gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from partition_baseline_support import *
use_timeit = True # for timing runs (optional)
if use_timeit:
import timeit
import os, sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--parts", type=int, required=False)
parser.add_argument("input_filename", nargs="?", type=str, default="../../data/static/simulated_blockmodel_graph_500_nodes")
args = parser.parse_args()
input_filename = args.input_filename
true_partition_available = True
visualize_graph = False # whether to plot the graph layout colored with intermediate partitions
verbose = True # whether to print updates of the partitioning
if not os.path.isfile(input_filename + '.tsv') and not os.path.isfile(input_filename + '_1.tsv'):
print("File doesn't exist: '{}'!".format(input_filename))
sys.exit(1)
if args.parts >= 1:
print('\nLoading partition 1 of {} ({}) ...'.format(args.parts, input_filename + "_1.tsv"))
out_neighbors, in_neighbors, N, E, true_partition = load_graph(input_filename, load_true_partition=true_partition_available, strm_piece_num=1)
for part in xrange(2, args.parts + 1):
print('Loading partition {} of {} ({}) ...'.format(part, args.parts, input_filename + "_" + str(part) + ".tsv"))
out_neighbors, in_neighbors, N, E = load_graph(input_filename, load_true_partition=False, strm_piece_num=part, out_neighbors=out_neighbors, in_neighbors=in_neighbors)
else:
out_neighbors, in_neighbors, N, E, true_partition = load_graph(input_filename, load_true_partition=true_partition_available)
if verbose:
print('Number of nodes: {}'.format(N))
print('Number of edges: {}'.format(E))
if use_timeit:
t0 = timeit.default_timer()
# initialize by putting each node in its own block (N blocks)
num_blocks = N
partition = np.array(range(num_blocks))
# partition update parameters
beta = 3 # exploitation versus exploration (higher value favors exploitation)
use_sparse_matrix = False # whether to represent the edge count matrix using sparse matrix
# Scipy's sparse matrix is slow but this may be necessary for large graphs
# agglomerative partition update parameters
num_agg_proposals_per_block = 10 # number of proposals per block
num_block_reduction_rate = 0.5 # fraction of blocks to reduce until the golden ratio bracket is established
# nodal partition updates parameters
max_num_nodal_itr = 100 # maximum number of iterations
delta_entropy_threshold1 = 5e-4 # stop iterating when the change in entropy falls below this fraction of the overall entropy
# lowering this threshold results in more nodal update iterations and likely better performance, but longer runtime
delta_entropy_threshold2 = 1e-4 # threshold after the golden ratio bracket is established (typically lower to fine-tune to partition)
delta_entropy_moving_avg_window = 3 # width of the moving average window for the delta entropy convergence criterion
# initialize edge counts and block degrees
interblock_edge_count, block_degrees_out, block_degrees_in, block_degrees = initialize_edge_counts(out_neighbors,
num_blocks,
partition,
use_sparse_matrix)
# initialize items before iterations to find the partition with the optimal number of blocks
optimal_num_blocks_found, old_partition, old_interblock_edge_count, old_block_degrees, old_block_degrees_out, old_block_degrees_in, old_overall_entropy, old_num_blocks, graph_object = initialize_partition_variables()
num_blocks_to_merge = int(num_blocks * num_block_reduction_rate)
# begin partitioning by finding the best partition with the optimal number of blocks
while not optimal_num_blocks_found:
# begin agglomerative partition updates (i.e. block merging)
if verbose:
print("\nMerging down blocks from {} to {}".format(num_blocks, num_blocks - num_blocks_to_merge))
best_merge_for_each_block = np.ones(num_blocks, dtype=int) * -1 # initialize to no merge
delta_entropy_for_each_block = np.ones(num_blocks) * np.Inf # initialize criterion
block_partition = range(num_blocks)
for current_block in range(num_blocks): # evalaute agglomerative updates for each block
for proposal_idx in range(num_agg_proposals_per_block):
# populate edges to neighboring blocks
if use_sparse_matrix:
out_blocks = interblock_edge_count[current_block, :].nonzero()[1]
out_blocks = np.hstack((out_blocks.reshape([len(out_blocks), 1]),
interblock_edge_count[current_block, out_blocks].toarray().transpose()))
else:
out_blocks = interblock_edge_count[current_block, :].nonzero()
out_blocks = np.hstack(
(np.array(out_blocks).transpose(), interblock_edge_count[current_block, out_blocks].transpose()))
if use_sparse_matrix:
in_blocks = interblock_edge_count[:, current_block].nonzero()[0]
in_blocks = np.hstack(
(in_blocks.reshape([len(in_blocks), 1]), interblock_edge_count[in_blocks, current_block].toarray()))
else:
in_blocks = interblock_edge_count[:, current_block].nonzero()
in_blocks = np.hstack(
(np.array(in_blocks).transpose(), interblock_edge_count[in_blocks, current_block].transpose()))
# propose a new block to merge with
proposal, num_out_neighbor_edges, num_in_neighbor_edges, num_neighbor_edges = propose_new_partition(
current_block, out_blocks, in_blocks, block_partition, interblock_edge_count, block_degrees, num_blocks,
1, use_sparse_matrix)
# compute the two new rows and columns of the interblock edge count matrix
new_interblock_edge_count_current_block_row, new_interblock_edge_count_new_block_row, new_interblock_edge_count_current_block_col, new_interblock_edge_count_new_block_col = \
compute_new_rows_cols_interblock_edge_count_matrix(interblock_edge_count, current_block, proposal,
out_blocks[:, 0], out_blocks[:, 1], in_blocks[:, 0],
in_blocks[:, 1],
interblock_edge_count[current_block, current_block],
1, use_sparse_matrix)
# compute new block degrees
block_degrees_out_new, block_degrees_in_new, block_degrees_new = compute_new_block_degrees(current_block,
proposal,
block_degrees_out,
block_degrees_in,
block_degrees,
num_out_neighbor_edges,
num_in_neighbor_edges,
num_neighbor_edges)
# compute change in entropy / posterior
delta_entropy = compute_delta_entropy(current_block, proposal, interblock_edge_count,
new_interblock_edge_count_current_block_row,
new_interblock_edge_count_new_block_row,
new_interblock_edge_count_current_block_col,
new_interblock_edge_count_new_block_col, block_degrees_out,
block_degrees_in, block_degrees_out_new, block_degrees_in_new,
use_sparse_matrix)
if delta_entropy < delta_entropy_for_each_block[current_block]: # a better block candidate was found
best_merge_for_each_block[current_block] = proposal
delta_entropy_for_each_block[current_block] = delta_entropy
# carry out the best merges
partition, num_blocks = carry_out_best_merges(delta_entropy_for_each_block, best_merge_for_each_block, partition,
num_blocks, num_blocks_to_merge)
# re-initialize edge counts and block degrees
interblock_edge_count, block_degrees_out, block_degrees_in, block_degrees = initialize_edge_counts(out_neighbors,
num_blocks,
partition,
use_sparse_matrix)
# perform nodal partition updates
if verbose:
print("Beginning nodal updates")
total_num_nodal_moves = 0
itr_delta_entropy = np.zeros(max_num_nodal_itr)
# compute the global entropy for MCMC convergence criterion
overall_entropy = compute_overall_entropy(interblock_edge_count, block_degrees_out, block_degrees_in, num_blocks, N,
E, use_sparse_matrix)
for itr in range(max_num_nodal_itr):
num_nodal_moves = 0;
itr_delta_entropy[itr] = 0
for current_node in range(N):
current_block = partition[current_node]
# propose a new block for this node
proposal, num_out_neighbor_edges, num_in_neighbor_edges, num_neighbor_edges = propose_new_partition(
current_block, out_neighbors[current_node], in_neighbors[current_node], partition,
interblock_edge_count, block_degrees, num_blocks, 0, use_sparse_matrix)
# determine whether to accept or reject the proposal
if (proposal != current_block):
# compute block counts of in and out neighbors
blocks_out, inverse_idx_out = np.unique(partition[out_neighbors[current_node][:, 0]],
return_inverse=True)
count_out = np.bincount(inverse_idx_out, weights=out_neighbors[current_node][:, 1]).astype(int)
blocks_in, inverse_idx_in = np.unique(partition[in_neighbors[current_node][:, 0]], return_inverse=True)
count_in = np.bincount(inverse_idx_in, weights=in_neighbors[current_node][:, 1]).astype(int)
# compute the two new rows and columns of the interblock edge count matrix
self_edge_weight = np.sum(out_neighbors[current_node][np.where(
out_neighbors[current_node][:, 0] == current_node), 1]) # check if this node has a self edge
new_interblock_edge_count_current_block_row, new_interblock_edge_count_new_block_row, new_interblock_edge_count_current_block_col, new_interblock_edge_count_new_block_col = \
compute_new_rows_cols_interblock_edge_count_matrix(interblock_edge_count, current_block, proposal,
blocks_out, count_out, blocks_in, count_in,
self_edge_weight, 0, use_sparse_matrix)
# compute new block degrees
block_degrees_out_new, block_degrees_in_new, block_degrees_new = compute_new_block_degrees(
current_block, proposal, block_degrees_out, block_degrees_in, block_degrees, num_out_neighbor_edges,
num_in_neighbor_edges, num_neighbor_edges)
# compute the Hastings correction
if num_neighbor_edges>0:
Hastings_correction = compute_Hastings_correction(blocks_out, count_out, blocks_in, count_in, proposal,
interblock_edge_count,
new_interblock_edge_count_current_block_row,
new_interblock_edge_count_current_block_col,
num_blocks, block_degrees,
block_degrees_new, use_sparse_matrix)
else: # if the node is an island, proposal is random and symmetric
Hastings_correction = 1
# compute change in entropy / posterior
delta_entropy = compute_delta_entropy(current_block, proposal, interblock_edge_count,
new_interblock_edge_count_current_block_row,
new_interblock_edge_count_new_block_row,
new_interblock_edge_count_current_block_col,
new_interblock_edge_count_new_block_col, block_degrees_out,
block_degrees_in, block_degrees_out_new, block_degrees_in_new,
use_sparse_matrix)
# compute probability of acceptance
p_accept = np.min([np.exp(-beta * delta_entropy) * Hastings_correction, 1])
# if accept the proposal, update the partition, inter_block_edge_count, and block degrees
if (np.random.uniform() <= p_accept):
total_num_nodal_moves += 1
num_nodal_moves += 1
itr_delta_entropy[itr] += delta_entropy
partition, interblock_edge_count, block_degrees_out, block_degrees_in, block_degrees = update_partition(
partition, current_node, current_block, proposal, interblock_edge_count,
new_interblock_edge_count_current_block_row, new_interblock_edge_count_new_block_row,
new_interblock_edge_count_current_block_col, new_interblock_edge_count_new_block_col,
block_degrees_out_new, block_degrees_in_new, block_degrees_new, use_sparse_matrix)
if verbose:
print("Itr: {}, number of nodal moves: {}, delta S: {:0.5f}".format(itr, num_nodal_moves,
itr_delta_entropy[itr] / float(
overall_entropy)))
if itr >= (
delta_entropy_moving_avg_window - 1): # exit MCMC if the recent change in entropy falls below a small fraction of the overall entropy
if not (np.all(np.isfinite(old_overall_entropy))): # golden ratio bracket not yet established
if (-np.mean(itr_delta_entropy[(itr - delta_entropy_moving_avg_window + 1):itr]) < (
delta_entropy_threshold1 * overall_entropy)):
break
else: # golden ratio bracket is established. Fine-tuning partition.
if (-np.mean(itr_delta_entropy[(itr - delta_entropy_moving_avg_window + 1):itr]) < (
delta_entropy_threshold2 * overall_entropy)):
break
# compute the global entropy for determining the optimal number of blocks
overall_entropy = compute_overall_entropy(interblock_edge_count, block_degrees_out, block_degrees_in, num_blocks, N,
E, use_sparse_matrix)
if verbose:
print(
"Total number of nodal moves: {}, overall_entropy: {:0.2f}".format(total_num_nodal_moves, overall_entropy))
if visualize_graph:
graph_object = plot_graph_with_partition(out_neighbors, partition, graph_object)
# check whether the partition with optimal number of block has been found; if not, determine and prepare for the next number of blocks to try
partition, interblock_edge_count, block_degrees, block_degrees_out, block_degrees_in, num_blocks, num_blocks_to_merge, old_partition, old_interblock_edge_count, old_block_degrees, old_block_degrees_out, old_block_degrees_in, old_overall_entropy, old_num_blocks, optimal_num_blocks_found = \
prepare_for_partition_on_next_num_blocks(overall_entropy, partition, interblock_edge_count, block_degrees,
block_degrees_out, block_degrees_in, num_blocks, old_partition,
old_interblock_edge_count, old_block_degrees, old_block_degrees_out,
old_block_degrees_in, old_overall_entropy, old_num_blocks,
num_block_reduction_rate)
if verbose:
print('Overall entropy: {}'.format(old_overall_entropy))
print('Number of blocks: {}'.format(old_num_blocks))
if optimal_num_blocks_found:
print('\nOptimal partition found with {} blocks'.format(num_blocks))
if use_timeit:
t1 = timeit.default_timer()
print('\nGraph partition took {} seconds'.format(t1 - t0))
# evaluate output partition against the true partition
evaluate_partition(true_partition, partition)
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.closed'
db.add_column(u'bccf_event', 'closed',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.closed'
db.delete_column(u'bccf_event', 'closed')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bccf.article': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Article'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.bccfbabypage': {
'Meta': {'ordering': "('order',)", 'object_name': 'BCCFBabyPage', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'bccf.bccfchildpage': {
'Meta': {'ordering': "('-created',)", 'object_name': 'BCCFChildPage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'bccf_topic': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['bccf.BCCFTopic']", 'null': 'True', 'blank': 'True'}),
u'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'gparent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.BCCFPage']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'page_for': ('django.db.models.fields.CharField', [], {'default': "'parent'", 'max_length': '13', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.BCCFChildPage']", 'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'bccf.bccfgenericpage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'BCCFGenericPage', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'show_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_rating': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_resources': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bccf.bccfpage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'BCCFPage', '_ormbases': [u'pages.Page']},
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.bccftopic': {
'Meta': {'object_name': 'BCCFTopic'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'bccf.blog': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Blog', '_ormbases': [u'bccf.BCCFChildPage']},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.campaign': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Campaign'},
'approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'approved_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'by_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'campaigns'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'bccf.downloadableform': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'DownloadableForm'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.event': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Event', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event_product': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event-product'", 'null': 'True', 'to': u"orm['shop.Product']"}),
'full': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_street2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'max_seats': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'program': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'program'", 'null': 'True', 'to': u"orm['bccf.Program']"}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': u"orm['auth.User']"}),
'survey_after': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'survey_after'", 'null': 'True', 'to': u"orm['builder.FormPublished']"}),
'survey_before': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'survey_before'", 'null': 'True', 'to': u"orm['builder.FormPublished']"})
},
u'bccf.eventregistration': {
'Meta': {'object_name': 'EventRegistration'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event'", 'to': u"orm['bccf.Event']"}),
'event_order': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'event-order'", 'null': 'True', 'blank': 'True', 'to': u"orm['shop.Order']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'passed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registration_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'reminder': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'bccf.footermarquee': {
'Meta': {'object_name': 'FooterMarquee'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.footermarqueeslide': {
'Meta': {'object_name': 'FooterMarqueeSlide'},
'caption': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bccf.FooterMarquee']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'bccf.homemarquee': {
'Meta': {'object_name': 'HomeMarquee'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.homemarqueeslide': {
'Meta': {'object_name': 'HomeMarqueeSlide'},
'caption': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'linkLabel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bccf.HomeMarquee']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'bccf.magazine': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Magazine'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.pagemarquee': {
'Meta': {'object_name': 'PageMarquee'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.pagemarqueeslide': {
'Meta': {'object_name': 'PageMarqueeSlide'},
'caption': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'linkLabel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bccf.PageMarquee']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'bccf.podcast': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Podcast', '_ormbases': [u'bccf.BCCFChildPage']},
'attached_audio': ('mezzanine.core.fields.FileField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.professionalpayment': {
'Meta': {'ordering': "('-paid_on',)", 'object_name': 'ProfessionalPayment'},
'amount': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paid_to'", 'to': u"orm['auth.User']"})
},
u'bccf.program': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Program', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'user_added': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'bccf.programrequest': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ProgramRequest'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'program_requests'", 'to': u"orm['auth.User']"})
},
u'bccf.settings': {
'Meta': {'object_name': 'Settings'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.tipsheet': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'TipSheet'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'accreditation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['bccf.Program']", 'null': 'True', 'blank': 'True'}),
'autosubscribe': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'avatar': ('bccf.fields.MyImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'male'", 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_mailing_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_forum_moderator': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '10', 'blank': 'True'}),
'linkedin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'membership_level': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '1'}),
'membership_order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order'", 'null': 'True', 'to': u"orm['shop.Order']"}),
'membership_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': u"orm['bccf.UserProfile']"}),
'payment': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'phone_mobile': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'phone_work': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'photo': ('bccf.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pinterest': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'requested_cancellation': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'show_in_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_signatures': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'signature_html': ('django.db.models.fields.TextField', [], {'max_length': '1054', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '3.0'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'youtube': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'bccf.video': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Video', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'video_url': ('embed_video.fields.EmbedVideoField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
u'builder.formpublished': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'FormPublished'},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'form_structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['builder.FormStructure']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'builder.formstructure': {
'Meta': {'object_name': 'FormStructure'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Form Structure'", 'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'JSON'", 'max_length': '4'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pages.page': {
'Meta': {'ordering': "(u'titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'shop.category': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Category', '_ormbases': [u'pages.Page']},
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': u"orm['shop.ProductOption']"}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
u'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'paypal'", 'max_length': '6'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tax_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'tax_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'shop.product': {
'Meta': {'object_name': 'Product'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
u'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': u"orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'tax_exempt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': u"orm['shop.Product']"})
},
u'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': u"orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['bccf']
| |
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import division
from math import exp
import warnings
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import numpy as np
from numpy.linalg import inv
import scipy
from scipy.spatial.distance import mahalanobis as scipy_mahalanobis
from filterpy.stats import norm_cdf, multivariate_gaussian, logpdf, mahalanobis
ITERS = 10000
def test_mahalanobis():
global a, b, S
# int test
a, b, S = 3, 1, 2
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
# int list
assert abs(mahalanobis([a], [b], [S]) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
assert abs(mahalanobis([a], b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
# float
a, b, S = 3.123, 3.235235, .01234
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
assert abs(mahalanobis([a], [b], [S]) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
assert abs(mahalanobis([a], b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
#float array
assert abs(mahalanobis(np.array([a]), b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
#1d array
a = np.array([1., 2.])
b = np.array([1.4, 1.2])
S = np.array([[1., 2.], [2., 4.001]])
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
#2d array
a = np.array([[1., 2.]])
b = np.array([[1.4, 1.2]])
S = np.array([[1., 2.], [2., 4.001]])
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
assert abs(mahalanobis(a.T, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
assert abs(mahalanobis(a, b.T, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
assert abs(mahalanobis(a.T, b.T, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
try:
# mismatched shapes
mahalanobis([1], b, S)
assert "didn't catch vectors of different lengths"
except ValueError:
pass
except:
assert "raised exception other than ValueError"
# okay, now check for numerical accuracy
for _ in range(ITERS):
N = np.random.randint(1, 20)
a = np.random.randn(N)
b = np.random.randn(N)
S = np.random.randn(N, N)
S = np.dot(S, S.T) #ensure positive semi-definite
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
def test_multivariate_gaussian():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# test that we treat lists and arrays the same
mean= (0, 0)
cov=[[1, .5], [.5, 1]]
a = [[multivariate_gaussian((i, j), mean, cov)
for i in (-1, 0, 1)]
for j in (-1, 0, 1)]
b = [[multivariate_gaussian((i, j), mean, np.asarray(cov))
for i in (-1, 0, 1)]
for j in (-1, 0, 1)]
assert np.allclose(a, b)
a = [[multivariate_gaussian((i, j), np.asarray(mean), cov)
for i in (-1, 0, 1)]
for j in (-1, 0, 1)]
assert np.allclose(a, b)
try:
multivariate_gaussian(1, 1, -1)
except:
pass
else:
assert False, "negative variances are meaningless"
# test that we get the same results as scipy.stats.multivariate_normal
xs = np.random.randn(1000)
mean = np.random.randn(1000)
var = np.random.random(1000) * 5
for x, m, v in zip(xs, mean, var):
assert abs(multivariate_gaussian(x, m, v) - scipy.stats.multivariate_normal(m, v).pdf(x)) < 1.e-12
def _is_inside_ellipse(x, y, ex, ey, orientation, width, height):
co = np.cos(orientation)
so = np.sin(orientation)
xx = x*co + y*so
yy = y*co - x*so
return (xx / width)**2 + (yy / height)**2 <= 1.
def do_plot_test():
import matplotlib.pyplot as plt
from numpy.random import multivariate_normal as mnormal
from filterpy.stats import covariance_ellipse, plot_covariance
p = np.array([[32, 15], [15., 40.]])
x, y = mnormal(mean=(0, 0), cov=p, size=5000).T
sd = 2
a, w, h = covariance_ellipse(p, sd)
print(np.degrees(a), w, h)
count = 0
color = []
for i in range(len(x)):
if _is_inside_ellipse(x[i], y[i], 0, 0, a, w, h):
color.append('b')
count += 1
else:
color.append('r')
plt.scatter(x, y, alpha=0.2, c=color)
plt.axis('equal')
plot_covariance(mean=(0., 0.),
cov=p,
std=[1,2,3],
alpha=0.3,
facecolor='none')
print(count / len(x))
def test_norm_cdf():
# test using the 68-95-99.7 rule
mu = 5
std = 3
var = std*std
std_1 = (norm_cdf((mu-std, mu+std), mu, var))
assert abs(std_1 - .6827) < .0001
std_1 = (norm_cdf((mu+std, mu-std), mu, std=std))
assert abs(std_1 - .6827) < .0001
std_1half = (norm_cdf((mu+std, mu), mu, var))
assert abs(std_1half - .6827/2) < .0001
std_2 = (norm_cdf((mu-2*std, mu+2*std), mu, var))
assert abs(std_2 - .9545) < .0001
std_3 = (norm_cdf((mu-3*std, mu+3*std), mu, var))
assert abs(std_3 - .9973) < .0001
def test_logpdf():
assert 3.9 < exp(logpdf(1, 1, .01)) < 4.
assert 3.9 < exp(logpdf([1], [1], .01)) < 4.
assert 3.9 < exp(logpdf([[1]], [[1]], .01)) < 4.
logpdf([1., 2], [1.1, 2], cov=np.array([[1., 2], [2, 5]]), allow_singular=False)
logpdf([1., 2], [1.1, 2], cov=np.array([[1., 2], [2, 5]]), allow_singular=True)
def covariance_3d_plot_test():
import matplotlib.pyplot as plt
from filterpy.stats import plot_3d_covariance
mu = [13456.3,2320,672.5]
C = np.array([[1.0, .03, .2],
[.03, 4.0, .0],
[.2, .0, 16.1]])
sample = np.random.multivariate_normal(mu, C, size=1000)
fig = plt.gcf()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=sample[:, 0], ys=sample[:, 1], zs=sample[:, 2], s=1)
plot_3d_covariance(mu, C, alpha=.4, std=3, limit_xyz=True, ax=ax)
if __name__ == "__main__":
covariance_3d_plot_test()
plt.figure()
do_plot_test()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
"""\
===================
IRC Channel Logger
===================
Logger writes all traffic it receives to text files, changing files once per
day. It is built using IRC_Client as its core.
Example Usage
-------------
To log the channel #sillyputty on server my.server.org::
Logger('#sillyputty', host="my.server.org").run()
It will now log all messages to #kamtest except those prefixed by "[off]".
More Detail
-----------
BasicLogger is a higher-level IRC client that is meant to link to the base
client found in IRCClient.py. It sends command tuples to its "irc" outbox, and
receives them via its "inbox", allowing it to implement login, and ping
response. It uses IRC_Client's tuple-based output format to achieve some
demultiplexing of IRC output as well, though not of the multiple-channel sort.
BasicLogger depends heavily on the LoggerFunctions module. See LoggerFunctions
for a list of queries it responds to, how it formats the date and time, and how
it determines file names.
Logger ultimately links BasicLogger's "irc" outbox to IRC_Client's "talk" inbox.
It also utilizes two Carousels and SimpleFileWriters.
How it works
-------------
Logger writes everything it hears to two files in the specified directory. The
filenames are in the format "givenchannelnameDD-MM-YYYY.log" and
"givenchannelnameDD-MM-YYYY.info".
BasicLogger writes all channel output to its "outbox" and all other messages to
its "system" box. Once per loop, it checks the current date against its stored
date. If the date has changed, then it changes the name of its logfiles to
reflect the new date and sends the new names to "log_next" and "info_next".
Logger uses this in conjunction with a Carousel to create a new logfile and
close the old one.
By default BasicLogger uses ::outformat::, defined in IRCClient, to format
messages from IRCClient.SimpleIRCClientPrefab before writing to the log. To
format messages differently, pass in a different function to its "formatter"
keyword.
Logger simply links BasicLogger with a IRCClient.SimpleIRCClientPrefab and two
Carousel-encapsulated SimpleFileWriters. It also slaps timestamps on messages.
It takes any keyword that BasicLogger or IRCClient.SimpleIRCClientPrefab will
take.
Command Line Usage
------------------
One can run Logger from the command line by entering::
./Logger.py \#somechannel desirednickname
"""
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Carousel import Carousel
from Axon.Component import component
import IRCClient
import LoggerFunctions
import time, os
__kamaelia_components__ = (SimpleFileWriter, Graphline, Carousel, IRCClient)
class BasicLogger(component):
"""\
BasicLogger(channel, **kwargs) -> new BasicLogger component
Keyword arguments:
- formatter -- function that takes an output tuple of IRC_Client's and
outputs a string. Default outformat from IRCClient.py
- name -- nickname of the logger bot. Default "jinnaslogbot"
- logdir -- directory logs are to be put into. Default is the directory
this module is in.
"""
Outboxes = {"irc" : "to IRC, for user responses and login",
"outbox" : "What we're interested in, the traffic over the channel",
"system" : "Messages directed toward the client, numeric replies, etc.",
"signal" : "Shutdown handling in the future",
"log_next" : "for the Log Carousel",
"info_next" : "for the Info Carousel"
}
def __init__(self,
channel,
formatter=IRCClient.outformat,
name="jinnaslogbot",
logdir="",
password=None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(BasicLogger, self).__init__()
self.channel = channel
self.format = formatter
self.name = name
self.logdir = logdir.rstrip('/') or os.getcwd()
self.logdir = self.logdir + '/'
self.logname = ""
self.infoname = ""
self.password = password
self.debugger.addDebugSection("Logger.main", 0)
def login(self):
"""registers with the IRC server"""
self.send(("NICK", self.name), "irc")
self.send(("USER", self.name, self.name, self.name, self.name), "irc")
if self.password:
self.send(("PRIVMSG", 'nickserv', "identify " + self.password), "irc")
self.send(("JOIN", self.channel), "irc")
def main(self):
"""Main loop"""
self.login()
self.changeDate()
yield 1
while True:
if self.currentDateString() != self.lastdatestring:
self.changeDate()
yield 1
while self.dataReady("inbox"):
data = self.recv("inbox")
formatted_data = self.format(data)
if (data[2] == self.channel or data[0] == 'NICK') and formatted_data: #format might return None
self.send(formatted_data, "outbox")
self.respondToQueries(data)
elif formatted_data:
self.send(formatted_data, "system")
self.respondToPings(data)
def respondToPings(self, msg):
if msg[0] == 'PING':
self.send(('PONG', msg[1]), 'irc')
self.send("Sent PONG to %s \n" % msg[1], "system")
def respondToQueries(self, msg):
if msg[0] == 'PRIVMSG' and msg[3].split(':')[0] == self.name:
words = msg[3].split()[1:]
if len(words) > 1 and words[0] == "reload":
try:
exec("reload(%s)" % words[1])
reply = "'%s' reloaded\n" % words[1]
except:
reply = "'%s' isn't a module, or at least not one I can reload.\n" % words[1]
self.send(('PRIVMSG', self.channel, reply), "irc")
self.send(self.format(reply), "outbox")
LoggerFunctions.respondToQueries(self, msg)
def currentDateString(self):
"""returns the current date"""
return LoggerFunctions.currentDateString()
def currentTimeString(self):
"""returns current time"""
return LoggerFunctions.currentTimeString()
def getFilenames(self):
"""returns tuple (logname, infoname) according to the parameters given"""
return LoggerFunctions.getFilenames(self.logdir, self.channel)
def changeDate(self):
"""updates the date and requests new log files to reflect the date"""
self.lastdatestring = self.currentDateString()
self.logname, self.infoname = self.getFilenames()
self.send(self.logname, "log_next")
self.send(self.infoname, "info_next")
def Logger(channel,
name=None,
formatter=LoggerFunctions.TimedOutformat,
logdir="",
password=None,
filewriter = LoggerFunctions.AppendingFileWriter,
**irc_args):
"""\
Logger(channel, **kwargs) ->
Prefab that links the IRC components to BasicLogger
and two Carousel-encapsulated AppendingFileWriters
Keyword arguments:
- formatter -- formatter to run incoming messages from IRC_Client through
before writing to the log. Default TimedOutformat.
- name -- nickname of the loggerbot. Default is the default name defined in
BasicLogger.
- logdir -- directory logs are to be put into. Default is the directory
this module is in.
- **irc_args -- pointer to a dictionary containing arguments for IRCClient.SimpleIRCClientPrefab
"""
return Graphline(irc = IRCClient.SimpleIRCClientPrefab(**irc_args),
logger = BasicLogger(channel, name=name, formatter=formatter, logdir=logdir, password=password),
log = Carousel(filewriter),
info = Carousel(filewriter),
linkages = {("logger", "irc") : ("irc", "inbox"),
("irc", "outbox") : ("logger", "inbox"),
("logger", "log_next") : ("log", "next"),
("logger", "outbox") : ("log", "inbox"),
("logger", "info_next") : ("info", "next"),
("logger", "system") : ("info", "inbox"),
}
)
if __name__ == '__main__':
import sys
channel = "#kamtest"
Name = "jinnaslogbot"
pwd = None
if len(sys.argv) > 1: channel = sys.argv[1]
if len(sys.argv) > 2: Name = sys.argv[2]
if len(sys.argv) > 3: pwd = sys.argv[3]
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.Util.Introspector import Introspector
from Kamaelia.Chassis.Pipeline import Pipeline
Pipeline( Introspector(), TCPClient("127.0.0.1",1501) ).activate()
print "Logging %s as %s" % (channel, Name)
Logger(channel,
name=Name,
password=pwd,
formatter=(lambda data: LoggerFunctions.HTMLOutformat(data)),
filewriter = LoggerFunctions.LoggerWriter,
).run()
| |
import json
import os
import zipfile
from tempfile import NamedTemporaryFile
import mock
import pytest
from nose.tools import eq_
from django import forms
import amo
import amo.tests
from addons.models import Addon
from applications.models import AppVersion
from files import utils
from files.models import File
from versions.models import Version
pytestmark = pytest.mark.django_db
def test_is_beta():
assert not utils.is_beta('1.2')
assert utils.is_beta('1.2a')
assert utils.is_beta('1.2a1')
assert utils.is_beta('1.2a123')
assert utils.is_beta('1.2a.1')
assert utils.is_beta('1.2a.123')
assert utils.is_beta('1.2a-1')
assert utils.is_beta('1.2a-123')
assert utils.is_beta('1.2alpha')
assert utils.is_beta('1.2alpha')
assert utils.is_beta('1.2alpha1')
assert utils.is_beta('1.2alpha123')
assert utils.is_beta('1.2alpha.1')
assert utils.is_beta('1.2alpha.123')
assert utils.is_beta('1.2alpha-1')
assert utils.is_beta('1.2alpha-123')
assert utils.is_beta('1.2b')
assert utils.is_beta('1.2b1')
assert utils.is_beta('1.2b123')
assert utils.is_beta('1.2b.1')
assert utils.is_beta('1.2b.123')
assert utils.is_beta('1.2b-1')
assert utils.is_beta('1.2b-123')
assert utils.is_beta('1.2beta')
assert utils.is_beta('1.2beta1')
assert utils.is_beta('1.2beta123')
assert utils.is_beta('1.2beta.1')
assert utils.is_beta('1.2beta.123')
assert utils.is_beta('1.2beta-1')
assert utils.is_beta('1.2beta-123')
assert utils.is_beta('1.2pre')
assert utils.is_beta('1.2pre1')
assert utils.is_beta('1.2pre123')
assert utils.is_beta('1.2pre.1')
assert utils.is_beta('1.2pre.123')
assert utils.is_beta('1.2pre-1')
assert utils.is_beta('1.2pre-123')
assert utils.is_beta('1.2rc')
assert utils.is_beta('1.2rc1')
assert utils.is_beta('1.2rc123')
assert utils.is_beta('1.2rc.1')
assert utils.is_beta('1.2rc.123')
assert utils.is_beta('1.2rc-1')
assert utils.is_beta('1.2rc-123')
class TestFindJetpacks(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestFindJetpacks, self).setUp()
File.objects.update(jetpack_version='1.0')
self.file = File.objects.filter(version__addon=3615).get()
def test_success(self):
files = utils.find_jetpacks('1.0', '1.1')
eq_(files, [self.file])
def test_skip_autorepackage(self):
Addon.objects.update(auto_repackage=False)
eq_(utils.find_jetpacks('1.0', '1.1'), [])
def test_minver(self):
files = utils.find_jetpacks('1.1', '1.2')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_maxver(self):
files = utils.find_jetpacks('.1', '1.0')
eq_(files, [self.file])
eq_(files[0].needs_upgrade, False)
def test_unreviewed_files_plus_reviewed_file(self):
# We upgrade unreviewed files up to the latest reviewed file.
v = Version.objects.create(addon_id=3615)
new_file = File.objects.create(version=v, jetpack_version='1.0')
Version.objects.create(addon_id=3615)
new_file2 = File.objects.create(version=v, jetpack_version='1.0')
eq_(new_file.status, amo.STATUS_UNREVIEWED)
eq_(new_file2.status, amo.STATUS_UNREVIEWED)
files = utils.find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
assert all(f.needs_upgrade for f in files)
# Now self.file will not need an upgrade since we skip old versions.
new_file.update(status=amo.STATUS_PUBLIC)
files = utils.find_jetpacks('1.0', '1.1')
eq_(files, [self.file, new_file, new_file2])
eq_(files[0].needs_upgrade, False)
assert all(f.needs_upgrade for f in files[1:])
class TestExtractor(amo.tests.TestCase):
def os_path_exists_for(self, path_to_accept):
"""Helper function that returns a function for a mock.
The returned function will return True if the path passed as parameter
endswith the "path_to_accept".
"""
return lambda path: path.endswith(path_to_accept)
def test_no_manifest(self):
with self.assertRaises(forms.ValidationError) as exc:
utils.Extractor.parse('foobar')
assert exc.exception.message == (
"No install.rdf or package.json or manifest.json found")
@mock.patch('files.utils.ManifestJSONExtractor')
@mock.patch('files.utils.PackageJSONExtractor')
@mock.patch('files.utils.RDFExtractor')
@mock.patch('files.utils.os.path.exists')
def test_parse_install_rdf(self, exists_mock, rdf_extractor,
package_json_extractor,
manifest_json_extractor):
exists_mock.side_effect = self.os_path_exists_for('install.rdf')
utils.Extractor.parse('foobar')
assert rdf_extractor.called
assert not package_json_extractor.called
assert not manifest_json_extractor.called
@mock.patch('files.utils.ManifestJSONExtractor')
@mock.patch('files.utils.PackageJSONExtractor')
@mock.patch('files.utils.RDFExtractor')
@mock.patch('files.utils.os.path.exists')
def test_parse_package_json(self, exists_mock, rdf_extractor,
package_json_extractor,
manifest_json_extractor):
exists_mock.side_effect = self.os_path_exists_for('package.json')
utils.Extractor.parse('foobar')
assert not rdf_extractor.called
assert package_json_extractor.called
assert not manifest_json_extractor.called
@mock.patch('files.utils.ManifestJSONExtractor')
@mock.patch('files.utils.PackageJSONExtractor')
@mock.patch('files.utils.RDFExtractor')
@mock.patch('files.utils.os.path.exists')
def test_parse_manifest_json(self, exists_mock, rdf_extractor,
package_json_extractor,
manifest_json_extractor):
self.create_switch('webextensions')
exists_mock.side_effect = self.os_path_exists_for('manifest.json')
utils.Extractor.parse('foobar')
assert not rdf_extractor.called
assert not package_json_extractor.called
assert manifest_json_extractor.called
@mock.patch('files.utils.ManifestJSONExtractor')
@mock.patch('files.utils.PackageJSONExtractor')
@mock.patch('files.utils.RDFExtractor')
@mock.patch('files.utils.os.path.exists')
def test_parse_manifest_json_no_waffle(self, exists_mock, rdf_extractor,
package_json_extractor,
manifest_json_extractor):
# Here we don't create the waffle switch to enable it.
exists_mock.side_effect = self.os_path_exists_for('manifest.json')
with self.assertRaises(forms.ValidationError) as exc:
utils.Extractor.parse('foobar')
assert exc.exception.message == "WebExtensions aren't allowed yet"
assert not rdf_extractor.called
assert not package_json_extractor.called
assert not manifest_json_extractor.called
class TestPackageJSONExtractor(amo.tests.TestCase):
def parse(self, base_data):
return utils.PackageJSONExtractor('/fake_path',
json.dumps(base_data)).parse()
def create_appversion(self, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
def test_instanciate_without_data(self):
"""Without data, we load the data from the file path."""
data = {'id': 'some-id'}
with NamedTemporaryFile() as file_:
file_.write(json.dumps(data))
file_.flush()
pje = utils.PackageJSONExtractor(file_.name)
assert pje.data == data
def test_guid(self):
"""Use id for the guid."""
assert self.parse({'id': 'some-id'})['guid'] == 'some-id'
def test_name_for_guid_if_no_id(self):
"""Use the name for the guid if there is no id."""
assert self.parse({'name': 'addon-name'})['guid'] == 'addon-name'
def test_type(self):
"""Package.json addons are always ADDON_EXTENSION."""
assert self.parse({})['type'] == amo.ADDON_EXTENSION
def test_no_restart(self):
"""Package.json addons are always no-restart."""
assert self.parse({})['no_restart'] is True
def test_name_from_title_with_name(self):
"""Use the title for the name."""
data = {'title': 'The Addon Title', 'name': 'the-addon-name'}
assert self.parse(data)['name'] == 'The Addon Title'
def test_name_from_name_without_title(self):
"""Use the name for the name if there is no title."""
assert (
self.parse({'name': 'the-addon-name'})['name'] == 'the-addon-name')
def test_version(self):
"""Use version for the version."""
assert self.parse({'version': '23.0.1'})['version'] == '23.0.1'
def test_homepage(self):
"""Use homepage for the homepage."""
assert (
self.parse({'homepage': 'http://my-addon.org'})['homepage'] ==
'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
assert (
self.parse({'description': 'An addon.'})['summary'] == 'An addon.')
def test_apps(self):
"""Use engines for apps."""
firefox_version = self.create_appversion('firefox', '33.0a1')
thunderbird_version = self.create_appversion('thunderbird', '33.0a1')
data = {'engines': {'firefox': '>=33.0a1', 'thunderbird': '>=33.0a1'}}
apps = self.parse(data)['apps']
apps_dict = dict((app.appdata.short, app) for app in apps)
assert sorted(apps_dict.keys()) == ['firefox', 'thunderbird']
assert apps_dict['firefox'].min == firefox_version
assert apps_dict['firefox'].max == firefox_version
assert apps_dict['thunderbird'].min == thunderbird_version
assert apps_dict['thunderbird'].max == thunderbird_version
def test_unknown_apps_are_ignored(self):
"""Unknown engines get ignored."""
self.create_appversion('firefox', '33.0a1')
self.create_appversion('thunderbird', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'thunderbird': '>=33.0a1',
'node': '>=0.10',
},
}
apps = self.parse(data)['apps']
engines = [app.appdata.short for app in apps]
assert sorted(engines) == ['firefox', 'thunderbird'] # Not node.
def test_invalid_app_versions_are_ignored(self):
"""Valid engines with invalid versions are ignored."""
firefox_version = self.create_appversion('firefox', '33.0a1')
data = {
'engines': {
'firefox': '>=33.0a1',
'fennec': '>=33.0a1',
},
}
apps = self.parse(data)['apps']
assert len(apps) == 1
assert apps[0].appdata.short == 'firefox'
assert apps[0].min == firefox_version
assert apps[0].max == firefox_version
def test_fennec_is_treated_as_android(self):
"""Treat the fennec engine as android."""
android_version = self.create_appversion('android', '33.0a1')
data = {
'engines': {
'fennec': '>=33.0a1',
'node': '>=0.10',
},
}
apps = self.parse(data)['apps']
assert apps[0].appdata.short == 'android'
assert apps[0].min == android_version
assert apps[0].max == android_version
def test_is_webextension(self):
"""An add-on with a package.json file can't be a webextension."""
assert 'is_webextension' not in self.parse({})
class TestManifestJSONExtractor(amo.tests.TestCase):
def parse(self, base_data):
return utils.ManifestJSONExtractor('/fake_path',
json.dumps(base_data)).parse()
def create_appversion(self, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
def test_instanciate_without_data(self):
"""Without data, we load the data from the file path."""
data = {'id': 'some-id'}
with NamedTemporaryFile() as file_:
file_.write(json.dumps(data))
file_.flush()
mje = utils.ManifestJSONExtractor(file_.name)
assert mje.data == data
def test_guid(self):
"""Use applications>gecko>id for the guid."""
assert self.parse(
{'applications': {
'gecko': {
'id': 'some-id'}}})['guid'] == 'some-id'
def test_name_for_guid_if_no_id(self):
"""Use the name for the guid if there is no id."""
assert self.parse({'name': 'addon-name'})['guid'] == 'addon-name'
def test_type(self):
"""manifest.json addons are always ADDON_EXTENSION."""
assert self.parse({})['type'] == amo.ADDON_EXTENSION
def test_no_restart(self):
"""manifest.json addons are always no-restart."""
assert self.parse({})['no_restart'] is True
def test_name(self):
"""Use name for the name."""
assert self.parse({'name': 'addon-name'})['name'] == 'addon-name'
def test_version(self):
"""Use version for the version."""
assert self.parse({'version': '23.0.1'})['version'] == '23.0.1'
def test_homepage(self):
"""Use homepage_url for the homepage."""
assert (
self.parse({'homepage_url': 'http://my-addon.org'})['homepage'] ==
'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
assert (
self.parse({'description': 'An addon.'})['summary'] == 'An addon.')
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '30.0')
firefox_max_version = self.create_appversion('firefox', '30.*')
self.create_appversion('firefox', '42.0') # Default AppVersions.
self.create_appversion('firefox', '42.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=30.0',
'strict_max_version': '=30.*'}}}
apps = self.parse(data)['apps']
assert len(apps) == 1 # Only Firefox for now.
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
# Default AppVersions.
firefox_min_version = self.create_appversion('firefox', '42.0')
firefox_max_version = self.create_appversion('firefox', '42.*')
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 1 # Only Firefox for now.
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
def test_invalid_app_versions_are_ignored(self):
"""Invalid versions are ignored."""
data = {
'applications': {
'gecko': {
# Not created, so are seen as invalid.
'strict_min_version': '>=30.0',
'strict_max_version': '=30.*'}}}
assert not self.parse(data)['apps']
def test_is_webextension(self):
assert self.parse({})['is_webextension']
def test_zip_folder_content():
extension_file = 'apps/files/fixtures/files/extension.xpi'
try:
temp_folder = utils.extract_zip(extension_file)
assert os.listdir(temp_folder) == [
'install.rdf', 'chrome.manifest', 'chrome']
temp_filename = amo.tests.get_temp_filename()
utils.zip_folder_content(temp_folder, temp_filename)
# Make sure the zipped files contain the same files.
with zipfile.ZipFile(temp_filename, mode='r') as new:
with zipfile.ZipFile(extension_file, mode='r') as orig:
assert new.namelist() == orig.namelist()
finally:
if os.path.exists(temp_folder):
amo.utils.rm_local_tmp_dir(temp_folder)
if os.path.exists(temp_filename):
os.unlink(temp_filename)
def test_repack():
# Warning: context managers all the way down. Because they're awesome.
extension_file = 'apps/files/fixtures/files/extension.xpi'
# We don't want to overwrite our fixture, so use a copy.
with amo.tests.copy_file_to_temp(extension_file) as temp_filename:
# This is where we're really testing the repack helper.
with utils.repack(temp_filename) as folder_path:
# Temporary folder contains the unzipped XPI.
assert os.listdir(folder_path) == [
'install.rdf', 'chrome.manifest', 'chrome']
# Add a file, which should end up in the repacked file.
with open(os.path.join(folder_path, 'foo.bar'), 'w') as file_:
file_.write('foobar')
# Once we're done with the repack, the temporary folder is removed.
assert not os.path.exists(folder_path)
# And the repacked file has the added file.
assert os.path.exists(temp_filename)
with zipfile.ZipFile(temp_filename, mode='r') as zf:
assert 'foo.bar' in zf.namelist()
assert zf.read('foo.bar') == 'foobar'
@pytest.fixture
def file_obj():
addon = amo.tests.addon_factory()
addon.update(guid='xxxxx')
version = addon.current_version
return version.all_files[0]
def test_bump_version_in_install_rdf(file_obj):
with amo.tests.copy_file('apps/files/fixtures/files/jetpack.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '1.3.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '1.3.1-signed'
def test_bump_version_in_alt_install_rdf(file_obj):
with amo.tests.copy_file('apps/files/fixtures/files/alt-rdf.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '2.1.106.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '2.1.106.1-signed'
def test_bump_version_in_package_json(file_obj):
with amo.tests.copy_file(
'apps/files/fixtures/files/new-format-0.0.1.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '0.0.1.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '0.0.1.1-signed'
| |
import numpy as np
import pytest
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn.preprocessing.label import _encode
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert issparse(got)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
@pytest.mark.parametrize(
"values, classes, unknown",
[(np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array([1, 2, 3], dtype='int64'), np.array([4], dtype='int64')),
(np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
np.array(['d'], dtype=object)),
(np.array(['b', 'a', 'c', 'a', 'c']),
np.array(['a', 'b', 'c']), np.array(['d']))],
ids=['int64', 'object', 'str'])
def test_label_encoder(values, classes, unknown):
# Test LabelEncoder's transform, fit_transform and
# inverse_transform methods
le = LabelEncoder()
le.fit(values)
assert_array_equal(le.classes_, classes)
assert_array_equal(le.transform(values), [1, 0, 2, 0, 2])
assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values)
le = LabelEncoder()
ret = le.fit_transform(values)
assert_array_equal(ret, [1, 0, 2, 0, 2])
with pytest.raises(ValueError, match="unseen labels"):
le.transform(unknown)
def test_label_encoder_negative_ints():
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
@pytest.mark.parametrize("dtype", ['str', 'object'])
def test_label_encoder_str_bad_shape(dtype):
le = LabelEncoder()
le.fit(np.array(["apple", "orange"], dtype=dtype))
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, -1, 1])
msg = "contains previously unseen labels"
assert_raise_message(ValueError, msg, le.inverse_transform, [-2])
assert_raise_message(ValueError, msg, le.inverse_transform, [-2, -3, -4])
# Fail on inverse_transform("")
msg = "bad input shape ()"
assert_raise_message(ValueError, msg, le.inverse_transform, "")
@pytest.mark.parametrize(
"values",
[np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['b', 'a', 'c', 'a', 'c'])],
ids=['int64', 'object', 'str'])
def test_label_encoder_empty_array(values):
le = LabelEncoder()
le.fit(values)
# test empty transform
transformed = le.transform([])
assert_array_equal(np.array([]), transformed)
# test empty inverse transform
inverse_transformed = le.inverse_transform([])
assert_array_equal(np.array([]), inverse_transformed)
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: ({2, 3}, {1}, {1, 2}),
lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert issparse(got) == sparse_output
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert got.indices.dtype == got.indptr.dtype
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert issparse(got) == sparse_output
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert got.indices.dtype == got.indptr.dtype
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: ({2, 3}, {1}, {1, 2}),
lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
Y = np.array([[1, 0], [0, 1]])
w = 'unknown class(es) [0, 4] will be ignored'
matrix = assert_warns_message(UserWarning, w,
mlb.fit(y).transform, [[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
Y = np.array([[1, 0, 0], [0, 1, 0]])
mlb = MultiLabelBinarizer(classes=[1, 2, 3])
matrix = assert_warns_message(UserWarning, w,
mlb.fit(y).transform, [[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
# ensure a ValueError is thrown if given duplicate classes
err_msg = "The classes argument contains duplicate classes. Remove " \
"these duplicates before passing them to MultiLabelBinarizer."
mlb = MultiLabelBinarizer(classes=[1, 3, 2, 3])
assert_raise_message(ValueError, err_msg, mlb.fit, inp)
def test_multilabel_binarizer_multiple_calls():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
indicator_mat2 = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
# first call
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
# second call change class
mlb.classes = [1, 2, 3]
assert_array_equal(mlb.fit_transform(inp), indicator_mat2)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert issparse(binarized) == sparse_output
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert issparse(binarized) == sparse_output
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert issparse(inverse_output) == issparse(y)
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
check_binarized_results(y, classes, pos_label, neg_label, expected)
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
check_binarized_results(y, classes, pos_label, neg_label, expected)
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
check_binarized_results(y, classes, pos_label, neg_label, expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
check_binarized_results(y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
@pytest.mark.parametrize(
"values, expected",
[(np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array([1, 2, 3], dtype='int64')),
(np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object)),
(np.array(['b', 'a', 'c', 'a', 'c']),
np.array(['a', 'b', 'c']))],
ids=['int64', 'object', 'str'])
def test_encode_util(values, expected):
uniques = _encode(values)
assert_array_equal(uniques, expected)
uniques, encoded = _encode(values, encode=True)
assert_array_equal(uniques, expected)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
_, encoded = _encode(values, uniques, encode=True)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
def test_encode_check_unknown():
# test for the check_unknown parameter of _encode()
uniques = np.array([1, 2, 3])
values = np.array([1, 2, 3, 4])
# Default is True, raise error
with pytest.raises(ValueError,
match='y contains previously unseen labels'):
_encode(values, uniques, encode=True, check_unknown=True)
# dont raise error if False
_encode(values, uniques, encode=True, check_unknown=False)
# parameter is ignored for object dtype
uniques = np.array(['a', 'b', 'c'], dtype=object)
values = np.array(['a', 'b', 'c', 'd'], dtype=object)
with pytest.raises(ValueError,
match='y contains previously unseen labels'):
_encode(values, uniques, encode=True, check_unknown=False)
| |
import string
import numpy as np
from pandas import (
DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range)
import pandas.util.testing as tm
class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df['foo'] = 'bar'
self.df['bar'] = 'baz'
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8),
columns=list('abcdefgh'))
self.df['foo'] = 'bar'
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype='object')
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype='object')
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.df2 = DataFrame(
{c: {0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64)}
[np.random.randint(0, 4)] for c in range(N)})
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx)
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
N = 10**3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{c: {0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64)}
[np.random.randint(0, 4)] for c in range(N)})
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
class Iteration:
# mem_itertuples_* benchmarks are slow
timeout = 120
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(np.random.randn(N, 5 * N),
columns=['C' + str(c) for c in range(N * 5)])
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_iteritems(self):
# (monitor no-copying behaviour)
if hasattr(self.df, '_item_cache'):
self.df._item_cache.clear()
for name, col in self.df.iteritems():
pass
def time_iteritems_cached(self):
for name, col in self.df.iteritems():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples_start(self):
self.df4.itertuples()
def time_itertuples_read_first(self):
next(self.df4.itertuples())
def time_itertuples(self):
for row in self.df4.itertuples():
pass
def time_itertuples_to_list(self):
list(self.df4.itertuples())
def mem_itertuples_start(self):
return self.df4.itertuples()
def peakmem_itertuples_start(self):
self.df4.itertuples()
def mem_itertuples_read_first(self):
return next(self.df4.itertuples())
def peakmem_itertuples(self):
for row in self.df4.itertuples():
pass
def mem_itertuples_to_list(self):
return list(self.df4.itertuples())
def peakmem_itertuples_to_list(self):
list(self.df4.itertuples())
def time_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def time_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def time_itertuples_raw_tuples(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def time_itertuples_raw_tuples_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def mem_itertuples_raw_start(self):
return self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def mem_itertuples_raw_to_list(self):
return list(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML:
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range('2000', periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class Repr:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, int(nrows / 100)), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
class MaskBool:
def setup(self):
data = np.random.randn(1000, 500)
df = DataFrame(data)
df = df.where(df > 0)
self.bools = df > 0
self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
class Isnull:
def setup(self):
N = 10**3
self.df_no_null = DataFrame(np.random.randn(N, N))
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (N, N))
self.df = DataFrame(data)
sample = np.array(list(string.ascii_letters + string.whitespace))
data = np.random.choice(sample, (N, N))
self.df_strings = DataFrame(data)
sample = np.array([NaT, np.nan, None, np.datetime64('NaT'),
np.timedelta64('NaT'), 0, 1, 2.0, '', 'abcd'])
data = np.random.choice(sample, (N, N))
self.df_obj = DataFrame(data)
def time_isnull_floats_no_null(self):
isnull(self.df_no_null)
def time_isnull(self):
isnull(self.df)
def time_isnull_strngs(self):
isnull(self.df_strings)
def time_isnull_obj(self):
isnull(self.df_obj)
class Fillna:
params = ([True, False], ['pad', 'bfill'])
param_names = ['inplace', 'method']
def setup(self, inplace, method):
values = np.random.randn(10000, 100)
values[::2] = np.nan
self.df = DataFrame(values)
def time_frame_fillna(self, inplace, method):
self.df.fillna(inplace=inplace, method=method)
class Dropna:
params = (['all', 'any'], [0, 1])
param_names = ['how', 'axis']
def setup(self, how, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.ix[50:1000, 20:50] = np.nan
self.df.ix[2000:3000] = np.nan
self.df.ix[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed['foo'] = 'bar'
def time_dropna(self, how, axis):
self.df.dropna(how=how, axis=axis)
def time_dropna_axis_mixed_dtypes(self, how, axis):
self.df_mixed.dropna(how=how, axis=axis)
class Count:
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.ix[50:1000, 20:50] = np.nan
self.df.ix[2000:3000] = np.nan
self.df.ix[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed['foo'] = 'bar'
self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index])
self.df.columns = MultiIndex.from_arrays([self.df.columns,
self.df.columns])
self.df_mixed.index = MultiIndex.from_arrays([self.df_mixed.index,
self.df_mixed.index])
self.df_mixed.columns = MultiIndex.from_arrays([self.df_mixed.columns,
self.df_mixed.columns])
def time_count_level_multi(self, axis):
self.df.count(axis=axis, level=1)
def time_count_level_mixed_dtypes_multi(self, axis):
self.df_mixed.count(axis=axis, level=1)
class Apply:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 100))
self.s = Series(np.arange(1028.0))
self.df2 = DataFrame({i: self.s for i in range(1028)})
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_apply_user_func(self):
self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)])
def time_apply_axis_1(self):
self.df.apply(lambda x: x + 1, axis=1)
def time_apply_lambda_mean(self):
self.df.apply(lambda x: x.mean())
def time_apply_np_mean(self):
self.df.apply(np.mean)
def time_apply_pass_thru(self):
self.df.apply(lambda x: x)
def time_apply_ref_by_name(self):
self.df3.apply(lambda x: x['A'] + x['B'], axis=1)
class Dtypes:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 1000))
def time_frame_dtypes(self):
self.df.dtypes
class Equals:
def setup(self):
N = 10**3
self.float_df = DataFrame(np.random.randn(N, N))
self.float_df_nan = self.float_df.copy()
self.float_df_nan.iloc[-1, -1] = np.nan
self.object_df = DataFrame('foo', index=range(N), columns=range(N))
self.object_df_nan = self.object_df.copy()
self.object_df_nan.iloc[-1, -1] = np.nan
self.nonunique_cols = self.object_df.copy()
self.nonunique_cols.columns = ['A'] * len(self.nonunique_cols.columns)
self.nonunique_cols_nan = self.nonunique_cols.copy()
self.nonunique_cols_nan.iloc[-1, -1] = np.nan
def time_frame_float_equal(self):
self.float_df.equals(self.float_df)
def time_frame_float_unequal(self):
self.float_df.equals(self.float_df_nan)
def time_frame_nonunique_equal(self):
self.nonunique_cols.equals(self.nonunique_cols)
def time_frame_nonunique_unequal(self):
self.nonunique_cols.equals(self.nonunique_cols_nan)
def time_frame_object_equal(self):
self.object_df.equals(self.object_df)
def time_frame_object_unequal(self):
self.object_df.equals(self.object_df_nan)
class Interpolate:
params = [None, 'infer']
param_names = ['downcast']
def setup(self, downcast):
N = 10000
# this is the worst case, where every column has NaNs.
self.df = DataFrame(np.random.randn(N, 100))
self.df.values[::2] = np.nan
self.df2 = DataFrame({'A': np.arange(0, N),
'B': np.random.randint(0, 100, N),
'C': np.random.randn(N),
'D': np.random.randn(N)})
self.df2.loc[1::5, 'A'] = np.nan
self.df2.loc[1::5, 'C'] = np.nan
def time_interpolate(self, downcast):
self.df.interpolate(downcast=downcast)
def time_interpolate_some_good(self, downcast):
self.df2.interpolate(downcast=downcast)
class Shift:
# frame shift speedup issue-5609
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.rand(10000, 500))
def time_shift(self, axis):
self.df.shift(1, axis=axis)
class Nunique:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 1000))
def time_frame_nunique(self):
self.df.nunique()
class Duplicated:
def setup(self):
n = (1 << 20)
t = date_range('2015-01-01', freq='S', periods=(n // 64))
xs = np.random.randn(n // 64).round(2)
self.df = DataFrame({'a': np.random.randint(-1 << 8, 1 << 8, n),
'b': np.random.choice(t, n),
'c': np.random.choice(xs, n)})
self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T
def time_frame_duplicated(self):
self.df.duplicated()
def time_frame_duplicated_wide(self):
self.df2.duplicated()
class XS:
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.N = 10**4
self.df = DataFrame(np.random.randn(self.N, self.N))
def time_frame_xs(self, axis):
self.df.xs(self.N / 2, axis=axis)
class SortValues:
params = [True, False]
param_names = ['ascending']
def setup(self, ascending):
self.df = DataFrame(np.random.randn(1000000, 2), columns=list('AB'))
def time_frame_sort_values(self, ascending):
self.df.sort_values(by='A', ascending=ascending)
class SortIndexByColumns:
def setup(self):
N = 10000
K = 10
self.df = DataFrame({'key1': tm.makeStringIndex(N).values.repeat(K),
'key2': tm.makeStringIndex(N).values.repeat(K),
'value': np.random.randn(N * K)})
def time_frame_sort_values_by_columns(self):
self.df.sort_values(by=['key1', 'key2'])
class Quantile:
params = [0, 1]
param_names = ['axis']
def setup(self, axis):
self.df = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_frame_quantile(self, axis):
self.df.quantile([0.1, 0.5], axis=axis)
class GetDtypeCounts:
# 2807
def setup(self):
self.df = DataFrame(np.random.randn(10, 10000))
def time_frame_get_dtype_counts(self):
self.df.get_dtype_counts()
def time_info(self):
self.df.info()
class NSort:
params = ['first', 'last', 'all']
param_names = ['keep']
def setup(self, keep):
self.df = DataFrame(np.random.randn(100000, 3),
columns=list('ABC'))
def time_nlargest_one_column(self, keep):
self.df.nlargest(100, 'A', keep=keep)
def time_nlargest_two_columns(self, keep):
self.df.nlargest(100, ['A', 'B'], keep=keep)
def time_nsmallest_one_column(self, keep):
self.df.nsmallest(100, 'A', keep=keep)
def time_nsmallest_two_columns(self, keep):
self.df.nsmallest(100, ['A', 'B'], keep=keep)
class Describe:
def setup(self):
self.df = DataFrame({
'a': np.random.randint(0, 100, int(1e6)),
'b': np.random.randint(0, 100, int(1e6)),
'c': np.random.randint(0, 100, int(1e6))
})
def time_series_describe(self):
self.df['a'].describe()
def time_dataframe_describe(self):
self.df.describe()
from .pandas_vb_common import setup # noqa: F401
| |
#!/usr/bin/env python
"""
EVENNIA SERVER LAUNCHER SCRIPT
This is the start point for running Evennia.
Sets the appropriate environmental variables and launches the server
and portal through the evennia_runner. Run without arguments to get a
menu. Run the script with the -h flag to see usage information.
"""
from __future__ import print_function
from builtins import input, range
import os
import sys
import signal
import shutil
import importlib
from argparse import ArgumentParser
from subprocess import Popen, check_output, call, CalledProcessError, STDOUT
import django
# Signal processing
SIG = signal.SIGINT
# Set up the main python paths to Evennia
EVENNIA_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import evennia
EVENNIA_LIB = os.path.join(os.path.dirname(os.path.abspath(evennia.__file__)))
EVENNIA_SERVER = os.path.join(EVENNIA_LIB, "server")
EVENNIA_RUNNER = os.path.join(EVENNIA_SERVER, "evennia_runner.py")
EVENNIA_TEMPLATE = os.path.join(EVENNIA_LIB, "game_template")
EVENNIA_PROFILING = os.path.join(EVENNIA_SERVER, "profiling")
EVENNIA_DUMMYRUNNER = os.path.join(EVENNIA_PROFILING, "dummyrunner.py")
TWISTED_BINARY = "twistd"
# Game directory structure
SETTINGFILE = "settings.py"
SERVERDIR = "server"
CONFDIR = os.path.join(SERVERDIR, "conf")
SETTINGS_PATH = os.path.join(CONFDIR, SETTINGFILE)
SETTINGS_DOTPATH = "server.conf.settings"
CURRENT_DIR = os.getcwd()
GAMEDIR = CURRENT_DIR
# Operational setup
SERVER_LOGFILE = None
PORTAL_LOGFILE = None
HTTP_LOGFILE = None
SERVER_PIDFILE = None
PORTAL_PIDFILE = None
SERVER_RESTART = None
PORTAL_RESTART = None
SERVER_PY_FILE = None
PORTAL_PY_FILE = None
PYTHON_MIN = '2.7'
TWISTED_MIN = '16.0.0'
DJANGO_MIN = '1.8'
DJANGO_REC = '1.9'
sys.path[1] = EVENNIA_ROOT
#------------------------------------------------------------
#
# Messages
#
#------------------------------------------------------------
CREATED_NEW_GAMEDIR = \
"""
Welcome to Evennia!
Created a new Evennia game directory '{gamedir}'.
You can now optionally edit your new settings file
at {settings_path}. If you don't, the defaults
will work out of the box. When ready to continue, 'cd' to your
game directory and run:
evennia migrate
This initializes the database. To start the server for the first
time, run:
evennia start
Make sure to create a superuser when asked for it (the email can
be blank if you want). You should now be able to (by default)
connect to your server on 'localhost', port 4000 using a
telnet/mud client or http://localhost:8000 using your web browser.
If things don't work, check so those ports are open.
"""
ERROR_INPUT = \
"""
Command
{args} {kwargs}
raised an error: '{traceback}'.
"""
ERROR_NO_GAMEDIR = \
"""
ERROR: No Evennia settings file was found. Evennia looks for the
file in your game directory as server/conf/settings.py.
You must run this command from somewhere inside a valid game
directory first created with
evennia --init mygamename
If you are in a game directory but is missing a settings.py file,
it may be because you have git-cloned an existing game directory.
The settings.py file is not cloned by git (it's in .gitignore)
since it can contain sensitive and/or server-specific information.
You can create a new, empty settings file with
evennia --initsettings
If cloning the settings file is not a problem you could manually
copy over the old settings file or remove its entry in .gitignore
"""
WARNING_MOVING_SUPERUSER = \
"""
WARNING: Evennia expects a Player superuser with id=1. No such
Player was found. However, another superuser ('{other_key}',
id={other_id}) was found in the database. If you just created this
superuser and still see this text it is probably due to the
database being flushed recently - in this case the database's
internal auto-counter might just start from some value higher than
one.
We will fix this by assigning the id 1 to Player '{other_key}'.
Please confirm this is acceptable before continuing.
"""
WARNING_RUNSERVER = \
"""
WARNING: There is no need to run the Django development
webserver to test out Evennia web features (the web client
will in fact not work since the Django test server knows
nothing about MUDs). Instead, just start Evennia with the
webserver component active (this is the default).
"""
ERROR_SETTINGS = \
"""
ERROR: There was an error importing Evennia's config file
{settingspath}.
There is usually one of three reasons for this:
1) You are not running this command from your game directory.
Change directory to your game directory and try again (or
create a new game directory using evennia --init <dirname>)
2) The settings file contains a syntax error. If you see a
traceback above, review it, resolve the problem and try again.
3) Django is not correctly installed. This usually shows as
errors mentioning 'DJANGO_SETTINGS_MODULE'. If you run a
virtual machine, it might be worth to restart it to see if
this resolves the issue.
""".format(settingsfile=SETTINGFILE, settingspath=SETTINGS_PATH)
ERROR_INITSETTINGS = \
"""
ERROR: 'evennia --initsettings' must be called from the root of
your game directory, since it tries to (re)create the new
settings.py file in a subfolder server/conf/.
"""
RECREATED_SETTINGS = \
"""
(Re)created an empty settings file in server/conf/settings.py.
Note that if you were using an existing database, the password
salt of this new settings file will be different from the old one.
This means that any existing players may not be able to log in to
their accounts with their old passwords.
"""
ERROR_DATABASE = \
"""
ERROR: Your database does not seem to be set up correctly.
(error was '{traceback}')
Standing in your game directory, run
evennia migrate
to initialize/update the database according to your settings.
"""
ERROR_WINDOWS_WIN32API = \
"""
ERROR: Unable to import win32api, which Twisted requires to run.
You may download it from:
http://sourceforge.net/projects/pywin32/files/pywin32/
If you are running in a virtual environment, browse to the
location of the latest win32api exe file for your computer and
Python version and copy the url to it; then paste it into a call
to easy_install:
easy_install http://<url to win32api exe>
"""
INFO_WINDOWS_BATFILE = \
"""
INFO: Since you are running Windows, a file 'twistd.bat' was
created for you. This is a simple batch file that tries to call
the twisted executable. Evennia determined this to be:
{twistd_path}
If you run into errors at startup you might need to edit
twistd.bat to point to the actual location of the Twisted
executable (usually called twistd.py) on your machine.
This procedure is only done once. Run evennia.py again when you
are ready to start the server.
"""
CMDLINE_HELP = \
"""
Starts or operates the Evennia MU* server. Also allows for
initializing a new game directory and manages the game's database.
You can also pass most standard django-admin arguments and
options.
"""
VERSION_INFO = \
"""
Evennia {version}
OS: {os}
Python: {python}
Twisted: {twisted}
Django: {django}{about}
"""
ABOUT_INFO = \
"""
Evennia MUD/MUX/MU* development system
Licence: BSD 3-Clause Licence
Web: http://www.evennia.com
Irc: #evennia on FreeNode
Forum: http://www.evennia.com/discussions
Maintainer (2010-): Griatch (griatch AT gmail DOT com)
Maintainer (2006-10): Greg Taylor
Use -h for command line options.
"""
HELP_ENTRY = \
"""
Enter 'evennia -h' for command-line options.
Use option (1) in a production environment. During development (2) is
usually enough, portal debugging is usually only useful if you are
adding new protocols or are debugging Evennia itself.
Reload with (5) to update the server with your changes without
disconnecting any players.
Note: Reload and stop are sometimes poorly supported in Windows. If you
have issues, log into the game to stop or restart the server instead.
"""
MENU = \
"""
+----Evennia Launcher-------------------------------------------+
| |
+--- Starting --------------------------------------------------+
| |
| 1) (normal): All output to logfiles |
| 2) (server devel): Server logs to terminal (-i option) |
| 3) (portal devel): Portal logs to terminal |
| 4) (full devel): Both Server and Portal logs to terminal |
| |
+--- Restarting ------------------------------------------------+
| |
| 5) Reload the Server |
| 6) Reload the Portal (only works with portal/full debug) |
| |
+--- Stopping --------------------------------------------------+
| |
| 7) Stopping both Portal and Server |
| 8) Stopping only Server |
| 9) Stopping only Portal |
| |
+---------------------------------------------------------------+
| h) Help i) About info q) Abort |
+---------------------------------------------------------------+
"""
ERROR_LOGDIR_MISSING = \
"""
ERROR: One or more log-file directory locations could not be
found:
{logfiles}
This is simple to fix: Just manually create the missing log
directory (or directories) and re-launch the server (the log files
will be created automatically).
(Explanation: Evennia creates the log directory automatically when
initializating a new game directory. This error usually happens if
you used git to clone a pre-created game directory - since log
files are in .gitignore they will not be cloned, which leads to
the log directory also not being created.)
"""
ERROR_PYTHON_VERSION = \
"""
ERROR: Python {pversion} used. Evennia requires version
{python_min} or higher (but not 3.x).
"""
ERROR_TWISTED_VERSION = \
"""
ERROR: Twisted {tversion} found. Evennia requires
version {twisted_min} or higher.
"""
ERROR_NOTWISTED = \
"""
ERROR: Twisted does not seem to be installed.
"""
ERROR_DJANGO_MIN = \
"""
ERROR: Django {dversion} found. Evennia requires version {django_min}
or higher.
Install it with for example `pip install --upgrade django`
or with `pip install django=={django_min}` to get a specific version.
It's also a good idea to run `evennia migrate` after this upgrade.
"""
NOTE_DJANGO_MIN = \
"""
NOTE: Django {dversion} found. This will work, but v{django_rec}
is recommended for production.
"""
NOTE_DJANGO_NEW = \
"""
NOTE: Django {dversion} found. This is newer than Evennia's
recommended version (v{django_rec}). It might work, but may be new
enough to not be fully tested yet. Report any issues.
"""
ERROR_NODJANGO = \
"""
ERROR: Django does not seem to be installed.
"""
NOTE_KEYBOARDINTERRUPT = \
"""
STOP: Caught keyboard interrupt while in interactive mode.
"""
#------------------------------------------------------------
#
# Functions
#
#------------------------------------------------------------
def evennia_version():
"""
Get the Evennia version info from the main package.
"""
version = "Unknown"
try:
import evennia
version = evennia.__version__
except ImportError:
pass
try:
rev = check_output(
"git rev-parse --short HEAD",
shell=True, cwd=EVENNIA_ROOT, stderr=STDOUT).strip()
version = "%s (rev %s)" % (version, rev)
except (IOError, CalledProcessError):
pass
return version
EVENNIA_VERSION = evennia_version()
def check_main_evennia_dependencies():
"""
Checks and imports the Evennia dependencies. This must be done
already before the paths are set up.
Returns:
not_error (bool): True if no dependency error was found.
"""
error = False
# Python
pversion = ".".join(str(num) for num in sys.version_info if type(num) == int)
if pversion < PYTHON_MIN:
print(ERROR_PYTHON_VERSION.format(pversion=pversion, python_min=PYTHON_MIN))
error = True
# Twisted
try:
import twisted
tversion = twisted.version.short()
if tversion < TWISTED_MIN:
print(ERROR_TWISTED_VERSION.format(
tversion=tversion, twisted_min=TWISTED_MIN))
error = True
except ImportError:
print(ERROR_NOTWISTED)
error = True
# Django
try:
dversion = ".".join(str(num) for num in django.VERSION if type(num) == int)
# only the main version (1.5, not 1.5.4.0)
dversion_main = ".".join(dversion.split(".")[:2])
if dversion < DJANGO_MIN:
print(ERROR_DJANGO_MIN.format(
dversion=dversion_main, django_min=DJANGO_MIN))
error = True
elif DJANGO_MIN <= dversion < DJANGO_REC:
print(NOTE_DJANGO_MIN.format(
dversion=dversion_main, django_rec=DJANGO_REC))
elif DJANGO_REC < dversion_main:
print(NOTE_DJANGO_NEW.format(
dversion=dversion_main, django_rec=DJANGO_REC))
except ImportError:
print(ERROR_NODJANGO)
error = True
if error:
sys.exit()
# return True/False if error was reported or not
return not error
def set_gamedir(path):
"""
Set GAMEDIR based on path, by figuring out where the setting file
is inside the directory tree.
"""
global GAMEDIR
Ndepth = 10
settings_path = os.path.join("server", "conf", "settings.py")
for i in range(Ndepth):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(settings_path):
GAMEDIR = gpath
return
os.chdir(os.pardir)
print(ERROR_NO_GAMEDIR)
sys.exit()
def create_secret_key():
"""
Randomly create the secret key for the settings file
"""
import random
import string
secret_key = list((string.letters +
string.digits + string.punctuation).replace("\\", "")\
.replace("'", '"').replace("{","_").replace("}","-"))
random.shuffle(secret_key)
secret_key = "".join(secret_key[:40])
return secret_key
def create_settings_file(init=True):
"""
Uses the template settings file to build a working settings file.
Args:
init (bool): This is part of the normal evennia --init
operation. If false, this function will copy a fresh
template file in (asking if it already exists).
"""
settings_path = os.path.join(GAMEDIR, "server", "conf", "settings.py")
if not init:
# if not --init mode, settings file may already exist from before
if os.path.exists(settings_path):
inp = raw_input("server/conf/settings.py already exists. "
"Do you want to reset it? y/[N]> ")
if not inp.lower() == 'y':
print ("Aborted.")
sys.exit()
else:
print ("Reset the settings file.")
default_settings_path = os.path.join(EVENNIA_TEMPLATE, "server", "conf", "settings.py")
shutil.copy(default_settings_path, settings_path)
with open(settings_path, 'r') as f:
settings_string = f.read()
# tweak the settings
setting_dict = {
"settings_default": os.path.join(EVENNIA_LIB, "settings_default.py"),
"servername": "\"%s\"" % GAMEDIR.rsplit(os.path.sep, 1)[1].capitalize(),
"secret_key": "\'%s\'" % create_secret_key()}
settings_string = settings_string.format(**setting_dict)
with open(settings_path, 'w') as f:
f.write(settings_string)
def create_game_directory(dirname):
"""
Initialize a new game directory named dirname
at the current path. This means copying the
template directory from evennia's root.
Args:
dirname (str): The directory name to create.
"""
global GAMEDIR
GAMEDIR = os.path.abspath(os.path.join(CURRENT_DIR, dirname))
if os.path.exists(GAMEDIR):
print("Cannot create new Evennia game dir: '%s' already exists." % dirname)
sys.exit()
# copy template directory
shutil.copytree(EVENNIA_TEMPLATE, GAMEDIR)
# pre-build settings file in the new GAMEDIR
create_settings_file()
def create_superuser():
"""
Create the superuser player
"""
print(
"\nCreate a superuser below. The superuser is Player #1, the 'owner' "
"account of the server.\n")
django.core.management.call_command("createsuperuser", interactive=True)
def check_database():
"""
Check so the database exists.
Returns:
exists (bool): `True` if the database exists, otherwise `False`.
"""
# Check so a database exists and is accessible
from django.db import connection
tables = connection.introspection.get_table_list(connection.cursor())
if not tables or not isinstance(tables[0], basestring): # django 1.8+
tables = [tableinfo.name for tableinfo in tables]
if tables and u'players_playerdb' in tables:
# database exists and seems set up. Initialize evennia.
import evennia
evennia._init()
# Try to get Player#1
from evennia.players.models import PlayerDB
try:
PlayerDB.objects.get(id=1)
except django.db.utils.OperationalError as e:
print(ERROR_DATABASE.format(traceback=e))
sys.exit()
except PlayerDB.DoesNotExist:
# no superuser yet. We need to create it.
other_superuser = PlayerDB.objects.filter(is_superuser=True)
if other_superuser:
# Another superuser was found, but not with id=1. This may
# happen if using flush (the auto-id starts at a higher
# value). Wwe copy this superuser into id=1. To do
# this we must deepcopy it, delete it then save the copy
# with the new id. This allows us to avoid the UNIQUE
# constraint on usernames.
other = other_superuser[0]
other_id = other.id
other_key = other.username
print(WARNING_MOVING_SUPERUSER.format(
other_key=other_key, other_id=other_id))
res = ""
while res.upper() != "Y":
# ask for permission
res = input("Continue [Y]/N: ")
if res.upper() == "N":
sys.exit()
elif not res:
break
# continue with the
from copy import deepcopy
new = deepcopy(other)
other.delete()
new.id = 1
new.save()
else:
create_superuser()
check_database()
return True
def getenv():
"""
Get current environment and add PYTHONPATH.
Returns:
env (dict): Environment global dict.
"""
sep = ";" if os.name == 'nt' else ":"
env = os.environ.copy()
env['PYTHONPATH'] = sep.join(sys.path)
return env
def get_pid(pidfile):
"""
Get the PID (Process ID) by trying to access an PID file.
Args:
pidfile (str): The path of the pid file.
Returns:
pid (str): The process id.
"""
pid = None
if os.path.exists(pidfile):
f = open(pidfile, 'r')
pid = f.read()
return pid
def del_pid(pidfile):
"""
The pidfile should normally be removed after a process has
finished, but when sending certain signals they remain, so we need
to clean them manually.
Args:
pidfile (str): The path of the pid file.
"""
if os.path.exists(pidfile):
os.remove(pidfile)
def kill(pidfile, signal=SIG, succmsg="", errmsg="",
restart_file=SERVER_RESTART, restart=False):
"""
Send a kill signal to a process based on PID. A customized
success/error message will be returned. If clean=True, the system
will attempt to manually remove the pid file.
Args:
pidfile (str): The path of the pidfile to get the PID from.
signal (int, optional): Signal identifier.
succmsg (str, optional): Message to log on success.
errmsg (str, optional): Message to log on failure.
restart_file (str, optional): Restart file location.
restart (bool, optional): Are we in restart mode or not.
"""
pid = get_pid(pidfile)
if pid:
if os.name == 'nt':
os.remove(pidfile)
# set restart/norestart flag
if restart:
django.core.management.call_command(
'collectstatic', interactive=False, verbosity=0)
with open(restart_file, 'w') as f:
f.write("reload")
else:
with open(restart_file, 'w') as f:
f.write("shutdown")
try:
os.kill(int(pid), signal)
except OSError:
print("Process %(pid)s cannot be stopped. "\
"The PID file 'server/%(pidfile)s' seems stale. "\
"Try removing it." % {'pid': pid, 'pidfile': pidfile})
return
print("Evennia:", succmsg)
return
print("Evennia:", errmsg)
def show_version_info(about=False):
"""
Display version info.
Args:
about (bool): Include ABOUT info as well as version numbers.
Returns:
version_info (str): A complete version info string.
"""
import os
import sys
import twisted
import django
return VERSION_INFO.format(
version=EVENNIA_VERSION, about=ABOUT_INFO if about else "",
os=os.name, python=sys.version.split()[0],
twisted=twisted.version.short(),
django=django.get_version())
def error_check_python_modules():
"""
Import settings modules in settings. This will raise exceptions on
pure python-syntax issues which are hard to catch gracefully with
exceptions in the engine (since they are formatting errors in the
python source files themselves). Best they fail already here
before we get any further.
Raises:
DeprecationWarning: For trying to access various modules
(usually in `settings.py`) which are no longer supported.
"""
from django.conf import settings
def imp(path, split=True):
mod, fromlist = path, "None"
if split:
mod, fromlist = path.rsplit('.', 1)
__import__(mod, fromlist=[fromlist])
# core modules
imp(settings.COMMAND_PARSER)
imp(settings.SEARCH_AT_RESULT)
imp(settings.CONNECTION_SCREEN_MODULE)
#imp(settings.AT_INITIAL_SETUP_HOOK_MODULE, split=False)
for path in settings.LOCK_FUNC_MODULES:
imp(path, split=False)
# cmdsets
deprstring = ("settings.%s should be renamed to %s. If defaults are used, "
"their path/classname must be updated "
"(see evennia/settings_default.py).")
if hasattr(settings, "CMDSET_DEFAULT"):
raise DeprecationWarning(deprstring % (
"CMDSET_DEFAULT", "CMDSET_CHARACTER"))
if hasattr(settings, "CMDSET_OOC"):
raise DeprecationWarning(deprstring % ("CMDSET_OOC", "CMDSET_PLAYER"))
if settings.WEBSERVER_ENABLED and not isinstance(settings.WEBSERVER_PORTS[0], tuple):
raise DeprecationWarning(
"settings.WEBSERVER_PORTS must be on the form "
"[(proxyport, serverport), ...]")
if hasattr(settings, "BASE_COMM_TYPECLASS"):
raise DeprecationWarning(deprstring % (
"BASE_COMM_TYPECLASS", "BASE_CHANNEL_TYPECLASS"))
if hasattr(settings, "COMM_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % (
"COMM_TYPECLASS_PATHS", "CHANNEL_TYPECLASS_PATHS"))
if hasattr(settings, "CHARACTER_DEFAULT_HOME"):
raise DeprecationWarning(
"settings.CHARACTER_DEFAULT_HOME should be renamed to "
"DEFAULT_HOME. See also settings.START_LOCATION "
"(see evennia/settings_default.py).")
deprstring = ("settings.%s is now merged into settings.TYPECLASS_PATHS. "
"Update your settings file.")
if hasattr(settings, "OBJECT_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "OBJECT_TYPECLASS_PATHS")
if hasattr(settings, "SCRIPT_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "SCRIPT_TYPECLASS_PATHS")
if hasattr(settings, "PLAYER_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "PLAYER_TYPECLASS_PATHS")
if hasattr(settings, "CHANNEL_TYPECLASS_PATHS"):
raise DeprecationWarning(deprstring % "CHANNEL_TYPECLASS_PATHS")
from evennia.commands import cmdsethandler
if not cmdsethandler.import_cmdset(settings.CMDSET_UNLOGGEDIN, None):
print("Warning: CMDSET_UNLOGGED failed to load!")
if not cmdsethandler.import_cmdset(settings.CMDSET_CHARACTER, None):
print("Warning: CMDSET_CHARACTER failed to load")
if not cmdsethandler.import_cmdset(settings.CMDSET_PLAYER, None):
print("Warning: CMDSET_PLAYER failed to load")
# typeclasses
imp(settings.BASE_PLAYER_TYPECLASS)
imp(settings.BASE_OBJECT_TYPECLASS)
imp(settings.BASE_CHARACTER_TYPECLASS)
imp(settings.BASE_ROOM_TYPECLASS)
imp(settings.BASE_EXIT_TYPECLASS)
imp(settings.BASE_SCRIPT_TYPECLASS)
def init_game_directory(path, check_db=True):
"""
Try to analyze the given path to find settings.py - this defines
the game directory and also sets PYTHONPATH as well as the django
path.
Args:
path (str): Path to new game directory, including its name.
check_db (bool, optional): Check if the databae exists.
"""
# set the GAMEDIR path
set_gamedir(path)
# Add gamedir to python path
sys.path.insert(0, GAMEDIR)
if sys.argv[1] == 'test':
os.environ['DJANGO_SETTINGS_MODULE'] = 'evennia.settings_default'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = SETTINGS_DOTPATH
# required since django1.7
django.setup()
# test existence of the settings module
try:
from django.conf import settings
except Exception as ex:
if not str(ex).startswith("No module named"):
import traceback
print(traceback.format_exc().strip())
print(ERROR_SETTINGS)
sys.exit()
# this will both check the database and initialize the evennia dir.
if check_db:
check_database()
# set up the Evennia executables and log file locations
global SERVER_PY_FILE, PORTAL_PY_FILE
global SERVER_LOGFILE, PORTAL_LOGFILE, HTTP_LOGFILE
global SERVER_PIDFILE, PORTAL_PIDFILE
global SERVER_RESTART, PORTAL_RESTART
global EVENNIA_VERSION
SERVER_PY_FILE = os.path.join(EVENNIA_LIB, "server", "server.py")
PORTAL_PY_FILE = os.path.join(EVENNIA_LIB, "portal", "portal", "portal.py")
SERVER_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, "server.pid")
PORTAL_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, "portal.pid")
SERVER_RESTART = os.path.join(GAMEDIR, SERVERDIR, "server.restart")
PORTAL_RESTART = os.path.join(GAMEDIR, SERVERDIR, "portal.restart")
SERVER_LOGFILE = settings.SERVER_LOG_FILE
PORTAL_LOGFILE = settings.PORTAL_LOG_FILE
HTTP_LOGFILE = settings.HTTP_LOG_FILE
# verify existence of log file dir (this can be missing e.g.
# if the game dir itself was cloned since log files are in .gitignore)
logdirs = [logfile.rsplit(os.path.sep, 1)
for logfile in (SERVER_LOGFILE, PORTAL_LOGFILE, HTTP_LOGFILE)]
if not all(os.path.isdir(pathtup[0]) for pathtup in logdirs):
errstr = "\n ".join("%s (log file %s)" % (pathtup[0], pathtup[1]) for pathtup in logdirs
if not os.path.isdir(pathtup[0]))
print(ERROR_LOGDIR_MISSING.format(logfiles=errstr))
sys.exit()
if os.name == 'nt':
# We need to handle Windows twisted separately. We create a
# batchfile in game/server, linking to the actual binary
global TWISTED_BINARY
# Windows requires us to use the absolute path for the bat file.
server_path = os.path.dirname(os.path.abspath(__file__))
TWISTED_BINARY = os.path.join(server_path, "twistd.bat")
# add path so system can find the batfile
sys.path.insert(1, os.path.join(GAMEDIR, SERVERDIR))
try:
importlib.import_module("win32api")
except ImportError:
print(ERROR_WINDOWS_WIN32API)
sys.exit()
batpath = os.path.join(EVENNIA_SERVER, TWISTED_BINARY)
if not os.path.exists(batpath):
# Test for executable twisted batch file. This calls the
# twistd.py executable that is usually not found on the
# path in Windows. It's not enough to locate
# scripts.twistd, what we want is the executable script
# C:\PythonXX/Scripts/twistd.py. Alas we cannot hardcode
# this location since we don't know if user has Python in
# a non-standard location. So we try to figure it out.
twistd = importlib.import_module("twisted.scripts.twistd")
twistd_dir = os.path.dirname(twistd.__file__)
# note that we hope the twistd package won't change here, since we
# try to get to the executable by relative path.
twistd_path = os.path.abspath(
os.path.join(twistd_dir, os.pardir, os.pardir, os.pardir,
os.pardir, 'scripts', 'twistd.py'))
with open(batpath, 'w') as bat_file:
# build a custom bat file for windows
bat_file.write("@\"%s\" \"%s\" %%*" % (
sys.executable, twistd_path))
print(INFO_WINDOWS_BATFILE.format(twistd_path=twistd_path))
def run_dummyrunner(number_of_dummies):
"""
Start an instance of the dummyrunner
Args:
number_of_dummies (int): The number of dummy players to start.
Notes:
The dummy players' behavior can be customized by adding a
`dummyrunner_settings.py` config file in the game's conf/
directory.
"""
number_of_dummies = str(int(number_of_dummies)) if number_of_dummies else 1
cmdstr = [sys.executable, EVENNIA_DUMMYRUNNER, "-N", number_of_dummies]
config_file = os.path.join(SETTINGS_PATH, "dummyrunner_settings.py")
if os.path.exists(config_file):
cmdstr.extend(["--config", config_file])
try:
call(cmdstr, env=getenv())
except KeyboardInterrupt:
pass
def list_settings(keys):
"""
Display the server settings. We only display the Evennia specific
settings here. The result will be printed to the terminal.
Args:
keys (str or list): Setting key or keys to inspect.
"""
from importlib import import_module
from evennia.utils import evtable
evsettings = import_module(SETTINGS_DOTPATH)
if len(keys) == 1 and keys[0].upper() == "ALL":
# show a list of all keys
# a specific key
table = evtable.EvTable()
confs = [key for key in sorted(evsettings.__dict__) if key.isupper()]
for i in range(0, len(confs), 4):
table.add_row(*confs[i:i+4])
else:
# a specific key
table = evtable.EvTable(width=131)
keys = [key.upper() for key in keys]
confs = dict((key, var) for key, var in evsettings.__dict__.items()
if key in keys)
for key, val in confs.items():
table.add_row(key, str(val))
print(table)
def run_menu():
"""
This launches an interactive menu.
"""
while True:
# menu loop
print(MENU)
inp = input(" option > ")
# quitting and help
if inp.lower() == 'q':
return
elif inp.lower() == 'h':
print(HELP_ENTRY)
input("press <return> to continue ...")
continue
elif inp.lower() in ('v', 'i', 'a'):
print(show_version_info(about=True))
input("press <return> to continue ...")
continue
# options
try:
inp = int(inp)
except ValueError:
print("Not a valid option.")
continue
if inp == 1:
# start everything, log to log files
server_operation("start", "all", False, False)
elif inp == 2:
# start everything, server interactive start
server_operation("start", "all", True, False)
elif inp == 3:
# start everything, portal interactive start
server_operation("start", "server", False, False)
server_operation("start", "portal", True, False)
elif inp == 4:
# start both server and portal interactively
server_operation("start", "server", True, False)
server_operation("start", "portal", True, False)
elif inp == 5:
# reload the server
server_operation("reload", "server", None, None)
elif inp == 6:
# reload the portal
server_operation("reload", "portal", None, None)
elif inp == 7:
# stop server and portal
server_operation("stop", "all", None, None)
elif inp == 8:
# stop server
server_operation("stop", "server", None, None)
elif inp == 9:
# stop portal
server_operation("stop", "portal", None, None)
else:
print("Not a valid option.")
continue
return
def server_operation(mode, service, interactive, profiler, logserver=False):
"""
Handle argument options given on the command line.
Args:
mode (str): Start/stop/restart and so on.
service (str): "server", "portal" or "all".
interactive (bool). Use interactive mode or daemon.
profiler (bool): Run the service under the profiler.
logserver (bool, optional): Log Server data to logfile
specified by settings.SERVER_LOG_FILE.
"""
cmdstr = [sys.executable, EVENNIA_RUNNER]
errmsg = "The %s does not seem to be running."
if mode == 'start':
# launch the error checker. Best to catch the errors already here.
error_check_python_modules()
# starting one or many services
if service == 'server':
if profiler:
cmdstr.append('--pserver')
if interactive:
cmdstr.append('--iserver')
if logserver:
cmdstr.append('--logserver')
cmdstr.append('--noportal')
elif service == 'portal':
if profiler:
cmdstr.append('--pportal')
if interactive:
cmdstr.append('--iportal')
cmdstr.append('--noserver')
django.core.management.call_command(
'collectstatic', verbosity=1, interactive=False)
else:
# all
# for convenience we don't start logging of
# portal, only of server with this command.
if profiler:
# this is the common case
cmdstr.append('--pserver')
if interactive:
cmdstr.append('--iserver')
if logserver:
cmdstr.append('--logserver')
django.core.management.call_command(
'collectstatic', verbosity=1, interactive=False)
cmdstr.extend([
GAMEDIR, TWISTED_BINARY, SERVER_LOGFILE,
PORTAL_LOGFILE, HTTP_LOGFILE])
# start the server
process = Popen(cmdstr, env=getenv())
if interactive:
try:
process.wait()
except KeyboardInterrupt:
server_operation("stop", "portal", False, False)
return
finally:
print(NOTE_KEYBOARDINTERRUPT)
elif mode == 'reload':
# restarting services
if os.name == 'nt':
print(
"Restarting from command line is not supported under Windows. "
"Log into the game to restart.")
return
if service == 'server':
kill(SERVER_PIDFILE, SIG, "Server reloaded.",
errmsg % 'Server', SERVER_RESTART, restart=True)
elif service == 'portal':
print(
"Note: Portal usually doesnt't need to be reloaded unless you "
"are debugging in interactive mode. If Portal was running in "
"default Daemon mode, it cannot be restarted. In that case "
"you have to restart it manually with 'evennia.py "
"start portal'")
kill(PORTAL_PIDFILE, SIG,
"Portal reloaded (or stopped, if it was in daemon mode).",
errmsg % 'Portal', PORTAL_RESTART, restart=True)
else:
# all
# default mode, only restart server
kill(SERVER_PIDFILE, SIG,
"Server reload.",
errmsg % 'Server', SERVER_RESTART, restart=True)
elif mode == 'stop':
# stop processes, avoiding reload
if service == 'server':
kill(SERVER_PIDFILE, SIG,
"Server stopped.", errmsg % 'Server', SERVER_RESTART)
elif service == 'portal':
kill(PORTAL_PIDFILE, SIG,
"Portal stopped.", errmsg % 'Portal', PORTAL_RESTART)
else:
kill(PORTAL_PIDFILE, SIG,
"Portal stopped.", errmsg % 'Portal', PORTAL_RESTART)
kill(SERVER_PIDFILE, SIG,
"Server stopped.", errmsg % 'Server', SERVER_RESTART)
def main():
"""
Run the evennia launcher main program.
"""
# set up argument parser
parser = ArgumentParser(description=CMDLINE_HELP)
parser.add_argument(
'-v', '--version', action='store_true',
dest='show_version', default=False,
help="Show version info.")
parser.add_argument(
'-i', '--interactive', action='store_true',
dest='interactive', default=False,
help="Start given processes in interactive mode.")
parser.add_argument(
'-l', '--log', action='store_true',
dest="logserver", default=False,
help="Log Server data to log file.")
parser.add_argument(
'--init', action='store', dest="init", metavar="name",
help="Creates a new game directory 'name' at the current location.")
parser.add_argument(
'--list', nargs='+', action='store', dest='listsetting', metavar="key",
help=("List values for server settings. Use 'all' to list all "
"available keys."))
parser.add_argument(
'--profiler', action='store_true', dest='profiler', default=False,
help="Start given server component under the Python profiler.")
parser.add_argument(
'--dummyrunner', nargs=1, action='store', dest='dummyrunner',
metavar="N",
help="Tests a running server by connecting N dummy players to it.")
parser.add_argument(
'--settings', nargs=1, action='store', dest='altsettings',
default=None, metavar="filename.py",
help=("Start evennia with alternative settings file in "
"gamedir/server/conf/."))
parser.add_argument(
'--initsettings', action='store_true', dest="initsettings",
default=False,
help="Creates a new, empty settings file as gamedir/server/conf/settings.py.")
parser.add_argument(
"option", nargs='?', default="noop",
help="Operational mode: 'start', 'stop', 'restart' or 'menu'.")
parser.add_argument(
"service", metavar="component", nargs='?', default="all",
help=("Server component to operate on: "
"'server', 'portal' or 'all' (default)."))
parser.epilog = (
"Example django-admin commands: "
"'migrate', 'flush', 'shell' and 'dbshell'. "
"See the django documentation for more django-admin commands.")
args, unknown_args = parser.parse_known_args()
# handle arguments
option, service = args.option, args.service
# make sure we have everything
check_main_evennia_dependencies()
if not args:
# show help pane
print(CMDLINE_HELP)
sys.exit()
elif args.init:
# initialization of game directory
create_game_directory(args.init)
print(CREATED_NEW_GAMEDIR.format(
gamedir=args.init,
settings_path=os.path.join(args.init, SETTINGS_PATH)))
sys.exit()
if args.show_version:
# show the version info
print(show_version_info(option == "help"))
sys.exit()
if args.altsettings:
# use alternative settings file
sfile = args.altsettings[0]
global SETTINGSFILE, SETTINGS_DOTPATH
SETTINGSFILE = sfile
SETTINGS_DOTPATH = "server.conf.%s" % sfile.rstrip(".py")
print("Using settings file '%s' (%s)." % (
SETTINGSFILE, SETTINGS_DOTPATH))
if args.initsettings:
# create new settings file
global GAMEDIR
GAMEDIR = os.getcwd()
try:
create_settings_file(init=False)
print(RECREATED_SETTINGS)
except IOError:
print(ERROR_INITSETTINGS)
sys.exit()
if args.dummyrunner:
# launch the dummy runner
init_game_directory(CURRENT_DIR, check_db=True)
run_dummyrunner(args.dummyrunner[0])
elif args.listsetting:
# display all current server settings
init_game_directory(CURRENT_DIR, check_db=False)
list_settings(args.listsetting)
elif option == 'menu':
# launch menu for operation
init_game_directory(CURRENT_DIR, check_db=True)
run_menu()
elif option in ('start', 'reload', 'stop'):
# operate the server directly
init_game_directory(CURRENT_DIR, check_db=True)
server_operation(option, service, args.interactive, args.profiler, args.logserver)
elif option != "noop":
# pass-through to django manager
check_db = False
if option in ('runserver', 'testserver'):
print(WARNING_RUNSERVER)
if option == "shell":
# to use the shell we need to initialize it first,
# and this only works if the database is set up
check_db = True
init_game_directory(CURRENT_DIR, check_db=check_db)
args = [option]
kwargs = {}
if service not in ("all", "server", "portal"):
args.append(service)
if unknown_args:
for arg in unknown_args:
if arg.startswith("--"):
print("arg:", arg)
if "=" in arg:
arg, value = [p.strip() for p in arg.split("=", 1)]
else:
value = True
kwargs[arg.lstrip("--")] = [value]
else:
args.append(arg)
try:
django.core.management.call_command(*args, **kwargs)
except django.core.management.base.CommandError as exc:
args = ", ".join(args)
kwargs = ", ".join(["--%s" % kw for kw in kwargs])
print(ERROR_INPUT.format(traceback=exc, args=args, kwargs=kwargs))
else:
# no input; print evennia info
print(ABOUT_INFO)
if __name__ == '__main__':
# start Evennia from the command line
main()
| |
from datetime import datetime
import json
import logging
import os
import os.path
from dask.utils import format_bytes
from tornado import escape
from tornado.websocket import WebSocketHandler
from tlz import first, merge
from ..utils import RequestHandler, redirect
from ...diagnostics.websocket import WebsocketPlugin
from ...metrics import time
from ...utils import log_errors, format_time
ns = {
func.__name__: func
for func in [format_bytes, format_time, datetime.fromtimestamp, time]
}
rel_path_statics = {"rel_path_statics": "../../.."}
logger = logging.getLogger(__name__)
class Workers(RequestHandler):
def get(self):
with log_errors():
self.render(
"workers.html",
title="Workers",
scheduler=self.server,
**merge(self.server.__dict__, ns, self.extra, rel_path_statics),
)
class Worker(RequestHandler):
def get(self, worker):
worker = escape.url_unescape(worker)
if worker not in self.server.workers:
self.send_error(404)
return
with log_errors():
self.render(
"worker.html",
title="Worker: " + worker,
scheduler=self.server,
Worker=worker,
**merge(self.server.__dict__, ns, self.extra, rel_path_statics),
)
class Task(RequestHandler):
def get(self, task):
task = escape.url_unescape(task)
if task not in self.server.tasks:
self.send_error(404)
return
with log_errors():
self.render(
"task.html",
title="Task: " + task,
Task=task,
scheduler=self.server,
**merge(self.server.__dict__, ns, self.extra, rel_path_statics),
)
class Logs(RequestHandler):
def get(self):
with log_errors():
logs = self.server.get_logs()
self.render(
"logs.html",
title="Logs",
logs=logs,
**merge(self.extra, rel_path_statics),
)
class WorkerLogs(RequestHandler):
async def get(self, worker):
with log_errors():
worker = escape.url_unescape(worker)
logs = await self.server.get_worker_logs(workers=[worker])
logs = logs[worker]
self.render(
"logs.html",
title="Logs: " + worker,
logs=logs,
**merge(self.extra, rel_path_statics),
)
class WorkerCallStacks(RequestHandler):
async def get(self, worker):
with log_errors():
worker = escape.url_unescape(worker)
keys = self.server.processing[worker]
call_stack = await self.server.get_call_stack(keys=keys)
self.render(
"call-stack.html",
title="Call Stacks: " + worker,
call_stack=call_stack,
**merge(self.extra, rel_path_statics),
)
class TaskCallStack(RequestHandler):
async def get(self, key):
with log_errors():
key = escape.url_unescape(key)
call_stack = await self.server.get_call_stack(keys=[key])
if not call_stack:
self.write(
"<p>Task not actively running. "
"It may be finished or not yet started</p>"
)
else:
self.render(
"call-stack.html",
title="Call Stack: " + key,
call_stack=call_stack,
**merge(self.extra, rel_path_statics),
)
class IndividualPlots(RequestHandler):
def get(self):
try:
from bokeh.server.tornado import BokehTornado
bokeh_application = first(
app
for app in self.server.http_application.applications
if isinstance(app, BokehTornado)
)
individual_bokeh = {
uri.strip("/").replace("-", " ").title(): uri
for uri in bokeh_application.app_paths
if uri.lstrip("/").startswith("individual-")
and not uri.endswith(".json")
}
individual_static = {
uri.strip("/")
.replace(".html", "")
.replace("-", " ")
.title(): "/statics/"
+ uri
for uri in os.listdir(
os.path.join(os.path.dirname(__file__), "..", "static")
)
if uri.lstrip("/").startswith("individual-") and uri.endswith(".html")
}
result = {**individual_bokeh, **individual_static}
self.write(result)
except (ImportError, StopIteration):
self.write({})
class EventstreamHandler(WebSocketHandler):
def initialize(self, dask_server=None, extra=None):
self.server = dask_server
self.extra = extra or {}
self.plugin = WebsocketPlugin(self, self.server)
self.server.add_plugin(self.plugin)
def send(self, name, data):
data["name"] = name
for k in list(data):
# Drop bytes objects for now
if isinstance(data[k], bytes):
del data[k]
self.write_message(data)
def open(self):
for worker in self.server.workers:
self.plugin.add_worker(self.server, worker)
def on_message(self, message):
message = json.loads(message)
if message["name"] == "ping":
self.send("pong", {"timestamp": str(datetime.now())})
def on_close(self):
self.server.remove_plugin(self.plugin)
routes = [
(r"info", redirect("info/main/workers.html"), {}),
(r"info/main/workers.html", Workers, {}),
(r"info/worker/(.*).html", Worker, {}),
(r"info/task/(.*).html", Task, {}),
(r"info/main/logs.html", Logs, {}),
(r"info/call-stacks/(.*).html", WorkerCallStacks, {}),
(r"info/call-stack/(.*).html", TaskCallStack, {}),
(r"info/logs/(.*).html", WorkerLogs, {}),
(r"individual-plots.json", IndividualPlots, {}),
(r"eventstream", EventstreamHandler, {}),
]
| |
#!/usr/bin/env python
import subprocess
from collections import namedtuple
import os
import sys
import chess
PerftResult = namedtuple("PerftResult", [
"nodes",
"captures",
"enpassants",
"castles",
"promotions",
"checks",
"mates"])
start_position_fen = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
engine = "lesschess"
target = ""
def iwrite(text):
sys.stdout.write(text)
sys.stdout.flush()
def find_executable():
global target
locations = (".", "./build")
for loc in locations:
target = loc + "/" + engine
if os.path.isfile(target):
return
raise Exception("Unable to find engine!")
def run_perft_test(fen, depth):
cmd = '{} perft "{fen}" {depth}'.format(target, fen=fen, depth=depth)
output = subprocess.check_output(cmd, shell=True)
nodes, captures, eps, castles, promos, checks, mates = output.split()
return PerftResult(nodes=int(nodes), captures=int(captures),
enpassants=int(eps), castles=int(castles),
promotions=int(promos), checks=int(checks),
mates=int(mates))
def run_perft_test_suite(name, fen, expected, max_depth=None,
nodes_only=False):
iwrite("Perft Test: {}".format(name))
max_depth = max_depth or 1000
for depth, e in expected:
if depth > max_depth:
break
run_perft_test(fen, depth)
res = run_perft_test(fen, depth)
if (nodes_only and res.nodes != e) or (not nodes_only and res != e):
print ""
print "Failed perft test case!!"
print "FEN: ", fen
print "Depth: ", depth
print "Expected: ", str(e)
print "Actual : ", str(res)
return
iwrite('.')
print "Passed."
print_cmd = True
def run_tactics_test(name, fen, expected_move=None, expected_score=None):
import datetime
print "Tactics Test: {}".format(name)
board = chess.Board(fen)
cmd = '{exe} tactics "{fen}"'.format(exe=target, fen=fen)
if False:
print "{!s}".format(board)
print "Running command '{}'".format(cmd)
begin = datetime.datetime.now()
output = subprocess.check_output(cmd, shell=True)
end = datetime.datetime.now()
print "Test took {}.".format(end - begin)
actual_move, actual_score, actual_depth = output.split()
actual_score = int(actual_score)
actual_depth = int(actual_depth)
if actual_move == "mated":
print "Failed!"
print "Engine thinks it is mated!"
elif actual_move == "none":
print "Failed!"
print "Engine thinks it has no legal moves!"
elif expected_move is not None or expected_score is not None:
actual_move_san = board.san(chess.Move.from_uci(actual_move))
if expected_move is None:
expected_move = actual_move
expected_move_san = board.san(chess.Move.from_uci(expected_move))
if expected_score is None:
expected_score = actual_score
if actual_move != expected_move or expected_score != actual_score:
print "Failed!"
print "Expected Move : {} ({})".format(
expected_move_san, expected_move)
print "Actual Move : {} ({})".format(
actual_move_san, actual_move)
print "Expected Score: {}".format(expected_score)
print "Actual Score : {}".format(actual_score)
print "Actual Depth : {}".format(actual_depth)
else:
print "Move : {} ({})".format(actual_move_san, actual_move)
print "Score: {}".format(actual_score)
print "Depth: {}".format(actual_depth)
print "Passed."
else:
raise Exception("Must provide either expected move or score!")
print ""
def starting_position_perft_test(max_depth=None):
expected = (
(0, PerftResult(1, 0, 0, 0, 0, 0, 0)),
(1, PerftResult(20, 0, 0, 0, 0, 0, 0)),
(2, PerftResult(400, 0, 0, 0, 0, 0, 0)),
(3, PerftResult(8902, 34, 0, 0, 0, 12, 0)),
(4, PerftResult(197281, 1576, 0, 0, 0, 469, 8)),
(5, PerftResult(4865609, 82719, 258, 0, 0, 27351, 347)),
(6, PerftResult(119060324, 2812008, 5248, 0, 0, 809099, 10828)),
)
run_perft_test_suite("start position", start_position_fen, expected,
max_depth)
def kiwipete_perft_test(max_depth=None):
fen = "r3k2r/p1ppqpb1/bn2pnp1/3PN3/1p2P3/2N2Q1p/PPPBBPPP/R3K2R w KQkq -"
expected = (
(1, PerftResult(48, 8, 0, 2, 0, 0, 0)),
(2, PerftResult(2039, 351, 1, 91, 0, 3, 0)),
(3, PerftResult(97862, 17102, 45, 3162, 0, 993, 1)),
(4, PerftResult(4085603, 757163, 1929, 128013, 15172, 25523, 43)),
(5, PerftResult(193690690, 35043416, 73365, 4993637, 8392, 3309887, 30171)),
)
run_perft_test_suite("kiwi pete", fen, expected, max_depth)
def position3_perft_test(max_depth=None):
fen = "8/2p5/3p4/KP5r/1R3p1k/8/4P1P1/8 w - -"
expected = (
(1, PerftResult(14, 1, 0, 0, 0, 2, 0)),
(2, PerftResult(191, 14, 0, 0, 0, 10, 0)),
(3, PerftResult(2812, 209, 2, 0, 0, 267, 0)),
(4, PerftResult(43238, 3348, 123, 0, 0, 1680, 17)),
(5, PerftResult(674624, 52051, 1165, 0, 0, 52950, 0)),
(6, PerftResult(11030083, 940350, 33325, 0, 7552, 452473, 2733)),
(7, PerftResult(178633661, 14519036, 294874, 0, 140024, 12797406, 87)),
)
run_perft_test_suite("position3", fen, expected, max_depth)
def position4_white_perft_test(max_depth=None):
fen = "r3k2r/Pppp1ppp/1b3nbN/nP6/BBP1P3/q4N2/Pp1P2PP/R2Q1RK1 w kq - 0 1"
expected = (
(1, PerftResult(6, 0, 0, 0, 0, 0, 0)),
(2, PerftResult(264, 87, 0, 6, 48, 10, 0)),
(3, PerftResult(9467, 1021, 4, 0, 120, 38, 22)),
(4, PerftResult(422333, 131393, 0, 7795, 60032, 15492, 5)),
(5, PerftResult(15833292, 2046173, 6512, 0, 329464, 200568, 50562)),
(6, PerftResult(706045033, 210369132, 212, 10882006, 81102984, 26973664, 81076)),
)
run_perft_test_suite("position4 white", fen, expected, max_depth)
def position4_black_perft_test(max_depth=None):
fen = "r2q1rk1/pP1p2pp/Q4n2/bbp1p3/Np6/1B3NBn/pPPP1PPP/R3K2R b KQ - 0 1"
expected = (
(1, PerftResult(6, 0, 0, 0, 0, 0, 0)),
(2, PerftResult(264, 87, 0, 6, 48, 10, 0)),
(3, PerftResult(9467, 1021, 4, 0, 120, 38, 22)),
(4, PerftResult(422333, 131393, 0, 7795, 60032, 15492, 5)),
(5, PerftResult(15833292, 2046173, 6512, 0, 329464, 200568, 50562)),
(6, PerftResult(706045033, 210369132, 212, 10882006, 81102984, 26973664, 81076)),
)
run_perft_test_suite("position4 black", fen, expected, max_depth)
def talkchess_perft_test(max_depth=None):
fen = "rnbq1k1r/pp1Pbppp/2p5/8/2B5/8/PPP1NnPP/RNBQK2R w KQ - 1 8"
expected = (
(1, 44),
(2, 1486),
(3, 62379),
(4, 2103487),
(5, 89941194),
)
run_perft_test_suite("talkchess", fen, expected, max_depth,
nodes_only=True)
def position6_perft_test(max_depth=None):
fen = "r4rk1/1pp1qppp/p1np1n2/2b1p1B1/2B1P1b1/P1NP1N2/1PP1QPPP/R4RK1 w - - 0 10"
expected = (
(0, 1),
(1, 46),
(2, 2079),
(3, 89890),
(4, 3894594),
(5, 164075551),
(6, 6923051137),
(7, 287188994746),
(8, 11923589843526),
(9, 490154852788714),
)
run_perft_test_suite("position6", fen, expected, max_depth, nodes_only=True)
def tactics_froms_gambit_mate():
fen = "rnbqk1nr/ppp2p1p/3b4/6p1/8/5N1P/PPPPP1P1/RNBQKB1R b KQkq - 0 1"
run_tactics_test("From's Gambit Mate in 1", fen, "d6g3")
def tactics_knight_sack_mate():
fen = "rn3r1k/6pp/1pN2p2/p3N3/1P5q/PQ2PPp1/5n2/3R2K1 w - - 0 1"
run_tactics_test("Knight Sack Mate", fen, "e5f7")
def tactics_queen_sack_mate():
fen = "rn3r1k/6pp/1pN2p2/p3N3/1P5q/PQ2PPp1/5n2/2R3K1 w - - 0 1"
run_tactics_test("Queen Sack Mate", fen, "b3g8")
def tactics_double_check_mate():
fen = "rn2r1k1/5Npp/1pN2p2/p7/1P5q/PQ2PPp1/5n2/3R2K1 w - - 0 1"
run_tactics_test("Double Check For Mate", fen, "f7h6")
def tactics_queen_sack_smothered_mate():
fen = "rn2r2k/6pp/1pN2p1N/p7/1P5q/PQ2PPp1/5n2/3R2K1 w - - 0 1"
run_tactics_test("Queen Sack Smothered Mate", fen, "b3g8")
def tactics_caro_kann_mate():
fen = "rnb1kb1r/pp3ppp/2p5/4q3/4n3/3Q4/PPPB1PPP/2KR1BNR w kq - 0 1"
run_tactics_test("Caro Kann Mate", fen, "d3d8")
def tactics_win_queen():
fen = "rnbqk2r/ppp2ppp/3b4/8/2P1n3/5NP1/PP2PP1P/RNBQKB1R b KQkq - 0 1"
run_tactics_test("Early Game Win Queen", fen, "e4f2")
def tactics_50_move_rule_draw():
fen = "8/8/8/8/3k4/3P4/3K4/8 w - - 98 1"
run_tactics_test("50-move Rule Draw", fen, expected_score=0.)
epd_records = (
'1k1r4/pp1b1R2/3q2pp/4p3/2B5/4Q3/PPP2B2/2K5 b - - bm Qd1+; id "BK.01";',
'3r1k2/4npp1/1ppr3p/p6P/P2PPPP1/1NR5/5K2/2R5 w - - bm d5; id "BK.02";',
'2q1rr1k/3bbnnp/p2p1pp1/2pPp3/PpP1P1P1/1P2BNNP/2BQ1PRK/7R b - - bm f5; id "BK.03";',
'rnbqkb1r/p3pppp/1p6/2ppP3/3N4/2P5/PPP1QPPP/R1B1KB1R w KQkq - bm e6; id "BK.04";',
'r1b2rk1/2q1b1pp/p2ppn2/1p6/3QP3/1BN1B3/PPP3PP/R4RK1 w - - bm Nd5 a4; id "BK.05";',
'2r3k1/pppR1pp1/4p3/4P1P1/5P2/1P4K1/P1P5/8 w - - bm g6; id "BK.06";',
'1nk1r1r1/pp2n1pp/4p3/q2pPp1N/b1pP1P2/B1P2R2/2P1B1PP/R2Q2K1 w - - bm Nf6; id "BK.07";',
'4b3/p3kp2/6p1/3pP2p/2pP1P2/4K1P1/P3N2P/8 w - - bm f5; id "BK.08";',
'2kr1bnr/pbpq4/2n1pp2/3p3p/3P1P1B/2N2N1Q/PPP3PP/2KR1B1R w - - bm f5; id "BK.09";',
'3rr1k1/pp3pp1/1qn2np1/8/3p4/PP1R1P2/2P1NQPP/R1B3K1 b - - bm Ne5; id "BK.10";',
'2r1nrk1/p2q1ppp/bp1p4/n1pPp3/P1P1P3/2PBB1N1/4QPPP/R4RK1 w - - bm f4; id "BK.11";',
'r3r1k1/ppqb1ppp/8/4p1NQ/8/2P5/PP3PPP/R3R1K1 b - - bm Bf5; id "BK.12";',
'r2q1rk1/4bppp/p2p4/2pP4/3pP3/3Q4/PP1B1PPP/R3R1K1 w - - bm b4; id "BK.13";',
'rnb2r1k/pp2p2p/2pp2p1/q2P1p2/8/1Pb2NP1/PB2PPBP/R2Q1RK1 w - - bm Qd2 Qe1; id "BK.14";', # TODO(plesslie): fix this test to accept either move
'2r3k1/1p2q1pp/2b1pr2/p1pp4/6Q1/1P1PP1R1/P1PN2PP/5RK1 w - - bm Qxg7+; id "BK.15";',
'r1bqkb1r/4npp1/p1p4p/1p1pP1B1/8/1B6/PPPN1PPP/R2Q1RK1 w kq - bm Ne4; id "BK.16";',
'r2q1rk1/1ppnbppp/p2p1nb1/3Pp3/2P1P1P1/2N2N1P/PPB1QP2/R1B2RK1 b - - bm h5; id "BK.17";',
'r1bq1rk1/pp2ppbp/2np2p1/2n5/P3PP2/N1P2N2/1PB3PP/R1B1QRK1 b - - bm Nb3; id "BK.18";',
'3rr3/2pq2pk/p2p1pnp/8/2QBPP2/1P6/P5PP/4RRK1 b - - bm Rxe4; id "BK.19";',
'r4k2/pb2bp1r/1p1qp2p/3pNp2/3P1P2/2N3P1/PPP1Q2P/2KRR3 w - - bm g4; id "BK.20";',
'3rn2k/ppb2rpp/2ppqp2/5N2/2P1P3/1P5Q/PB3PPP/3RR1K1 w - - bm Nh6; id "BK.21";',
'2r2rk1/1bqnbpp1/1p1ppn1p/pP6/N1P1P3/P2B1N1P/1B2QPP1/R2R2K1 b - - bm Bxe4; id "BK.22";',
'r1bqk2r/pp2bppp/2p5/3pP3/P2Q1P2/2N1B3/1PP3PP/R4RK1 b kq - bm f6; id "BK.23";',
'r2qnrnk/p2b2b1/1p1p2pp/2pPpp2/1PP1P3/PRNBB3/3QNPPP/5RK1 w - - bm f4; id "BK.24";',
)
def run_epd_test(name, fen, expected_move):
import datetime
print "Tactics Test: {}".format(name)
board = chess.Board(fen)
print "{!s}".format(board)
cmd = '{exe} tactics "{fen}"'.format(exe=target, fen=fen)
print "Running command '{}'".format(cmd)
begin = datetime.datetime.now()
output = subprocess.check_output(cmd, shell=True)
end = datetime.datetime.now()
print "Test took {}.".format(end - begin)
actual_move, actual_score, actual_depth = output.split()
actual_score = int(actual_score)
actual_depth = int(actual_depth)
if actual_move == "mated":
print "Failed!"
print "Engine thinks it is mated!"
elif actual_move == "none":
print "Failed!"
print "Engine thinks it has no legal moves!"
else:
actual_move_san = board.san(chess.Move.from_uci(actual_move))
expected_move_san = expected_move
if actual_move_san != expected_move_san:
print "Failed!"
print "Expected Move : {} ({})".format(
expected_move_san, expected_move)
print "Actual Move : {} ({})".format(
actual_move_san, actual_move)
print "Actual Score : {}".format(actual_score)
print "Actual Depth : {}".format(actual_depth)
else:
# print "Move : {} ({})".format(actual_move_san, actual_move)
# print "Score: {}".format(actual_score)
# print "Depth: {}".format(actual_depth)
print "Passed."
print ""
def tactics_epd_records():
for record in epd_records:
fen, rest = record.split('bm', 1)
expected_move, rest = rest.split(';', 1)
expected_move = expected_move.strip()
name = rest[5:-2]
fen += "0 0"
# print "Name=",name
# print "FEN=",fen
run_epd_test(name, fen, expected_move)
break # for now, just the first test
if __name__ == '__main__':
fast_mode = True
perft_suites = (
("start", starting_position_perft_test, 4),
("kiwi", kiwipete_perft_test, 3),
("position3", position3_perft_test, 5),
("white_position4", position4_white_perft_test, 4),
("black_position4", position4_black_perft_test, 4),
("talkchess", talkchess_perft_test, 4,),
("position6", position6_perft_test, 4,),
)
tactics_suites = (
("froms_gambit", tactics_froms_gambit_mate),
("knight_sack", tactics_knight_sack_mate),
# Too hard right now
# ("queen_sack", tactics_queen_sack_mate),
("double_check", tactics_double_check_mate),
("queen_sack_smothered", tactics_queen_sack_smothered_mate),
("caro_kann", tactics_caro_kann_mate),
("win_queen", tactics_win_queen),
("50_move_rule", tactics_50_move_rule_draw),
("edp_tactics", tactics_epd_records),
)
available_suites = {}
for name, suite, depth in perft_suites:
available_suites[name] = (suite, (depth, ))
for name, suite in tactics_suites:
available_suites[name] = (suite, tuple())
suites = set()
fast_mode = True
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg == "slow":
fast_mode = False
elif arg == "tactics":
for s in tactics_suites:
suites.add(available_suites[s[0]])
elif arg == "perft":
for s in perft_suites:
suites.add(available_suites[s[0]])
else:
suites.add(available_suites[arg])
suites = tuple(sorted(suites))
if not suites:
suites = tuple(sorted(available_suites.itervalues()))
find_executable()
for func, args in suites:
func(*args)
| |
# Module wordnet.py
#
# Author: Oliver Steele <steele@cs.brandeis.edu>
# Project Page: http://sourceforge.net/projects/pywordnet
#
# Copyright (c) 1998-2001 by Oliver Steele. Use is permitted under
# the Artistic License
# <http://www.opensource.org/licenses/artistic-license.html>
"""Utility functions to use with the wordnet module.
Usage
-----
>>> dog = N['dog'][0]
# (First 10) adjectives that are transitively SIMILAR to the main sense of "red"
>>> closure(ADJ['red'][0], SIMILAR)[:10]
['red' in {adjective: red, reddish, ruddy, blood-red, carmine, cerise, cherry, cherry-red, crimson, ruby, ruby-red, scarlet}, {adjective: chromatic}, {adjective: amber, brownish-yellow, yellow-brown}, {adjective: amethyst}, {adjective: aureate, gilded, gilt, gold, golden}, {adjective: azure, cerulean, sky-blue, bright blue}, {adjective: blue, bluish, blueish, light-blue, dark-blue}, {adjective: bluish green, blue-green, cyan, teal}, {adjective: blushful, rosy}, {adjective: bottle-green}]
>>> # Adjectives that are transitively SIMILAR to any of the senses of "red"
>>> #flatten1(map(lambda sense:closure(sense, SIMILAR), ADJ['red'])) # too verbose
>>> # Hyponyms of the main sense of "dog" (n.) that are homophonous with verbs
>>> filter(lambda sense:V.get(sense.form), flatten1(map(lambda e:e.senses(), hyponyms(N['dog'][0]))))
['dog' in {noun: dog, domestic dog, Canis familiaris}, 'pooch' in {noun: pooch, doggie, doggy, barker, bow-wow}, 'toy' in {noun: toy dog, toy}, 'hound' in {noun: hound, hound dog}, 'basset' in {noun: basset, basset hound}, 'cocker' in {noun: cocker spaniel, English cocker spaniel, cocker}, 'bulldog' in {noun: bulldog, English bulldog}]
>>> # Find the senses of "raise"(v.) and "lower"(v.) that are antonyms
>>> filter(lambda p:p[0] in p[1].pointerTargets(ANTONYM), product(V['raise'].senses(), V['lower'].senses()))
[('raise' in {verb: raise, lift, elevate, get up, bring up}, 'lower' in {verb: lower, take down, let down, get down, bring down})]
"""
__author__ = "Oliver Steele <steele@cs.brandeis.edu>"
__version__ = "1.4"
from nltk_contrib.pywordnet import *
from nltk_contrib.pywordnet import _normalizePOS, _dictionaryFor
#
# Domain utilities
#
def _requireSource(entity):
if not hasattr(entity, 'pointers'):
if isinstance(entity, Word):
raise TypeError, `entity` + " is not a Sense or Synset. Try " + `entity` + "[0] instead."
else:
raise TypeError, `entity` + " is not a Sense or Synset"
def tree(source, pointerType):
"""
>>> dog = N['dog'][0]
>>> from pprint import pprint
>>> pprint(tree(dog, HYPERNYM))
['dog' in {noun: dog, domestic dog, Canis familiaris},
[{noun: canine, canid},
[{noun: carnivore},
[{noun: placental, placental mammal, eutherian, eutherian mammal},
[{noun: mammal},
[{noun: vertebrate, craniate},
[{noun: chordate},
[{noun: animal, animate being, beast, brute, creature, fauna},
[{noun: organism, being, living thing}, [{noun: entity}]]]]]]]]]]
>>> #pprint(tree(dog, HYPONYM)) # too verbose to include here
"""
if isinstance(source, Word):
return map(lambda s, t=pointerType:tree(s,t), source.senses())
_requireSource(source)
return [source] + map(lambda s, t=pointerType:tree(s,t), source.pointerTargets(pointerType))
def closure(source, pointerType, accumulator=None):
"""Return the transitive closure of source under the pointerType
relationship. If source is a Word, return the union of the
closures of its senses.
>>> dog = N['dog'][0]
>>> closure(dog, HYPERNYM)
['dog' in {noun: dog, domestic dog, Canis familiaris}, {noun: canine, canid}, {noun: carnivore}, {noun: placental, placental mammal, eutherian, eutherian mammal}, {noun: mammal}, {noun: vertebrate, craniate}, {noun: chordate}, {noun: animal, animate being, beast, brute, creature, fauna}, {noun: organism, being, living thing}, {noun: entity}]
"""
if isinstance(source, Word):
return reduce(union, map(lambda s, t=pointerType:tree(s,t), source.senses()))
_requireSource(source)
if accumulator is None:
accumulator = []
if source not in accumulator:
accumulator.append(source)
for target in source.pointerTargets(pointerType):
closure(target, pointerType, accumulator)
return accumulator
def hyponyms(source):
"""Return source and its hyponyms. If source is a Word, return
the union of the hyponyms of its senses."""
return closure(source, HYPONYM)
def hypernyms(source):
"""Return source and its hypernyms. If source is a Word, return
the union of the hypernyms of its senses."""
return closure(source, HYPERNYM)
def meet(a, b, pointerType=HYPERNYM):
"""Return the meet of a and b under the pointerType relationship.
>>> meet(N['dog'][0], N['cat'][0])
{noun: carnivore}
>>> meet(N['dog'][0], N['person'][0])
{noun: organism, being, living thing}
>>> meet(N['thought'][0], N['belief'][0])
{noun: content, cognitive content, mental object}
"""
return (intersection(closure(a, pointerType), closure(b, pointerType)) + [None])[0]
#
# String Utility Functions
#
def startsWith(str, prefix):
"""Return true iff _str_ starts with _prefix_.
>>> startsWith('unclear', 'un')
1
"""
return str[:len(prefix)] == prefix
def endsWith(str, suffix):
"""Return true iff _str_ ends with _suffix_.
>>> endsWith('clearly', 'ly')
1
"""
return str[-len(suffix):] == suffix
def equalsIgnoreCase(a, b):
"""Return true iff a and b have the same lowercase representation.
>>> equalsIgnoreCase('dog', 'Dog')
1
>>> equalsIgnoreCase('dOg', 'DOG')
1
"""
# test a == b first as an optimization where they're equal
return a == b or string.lower(a) == string.lower(b)
#
# Sequence Utility Functions
#
def issequence(item):
"""Return true iff _item_ is a Sequence (a List, String, or Tuple).
>>> issequence((1,2))
1
>>> issequence([1,2])
1
>>> issequence('12')
1
>>> issequence(1)
0
"""
return type(item) in (ListType, StringType, TupleType)
def intersection(u, v):
"""Return the intersection of _u_ and _v_.
>>> intersection((1,2,3), (2,3,4))
[2, 3]
"""
w = []
for e in u:
if e in v:
w.append(e)
return w
def union(u, v):
"""Return the union of _u_ and _v_.
>>> union((1,2,3), (2,3,4))
[1, 2, 3, 4]
"""
w = list(u)
if w is u:
import copy
w = copy.copy(w)
for e in v:
if e not in w:
w.append(e)
return w
def product(u, v):
"""Return the Cartesian product of u and v.
>>> product("123", "abc")
[('1', 'a'), ('1', 'b'), ('1', 'c'), ('2', 'a'), ('2', 'b'), ('2', 'c'), ('3', 'a'), ('3', 'b'), ('3', 'c')]
"""
return flatten1(map(lambda a, v=v:map(lambda b, a=a:(a,b), v), u))
def removeDuplicates(sequence):
"""Return a copy of _sequence_ with equal items removed.
>>> removeDuplicates("this is a test")
['t', 'h', 'i', 's', ' ', 'a', 'e']
>>> removeDuplicates(map(lambda tuple:apply(meet, tuple), product(N['story'].senses(), N['joke'].senses())))
[None, {noun: act, human action, human activity}, {noun: communication}, {noun: message, content, subject matter, substance}]
"""
accumulator = []
for item in sequence:
if item not in accumulator:
accumulator.append(item)
return accumulator
#
# Tree Utility Functions
#
def flatten1(sequence):
accumulator = []
for item in sequence:
if type(item) == TupleType:
item = list(item)
if type(item) == ListType:
accumulator.extend(item)
else:
accumulator.append(item)
return accumulator
#
# WordNet utilities
#
GET_INDEX_SUBSTITUTIONS = ((' ', '-'), ('-', ' '), ('-', ''), (' ', ''), ('.', ''))
def getIndex(form, pos='noun'):
"""Search for _form_ in the index file corresponding to
_pos_. getIndex applies to _form_ an algorithm that replaces
underscores with hyphens, hyphens with underscores, removes
hyphens and underscores, and removes periods in an attempt to find
a form of the string that is an exact match for an entry in the
index file corresponding to _pos_. getWord() is called on each
transformed string until a match is found or all the different
strings have been tried. It returns a Word or None."""
def trySubstitutions(trySubstitutions, form, substitutions, lookup=1, dictionary=_dictionaryFor(pos)):
if lookup and dictionary.has_key(form):
return dictionary[form]
elif substitutions:
(old, new) = substitutions[0]
substitute = string.replace(form, old, new) #and substitute != form
if substitute and dictionary.has_key(substitute):
return dictionary[substitute]
return trySubstitutions(trySubstitutions, form, substitutions[1:], lookup=0) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions[1:]))
#return trySubstitutions(returnMatch, form, GET_INDEX_SUBSTITUTIONS)
return trySubstitutions(trySubstitutions, form, GET_INDEX_SUBSTITUTIONS)
MORPHOLOGICAL_SUBSTITUTIONS = {
NOUN: (('s', ''), ('ses', 's'), ('xes', 'x'), ('zes', 'z'), ('ches', 'ch'), ('shes', 'sh')),
VERB: (('s', ''), ('ies', 'y'), ('es', 'e'), ('ed', 'e'), ('ed', ''), ('ing', 'e'), ('ing', 'e')),
ADJECTIVE: (('er', ''), ('er', 'est'), ('er', 'e'), ('est', 'e')),
ADVERB: None}
def morphy(form, pos='noun', collect=0):
"""Recursively uninflect _form_, and return the first form found
in the dictionary. If _collect_ is true, a sequence of all forms
is returned, instead of just the first one.
>>> morphy('dogs')
'dog'
>>> morphy('churches')
'church'
>>> morphy('aardwolves')
'aardwolf'
>>> morphy('abaci')
'abacus'
>>> morphy('hardrock', 'adv')
"""
pos = _normalizePOS(pos)
excfile = open(os.path.join(WNSEARCHDIR, {NOUN: 'noun', VERB: 'verb', ADJECTIVE: 'adj', ADVERB: 'adv'}[pos] + '.exc'))
substitutions = MORPHOLOGICAL_SUBSTITUTIONS[pos]
def trySubstitutions(trySubstitutions, # workaround for lack of nested closures in Python < 2.1
form, # reduced form
substitutions, # remaining substitutions
lookup=1,
dictionary=_dictionaryFor(pos),
excfile=excfile,
collect=collect,
collection=[]):
import string
exceptions = binarySearchFile(excfile, form)
if exceptions:
form = exceptions[string.find(exceptions, ' ')+1:-1]
if lookup and dictionary.has_key(form):
if collect:
collection.append(form)
else:
return form
elif substitutions:
old, new = substitutions[0]
substitutions = substitutions[1:]
substitute = None
if endsWith(form, old):
substitute = form[:-len(old)] + new
#if dictionary.has_key(substitute):
# return substitute
form = trySubstitutions(trySubstitutions, form, substitutions) or \
(substitute and trySubstitutions(trySubstitutions, substitute, substitutions))
return (collect and collection) or form
elif collect:
return collection
return trySubstitutions(trySubstitutions, form, substitutions)
#
# Testing
#
def _test(reset=0):
import doctest, wntools
if reset:
doctest.master = None # This keeps doctest from complaining after a reload.
return doctest.testmod(wntools)
| |
#! /usr/bin/env python
import numpy as np
from landlab.components import LinearDiffuser
_TINY_DIFFUSIVITY = 1.0e-20
class SimpleSubmarineDiffuser(LinearDiffuser):
r"""
Transport marine sediment using a water-depth-dependent diffusion model.
This component models sediment transport as a diffusion process with a
coefficient that depends on water depth :math:`h` as follows:
.. math::
D(h) = D_0 f_1(h) f_2(h)
Here :math:`D_0` is the maximum value, corresponding to the input
parameter :code:`shallow_water_diffusivity`.
The function :math:`f_1(h)` describes the decrease in transport efficiency
below the wave base depth :math:`h_w`. It is defined as unity for depth
above the wave base, and as
.. math::
f_1(h) = \exp( -(h - h_w) / h_w)
for :math:`h > h_w`.
The function :math:`f_2(h)` handles the transition in transport efficiency
around the shoreline. If :code:`tidal_range`, :math:`R_t`, is zero, then
:math:`f_2` is set to unity underwater (:math:`h \ge 0`), and a tiny value
above water (not zero, because that would cause a divide-by-zero error in
the base class).
If :math:`R_t > 0`, then a :math:`tanh` function is used to model
a smooth decrease in :math:`D` from the low to high tide level:
.. math::
f_2(h) = (\tanh ( -h / R_t) + 1) / 2
with an addition tiny value added to locations above water to avoid
division by zero.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components import SimpleSubmarineDiffuser
>>> grid = RasterModelGrid((3, 7), xy_spacing=100.0)
>>> grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
>>> topo = grid.add_zeros('topographic__elevation', at='node')
>>> topo[:] = -10.0
>>> topo[9:14] = [0., 10., 10., 5., 5.]
>>> ssd = SimpleSubmarineDiffuser(grid, tidal_range=0.0)
>>> ssd.run_one_step(dt=5.0)
>>> topo[8:13]
array([ -9.5, 0. , 9.5, 10. , 5. ])
>>> grid.at_node["sediment_deposit__thickness"][8:13]
array([ 0.5, 0. , -0.5, 0. , 0. ])
"""
_name = "SimpleSubmarineDiffuser"
_time_units = "y"
_info = {
"sea_level__elevation": {
"dtype": "float",
"intent": "in",
"optional": False,
"units": "m",
"mapping": "grid",
"doc": "Sea level elevation",
},
"topographic__elevation": {
"dtype": "float",
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation", # and seafloor
},
"water__depth": {
"dtype": "float",
"intent": "out",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "depth of water under current sea level",
},
"sediment_deposit__thickness": {
"dtype": "float",
"intent": "out",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Thickness of deposition or erosion in latest time step",
},
}
def __init__(
self,
grid,
sea_level=0.0,
wave_base=60.0,
shallow_water_diffusivity=100.0,
tidal_range=2.0,
**kwds
):
"""
Parameters
----------
grid: ModelGrid (RasterModelGrid, HexModelGrid, etc.)
A landlab grid.
sea_level: float, optional
The current sea level (m) (default 0)
wave_base: float, optional
Wave base (m) (default 60)
shallow_water_diffusivity: float, optional
Diffusivity coefficient for shallow water (m2 / y) (default 100)
tidal_range: float, optional
Tidal range (m) (default 2)
"""
self._wave_base = float(wave_base)
self._sea_level = sea_level
grid.at_grid["sea_level__elevation"] = sea_level
self._sea_level = grid.at_grid["sea_level__elevation"]
self._shallow_water_diffusivity = shallow_water_diffusivity
self._tidal_range = tidal_range
if tidal_range > 0.0:
self._inverse_tidal_range = 1.0 / tidal_range
if "kd" not in grid.at_node:
grid.add_zeros("kd", at="node")
if "sediment_deposit__thickness" not in grid.at_node:
grid.add_zeros("sediment_deposit__thickness", at="node")
if "water__depth" in grid.at_node:
self._depth = grid.at_node["water__depth"]
else:
self._depth = grid.add_zeros("water__depth", at="node")
self._time = 0.0
kwds.setdefault("linear_diffusivity", "kd")
super(SimpleSubmarineDiffuser, self).__init__(grid, **kwds)
@property
def wave_base(self):
return self._wave_base
@wave_base.setter
def wave_base(self, value):
self._wave_base = float(value)
@property
def shallow_water_diffusivity(self):
return self._shallow_water_diffusivity
@shallow_water_diffusivity.setter
def shallow_water_diffusivity(self, value):
self._shallow_water_diffusivity = float(value)
@property
def time(self):
return self._time
@property
def sea_level(self):
return self.grid.at_grid["sea_level__elevation"]
@sea_level.setter
def sea_level(self, sea_level):
self.grid.at_grid["sea_level__elevation"] = sea_level
def depth_function(self, water_depth):
"""
Return weighting factor for transport.
If there is no tidal range, then the weight factor is 1 if at or
below sea level, and 0 if above it. If there is a tidal range, then
a tanh function is used to weight transport across mean sea level, so
that there is some degree of transport for water depths within the
tidal range (less above, more below). The nature of the tanh function
is such that the transport is about 95% of its maximum value at a depth
of 1.5x the mean tidal range, and 5% of its maximum value at a height
of 1.5x the mean tidal range above mean sea level.
Parameters
----------
water_depth : float array
Depth of water relative to mean sea level (m) (can be negative)
Returns
-------
df : float array
Weight factor ranging from 0 to 1.
"""
if self._tidal_range > 0.0:
df = (np.tanh(self._inverse_tidal_range * water_depth) + 1.0) / 2.0
else:
df = 1.0 * (water_depth >= 0.0)
return df
def calc_diffusion_coef(self):
"""
Calculate and store diffusion coefficient values.
Returns
-------
k : float array
Diffusion coefficient, m2/y
"""
sea_level = self.grid.at_grid["sea_level__elevation"]
self._depth[:] = sea_level - self._grid.at_node["topographic__elevation"]
deep_water = self._depth > self._wave_base
land = self._depth < 0.0
k = self.grid.at_node["kd"]
k[:] = self._shallow_water_diffusivity * self.depth_function(self._depth)
k[deep_water] *= np.exp(
-(self._depth[deep_water] - self._wave_base) / self._wave_base
)
k[land] += _TINY_DIFFUSIVITY
return k
def run_one_step(self, dt):
"""
Advance by one time step.
Parameters
----------
dt : float
Time-step duration (y)
"""
z_before = self.grid.at_node["topographic__elevation"].copy()
self.calc_diffusion_coef()
super(SimpleSubmarineDiffuser, self).run_one_step(dt)
depo = self.grid.at_node["sediment_deposit__thickness"]
depo[:] = self.grid.at_node["topographic__elevation"] - z_before
self._time += dt
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Help document markdown helpers."""
import argparse
import re
import StringIO
import textwrap
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import usage_text
class Error(Exception):
"""Exceptions for the markdown module."""
_SPLIT = 78 # Split lines longer than this.
_SECTION_INDENT = 8 # Section or list within section indent.
_FIRST_INDENT = 2 # First line indent.
_SUBSEQUENT_INDENT = 6 # Subsequent line indent.
class ExampleCommandLineSplitter(object):
"""Example command line splitter.
Attributes:
max_index: int, The max index to check in line.
quote_char: str, The current quote char for quotes split across lines.
quote_index: int, The index of quote_char in line or 0 if in previous line.
"""
def __init__(self):
self._max_index = _SPLIT - _SECTION_INDENT - _FIRST_INDENT
self._quote_char = None
self._quote_index = 0
def _SplitInTwo(self, line):
"""Splits line into before and after, len(before) < self._max_index.
Args:
line: str, The line to split.
Returns:
(before, after)
The line split into two parts. <before> is a list of strings that forms
the first line of the split and <after> is a string containing the
remainder of the line to split. The display width of <before> is
< self._max_index. <before> contains the separator chars, including a
newline.
"""
punct_index = 0
quoted_space_index = 0
quoted_space_quote = None
space_index = 0
space_flag = False
i = 0
while i < self._max_index:
c = line[i]
i += 1
if c == self._quote_char:
self._quote_char = None
elif self._quote_char:
if c == ' ':
quoted_space_index = i - 1
quoted_space_quote = self._quote_char
elif c in ('"', "'"):
self._quote_char = c
self._quote_index = i
quoted_space_index = 0
elif c == '\\':
i += 1
elif i < self._max_index:
if c == ' ':
# Split before a flag instead of the next arg; it could be the flag
# value.
if line[i] == '-':
space_flag = True
space_index = i
elif space_flag:
space_flag = False
else:
space_index = i
elif c in (',', ';', '/', '|'):
punct_index = i
elif c == '=':
space_flag = False
separator = '\\\n'
indent = _FIRST_INDENT
if space_index:
split_index = space_index
indent = _SUBSEQUENT_INDENT
elif quoted_space_index:
split_index = quoted_space_index
if quoted_space_quote == "'":
separator = '\n'
elif punct_index:
split_index = punct_index
else:
split_index = self._max_index
if split_index <= self._quote_index:
self._quote_char = None
else:
self._quote_index = 0
self._max_index = _SPLIT - _SECTION_INDENT - indent
return [line[:split_index], separator, ' ' * indent], line[split_index:]
def Split(self, line):
"""Splits a long example command line by inserting newlines.
Args:
line: str, The command line to split.
Returns:
str, The command line with newlines inserted.
"""
lines = []
while len(line) > self._max_index:
before, line = self._SplitInTwo(line)
lines.extend(before)
lines.append(line)
return ''.join(lines)
class MarkdownGenerator(object):
"""Command help markdown document generator.
Attributes:
_buf: Output document stream.
_command: The CommandCommon instance for command.
_command_name: The command name string.
_command_path: Command path.
_description: The long_help description markdown.
_detailed_help: Command detailed help dict indexed by SECTION name.
_doc: The output markdown document string.
_file_name: The command path name (used to name documents).
_is_top_element: True if command is the top CLI element.
_is_topic: True if the command is a help topic.
_out: Output writer.
_printed_sections: The set of already printed sections.
_top_element: The top CLI element.
_track: The Command release track prefix.
_subcommand: The list of subcommand instances or None.
_subgroup: The list of subgroup instances or None.
"""
def __init__(self, command):
"""Constructor.
Args:
command: calliope._CommandCommon, Help extracted from this calliope
command, group or topic.
"""
command.LoadAllSubElements()
self._command = command
self._buf = StringIO.StringIO()
self._out = self._buf.write
self._description = ''
self._detailed_help = getattr(command, 'detailed_help', {})
self._command_path = command.GetPath()
self._command_name = ' '.join(self._command_path)
self._file_name = '_'.join(self._command_path)
self._track = command.ReleaseTrack().prefix
command_index = (2 if self._track and len(self._command_path) >= 3 and
self._command_path[1] == self._track else 1)
self._is_topic = (len(self._command_path) >= (command_index + 1) and
self._command_path[command_index] == 'topic')
# pylint: disable=protected-access
self._top_element = command._TopCLIElement()
self._is_top_element = command.IsRoot()
self._printed_sections = set()
self._subcommands = command.GetSubCommandHelps()
self._subgroups = command.GetSubGroupHelps()
def _SplitCommandFromArgs(self, cmd):
"""Splits cmd into command and args lists.
The command list part is a valid command and the args list part is the
trailing args.
Args:
cmd: [str], A command + args list.
Returns:
(command, args): The command and args lists.
"""
# The bare top level command always works.
if len(cmd) <= 1:
return cmd, []
# Skip the top level command name.
prefix = 1
i = prefix + 1
while i <= len(cmd):
if not self._top_element.IsValidSubPath(cmd[prefix:i]):
i -= 1
break
i += 1
return cmd[:i], cmd[i:]
def _UserInput(self, msg):
"""Returns msg with user input markdown.
Args:
msg: str, The user input string.
Returns:
The msg string with embedded user input markdown.
"""
return (base.MARKDOWN_CODE + base.MARKDOWN_ITALIC +
msg +
base.MARKDOWN_ITALIC + base.MARKDOWN_CODE)
def _Section(self, name, sep=True):
"""Prints the section header markdown for name.
Args:
name: str, The manpage section name.
sep: boolean, Add trailing newline.
"""
self._printed_sections.add(name)
self._out('\n\n## {name}\n'.format(name=name))
if sep:
self._out('\n')
def _PrintSynopsisSection(self, sections, has_global_flags):
"""Prints the command line synopsis section."""
# MARKDOWN_CODE is the default SYNOPSIS font style.
code = base.MARKDOWN_CODE
em = base.MARKDOWN_ITALIC
self._Section('SYNOPSIS')
self._out('{code}{command}{code}'.format(code=code,
command=self._command_name))
# Output the positional args up to the first REMAINDER or '-- *' args. The
# rest will be picked up after the flag args are output. argparse does not
# have an explicit '--' arg intercept, so we use the metavar value as a '--'
# sentinel. Any suppressed args are ingnored by a pre-pass.
positional_args = usage_text.FilterOutSuppressed(
self._command.ai.positional_args)
while positional_args:
arg = positional_args[0]
if arg.nargs == argparse.REMAINDER or arg.metavar.startswith('-- '):
break
positional_args.pop(0)
self._out(usage_text.PositionalDisplayString(arg, markdown=True))
if self._subcommands and self._subgroups:
self._out(' ' + em + 'GROUP' + em + ' | ' + em + 'COMMAND' + em)
elif self._subcommands:
self._out(' ' + em + 'COMMAND' + em)
elif self._subgroups:
self._out(' ' + em + 'GROUP' + em)
# Generate the flag usage string with flags in section order.
for _, _, groups, attrs in sections:
for group_id, group in sorted(
groups.iteritems(), key=lambda x: usage_text.FlagGroupSortKey(x[1])):
flag = group[0]
if len(group) == 1:
show_inverted = getattr(flag, 'show_inverted', None)
if show_inverted:
flag = show_inverted
msg = usage_text.FlagDisplayString(flag, markdown=True)
if not msg:
continue
if flag.required:
self._out(' {msg}'.format(msg=msg))
else:
self._out(' [{msg}]'.format(msg=msg))
else:
group.sort(key=lambda f: f.option_strings)
attr = attrs.get(group_id)
if not attr or not attr.is_mutex:
for flag in group:
self._out(' [{0}]'.format(
usage_text.FlagDisplayString(flag, markdown=True)))
else:
msg = ' | '.join(usage_text.FlagDisplayString(flag, markdown=True)
for flag in group)
if not msg:
continue
if attr.is_required:
self._out(' ({msg})'.format(msg=msg))
else:
self._out(' [{msg}]'.format(msg=msg))
if has_global_flags:
self._out(' [' + em + 'GLOBAL-FLAG ...' + em + ']')
# positional_args will only be non-empty if we had -- ... or REMAINDER left.
for arg in usage_text.FilterOutSuppressed(positional_args):
self._out(usage_text.PositionalDisplayString(arg, markdown=True))
self._out('\n')
def _PrintFlagDefinition(self, flag):
"""Prints a flags definition list item."""
self._out('\n{0}::\n'.format(
usage_text.FlagDisplayString(flag, markdown=True)))
self._out('\n{arghelp}\n'.format(arghelp=self._Details(flag)))
def _PrintFlagSection(self, heading, groups, attrs):
"""Prints a flag section."""
self._Section(heading, sep=False)
for group_id, group in sorted(
groups.iteritems(), key=lambda x: usage_text.FlagGroupSortKey(x[1])):
if len(group) == 1 or any([getattr(f, 'show_inverted', None)
for f in group]):
self._PrintFlagDefinition(group[0])
else:
if len(group) > 1:
attr = attrs.get(group_id)
if attr and attr.description:
self._out('\n' + attr.description + '\n')
for flag in sorted(group, key=lambda f: f.option_strings):
self._PrintFlagDefinition(flag)
def _PrintPositionalsAndFlagsSections(self, sections, has_global_flags):
"""Prints the positionals and flags sections."""
visible_positionals = usage_text.FilterOutSuppressed(
self._command.ai.positional_args)
if visible_positionals:
self._Section('POSITIONAL ARGUMENTS', sep=False)
for arg in visible_positionals:
self._out('\n{0}::\n'.format(
usage_text.PositionalDisplayString(arg, markdown=True).lstrip()))
self._out('\n{arghelp}\n'.format(arghelp=self._Details(arg)))
# List the sections in order.
for heading, _, groups, attrs in sections:
self._PrintFlagSection(heading, groups, attrs)
if has_global_flags:
self._Section('GLOBAL FLAGS', sep=False)
self._out('\nRun *$ gcloud help* for a description of flags available to'
'\nall commands.\n')
def _PrintSectionIfExists(self, name, default=None):
"""Print a section of the .help file, from a part of the detailed_help.
Args:
name: str, The manpage section name.
default: str, Default help_stuff if section name is not defined.
"""
if name in self._printed_sections:
return
help_stuff = self._detailed_help.get(name, default)
if not help_stuff:
return
if callable(help_stuff):
help_message = help_stuff()
else:
help_message = help_stuff
self._Section(name)
self._out('{message}\n'.format(
message=textwrap.dedent(help_message).strip()))
def _PrintAllExtraSections(self, excluded_sections):
"""Print all extra man page sections.
Args:
excluded_sections: A list of section names to exclude. These will be
printed later.
Extra sections are _detailed_help sections that have not been printed yet.
_PrintSectionIfExists() skips sections that have already been printed.
"""
for section in sorted(self._detailed_help):
if section.isupper() and section not in excluded_sections:
self._PrintSectionIfExists(section)
def _PrintCommandSection(self, name, subcommands, is_topic=False):
"""Prints a group or command section.
Args:
name: str, The section name singular form.
subcommands: dict, The subcommand dict.
is_topic: bool, True if this is a TOPIC subsection.
"""
# Determine if the section has any content.
content = ''
for subcommand, help_info in sorted(subcommands.iteritems()):
if self._command.IsHidden() or not help_info.is_hidden:
# If this group is already hidden, we can safely include hidden
# sub-items. Else, only include them if they are not hidden.
content += '\n*link:{ref}[{cmd}]*::\n\n{txt}\n'.format(
ref='/'.join(self._command_path + [subcommand]),
cmd=subcommand,
txt=help_info.help_text)
if content:
self._Section(name + 'S')
if is_topic:
self._out('The supplementary help topics are:\n')
else:
self._out('{cmd} is one of the following:\n'.format(
cmd=self._UserInput(name)))
self._out(content)
def _PrintNotesSection(self):
"""Prints the NOTES section if needed."""
if (self._command.IsHidden() or
self._command.ReleaseTrack().help_note):
self._Section('NOTES')
if self._command.IsHidden():
self._out('This command is an internal implementation detail and may'
' change or disappear without notice.\n\n')
if self._command.ReleaseTrack().help_note:
self._out(self._command.ReleaseTrack().help_note + '\n\n')
def _Details(self, arg):
"""Returns the detailed help message for the given arg."""
help_stuff = getattr(arg, 'detailed_help', (arg.help or '') + '\n')
help_message = help_stuff() if callable(help_stuff) else help_stuff
help_message = textwrap.dedent(help_message)
if (not arg.option_strings or
not arg.option_strings[0].startswith('-') or
arg.metavar == ' '):
choices = None
elif arg.choices:
choices = arg.choices
else:
try:
choices = arg.type.choices
except AttributeError:
choices = None
if choices:
metavar = arg.metavar or arg.dest.upper()
choices = getattr(arg, 'choices_help', choices)
if len(choices) > 1:
one_of = 'one of'
else:
# TBD I guess?
one_of = '(currenly only one value is supported)'
if isinstance(choices, dict):
extra_help = ' _{metavar}_ must be {one_of}:\n\n{choices}\n\n'.format(
metavar=metavar,
one_of=one_of,
choices='\n'.join(
['*{name}*::: {desc}'.format(name=name, desc=desc)
for name, desc in sorted(choices.iteritems())]))
else:
extra_help = ' _{metavar}_ must be {one_of}: {choices}.'.format(
metavar=metavar,
one_of=one_of,
choices=', '.join(['*{0}*'.format(x) for x in choices]))
else:
# calliope.backend.ArgumentInterceptor.add_argument() sets
# arg.inverted_help for Boolean flags with auto-generated --no-FLAG
# inverted counterparts.
extra_help = getattr(arg, 'inverted_help', None)
if extra_help:
help_message = help_message.rstrip()
if help_message:
newline_index = help_message.rfind('\n')
if newline_index >= 0 and help_message[newline_index + 1] == ' ':
# Preserve example markdown at end of help_message.
help_message += '\n\n' + extra_help.strip() + '\n'
else:
if not help_message.endswith('.'):
help_message += '.'
if help_message.rfind('\n\n') > 0:
# help_message has multiple paragraphs. Put extra_help in a new
# paragraph.
help_message += '\n\n\n'
help_message += extra_help + '\n'
return help_message.replace('\n\n', '\n+\n').strip()
def _ExpandFormatReferences(self, doc):
"""Expand {...} references in doc."""
doc = usage_text.ExpandHelpText(self._command, doc)
# Split long $ ... example lines.
pat = re.compile(r'^ *(\$ .{%d,})$' % (
_SPLIT - _FIRST_INDENT - _SECTION_INDENT), re.M)
pos = 0
rep = ''
while True:
match = pat.search(doc, pos)
if not match:
break
rep += (doc[pos:match.start(1)] + ExampleCommandLineSplitter().Split(
doc[match.start(1):match.end(1)]))
pos = match.end(1)
if rep:
doc = rep + doc[pos:]
return doc
def _AddCommandLinkMarkdown(self, doc):
r"""Add ([`*])command ...\1 link markdown to doc."""
top = self._command_path[0]
# This pattern matches "([`*]){top} {arg}*\1" where {top}...{arg} is a
# known command. The negative lookbehind prefix prevents hyperlinks in
# SYNOPSIS sections and as the first line in a paragraph.
pat = re.compile(r'(?<!\n\n)(?<!\*\(ALPHA\)\* )(?<!\*\(BETA\)\* )'
r'([`*])(?P<command>{top}( [a-z][-a-z0-9]*)*)\1'.format(
top=top))
pos = 0
rep = ''
while True:
match = pat.search(doc, pos)
if not match:
break
cmd, args = self._SplitCommandFromArgs(match.group('command').split(' '))
if args:
# Skip invalid commands.
rep += doc[pos:match.end(0)]
else:
ref = '/'.join(cmd)
lnk = 'link:' + ref + '[' + ' '.join(cmd) + ']'
rep += (doc[pos:match.start('command')] + lnk +
doc[match.end('command'):match.end(0)])
pos = match.end(0)
if rep:
doc = rep + doc[pos:]
return doc
def _AddCommandLineLinkMarkdown(self, doc):
"""Add $ command ... link markdown to doc."""
top = self._command_path[0]
# This pattern matches "$ {top} {arg}*" where each arg is lower case and
# does not start with example-, my-, or sample-. This follows the style
# guide rule that user-supplied args to example commands contain upper case
# chars or start with example-, my-, or sample-. The trailing .? allows for
# an optional punctuation character before end of line. This handles cases
# like ``... run $ gcloud foo bar.'' at the end of a sentence.
pat = re.compile(r'\$ (' + top +
'((?: (?!(example|my|sample)-)[a-z][-a-z0-9]*)*)).?[ `\n]')
pos = 0
rep = ''
while True:
match = pat.search(doc, pos)
if not match:
break
cmd, args = self._SplitCommandFromArgs(match.group(1).split(' '))
ref = '/'.join(cmd)
lnk = 'link:' + ref + '[' + ' '.join(cmd) + ']'
if args:
lnk += ' ' + ' '.join(args)
rep += doc[pos:match.start(1)] + lnk
pos = match.end(1)
if rep:
doc = rep + doc[pos:]
return doc
def _AddManPageLinkMarkdown(self, doc):
"""Add gcloud ...(1) man page link markdown to doc."""
top = self._command_path[0]
pat = re.compile(r'(\*?(' + top + r'(?:[-_ a-z])*)\*?)\(1\)')
pos = 0
rep = ''
while True:
match = pat.search(doc, pos)
if not match:
break
cmd = match.group(2).replace('_', ' ')
ref = cmd.replace(' ', '/')
lnk = '*link:' + ref + '[' + cmd + ']*'
rep += doc[pos:match.start(2)] + lnk
pos = match.end(1)
if rep:
doc = rep + doc[pos:]
return doc
def _FixAirQuotesMarkdown(self, doc):
"""Change ``.*[[:alnum:]]{2,}.*'' quotes => UserInput(*) in doc."""
# Double ``air quotes'' on strings with no identifier chars or groups of
# singleton identifier chars are literal. All other double air quote forms
# are converted to unquoted strings with the _UserInput() font
# embellishment. This is a subjective choice for aesthetically pleasing
# renderings.
pat = re.compile(r"[^`](``([^`]*\w{2,}[^`']*)'')")
pos = 0
rep = ''
while True:
match = pat.search(doc, pos)
if not match:
break
rep += doc[pos:match.start(1)] + self._UserInput(match.group(2))
pos = match.end(1)
if rep:
doc = rep + doc[pos:]
return doc
def _SetDetailedHelpSection(self, name, lines):
"""Sets a _detailed_help name or _description section composed of lines.
Args:
name: The section name or None for the DESCRIPTION section.
lines: The list of lines in the section.
"""
# Strip leading empty lines.
while lines and not lines[0]:
lines = lines[1:]
# Strip trailing empty lines.
while lines and not lines[-1]:
lines = lines[:-1]
if lines:
if name:
self._detailed_help[name] = '\n'.join(lines)
else:
self._description = '\n'.join(lines)
def _ExtractDetailedHelp(self):
"""Extracts _detailed_help sections from the command long_help string."""
name = None # DESRIPTION
lines = []
for line in textwrap.dedent(self._command.long_help).strip().splitlines():
# '## \n' is not section markdown.
if len(line) >= 4 and line.startswith('## '):
self._SetDetailedHelpSection(name, lines)
name = line[3:]
lines = []
else:
lines.append(line)
self._SetDetailedHelpSection(name, lines)
def _Edit(self, doc):
"""Applies edits to a copy of the generated markdown in doc.
The sub-edit method call order might be significant. This method allows
the combined edits to be tested without relying on the order.
Args:
doc: The markdown document to edit.
Returns:
An edited copy of the generated markdown.
"""
doc = self._ExpandFormatReferences(doc)
doc = self._AddCommandLineLinkMarkdown(doc)
doc = self._AddCommandLinkMarkdown(doc)
doc = self._AddManPageLinkMarkdown(doc)
doc = self._FixAirQuotesMarkdown(doc)
return doc
def Generate(self):
"""Generates markdown for the command, group or topic, into a string."""
self._out('# {0}(1)\n'.format(self._file_name.upper()))
self._Section('NAME')
self._out('{{command}} - {index}\n'.format(index=self._command.index_help))
if not self._is_topic:
sections, has_global_flags = usage_text.GetFlagSections(
self._command, self._command.ai)
self._PrintSynopsisSection(sections, has_global_flags)
self._ExtractDetailedHelp()
self._PrintSectionIfExists(
'DESCRIPTION',
default=usage_text.ExpandHelpText(self._command, self._description))
if not self._is_topic:
self._PrintPositionalsAndFlagsSections(sections, has_global_flags)
if self._subgroups:
self._PrintCommandSection('GROUP', self._subgroups)
if self._subcommands:
if self._is_topic:
self._PrintCommandSection('TOPIC', self._subcommands, is_topic=True)
else:
self._PrintCommandSection('COMMAND', self._subcommands)
final_sections = ['EXAMPLES', 'SEE ALSO']
self._PrintAllExtraSections(excluded_sections=final_sections + ['NOTES'])
for section in final_sections:
self._PrintSectionIfExists(section)
self._PrintNotesSection()
return self._Edit(self._buf.getvalue())
def Markdown(command):
"""Generates and returns the help markdown document for command.
Args:
command: The CommandCommon command instance.
Returns:
The markdown document string.
"""
return MarkdownGenerator(command).Generate()
| |
#!/usr/bin/python3
import subprocess
import argparse
import difflib
import filecmp
import fnmatch
import json
import sys
import re
import os
fmtr_class = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(prog = 'nasm-t.py',
formatter_class=fmtr_class)
parser.add_argument('-d', '--directory',
dest = 'dir', default = './travis/test',
help = 'Directory with tests')
parser.add_argument('--nasm',
dest = 'nasm', default = './nasm',
help = 'Nasm executable to use')
parser.add_argument('--hexdump',
dest = 'hexdump', default = '/usr/bin/hexdump',
help = 'Hexdump executable to use')
sp = parser.add_subparsers(dest = 'cmd')
for cmd in ['run']:
spp = sp.add_parser(cmd, help = 'Run test cases')
spp.add_argument('-t', '--test',
dest = 'test',
help = 'Run the selected test only',
required = False)
for cmd in ['new']:
spp = sp.add_parser(cmd, help = 'Add a new test case')
spp.add_argument('--description',
dest = 'description', default = "Description of a test",
help = 'Description of a test',
required = False)
spp.add_argument('--id',
dest = 'id',
help = 'Test identifier/name',
required = True)
spp.add_argument('--format',
dest = 'format', default = 'bin',
help = 'Output format',
required = False)
spp.add_argument('--source',
dest = 'source',
help = 'Source file',
required = False)
spp.add_argument('--option',
dest = 'option',
default = '-Ox',
help = 'NASM options',
required = False)
spp.add_argument('--ref',
dest = 'ref',
help = 'Test reference',
required = False)
spp.add_argument('--error',
dest = 'error',
help = 'Set to "y" if test is supposed to fail',
required = False)
spp.add_argument('--output',
dest = 'output', default = 'y',
help = 'Output (compiled) file name (or "y")',
required = False)
spp.add_argument('--stdout',
dest = 'stdout', default = 'y',
help = 'Filename of stdout file (or "y")',
required = False)
spp.add_argument('--stderr',
dest = 'stderr', default = 'y',
help = 'Filename of stderr file (or "y")',
required = False)
for cmd in ['list']:
spp = sp.add_parser(cmd, help = 'List test cases')
for cmd in ['update']:
spp = sp.add_parser(cmd, help = 'Update test cases with new compiler')
spp.add_argument('-t', '--test',
dest = 'test',
help = 'Update the selected test only',
required = False)
map_fmt_ext = {
'bin': '.bin',
'elf': '.o',
'elf64': '.o',
'elf32': '.o',
'elfx32': '.o',
'ith': '.ith',
'srec': '.srec',
'obj': '.obj',
'win32': '.obj',
'win64': '.obj',
'coff': '.obj',
'macho': '.o',
'macho32': '.o',
'macho64': '.o',
'aout': '.out',
'aoutb': '.out',
'as86': '.o',
'rdf': '.rdf',
}
args = parser.parse_args()
if args.cmd == None:
parser.print_help()
sys.exit(1)
def read_stdfile(path):
with open(path, "rb") as f:
data = f.read().decode("utf-8").strip("\n")
f.close()
return data
#
# Check if descriptor has mandatory fields
def is_valid_desc(desc):
if desc == None:
return False
if 'description' not in desc:
return False
if desc['description'] == "":
return False
return True
#
# Expand ref/id in descriptors array
def expand_templates(desc_array):
desc_ids = { }
for d in desc_array:
if 'id' in d:
desc_ids[d['id']] = d
for i, d in enumerate(desc_array):
if 'ref' in d and d['ref'] in desc_ids:
ref = desc_ids[d['ref']]
own = d.copy()
desc_array[i] = ref.copy()
for k, v in own.items():
desc_array[i][k] = v
del desc_array[i]['id']
return desc_array
def prepare_desc(desc, basedir, name, path):
if not is_valid_desc(desc):
return False
#
# Put private fields
desc['_base-dir'] = basedir
desc['_json-file'] = name
desc['_json-path'] = path
desc['_test-name'] = basedir + os.sep + name[:-5]
#
# If no target provided never update
if 'target' not in desc:
desc['target'] = []
desc['update'] = 'false'
#
# Which code to expect when nasm finishes
desc['_wait'] = 0
if 'error' in desc:
if desc['error'] == 'expected':
desc['_wait'] = 1
#
# Walk over targets and generate match templates
# if were not provided yet
for d in desc['target']:
if 'output' in d and not 'match' in d:
d['match'] = d['output'] + ".t"
return True
def read_json(path):
desc = None
try:
with open(path, "rb") as f:
try:
desc = json.loads(f.read().decode("utf-8").strip("\n"))
except:
desc = None
finally:
f.close()
except:
pass
return desc
def read_desc(basedir, name):
path = basedir + os.sep + name
desc = read_json(path)
desc_array = []
if type(desc) == dict:
if prepare_desc(desc, basedir, name, path) == True:
desc_array += [desc]
elif type(desc) == list:
expand_templates(desc)
for de in desc:
if prepare_desc(de, basedir, name, path) == True:
desc_array += [de]
return desc_array
def collect_test_desc_from_file(path):
if not fnmatch.fnmatch(path, '*.json'):
path += '.json'
basedir = os.path.dirname(path)
filename = os.path.basename(path)
return read_desc(basedir, filename)
def collect_test_desc_from_dir(basedir):
desc_array = []
if os.path.isdir(basedir):
for filename in os.listdir(basedir):
if os.path.isdir(basedir + os.sep + filename):
desc_array += collect_test_desc_from_dir(basedir + os.sep + filename)
elif fnmatch.fnmatch(filename, '*.json'):
desc = read_desc(basedir, filename)
if desc == None:
continue
desc_array += desc
desc_array.sort(key=lambda x: x['_test-name'])
return desc_array
if args.cmd == 'list':
fmt_entry = '%-32s %s'
desc_array = collect_test_desc_from_dir(args.dir)
print(fmt_entry % ('Name', 'Description'))
for desc in desc_array:
print(fmt_entry % (desc['_test-name'], desc['description']))
def test_abort(test, message):
print("\t%s: %s" % (test, message))
print("=== Test %s ABORT ===" % (test))
sys.exit(1)
return False
def test_fail(test, message):
print("\t%s: %s" % (test, message))
print("=== Test %s FAIL ===" % (test))
return False
def test_skip(test, message):
print("\t%s: %s" % (test, message))
print("=== Test %s SKIP ===" % (test))
return True
def test_over(test):
print("=== Test %s ERROR OVER ===" % (test))
return True
def test_pass(test):
print("=== Test %s PASS ===" % (test))
return True
def test_updated(test):
print("=== Test %s UPDATED ===" % (test))
return True
def run_hexdump(path):
p = subprocess.Popen([args.hexdump, "-C", path],
stdout = subprocess.PIPE,
close_fds = True)
if p.wait() == 0:
return p
return None
def show_std(stdname, data):
print("\t--- %s" % (stdname))
for i in data.split("\n"):
print("\t%s" % i)
print("\t---")
def cmp_std(from_name, from_data, match_name, match_data):
if from_data != match_data:
print("\t--- %s" % (from_name))
for i in from_data.split("\n"):
print("\t%s" % i)
print("\t--- %s" % (match_name))
for i in match_data.split("\n"):
print("\t%s" % i)
diff = difflib.unified_diff(from_data.split("\n"), match_data.split("\n"),
fromfile = from_name, tofile = match_name)
for i in diff:
print("\t%s" % i.strip("\n"))
print("\t---")
return False
return True
def show_diff(test, patha, pathb):
pa = run_hexdump(patha)
pb = run_hexdump(pathb)
if pa == None or pb == None:
return test_fail(test, "Can't create dumps")
sa = pa.stdout.read().decode("utf-8").strip("\n")
sb = pb.stdout.read().decode("utf-8").strip("\n")
print("\t--- hexdump %s" % (patha))
for i in sa.split("\n"):
print("\t%s" % i)
print("\t--- hexdump %s" % (pathb))
for i in sb.split("\n"):
print("\t%s" % i)
pa.stdout.close()
pb.stdout.close()
diff = difflib.unified_diff(sa.split("\n"), sb.split("\n"),
fromfile = patha, tofile = pathb)
for i in diff:
print("\t%s" % i.strip("\n"))
print("\t---")
return True
def prepare_run_opts(desc):
opts = []
if 'format' in desc:
opts += ['-f', desc['format']]
if 'option' in desc:
opts += desc['option'].split(" ")
for t in desc['target']:
if 'output' in t:
if 'option' in t:
opts += t['option'].split(" ") + [desc['_base-dir'] + os.sep + t['output']]
else:
opts += ['-o', desc['_base-dir'] + os.sep + t['output']]
if 'stdout' in t or 'stderr' in t:
if 'option' in t:
opts += t['option'].split(" ")
if 'source' in desc:
opts += [desc['_base-dir'] + os.sep + desc['source']]
return opts
def exec_nasm(desc):
print("\tProcessing %s" % (desc['_test-name']))
opts = [args.nasm] + prepare_run_opts(desc)
nasm_env = os.environ.copy()
nasm_env['NASMENV'] = '--reproducible'
desc_env = desc.get('environ')
if desc_env:
for i in desc_env:
v = i.split('=')
if len(v) == 2:
nasm_env[v[0]] = v[1]
else:
nasm_env[v[0]] = None
print("\tExecuting %s" % (" ".join(opts)))
pnasm = subprocess.Popen(opts,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True,
env = nasm_env)
if pnasm == None:
test_fail(desc['_test-name'], "Unable to execute test")
return None
stderr = pnasm.stderr.read(4194304).decode("utf-8").strip("\n")
stdout = pnasm.stdout.read(4194304).decode("utf-8").strip("\n")
pnasm.stdout.close()
pnasm.stderr.close()
wait_rc = pnasm.wait();
if desc['_wait'] != wait_rc:
if stdout != "":
show_std("stdout", stdout)
if stderr != "":
show_std("stderr", stderr)
test_fail(desc['_test-name'],
"Unexpected ret code: " + str(wait_rc))
return None, None, None
return pnasm, stdout, stderr
def test_run(desc):
print("=== Running %s ===" % (desc['_test-name']))
if 'disable' in desc:
return test_skip(desc['_test-name'], desc["disable"])
pnasm, stdout, stderr = exec_nasm(desc)
if pnasm == None:
return False
for t in desc['target']:
if 'output' in t:
output = desc['_base-dir'] + os.sep + t['output']
match = desc['_base-dir'] + os.sep + t['match']
if desc['_wait'] == 1:
continue
print("\tComparing %s %s" % (output, match))
if filecmp.cmp(match, output) == False:
show_diff(desc['_test-name'], match, output)
return test_fail(desc['_test-name'], match + " and " + output + " files are different")
elif 'stdout' in t:
print("\tComparing stdout")
match = desc['_base-dir'] + os.sep + t['stdout']
match_data = read_stdfile(match)
if match_data == None:
return test_fail(test, "Can't read " + match)
if cmp_std(match, match_data, 'stdout', stdout) == False:
return test_fail(desc['_test-name'], "Stdout mismatch")
else:
stdout = ""
elif 'stderr' in t:
print("\tComparing stderr")
match = desc['_base-dir'] + os.sep + t['stderr']
match_data = read_stdfile(match)
if match_data == None:
return test_fail(test, "Can't read " + match)
if cmp_std(match, match_data, 'stderr', stderr) == False:
return test_fail(desc['_test-name'], "Stderr mismatch")
else:
stderr = ""
if stdout != "":
show_std("stdout", stdout)
return test_fail(desc['_test-name'], "Stdout is not empty")
if stderr != "":
show_std("stderr", stderr)
return test_fail(desc['_test-name'], "Stderr is not empty")
return test_pass(desc['_test-name'])
#
# Compile sources and generate new targets
def test_update(desc):
print("=== Updating %s ===" % (desc['_test-name']))
if 'update' in desc and desc['update'] == 'false':
return test_skip(desc['_test-name'], "No output provided")
if 'disable' in desc:
return test_skip(desc['_test-name'], desc["disable"])
pnasm, stdout, stderr = exec_nasm(desc)
if pnasm == None:
return False
for t in desc['target']:
if 'output' in t:
output = desc['_base-dir'] + os.sep + t['output']
match = desc['_base-dir'] + os.sep + t['match']
print("\tMoving %s to %s" % (output, match))
os.rename(output, match)
if 'stdout' in t:
match = desc['_base-dir'] + os.sep + t['stdout']
print("\tMoving %s to %s" % ('stdout', match))
with open(match, "wb") as f:
f.write(stdout.encode("utf-8"))
f.close()
if 'stderr' in t:
match = desc['_base-dir'] + os.sep + t['stderr']
print("\tMoving %s to %s" % ('stderr', match))
with open(match, "wb") as f:
f.write(stderr.encode("utf-8"))
f.close()
return test_updated(desc['_test-name'])
#
# Create a new empty test case
if args.cmd == 'new':
#
# If no source provided create one
# from (ID which is required)
if not args.source:
args.source = args.id + ".asm"
#
# Emulate "touch" on source file
path_asm = args.dir + os.sep + args.source
print("\tCreating %s" % (path_asm))
open(path_asm, 'a').close()
#
# Fill the test descriptor
#
# FIXME: We should probably use Jinja
path_json = args.dir + os.sep + args.id + ".json"
print("\tFilling descriptor %s" % (path_json))
with open(path_json, 'wb') as f:
f.write("[\n\t{\n".encode("utf-8"))
acc = []
if args.description:
acc.append("\t\t\"description\": \"{}\"".format(args.description))
acc.append("\t\t\"id\": \"{}\"".format(args.id))
if args.format:
acc.append("\t\t\"format\": \"{}\"".format(args.format))
acc.append("\t\t\"source\": \"{}\"".format(args.source))
if args.option:
acc.append("\t\t\"option\": \"{}\"".format(args.option))
if args.ref:
acc.append("\t\t\"ref\": \"{}\"".format(args.ref))
if args.error == 'y':
acc.append("\t\t\"error\": \"true\"")
f.write(",\n".join(acc).encode("utf-8"))
if args.output or args.stdout or args.stderr:
acc = []
if args.output:
if args.output == 'y':
if args.format in map_fmt_ext:
args.output = args.id + map_fmt_ext[args.format]
acc.append("\t\t\t{{ \"output\": \"{}\" }}".format(args.output))
if args.stdout:
if args.stdout == 'y':
args.stdout = args.id + '.stdout'
acc.append("\t\t\t{{ \"stdout\": \"{}\" }}".format(args.stdout))
if args.stderr:
if args.stderr == 'y':
args.stderr = args.id + '.stderr'
acc.append("\t\t\t{{ \"stderr\": \"{}\" }}".format(args.stderr))
f.write(",\n".encode("utf-8"))
f.write("\t\t\"target\": [\n".encode("utf-8"))
f.write(",\n".join(acc).encode("utf-8"))
f.write("\n\t\t]".encode("utf-8"))
f.write("\n\t}\n]\n".encode("utf-8"))
f.close()
if args.cmd == 'run':
desc_array = []
if args.test == None:
desc_array = collect_test_desc_from_dir(args.dir)
else:
desc_array = collect_test_desc_from_file(args.test)
if len(desc_array) == 0:
test_abort(args.test, "Can't obtain test descriptors")
for desc in desc_array:
if test_run(desc) == False:
if 'error' in desc and desc['error'] == 'over':
test_over(desc['_test-name'])
else:
test_abort(desc['_test-name'], "Error detected")
if args.cmd == 'update':
desc_array = []
if args.test == None:
desc_array = collect_test_desc_from_dir(args.dir)
else:
desc_array = collect_test_desc_from_file(args.test)
if len(desc_array) == 0:
test_abort(args.test, "Can't obtain a test descriptors")
for desc in desc_array:
if test_update(desc) == False:
if 'error' in desc and desc['error'] == 'over':
test_over(desc['_test-name'])
else:
test_abort(desc['_test-name'], "Error detected")
| |
from __future__ import unicode_literals
import json
import unittest
from mopidy.models import (
Album, Artist, ModelJSONEncoder, Playlist, Ref, SearchResult, TlTrack,
Track, model_json_decoder)
class GenericCopyTest(unittest.TestCase):
def compare(self, orig, other):
self.assertEqual(orig, other)
self.assertNotEqual(id(orig), id(other))
def test_copying_track(self):
track = Track()
self.compare(track, track.copy())
def test_copying_artist(self):
artist = Artist()
self.compare(artist, artist.copy())
def test_copying_album(self):
album = Album()
self.compare(album, album.copy())
def test_copying_playlist(self):
playlist = Playlist()
self.compare(playlist, playlist.copy())
def test_copying_track_with_basic_values(self):
track = Track(name='foo', uri='bar')
copy = track.copy(name='baz')
self.assertEqual('baz', copy.name)
self.assertEqual('bar', copy.uri)
def test_copying_track_with_missing_values(self):
track = Track(uri='bar')
copy = track.copy(name='baz')
self.assertEqual('baz', copy.name)
self.assertEqual('bar', copy.uri)
def test_copying_track_with_private_internal_value(self):
artist1 = Artist(name='foo')
artist2 = Artist(name='bar')
track = Track(artists=[artist1])
copy = track.copy(artists=[artist2])
self.assertIn(artist2, copy.artists)
def test_copying_track_with_invalid_key(self):
test = lambda: Track().copy(invalid_key=True)
self.assertRaises(TypeError, test)
def test_copying_track_to_remove(self):
track = Track(name='foo').copy(name=None)
self.assertEquals(track.__dict__, Track().__dict__)
class RefTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
ref = Ref(uri=uri)
self.assertEqual(ref.uri, uri)
self.assertRaises(AttributeError, setattr, ref, 'uri', None)
def test_name(self):
name = 'a name'
ref = Ref(name=name)
self.assertEqual(ref.name, name)
self.assertRaises(AttributeError, setattr, ref, 'name', None)
def test_invalid_kwarg(self):
test = lambda: SearchResult(foo='baz')
self.assertRaises(TypeError, test)
def test_repr_without_results(self):
self.assertEquals(
"Ref(name=u'foo', type=u'artist', uri=u'uri')",
repr(Ref(uri='uri', name='foo', type='artist')))
def test_serialize_without_results(self):
self.assertDictEqual(
{'__model__': 'Ref', 'uri': 'uri'},
Ref(uri='uri').serialize())
def test_to_json_and_back(self):
ref1 = Ref(uri='uri')
serialized = json.dumps(ref1, cls=ModelJSONEncoder)
ref2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(ref1, ref2)
def test_type_constants(self):
self.assertEqual(Ref.ALBUM, 'album')
self.assertEqual(Ref.ARTIST, 'artist')
self.assertEqual(Ref.DIRECTORY, 'directory')
self.assertEqual(Ref.PLAYLIST, 'playlist')
self.assertEqual(Ref.TRACK, 'track')
def test_album_constructor(self):
ref = Ref.album(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.ALBUM)
def test_artist_constructor(self):
ref = Ref.artist(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.ARTIST)
def test_directory_constructor(self):
ref = Ref.directory(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.DIRECTORY)
def test_playlist_constructor(self):
ref = Ref.playlist(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.PLAYLIST)
def test_track_constructor(self):
ref = Ref.track(uri='foo', name='bar')
self.assertEqual(ref.uri, 'foo')
self.assertEqual(ref.name, 'bar')
self.assertEqual(ref.type, Ref.TRACK)
class ArtistTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
artist = Artist(uri=uri)
self.assertEqual(artist.uri, uri)
self.assertRaises(AttributeError, setattr, artist, 'uri', None)
def test_name(self):
name = 'a name'
artist = Artist(name=name)
self.assertEqual(artist.name, name)
self.assertRaises(AttributeError, setattr, artist, 'name', None)
def test_musicbrainz_id(self):
mb_id = 'mb-id'
artist = Artist(musicbrainz_id=mb_id)
self.assertEqual(artist.musicbrainz_id, mb_id)
self.assertRaises(
AttributeError, setattr, artist, 'musicbrainz_id', None)
def test_invalid_kwarg(self):
test = lambda: Artist(foo='baz')
self.assertRaises(TypeError, test)
def test_invalid_kwarg_with_name_matching_method(self):
test = lambda: Artist(copy='baz')
self.assertRaises(TypeError, test)
test = lambda: Artist(serialize='baz')
self.assertRaises(TypeError, test)
def test_repr(self):
self.assertEquals(
"Artist(name=u'name', uri=u'uri')",
repr(Artist(uri='uri', name='name')))
def test_serialize(self):
self.assertDictEqual(
{'__model__': 'Artist', 'uri': 'uri', 'name': 'name'},
Artist(uri='uri', name='name').serialize())
def test_serialize_falsy_values(self):
self.assertDictEqual(
{'__model__': 'Artist', 'uri': '', 'name': None},
Artist(uri='', name=None).serialize())
def test_to_json_and_back(self):
artist1 = Artist(uri='uri', name='name')
serialized = json.dumps(artist1, cls=ModelJSONEncoder)
artist2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(artist1, artist2)
def test_to_json_and_back_with_unknown_field(self):
artist = Artist(uri='uri', name='name').serialize()
artist['foo'] = 'foo'
serialized = json.dumps(artist)
test = lambda: json.loads(serialized, object_hook=model_json_decoder)
self.assertRaises(TypeError, test)
def test_to_json_and_back_with_field_matching_method(self):
artist = Artist(uri='uri', name='name').serialize()
artist['copy'] = 'foo'
serialized = json.dumps(artist)
test = lambda: json.loads(serialized, object_hook=model_json_decoder)
self.assertRaises(TypeError, test)
def test_to_json_and_back_with_field_matching_internal_field(self):
artist = Artist(uri='uri', name='name').serialize()
artist['__mro__'] = 'foo'
serialized = json.dumps(artist)
test = lambda: json.loads(serialized, object_hook=model_json_decoder)
self.assertRaises(TypeError, test)
def test_eq_name(self):
artist1 = Artist(name='name')
artist2 = Artist(name='name')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq_uri(self):
artist1 = Artist(uri='uri')
artist2 = Artist(uri='uri')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq_musibrainz_id(self):
artist1 = Artist(musicbrainz_id='id')
artist2 = Artist(musicbrainz_id='id')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq(self):
artist1 = Artist(uri='uri', name='name', musicbrainz_id='id')
artist2 = Artist(uri='uri', name='name', musicbrainz_id='id')
self.assertEqual(artist1, artist2)
self.assertEqual(hash(artist1), hash(artist2))
def test_eq_none(self):
self.assertNotEqual(Artist(), None)
def test_eq_other(self):
self.assertNotEqual(Artist(), 'other')
def test_ne_name(self):
artist1 = Artist(name='name1')
artist2 = Artist(name='name2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
def test_ne_uri(self):
artist1 = Artist(uri='uri1')
artist2 = Artist(uri='uri2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
def test_ne_musicbrainz_id(self):
artist1 = Artist(musicbrainz_id='id1')
artist2 = Artist(musicbrainz_id='id2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
def test_ne(self):
artist1 = Artist(uri='uri1', name='name1', musicbrainz_id='id1')
artist2 = Artist(uri='uri2', name='name2', musicbrainz_id='id2')
self.assertNotEqual(artist1, artist2)
self.assertNotEqual(hash(artist1), hash(artist2))
class AlbumTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
album = Album(uri=uri)
self.assertEqual(album.uri, uri)
self.assertRaises(AttributeError, setattr, album, 'uri', None)
def test_name(self):
name = 'a name'
album = Album(name=name)
self.assertEqual(album.name, name)
self.assertRaises(AttributeError, setattr, album, 'name', None)
def test_artists(self):
artist = Artist()
album = Album(artists=[artist])
self.assertIn(artist, album.artists)
self.assertRaises(AttributeError, setattr, album, 'artists', None)
def test_artists_none(self):
self.assertEqual(set(), Album(artists=None).artists)
def test_num_tracks(self):
num_tracks = 11
album = Album(num_tracks=num_tracks)
self.assertEqual(album.num_tracks, num_tracks)
self.assertRaises(AttributeError, setattr, album, 'num_tracks', None)
def test_num_discs(self):
num_discs = 2
album = Album(num_discs=num_discs)
self.assertEqual(album.num_discs, num_discs)
self.assertRaises(AttributeError, setattr, album, 'num_discs', None)
def test_date(self):
date = '1977-01-01'
album = Album(date=date)
self.assertEqual(album.date, date)
self.assertRaises(AttributeError, setattr, album, 'date', None)
def test_musicbrainz_id(self):
mb_id = 'mb-id'
album = Album(musicbrainz_id=mb_id)
self.assertEqual(album.musicbrainz_id, mb_id)
self.assertRaises(
AttributeError, setattr, album, 'musicbrainz_id', None)
def test_images(self):
image = 'data:foobar'
album = Album(images=[image])
self.assertIn(image, album.images)
self.assertRaises(AttributeError, setattr, album, 'images', None)
def test_images_none(self):
self.assertEqual(set(), Album(images=None).images)
def test_invalid_kwarg(self):
test = lambda: Album(foo='baz')
self.assertRaises(TypeError, test)
def test_repr_without_artists(self):
self.assertEquals(
"Album(artists=[], images=[], name=u'name', uri=u'uri')",
repr(Album(uri='uri', name='name')))
def test_repr_with_artists(self):
self.assertEquals(
"Album(artists=[Artist(name=u'foo')], images=[], name=u'name', "
"uri=u'uri')",
repr(Album(uri='uri', name='name', artists=[Artist(name='foo')])))
def test_serialize_without_artists(self):
self.assertDictEqual(
{'__model__': 'Album', 'uri': 'uri', 'name': 'name'},
Album(uri='uri', name='name').serialize())
def test_serialize_with_artists(self):
artist = Artist(name='foo')
self.assertDictEqual(
{'__model__': 'Album', 'uri': 'uri', 'name': 'name',
'artists': [artist.serialize()]},
Album(uri='uri', name='name', artists=[artist]).serialize())
def test_serialize_with_images(self):
image = 'data:foobar'
self.assertDictEqual(
{'__model__': 'Album', 'uri': 'uri', 'name': 'name',
'images': [image]},
Album(uri='uri', name='name', images=[image]).serialize())
def test_to_json_and_back(self):
album1 = Album(uri='uri', name='name', artists=[Artist(name='foo')])
serialized = json.dumps(album1, cls=ModelJSONEncoder)
album2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(album1, album2)
def test_eq_name(self):
album1 = Album(name='name')
album2 = Album(name='name')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_uri(self):
album1 = Album(uri='uri')
album2 = Album(uri='uri')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_artists(self):
artists = [Artist()]
album1 = Album(artists=artists)
album2 = Album(artists=artists)
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_artists_order(self):
artist1 = Artist(name='name1')
artist2 = Artist(name='name2')
album1 = Album(artists=[artist1, artist2])
album2 = Album(artists=[artist2, artist1])
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_num_tracks(self):
album1 = Album(num_tracks=2)
album2 = Album(num_tracks=2)
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_date(self):
date = '1977-01-01'
album1 = Album(date=date)
album2 = Album(date=date)
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_musibrainz_id(self):
album1 = Album(musicbrainz_id='id')
album2 = Album(musicbrainz_id='id')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq(self):
artists = [Artist()]
album1 = Album(
name='name', uri='uri', artists=artists, num_tracks=2,
musicbrainz_id='id')
album2 = Album(
name='name', uri='uri', artists=artists, num_tracks=2,
musicbrainz_id='id')
self.assertEqual(album1, album2)
self.assertEqual(hash(album1), hash(album2))
def test_eq_none(self):
self.assertNotEqual(Album(), None)
def test_eq_other(self):
self.assertNotEqual(Album(), 'other')
def test_ne_name(self):
album1 = Album(name='name1')
album2 = Album(name='name2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_uri(self):
album1 = Album(uri='uri1')
album2 = Album(uri='uri2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_artists(self):
album1 = Album(artists=[Artist(name='name1')])
album2 = Album(artists=[Artist(name='name2')])
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_num_tracks(self):
album1 = Album(num_tracks=1)
album2 = Album(num_tracks=2)
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_date(self):
album1 = Album(date='1977-01-01')
album2 = Album(date='1977-01-02')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne_musicbrainz_id(self):
album1 = Album(musicbrainz_id='id1')
album2 = Album(musicbrainz_id='id2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
def test_ne(self):
album1 = Album(
name='name1', uri='uri1', artists=[Artist(name='name1')],
num_tracks=1, musicbrainz_id='id1')
album2 = Album(
name='name2', uri='uri2', artists=[Artist(name='name2')],
num_tracks=2, musicbrainz_id='id2')
self.assertNotEqual(album1, album2)
self.assertNotEqual(hash(album1), hash(album2))
class TrackTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
track = Track(uri=uri)
self.assertEqual(track.uri, uri)
self.assertRaises(AttributeError, setattr, track, 'uri', None)
def test_name(self):
name = 'a name'
track = Track(name=name)
self.assertEqual(track.name, name)
self.assertRaises(AttributeError, setattr, track, 'name', None)
def test_artists(self):
artists = [Artist(name='name1'), Artist(name='name2')]
track = Track(artists=artists)
self.assertEqual(set(track.artists), set(artists))
self.assertRaises(AttributeError, setattr, track, 'artists', None)
def test_artists_none(self):
self.assertEqual(set(), Track(artists=None).artists)
def test_composers(self):
artists = [Artist(name='name1'), Artist(name='name2')]
track = Track(composers=artists)
self.assertEqual(set(track.composers), set(artists))
self.assertRaises(AttributeError, setattr, track, 'composers', None)
def test_composers_none(self):
self.assertEqual(set(), Track(composers=None).composers)
def test_performers(self):
artists = [Artist(name='name1'), Artist(name='name2')]
track = Track(performers=artists)
self.assertEqual(set(track.performers), set(artists))
self.assertRaises(AttributeError, setattr, track, 'performers', None)
def test_performers_none(self):
self.assertEqual(set(), Track(performers=None).performers)
def test_album(self):
album = Album()
track = Track(album=album)
self.assertEqual(track.album, album)
self.assertRaises(AttributeError, setattr, track, 'album', None)
def test_track_no(self):
track_no = 7
track = Track(track_no=track_no)
self.assertEqual(track.track_no, track_no)
self.assertRaises(AttributeError, setattr, track, 'track_no', None)
def test_disc_no(self):
disc_no = 2
track = Track(disc_no=disc_no)
self.assertEqual(track.disc_no, disc_no)
self.assertRaises(AttributeError, setattr, track, 'disc_no', None)
def test_date(self):
date = '1977-01-01'
track = Track(date=date)
self.assertEqual(track.date, date)
self.assertRaises(AttributeError, setattr, track, 'date', None)
def test_length(self):
length = 137000
track = Track(length=length)
self.assertEqual(track.length, length)
self.assertRaises(AttributeError, setattr, track, 'length', None)
def test_bitrate(self):
bitrate = 160
track = Track(bitrate=bitrate)
self.assertEqual(track.bitrate, bitrate)
self.assertRaises(AttributeError, setattr, track, 'bitrate', None)
def test_musicbrainz_id(self):
mb_id = 'mb-id'
track = Track(musicbrainz_id=mb_id)
self.assertEqual(track.musicbrainz_id, mb_id)
self.assertRaises(
AttributeError, setattr, track, 'musicbrainz_id', None)
def test_invalid_kwarg(self):
test = lambda: Track(foo='baz')
self.assertRaises(TypeError, test)
def test_repr_without_artists(self):
self.assertEquals(
"Track(artists=[], composers=[], name=u'name', "
"performers=[], uri=u'uri')",
repr(Track(uri='uri', name='name')))
def test_repr_with_artists(self):
self.assertEquals(
"Track(artists=[Artist(name=u'foo')], composers=[], name=u'name', "
"performers=[], uri=u'uri')",
repr(Track(uri='uri', name='name', artists=[Artist(name='foo')])))
def test_serialize_without_artists(self):
self.assertDictEqual(
{'__model__': 'Track', 'uri': 'uri', 'name': 'name'},
Track(uri='uri', name='name').serialize())
def test_serialize_with_artists(self):
artist = Artist(name='foo')
self.assertDictEqual(
{'__model__': 'Track', 'uri': 'uri', 'name': 'name',
'artists': [artist.serialize()]},
Track(uri='uri', name='name', artists=[artist]).serialize())
def test_serialize_with_album(self):
album = Album(name='foo')
self.assertDictEqual(
{'__model__': 'Track', 'uri': 'uri', 'name': 'name',
'album': album.serialize()},
Track(uri='uri', name='name', album=album).serialize())
def test_to_json_and_back(self):
track1 = Track(
uri='uri', name='name', album=Album(name='foo'),
artists=[Artist(name='foo')])
serialized = json.dumps(track1, cls=ModelJSONEncoder)
track2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(track1, track2)
def test_eq_uri(self):
track1 = Track(uri='uri1')
track2 = Track(uri='uri1')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_name(self):
track1 = Track(name='name1')
track2 = Track(name='name1')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_artists(self):
artists = [Artist()]
track1 = Track(artists=artists)
track2 = Track(artists=artists)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_artists_order(self):
artist1 = Artist(name='name1')
artist2 = Artist(name='name2')
track1 = Track(artists=[artist1, artist2])
track2 = Track(artists=[artist2, artist1])
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_album(self):
album = Album()
track1 = Track(album=album)
track2 = Track(album=album)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_track_no(self):
track1 = Track(track_no=1)
track2 = Track(track_no=1)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_date(self):
date = '1977-01-01'
track1 = Track(date=date)
track2 = Track(date=date)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_length(self):
track1 = Track(length=100)
track2 = Track(length=100)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_bitrate(self):
track1 = Track(bitrate=100)
track2 = Track(bitrate=100)
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_musibrainz_id(self):
track1 = Track(musicbrainz_id='id')
track2 = Track(musicbrainz_id='id')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq(self):
date = '1977-01-01'
artists = [Artist()]
album = Album()
track1 = Track(
uri='uri', name='name', artists=artists, album=album, track_no=1,
date=date, length=100, bitrate=100, musicbrainz_id='id')
track2 = Track(
uri='uri', name='name', artists=artists, album=album, track_no=1,
date=date, length=100, bitrate=100, musicbrainz_id='id')
self.assertEqual(track1, track2)
self.assertEqual(hash(track1), hash(track2))
def test_eq_none(self):
self.assertNotEqual(Track(), None)
def test_eq_other(self):
self.assertNotEqual(Track(), 'other')
def test_ne_uri(self):
track1 = Track(uri='uri1')
track2 = Track(uri='uri2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_name(self):
track1 = Track(name='name1')
track2 = Track(name='name2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_artists(self):
track1 = Track(artists=[Artist(name='name1')])
track2 = Track(artists=[Artist(name='name2')])
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_album(self):
track1 = Track(album=Album(name='name1'))
track2 = Track(album=Album(name='name2'))
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_track_no(self):
track1 = Track(track_no=1)
track2 = Track(track_no=2)
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_date(self):
track1 = Track(date='1977-01-01')
track2 = Track(date='1977-01-02')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_length(self):
track1 = Track(length=100)
track2 = Track(length=200)
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_bitrate(self):
track1 = Track(bitrate=100)
track2 = Track(bitrate=200)
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne_musicbrainz_id(self):
track1 = Track(musicbrainz_id='id1')
track2 = Track(musicbrainz_id='id2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
def test_ne(self):
track1 = Track(
uri='uri1', name='name1', artists=[Artist(name='name1')],
album=Album(name='name1'), track_no=1, date='1977-01-01',
length=100, bitrate=100, musicbrainz_id='id1')
track2 = Track(
uri='uri2', name='name2', artists=[Artist(name='name2')],
album=Album(name='name2'), track_no=2, date='1977-01-02',
length=200, bitrate=200, musicbrainz_id='id2')
self.assertNotEqual(track1, track2)
self.assertNotEqual(hash(track1), hash(track2))
class TlTrackTest(unittest.TestCase):
def test_tlid(self):
tlid = 123
tl_track = TlTrack(tlid=tlid)
self.assertEqual(tl_track.tlid, tlid)
self.assertRaises(AttributeError, setattr, tl_track, 'tlid', None)
def test_track(self):
track = Track()
tl_track = TlTrack(track=track)
self.assertEqual(tl_track.track, track)
self.assertRaises(AttributeError, setattr, tl_track, 'track', None)
def test_invalid_kwarg(self):
test = lambda: TlTrack(foo='baz')
self.assertRaises(TypeError, test)
def test_positional_args(self):
tlid = 123
track = Track()
tl_track = TlTrack(tlid, track)
self.assertEqual(tl_track.tlid, tlid)
self.assertEqual(tl_track.track, track)
def test_iteration(self):
tlid = 123
track = Track()
tl_track = TlTrack(tlid, track)
(tlid2, track2) = tl_track
self.assertEqual(tlid2, tlid)
self.assertEqual(track2, track)
def test_repr(self):
self.assertEquals(
"TlTrack(tlid=123, track=Track(artists=[], composers=[], "
"performers=[], uri=u'uri'))",
repr(TlTrack(tlid=123, track=Track(uri='uri'))))
def test_serialize(self):
track = Track(uri='uri', name='name')
self.assertDictEqual(
{'__model__': 'TlTrack', 'tlid': 123, 'track': track.serialize()},
TlTrack(tlid=123, track=track).serialize())
def test_to_json_and_back(self):
tl_track1 = TlTrack(tlid=123, track=Track(uri='uri', name='name'))
serialized = json.dumps(tl_track1, cls=ModelJSONEncoder)
tl_track2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(tl_track1, tl_track2)
def test_eq(self):
tlid = 123
track = Track()
tl_track1 = TlTrack(tlid=tlid, track=track)
tl_track2 = TlTrack(tlid=tlid, track=track)
self.assertEqual(tl_track1, tl_track2)
self.assertEqual(hash(tl_track1), hash(tl_track2))
def test_eq_none(self):
self.assertNotEqual(TlTrack(), None)
def test_eq_other(self):
self.assertNotEqual(TlTrack(), 'other')
def test_ne_tlid(self):
tl_track1 = TlTrack(tlid=123)
tl_track2 = TlTrack(tlid=321)
self.assertNotEqual(tl_track1, tl_track2)
self.assertNotEqual(hash(tl_track1), hash(tl_track2))
def test_ne_track(self):
tl_track1 = TlTrack(track=Track(uri='a'))
tl_track2 = TlTrack(track=Track(uri='b'))
self.assertNotEqual(tl_track1, tl_track2)
self.assertNotEqual(hash(tl_track1), hash(tl_track2))
class PlaylistTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
playlist = Playlist(uri=uri)
self.assertEqual(playlist.uri, uri)
self.assertRaises(AttributeError, setattr, playlist, 'uri', None)
def test_name(self):
name = 'a name'
playlist = Playlist(name=name)
self.assertEqual(playlist.name, name)
self.assertRaises(AttributeError, setattr, playlist, 'name', None)
def test_tracks(self):
tracks = [Track(), Track(), Track()]
playlist = Playlist(tracks=tracks)
self.assertEqual(list(playlist.tracks), tracks)
self.assertRaises(AttributeError, setattr, playlist, 'tracks', None)
def test_length(self):
tracks = [Track(), Track(), Track()]
playlist = Playlist(tracks=tracks)
self.assertEqual(playlist.length, 3)
def test_last_modified(self):
last_modified = 1390942873000
playlist = Playlist(last_modified=last_modified)
self.assertEqual(playlist.last_modified, last_modified)
self.assertRaises(
AttributeError, setattr, playlist, 'last_modified', None)
def test_with_new_uri(self):
tracks = [Track()]
last_modified = 1390942873000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_playlist = playlist.copy(uri='another uri')
self.assertEqual(new_playlist.uri, 'another uri')
self.assertEqual(new_playlist.name, 'a name')
self.assertEqual(list(new_playlist.tracks), tracks)
self.assertEqual(new_playlist.last_modified, last_modified)
def test_with_new_name(self):
tracks = [Track()]
last_modified = 1390942873000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_playlist = playlist.copy(name='another name')
self.assertEqual(new_playlist.uri, 'an uri')
self.assertEqual(new_playlist.name, 'another name')
self.assertEqual(list(new_playlist.tracks), tracks)
self.assertEqual(new_playlist.last_modified, last_modified)
def test_with_new_tracks(self):
tracks = [Track()]
last_modified = 1390942873000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_tracks = [Track(), Track()]
new_playlist = playlist.copy(tracks=new_tracks)
self.assertEqual(new_playlist.uri, 'an uri')
self.assertEqual(new_playlist.name, 'a name')
self.assertEqual(list(new_playlist.tracks), new_tracks)
self.assertEqual(new_playlist.last_modified, last_modified)
def test_with_new_last_modified(self):
tracks = [Track()]
last_modified = 1390942873000
new_last_modified = last_modified + 1000
playlist = Playlist(
uri='an uri', name='a name', tracks=tracks,
last_modified=last_modified)
new_playlist = playlist.copy(last_modified=new_last_modified)
self.assertEqual(new_playlist.uri, 'an uri')
self.assertEqual(new_playlist.name, 'a name')
self.assertEqual(list(new_playlist.tracks), tracks)
self.assertEqual(new_playlist.last_modified, new_last_modified)
def test_invalid_kwarg(self):
test = lambda: Playlist(foo='baz')
self.assertRaises(TypeError, test)
def test_repr_without_tracks(self):
self.assertEquals(
"Playlist(name=u'name', tracks=[], uri=u'uri')",
repr(Playlist(uri='uri', name='name')))
def test_repr_with_tracks(self):
self.assertEquals(
"Playlist(name=u'name', tracks=[Track(artists=[], composers=[], "
"name=u'foo', performers=[])], uri=u'uri')",
repr(Playlist(uri='uri', name='name', tracks=[Track(name='foo')])))
def test_serialize_without_tracks(self):
self.assertDictEqual(
{'__model__': 'Playlist', 'uri': 'uri', 'name': 'name'},
Playlist(uri='uri', name='name').serialize())
def test_serialize_with_tracks(self):
track = Track(name='foo')
self.assertDictEqual(
{'__model__': 'Playlist', 'uri': 'uri', 'name': 'name',
'tracks': [track.serialize()]},
Playlist(uri='uri', name='name', tracks=[track]).serialize())
def test_to_json_and_back(self):
playlist1 = Playlist(uri='uri', name='name')
serialized = json.dumps(playlist1, cls=ModelJSONEncoder)
playlist2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(playlist1, playlist2)
def test_eq_name(self):
playlist1 = Playlist(name='name')
playlist2 = Playlist(name='name')
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_uri(self):
playlist1 = Playlist(uri='uri')
playlist2 = Playlist(uri='uri')
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_tracks(self):
tracks = [Track()]
playlist1 = Playlist(tracks=tracks)
playlist2 = Playlist(tracks=tracks)
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_last_modified(self):
playlist1 = Playlist(last_modified=1)
playlist2 = Playlist(last_modified=1)
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq(self):
tracks = [Track()]
playlist1 = Playlist(
uri='uri', name='name', tracks=tracks, last_modified=1)
playlist2 = Playlist(
uri='uri', name='name', tracks=tracks, last_modified=1)
self.assertEqual(playlist1, playlist2)
self.assertEqual(hash(playlist1), hash(playlist2))
def test_eq_none(self):
self.assertNotEqual(Playlist(), None)
def test_eq_other(self):
self.assertNotEqual(Playlist(), 'other')
def test_ne_name(self):
playlist1 = Playlist(name='name1')
playlist2 = Playlist(name='name2')
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne_uri(self):
playlist1 = Playlist(uri='uri1')
playlist2 = Playlist(uri='uri2')
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne_tracks(self):
playlist1 = Playlist(tracks=[Track(uri='uri1')])
playlist2 = Playlist(tracks=[Track(uri='uri2')])
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne_last_modified(self):
playlist1 = Playlist(last_modified=1)
playlist2 = Playlist(last_modified=2)
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
def test_ne(self):
playlist1 = Playlist(
uri='uri1', name='name1', tracks=[Track(uri='uri1')],
last_modified=1)
playlist2 = Playlist(
uri='uri2', name='name2', tracks=[Track(uri='uri2')],
last_modified=2)
self.assertNotEqual(playlist1, playlist2)
self.assertNotEqual(hash(playlist1), hash(playlist2))
class SearchResultTest(unittest.TestCase):
def test_uri(self):
uri = 'an_uri'
result = SearchResult(uri=uri)
self.assertEqual(result.uri, uri)
self.assertRaises(AttributeError, setattr, result, 'uri', None)
def test_tracks(self):
tracks = [Track(), Track(), Track()]
result = SearchResult(tracks=tracks)
self.assertEqual(list(result.tracks), tracks)
self.assertRaises(AttributeError, setattr, result, 'tracks', None)
def test_artists(self):
artists = [Artist(), Artist(), Artist()]
result = SearchResult(artists=artists)
self.assertEqual(list(result.artists), artists)
self.assertRaises(AttributeError, setattr, result, 'artists', None)
def test_albums(self):
albums = [Album(), Album(), Album()]
result = SearchResult(albums=albums)
self.assertEqual(list(result.albums), albums)
self.assertRaises(AttributeError, setattr, result, 'albums', None)
def test_invalid_kwarg(self):
test = lambda: SearchResult(foo='baz')
self.assertRaises(TypeError, test)
def test_repr_without_results(self):
self.assertEquals(
"SearchResult(albums=[], artists=[], tracks=[], uri=u'uri')",
repr(SearchResult(uri='uri')))
def test_serialize_without_results(self):
self.assertDictEqual(
{'__model__': 'SearchResult', 'uri': 'uri'},
SearchResult(uri='uri').serialize())
def test_to_json_and_back(self):
result1 = SearchResult(uri='uri')
serialized = json.dumps(result1, cls=ModelJSONEncoder)
result2 = json.loads(serialized, object_hook=model_json_decoder)
self.assertEqual(result1, result2)
| |
# -*- coding: utf-8 -*-
import json
import mimetypes
import os
from datetime import datetime
from zipfile import ZipFile
from django import forms
from django.conf import settings
from django.core.validators import URLValidator
from django.forms import widgets
from django.forms.extras.widgets import SelectDateWidget
from django.forms.models import modelformset_factory
from django.template.defaultfilters import filesizeformat
from django.utils import six
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real as translation
import commonware
import happyforms
import waffle
from jinja2 import escape as jinja2_escape
from jinja2.filters import do_dictsort
from mpconstants import regions as mpconstants_regions
from quieter_formset.formset import BaseModelFormSet
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
import lib.iarc
import mkt
from lib.video import tasks as vtasks
from mkt import get_user
from mkt.access import acl
from mkt.api.models import Access
from mkt.constants import (CATEGORY_CHOICES, MAX_PACKAGED_APP_SIZE,
ratingsbodies)
from mkt.developers.utils import prioritize_app
from mkt.files.models import FileUpload
from mkt.files.utils import WebAppParser
from mkt.regions import REGIONS_CHOICES_SORTED_BY_NAME
from mkt.regions.utils import parse_region
from mkt.reviewers.models import RereviewQueue
from mkt.site.fields import SeparatedValuesField
from mkt.site.forms import AddonChoiceField
from mkt.site.utils import remove_icons, slug_validator, slugify
from mkt.tags.models import Tag
from mkt.tags.utils import can_edit_restricted_tags, clean_tags
from mkt.translations.fields import TransField
from mkt.translations.forms import TranslationFormMixin
from mkt.translations.models import Translation
from mkt.translations.widgets import TranslationTextarea, TransTextarea
from mkt.versions.models import Version
from mkt.webapps.models import (AddonUser, BlockedSlug, IARCInfo, Preview,
Webapp)
from mkt.webapps.tasks import (index_webapps, set_storefront_data,
update_manifests)
from . import tasks
log = commonware.log.getLogger('mkt.developers')
def region_error(region):
return forms.ValidationError(_('You cannot select {region}.').format(
region=unicode(parse_region(region).name)
))
def toggle_app_for_special_regions(request, app, enabled_regions=None):
"""Toggle for special regions (e.g., China)."""
if not waffle.flag_is_active(request, 'special-regions'):
return
for region in mkt.regions.SPECIAL_REGIONS:
status = app.geodata.get_status(region)
if enabled_regions is not None:
if region.id in enabled_regions:
# If it's not already enabled, mark as pending.
if status != mkt.STATUS_PUBLIC:
# Developer requested for it to be in China.
status = mkt.STATUS_PENDING
value, changed = app.geodata.set_status(region, status)
if changed:
log.info(u'[Webapp:%s] App marked as pending '
u'special region (%s).' % (app, region.slug))
value, changed = app.geodata.set_nominated_date(
region, save=True)
log.info(u'[Webapp:%s] Setting nomination date to '
u'now for region (%s).' % (app, region.slug))
else:
# Developer cancelled request for approval.
status = mkt.STATUS_NULL
value, changed = app.geodata.set_status(
region, status, save=True)
if changed:
log.info(u'[Webapp:%s] App marked as null special '
u'region (%s).' % (app, region.slug))
if status == mkt.STATUS_PUBLIC:
# Reviewer approved for it to be in China.
aer = app.addonexcludedregion.filter(region=region.id)
if aer.exists():
aer.delete()
log.info(u'[Webapp:%s] App included in new special '
u'region (%s).' % (app, region.slug))
else:
# Developer requested for it to be in China.
aer, created = app.addonexcludedregion.get_or_create(
region=region.id)
if created:
log.info(u'[Webapp:%s] App excluded from new special '
u'region (%s).' % (app, region.slug))
class AuthorForm(happyforms.ModelForm):
def clean_user(self):
user = self.cleaned_data['user']
if not user.read_dev_agreement:
raise forms.ValidationError(
_('All team members must have read and agreed to the '
'developer agreement.'))
return user
class Meta:
model = AddonUser
exclude = ('addon',)
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not any(d['role'] == mkt.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(_('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
_('At least one team member must be listed.'))
users = [d['user'] for d in data]
if sorted(users) != sorted(set(users)):
raise forms.ValidationError(
_('A team member can only be listed once.'))
AuthorFormSet = modelformset_factory(AddonUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
class DeleteForm(happyforms.Form):
reason = forms.CharField(required=False)
def __init__(self, request):
super(DeleteForm, self).__init__(request.POST)
def trap_duplicate(request, manifest_url):
# See if this user has any other apps with the same manifest.
owned = (request.user.addonuser_set
.filter(addon__manifest_url=manifest_url))
if not owned:
return
try:
app = owned[0].addon
except Webapp.DoesNotExist:
return
error_url = app.get_dev_url()
msg = None
if app.status == mkt.STATUS_PUBLIC:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently public. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_PENDING:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently pending. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_NULL:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently incomplete. '
'<a href="%s">Resume app</a>')
elif app.status == mkt.STATUS_REJECTED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently rejected. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_DISABLED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently banned on Marketplace. '
'<a href="%s">Edit app</a>')
elif app.disabled_by_user:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently disabled. '
'<a href="%s">Edit app</a>')
if msg:
return msg % (jinja2_escape(app.name), error_url)
def verify_app_domain(manifest_url, exclude=None, packaged=False):
if packaged or waffle.switch_is_active('webapps-unique-by-domain'):
domain = Webapp.domain_from_url(manifest_url)
qs = Webapp.objects.filter(app_domain=domain)
if exclude:
qs = qs.exclude(pk=exclude.pk)
if qs.exists():
raise forms.ValidationError(
_('An app already exists on this domain; '
'only one app per domain is allowed.'))
class PreviewForm(happyforms.ModelForm):
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors.
unsaved_image_data = forms.CharField(required=False,
widget=forms.HiddenInput)
unsaved_image_type = forms.CharField(required=False,
widget=forms.HiddenInput)
def save(self, addon, commit=True):
if self.cleaned_data:
self.instance.addon = addon
if self.cleaned_data.get('DELETE'):
# Existing preview.
if self.instance.id:
self.instance.delete()
# User has no desire to save this preview.
return
super(PreviewForm, self).save(commit=commit)
if self.cleaned_data['upload_hash']:
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
filetype = (os.path.splitext(upload_hash)[1][1:]
.replace('-', '/'))
if filetype in mkt.VIDEO_TYPES:
self.instance.update(filetype=filetype)
vtasks.resize_video.delay(upload_path, self.instance.pk,
user_pk=mkt.get_user().pk,
set_modified_on=[self.instance])
else:
self.instance.update(filetype='image/png')
tasks.resize_preview.delay(upload_path, self.instance.pk,
set_modified_on=[self.instance])
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'id', 'position')
class JSONField(forms.Field):
def to_python(self, value):
if value == '':
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
class JSONMultipleChoiceField(forms.MultipleChoiceField, JSONField):
widget = forms.CheckboxSelectMultiple
class AdminSettingsForm(PreviewForm):
DELETE = forms.BooleanField(required=False)
mozilla_contact = SeparatedValuesField(forms.EmailField, separator=',',
required=False)
vip_app = forms.BooleanField(required=False)
priority_review = forms.BooleanField(required=False)
banner_regions = JSONMultipleChoiceField(
required=False, choices=mkt.regions.REGIONS_CHOICES_NAME)
banner_message = TransField(required=False)
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'position')
def __init__(self, *args, **kw):
# Note that this form is not inheriting from AddonFormBase, so we have
# to get rid of 'version' ourselves instead of letting the parent class
# do it.
kw.pop('version', None)
# Get the object for the app's promo `Preview` and pass it to the form.
if kw.get('instance'):
addon = kw.pop('instance')
self.instance = addon
self.promo = addon.get_promo()
self.request = kw.pop('request', None)
# Note: After calling `super`, `self.instance` becomes the `Preview`
# object.
super(AdminSettingsForm, self).__init__(*args, **kw)
self.initial['vip_app'] = addon.vip_app
self.initial['priority_review'] = addon.priority_review
if self.instance:
self.initial['mozilla_contact'] = addon.mozilla_contact
self.initial['banner_regions'] = addon.geodata.banner_regions or []
self.initial['banner_message'] = addon.geodata.banner_message_id
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
def clean_position(self):
return -1
def clean_banner_regions(self):
try:
regions = map(int, self.cleaned_data.get('banner_regions'))
except (TypeError, ValueError):
# input data is not a list or data contains non-integers.
raise forms.ValidationError(_('Invalid region(s) selected.'))
return list(regions)
def clean_mozilla_contact(self):
contact = self.cleaned_data.get('mozilla_contact')
if self.cleaned_data.get('mozilla_contact') is None:
return u''
return contact
def save(self, addon, commit=True):
if (self.cleaned_data.get('DELETE') and
'upload_hash' not in self.changed_data and self.promo.id):
self.promo.delete()
elif self.promo and 'upload_hash' in self.changed_data:
self.promo.delete()
elif self.cleaned_data.get('upload_hash'):
super(AdminSettingsForm, self).save(addon, True)
updates = {
'vip_app': self.cleaned_data.get('vip_app'),
}
contact = self.cleaned_data.get('mozilla_contact')
if contact is not None:
updates['mozilla_contact'] = contact
if (self.cleaned_data.get('priority_review') and
not addon.priority_review):
# addon.priority_review gets updated within prioritize_app().
prioritize_app(addon, self.request.user)
else:
updates['priority_review'] = self.cleaned_data.get(
'priority_review')
addon.update(**updates)
geodata = addon.geodata
geodata.banner_regions = self.cleaned_data.get('banner_regions')
geodata.banner_message = self.cleaned_data.get('banner_message')
geodata.save()
uses_flash = self.cleaned_data.get('flash')
af = addon.get_latest_file()
if af is not None:
af.update(uses_flash=bool(uses_flash))
index_webapps.delay([addon.id])
return addon
class BasePreviewFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
at_least_one = False
for form in self.forms:
if (not form.cleaned_data.get('DELETE') and
form.cleaned_data.get('upload_hash') is not None):
at_least_one = True
if not at_least_one:
raise forms.ValidationError(
_('You must upload at least one screenshot or video.'))
PreviewFormSet = modelformset_factory(Preview, formset=BasePreviewFormSet,
form=PreviewForm, can_delete=True,
extra=1)
class NewManifestForm(happyforms.Form):
manifest = forms.URLField()
def __init__(self, *args, **kwargs):
self.is_standalone = kwargs.pop('is_standalone', False)
super(NewManifestForm, self).__init__(*args, **kwargs)
def clean_manifest(self):
manifest = self.cleaned_data['manifest']
# Skip checking the domain for the standalone validator.
if not self.is_standalone:
verify_app_domain(manifest)
return manifest
class NewPackagedAppForm(happyforms.Form):
upload = forms.FileField()
def __init__(self, *args, **kwargs):
self.max_size = kwargs.pop('max_size', MAX_PACKAGED_APP_SIZE)
self.user = kwargs.pop('user', get_user())
self.addon = kwargs.pop('addon', None)
self.file_upload = None
super(NewPackagedAppForm, self).__init__(*args, **kwargs)
def clean_upload(self):
upload = self.cleaned_data['upload']
errors = []
if upload.size > self.max_size:
errors.append({
'type': 'error',
'message': _('Packaged app too large for submission. Packages '
'must be smaller than %s.' % filesizeformat(
self.max_size)),
'tier': 1,
})
# Immediately raise an error, do not process the rest of the view,
# which would read the file.
raise self.persist_errors(errors, upload)
manifest = None
try:
# Be careful to keep this as in-memory zip reading.
manifest = ZipFile(upload, 'r').read('manifest.webapp')
except Exception as e:
errors.append({
'type': 'error',
'message': _('Error extracting manifest from zip file.'),
'tier': 1,
})
origin = None
if manifest:
try:
origin = WebAppParser.decode_manifest(manifest).get('origin')
except forms.ValidationError as e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if origin:
try:
verify_app_domain(origin, packaged=True, exclude=self.addon)
except forms.ValidationError, e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if errors:
raise self.persist_errors(errors, upload)
# Everything passed validation.
self.file_upload = FileUpload.from_post(
upload, upload.name, upload.size, user=self.user)
def persist_errors(self, errors, upload):
"""
Persist the error with this into FileUpload (but do not persist
the file contents, which are too large) and return a ValidationError.
"""
validation = {
'errors': len(errors),
'success': False,
'messages': errors,
}
self.file_upload = FileUpload.objects.create(
user=self.user, name=getattr(upload, 'name', ''),
validation=json.dumps(validation))
# Return a ValidationError to be raised by the view.
return forms.ValidationError(' '.join(e['message'] for e in errors))
class AddonFormBase(TranslationFormMixin, happyforms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.version = kw.pop('version', None)
super(AddonFormBase, self).__init__(*args, **kw)
class Meta:
models = Webapp
fields = ('name', 'slug')
class AppFormBasic(AddonFormBase):
"""Form to edit basic app info."""
slug = forms.CharField(max_length=30, widget=forms.TextInput)
manifest_url = forms.URLField()
hosted_url = forms.CharField(
label=_lazy(u'Hosted URL:'), required=False,
help_text=_lazy(
u'A URL to where your app is hosted on the web, if it exists. This'
u' allows users to try out your app before installing it.'))
description = TransField(
required=True,
label=_lazy(u'Provide a detailed description of your app'),
help_text=_lazy(u'This description will appear on the details page.'),
widget=TransTextarea)
tags = forms.CharField(
label=_lazy(u'Search Keywords:'), required=False,
widget=forms.Textarea(attrs={'rows': 3}),
help_text=_lazy(
u'The search keywords are used to return search results in the '
u'Firefox Marketplace. Be sure to include a keywords that '
u'accurately reflect your app.'))
class Meta:
model = Webapp
fields = ('slug', 'manifest_url', 'hosted_url', 'description', 'tags')
def __init__(self, *args, **kw):
# Force the form to use app_slug. We want to keep
# this under "slug" so all the js continues to work.
kw.setdefault('initial', {})['slug'] = kw['instance'].app_slug
super(AppFormBasic, self).__init__(*args, **kw)
self.old_manifest_url = self.instance.manifest_url
if self.instance.is_packaged:
# Manifest URL cannot be changed for packaged apps.
del self.fields['manifest_url']
self.initial['tags'] = ', '.join(self.get_tags(self.instance))
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if can_edit_restricted_tags(self.request):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
def _post_clean(self):
# Switch slug to app_slug in cleaned_data and self._meta.fields so
# we can update the app_slug field for webapps.
try:
self._meta.fields = list(self._meta.fields)
slug_idx = self._meta.fields.index('slug')
data = self.cleaned_data
if 'slug' in data:
data['app_slug'] = data.pop('slug')
self._meta.fields[slug_idx] = 'app_slug'
super(AppFormBasic, self)._post_clean()
finally:
self._meta.fields[slug_idx] = 'slug'
def clean_slug(self):
slug = self.cleaned_data['slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(_('The slug cannot be "%s". '
'Please choose another.' % slug))
return slug.lower()
def clean_manifest_url(self):
manifest_url = self.cleaned_data['manifest_url']
# Only verify if manifest changed.
if 'manifest_url' in self.changed_data:
verify_app_domain(manifest_url, exclude=self.instance)
return manifest_url
def save(self, addon, commit=False):
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AppFormBasic, self).save(commit=False)
addonform.save()
if 'manifest_url' in self.changed_data:
before_url = self.old_manifest_url
after_url = self.cleaned_data['manifest_url']
# If a non-admin edited the manifest URL, add to Re-review Queue.
if not acl.action_allowed(self.request, 'Admin', '%'):
log.info(u'[Webapp:%s] (Re-review) Manifest URL changed '
u'from %s to %s'
% (self.instance, before_url, after_url))
msg = (_(u'Manifest URL changed from {before_url} to '
u'{after_url}')
.format(before_url=before_url, after_url=after_url))
RereviewQueue.flag(self.instance,
mkt.LOG.REREVIEW_MANIFEST_URL_CHANGE, msg)
# Refetch the new manifest.
log.info('Manifest %s refreshed for %s'
% (addon.manifest_url, addon))
update_manifests.delay([self.instance.id])
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
add_tags = set(tags_new) - set(tags_old)
del_tags = set(tags_old) - set(tags_new)
# Add new tags.
for t in add_tags:
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in del_tags:
Tag(tag_text=t).remove_tag(addon)
return addonform
class AppFormDetails(AddonFormBase):
LOCALES = [(translation.to_locale(k).replace('_', '-'), v)
for k, v in do_dictsort(settings.LANGUAGES)]
default_locale = forms.TypedChoiceField(required=False, choices=LOCALES)
homepage = TransField.adapt(forms.URLField)(required=False)
privacy_policy = TransField(
widget=TransTextarea(), required=True,
label=_lazy(u"Please specify your app's Privacy Policy"))
class Meta:
model = Webapp
fields = ('default_locale', 'homepage', 'privacy_policy')
def clean(self):
# Make sure we have the required translations in the new locale.
required = ['name', 'description']
data = self.cleaned_data
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
if missing:
raise forms.ValidationError(
_('Before changing your default locale you must have a '
'name and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return data
class AppFormMedia(AddonFormBase):
icon_upload_hash = forms.CharField(required=False)
unsaved_icon_data = forms.CharField(required=False,
widget=forms.HiddenInput)
class Meta:
model = Webapp
fields = ('icon_upload_hash', 'icon_type')
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
tasks.resize_icon.delay(upload_path, destination,
mkt.CONTENT_ICON_SIZES,
set_modified_on=[addon])
return super(AppFormMedia, self).save(commit)
class AppSupportFormMixin(object):
def get_default_translation_for(self, field_name):
"""
Return the cleaned_data for the specified field_name, using the
field's default_locale.
"""
default_locale = self.fields[field_name].default_locale
return self.cleaned_data.get(field_name, {}).get(default_locale, '')
def clean_support_fields(self):
"""
Make sure either support email or support url are present.
"""
if ('support_email' in self._errors or
'support_url' in self._errors):
# If there are already errors for those fields, bail out, that
# means at least one of them was filled, the user just needs to
# correct the error.
return
support_email = self.get_default_translation_for('support_email')
support_url = self.get_default_translation_for('support_url')
if not support_email and not support_url:
# Mark the fields as invalid, add an error message on a special
# 'support' field that the template will use if necessary, not on
# both fields individually.
self._errors['support'] = self.error_class(
[_('You must provide either a website, an email, or both.')])
self._errors['support_email'] = self.error_class([''])
self._errors['support_url'] = self.error_class([''])
def clean(self):
cleaned_data = super(AppSupportFormMixin, self).clean()
self.clean_support_fields()
return cleaned_data
class AppFormSupport(AppSupportFormMixin, AddonFormBase):
support_url = TransField.adapt(forms.URLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Webapp
fields = ('support_email', 'support_url')
class AppAppealForm(happyforms.Form):
"""
If a developer's app is rejected he can make changes and request
another review.
"""
notes = forms.CharField(
label=_lazy(u'Your comments'),
required=False, widget=forms.Textarea(attrs={'rows': 2}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
super(AppAppealForm, self).__init__(*args, **kw)
def save(self):
version = self.product.versions.latest()
notes = self.cleaned_data['notes']
if notes:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version,
details={'comments': notes})
else:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version)
# Mark app and file as pending again.
self.product.update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
version.all_files[0].update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
return version
class PublishForm(happyforms.Form):
# Publish choice wording is slightly different here than with the
# submission flow because the app may have already been published.
mark_safe_lazy = lazy(mark_safe, six.text_type)
PUBLISH_CHOICES = (
(mkt.PUBLISH_IMMEDIATE,
mark_safe_lazy(_lazy(
u'<b>Published</b>: Visible to everyone in the Marketplace and '
u'included in search results and listing pages.'))),
(mkt.PUBLISH_HIDDEN,
mark_safe_lazy(_lazy(
u'<b>Unlisted</b>: Visible to only people with the URL and '
u'does not appear in search results and listing pages.'))),
)
# Used for setting initial form values.
PUBLISH_MAPPING = {
mkt.STATUS_PUBLIC: mkt.PUBLISH_IMMEDIATE,
mkt.STATUS_UNLISTED: mkt.PUBLISH_HIDDEN,
mkt.STATUS_APPROVED: mkt.PUBLISH_PRIVATE,
}
# Use in form processing to set status.
STATUS_MAPPING = dict((v, k) for k, v in PUBLISH_MAPPING.items())
publish_type = forms.TypedChoiceField(
required=False, choices=PUBLISH_CHOICES, widget=forms.RadioSelect(),
initial=0, coerce=int, label=_lazy('App Visibility:'))
limited = forms.BooleanField(
required=False, label=_lazy(
u'<b>Limit to my team</b>: Visible to only Team Members.'))
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon')
super(PublishForm, self).__init__(*args, **kwargs)
limited = False
publish = self.PUBLISH_MAPPING.get(self.addon.status,
mkt.PUBLISH_IMMEDIATE)
if self.addon.status == mkt.STATUS_APPROVED:
# Special case if app is currently private.
limited = True
publish = mkt.PUBLISH_HIDDEN
# Determine the current selection via STATUS to publish choice mapping.
self.fields['publish_type'].initial = publish
self.fields['limited'].initial = limited
# Make the limited label safe so we can display the HTML.
self.fields['limited'].label = mark_safe(self.fields['limited'].label)
def save(self):
publish = self.cleaned_data['publish_type']
limited = self.cleaned_data['limited']
if publish == mkt.PUBLISH_HIDDEN and limited:
publish = mkt.PUBLISH_PRIVATE
status = self.STATUS_MAPPING[publish]
self.addon.update(status=status)
mkt.log(mkt.LOG.CHANGE_STATUS, self.addon.get_status_display(),
self.addon)
# Call update_version, so various other bits of data update.
self.addon.update_version()
# Call to update names and locales if changed.
self.addon.update_name_from_package_manifest()
self.addon.update_supported_locales()
set_storefront_data.delay(self.addon.pk)
class RegionForm(forms.Form):
regions = forms.MultipleChoiceField(
required=False, choices=[], widget=forms.CheckboxSelectMultiple,
label=_lazy(u'Choose the regions your app will be listed in:'),
error_messages={'required':
_lazy(u'You must select at least one region.')})
special_regions = forms.MultipleChoiceField(
required=False, widget=forms.CheckboxSelectMultiple,
choices=[(x.id, x.name) for x in mkt.regions.SPECIAL_REGIONS])
enable_new_regions = forms.BooleanField(
required=False, label=_lazy(u'Enable new regions'))
restricted = forms.TypedChoiceField(
required=False, initial=0, coerce=int,
choices=[(0, _lazy('Make my app available in most regions')),
(1, _lazy('Choose where my app is made available'))],
widget=forms.RadioSelect(attrs={'class': 'choices'}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
self.request = kw.pop('request', None)
super(RegionForm, self).__init__(*args, **kw)
self.fields['regions'].choices = REGIONS_CHOICES_SORTED_BY_NAME()
# This is the list of the user's exclusions as we don't
# want the user's choices to be altered by external
# exclusions e.g. payments availability.
user_exclusions = list(
self.product.addonexcludedregion.values_list('region', flat=True)
)
# If we have excluded regions, uncheck those.
# Otherwise, default to everything checked.
self.regions_before = self.product.get_region_ids(
restofworld=True,
excluded=user_exclusions
)
self.initial = {
'regions': sorted(self.regions_before),
'restricted': int(self.product.geodata.restricted),
'enable_new_regions': self.product.enable_new_regions,
}
# The checkboxes for special regions are
#
# - checked ... if an app has not been requested for approval in
# China or the app has been rejected in China.
#
# - unchecked ... if an app has been requested for approval in
# China or the app has been approved in China.
unchecked_statuses = (mkt.STATUS_NULL, mkt.STATUS_REJECTED)
for region in self.special_region_objs:
if self.product.geodata.get_status(region) in unchecked_statuses:
# If it's rejected in this region, uncheck its checkbox.
if region.id in self.initial['regions']:
self.initial['regions'].remove(region.id)
elif region.id not in self.initial['regions']:
# If it's pending/public, check its checkbox.
self.initial['regions'].append(region.id)
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
@property
def special_region_objs(self):
return mkt.regions.SPECIAL_REGIONS
@property
def special_region_ids(self):
return mkt.regions.SPECIAL_REGION_IDS
@property
def low_memory_regions(self):
return any(region.low_memory for region in self.regions_by_id.values())
@property
def special_region_statuses(self):
"""Returns the null/pending/public status for each region."""
statuses = {}
for region in self.special_region_objs:
statuses[region.id] = self.product.geodata.get_status_slug(region)
return statuses
@property
def special_region_messages(self):
"""Returns the L10n messages for each region's status."""
return self.product.geodata.get_status_messages()
def is_toggling(self):
if not self.request or not hasattr(self.request, 'POST'):
return False
value = self.request.POST.get('toggle-paid')
return value if value in ('free', 'paid') else False
def _product_is_paid(self):
return (self.product.premium_type in mkt.ADDON_PREMIUMS or
self.product.premium_type == mkt.ADDON_FREE_INAPP)
def clean_regions(self):
regions = self.cleaned_data['regions']
if not self.is_toggling():
if not regions:
raise forms.ValidationError(
_('You must select at least one region.'))
return regions
def save(self):
# Don't save regions if we are toggling.
if self.is_toggling():
return
regions = [int(x) for x in self.cleaned_data['regions']]
special_regions = [
int(x) for x in self.cleaned_data['special_regions']
]
restricted = int(self.cleaned_data['restricted'] or 0)
if restricted:
before = set(self.regions_before)
after = set(regions)
log.info(u'[Webapp:%s] App marked as restricted.' % self.product)
# Add new region exclusions.
to_add = before - after
for region in to_add:
aer, created = self.product.addonexcludedregion.get_or_create(
region=region)
if created:
log.info(u'[Webapp:%s] Excluded from new region (%s).'
% (self.product, region))
# Remove old region exclusions.
to_remove = after - before
for region in to_remove:
self.product.addonexcludedregion.filter(
region=region).delete()
log.info(u'[Webapp:%s] No longer excluded from region (%s).'
% (self.product, region))
# If restricted, check how we should handle new regions.
if self.cleaned_data['enable_new_regions']:
self.product.update(enable_new_regions=True)
log.info(u'[Webapp:%s] will be added to future regions.'
% self.product)
else:
self.product.update(enable_new_regions=False)
log.info(u'[Webapp:%s] will not be added to future regions.'
% self.product)
else:
# If not restricted, set `enable_new_regions` to True and remove
# currently excluded regions.
self.product.update(enable_new_regions=True)
self.product.addonexcludedregion.all().delete()
log.info(u'[Webapp:%s] App marked as unrestricted.' % self.product)
self.product.geodata.update(restricted=restricted)
# Toggle region exclusions/statuses for special regions (e.g., China).
toggle_app_for_special_regions(self.request, self.product,
special_regions)
class CategoryForm(happyforms.Form):
categories = forms.MultipleChoiceField(label=_lazy(u'Categories'),
choices=CATEGORY_CHOICES,
widget=forms.CheckboxSelectMultiple)
def __init__(self, *args, **kw):
self.request = kw.pop('request', None)
self.product = kw.pop('product', None)
super(CategoryForm, self).__init__(*args, **kw)
self.cats_before = (list(self.product.categories)
if self.product.categories else [])
self.initial['categories'] = self.cats_before
def max_categories(self):
return mkt.MAX_CATEGORIES
def clean_categories(self):
categories = self.cleaned_data['categories']
set_categories = set(categories)
total = len(set_categories)
max_cat = mkt.MAX_CATEGORIES
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
return categories
def save(self):
after = list(self.cleaned_data['categories'])
self.product.update(categories=after)
toggle_app_for_special_regions(self.request, self.product)
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree'),
widget=forms.HiddenInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.now()
self.instance.save()
class DevNewsletterForm(happyforms.Form):
"""Devhub newsletter subscription form."""
email = forms.EmailField(
error_messages={'required':
_lazy(u'Please enter a valid email address.')},
widget=forms.TextInput(attrs={'required': '',
'placeholder':
_lazy(u'Your email address')}))
email_format = forms.ChoiceField(
widget=forms.RadioSelect(),
choices=(('H', 'HTML'), ('T', _lazy(u'Text'))),
initial='H')
privacy = forms.BooleanField(
error_messages={'required':
_lazy(u'You must agree to the Privacy Policy.')})
country = forms.ChoiceField(label=_lazy(u'Country'))
def __init__(self, locale, *args, **kw):
regions = mpconstants_regions.get_region(locale).REGIONS
regions = sorted(regions.iteritems(), key=lambda x: x[1])
super(DevNewsletterForm, self).__init__(*args, **kw)
self.fields['country'].choices = regions
self.fields['country'].initial = 'us'
class AppFormTechnical(AddonFormBase):
flash = forms.BooleanField(required=False)
is_offline = forms.BooleanField(required=False)
class Meta:
model = Webapp
fields = ('is_offline', 'public_stats',)
def __init__(self, *args, **kw):
super(AppFormTechnical, self).__init__(*args, **kw)
if self.version.all_files:
self.initial['flash'] = self.version.all_files[0].uses_flash
def save(self, addon, commit=False):
uses_flash = self.cleaned_data.get('flash')
self.instance = super(AppFormTechnical, self).save(commit=True)
if self.version.all_files:
self.version.all_files[0].update(uses_flash=bool(uses_flash))
return self.instance
class TransactionFilterForm(happyforms.Form):
app = AddonChoiceField(queryset=None, required=False, label=_lazy(u'App'))
transaction_type = forms.ChoiceField(
required=False, label=_lazy(u'Transaction Type'),
choices=[(None, '')] + mkt.MKT_TRANSACTION_CONTRIB_TYPES.items())
transaction_id = forms.CharField(
required=False, label=_lazy(u'Transaction ID'))
current_year = datetime.today().year
years = [current_year - x for x in range(current_year - 2012)]
date_from = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'From'))
date_to = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'To'))
def __init__(self, *args, **kwargs):
self.apps = kwargs.pop('apps', [])
super(TransactionFilterForm, self).__init__(*args, **kwargs)
self.fields['app'].queryset = self.apps
class APIConsumerForm(happyforms.ModelForm):
app_name = forms.CharField(required=False)
oauth_leg = forms.ChoiceField(choices=(
('website', _lazy('Web site')),
('command', _lazy('Command line')))
)
redirect_uri = forms.CharField(validators=[URLValidator()], required=False)
class Meta:
model = Access
fields = ('app_name', 'redirect_uri')
def __init__(self, *args, **kwargs):
super(APIConsumerForm, self).__init__(*args, **kwargs)
if self.data.get('oauth_leg') == 'website':
for field in ['app_name', 'redirect_uri']:
self.fields[field].required = True
class AppVersionForm(happyforms.ModelForm):
releasenotes = TransField(widget=TransTextarea(), required=False)
approvalnotes = forms.CharField(
widget=TranslationTextarea(attrs={'rows': 4}), required=False)
publish_immediately = forms.BooleanField(
required=False,
label=_lazy(u'Make this the Active version of my app as soon as it '
u'has been reviewed and approved.'))
class Meta:
model = Version
fields = ('releasenotes', 'approvalnotes')
def __init__(self, *args, **kwargs):
super(AppVersionForm, self).__init__(*args, **kwargs)
self.fields['publish_immediately'].initial = (
self.instance.addon.publish_type == mkt.PUBLISH_IMMEDIATE)
def save(self, *args, **kwargs):
rval = super(AppVersionForm, self).save(*args, **kwargs)
if self.instance.all_files[0].status == mkt.STATUS_PENDING:
# If version is pending, allow changes to publish_type.
if self.cleaned_data.get('publish_immediately'):
publish_type = mkt.PUBLISH_IMMEDIATE
else:
publish_type = mkt.PUBLISH_PRIVATE
self.instance.addon.update(publish_type=publish_type)
return rval
class PreloadTestPlanForm(happyforms.Form):
agree = forms.BooleanField(
widget=forms.CheckboxInput,
label=_lazy(
u'Please consider my app as a candidate to be pre-loaded on a '
u'Firefox OS device. I agree to the terms and conditions outlined '
u'above. I understand that this document is not a commitment to '
u'pre-load my app.'
))
test_plan = forms.FileField(
label=_lazy(u'Upload Your Test Plan (.pdf, .xls under 2.5MB)'),
widget=forms.FileInput(attrs={'class': 'button'}))
def clean(self):
"""Validate test_plan file."""
content_types = [
'application/pdf',
'application/vnd.pdf',
'application/ms-excel',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.'
'sheet'
]
max_upload_size = 2621440 # 2.5MB
if 'test_plan' not in self.files:
raise forms.ValidationError(_('Test plan required.'))
file = self.files['test_plan']
content_type = mimetypes.guess_type(file.name)[0]
if content_type in content_types:
if file._size > max_upload_size:
msg = _('File too large. Keep size under %s. Current size %s.')
msg = msg % (filesizeformat(max_upload_size),
filesizeformat(file._size))
self._errors['test_plan'] = self.error_class([msg])
raise forms.ValidationError(msg)
else:
msg = (_('Invalid file type {0}. Only {1} files are supported.')
.format(content_type, ', '.join(content_types)))
self._errors['test_plan'] = self.error_class([msg])
raise forms.ValidationError(msg)
return self.cleaned_data
class IARCGetAppInfoForm(happyforms.Form):
submission_id = forms.CharField()
security_code = forms.CharField(max_length=10)
def __init__(self, app, *args, **kwargs):
self.app = app
super(IARCGetAppInfoForm, self).__init__(*args, **kwargs)
def clean_submission_id(self):
submission_id = (
# Also allow "subm-1234" since that's what IARC tool displays.
self.cleaned_data['submission_id'].lower().replace('subm-', ''))
if submission_id.isdigit():
return int(submission_id)
raise forms.ValidationError(_('Please enter a valid submission ID.'))
def clean(self):
cleaned_data = super(IARCGetAppInfoForm, self).clean()
app = self.app
iarc_id = cleaned_data.get('submission_id')
if not app or not iarc_id:
return cleaned_data
if (not settings.IARC_ALLOW_CERT_REUSE and
IARCInfo.objects.filter(submission_id=iarc_id)
.exclude(addon=app).exists()):
del cleaned_data['submission_id']
raise forms.ValidationError(
_('This IARC certificate is already being used for another '
'app. Please create a new IARC Ratings Certificate.'))
return cleaned_data
def save(self, *args, **kwargs):
app = self.app
iarc_id = self.cleaned_data['submission_id']
iarc_code = self.cleaned_data['security_code']
if settings.DEBUG and iarc_id == 0:
# A local developer is being lazy. Skip the hard work.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors([])
app.set_interactives([])
app.set_content_ratings({ratingsbodies.ESRB: ratingsbodies.ESRB_E})
return
# Generate XML.
xml = lib.iarc.utils.render_xml(
'get_app_info.xml',
{'submission_id': iarc_id, 'security_code': iarc_code})
# Process that shizzle.
client = lib.iarc.client.get_iarc_client('services')
resp = client.Get_App_Info(XMLString=xml)
# Handle response.
data = lib.iarc.utils.IARC_XML_Parser().parse_string(resp)
if data.get('rows'):
row = data['rows'][0]
if 'submission_id' not in row:
# [{'ActionStatus': 'No records found. Please try another
# 'criteria.', 'rowId: 1}].
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo: %s' % row)
raise forms.ValidationError(msg)
# We found a rating, so store the id and code for future use.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors(row.get('descriptors', []))
app.set_interactives(row.get('interactives', []))
app.set_content_ratings(row.get('ratings', {}))
else:
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo. No rows: %s' % data)
raise forms.ValidationError(msg)
class ContentRatingForm(happyforms.Form):
since = forms.DateTimeField()
class MOTDForm(happyforms.Form):
motd = forms.CharField(widget=widgets.Textarea())
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import json
import mock
import six
import unittest
from swift.cli import container_deleter
from swift.common import internal_client
from swift.common import swob
from swift.common import utils
AppCall = collections.namedtuple('AppCall', [
'method', 'path', 'query', 'headers', 'body'])
class FakeInternalClient(internal_client.InternalClient):
def __init__(self, responses):
self.resp_iter = iter(responses)
self.calls = []
def make_request(self, method, path, headers, acceptable_statuses,
body_file=None, params=None):
if body_file is None:
body = None
else:
body = body_file.read()
path, _, query = path.partition('?')
self.calls.append(AppCall(method, path, query, headers, body))
resp = next(self.resp_iter)
if isinstance(resp, Exception):
raise resp
return resp
def __enter__(self):
return self
def __exit__(self, *args):
unused_responses = [r for r in self.resp_iter]
if unused_responses:
raise Exception('Unused responses: %r' % unused_responses)
class TestContainerDeleter(unittest.TestCase):
def setUp(self):
patcher = mock.patch.object(container_deleter.time, 'time',
side_effect=itertools.count())
patcher.__enter__()
self.addCleanup(patcher.__exit__)
patcher = mock.patch.object(container_deleter, 'OBJECTS_PER_UPDATE', 5)
patcher.__enter__()
self.addCleanup(patcher.__exit__)
def test_make_delete_jobs(self):
ts = '1558463777.42739'
self.assertEqual(
container_deleter.make_delete_jobs(
'acct', 'cont', ['obj1', 'obj2'],
utils.Timestamp(ts)),
[{'name': ts + '-acct/cont/obj1',
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'},
{'name': ts + '-acct/cont/obj2',
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'}])
def test_make_delete_jobs_native_utf8(self):
ts = '1558463777.42739'
uacct = acct = u'acct-\U0001f334'
ucont = cont = u'cont-\N{SNOWMAN}'
uobj1 = obj1 = u'obj-\N{GREEK CAPITAL LETTER ALPHA}'
uobj2 = obj2 = u'/obj-\N{GREEK CAPITAL LETTER OMEGA}'
if six.PY2:
acct = acct.encode('utf8')
cont = cont.encode('utf8')
obj1 = obj1.encode('utf8')
obj2 = obj2.encode('utf8')
self.assertEqual(
container_deleter.make_delete_jobs(
acct, cont, [obj1, obj2], utils.Timestamp(ts)),
[{'name': u'%s-%s/%s/%s' % (ts, uacct, ucont, uobj1),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'},
{'name': u'%s-%s/%s/%s' % (ts, uacct, ucont, uobj2),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'}])
def test_make_delete_jobs_unicode_utf8(self):
ts = '1558463777.42739'
acct = u'acct-\U0001f334'
cont = u'cont-\N{SNOWMAN}'
obj1 = u'obj-\N{GREEK CAPITAL LETTER ALPHA}'
obj2 = u'obj-\N{GREEK CAPITAL LETTER OMEGA}'
self.assertEqual(
container_deleter.make_delete_jobs(
acct, cont, [obj1, obj2], utils.Timestamp(ts)),
[{'name': u'%s-%s/%s/%s' % (ts, acct, cont, obj1),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'},
{'name': u'%s-%s/%s/%s' % (ts, acct, cont, obj2),
'deleted': 0,
'created_at': ts,
'etag': utils.MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': 'application/async-deleted'}])
def test_mark_for_deletion_empty_no_yield(self):
with FakeInternalClient([
swob.Response(json.dumps([
])),
]) as swift:
self.assertEqual(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'marker',
'end',
'prefix',
timestamp=None,
yield_time=None,
), 0)
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=marker&end_marker=end&prefix=prefix',
{}, None),
])
def test_mark_for_deletion_empty_with_yield(self):
with FakeInternalClient([
swob.Response(json.dumps([
])),
]) as swift:
self.assertEqual(list(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'marker',
'end',
'prefix',
timestamp=None,
yield_time=0.5,
)), [(0, None)])
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=marker&end_marker=end&prefix=prefix',
{}, None),
])
def test_mark_for_deletion_one_update_no_yield(self):
ts = '1558463777.42739'
with FakeInternalClient([
swob.Response(json.dumps([
{'name': '/obj1'},
{'name': 'obj2'},
{'name': 'obj3'},
])),
swob.Response(json.dumps([
])),
swob.Response(status=202),
]) as swift:
self.assertEqual(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'',
'',
'',
timestamp=utils.Timestamp(ts),
yield_time=None,
), 3)
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=&end_marker=&prefix=', {}, None),
('GET', '/v1/account/container',
'format=json&marker=obj3&end_marker=&prefix=', {}, None),
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': ts}, mock.ANY),
])
self.assertEqual(
json.loads(swift.calls[-1].body),
container_deleter.make_delete_jobs(
'account', 'container', ['/obj1', 'obj2', 'obj3'],
utils.Timestamp(ts)
)
)
def test_mark_for_deletion_two_updates_with_yield(self):
ts = '1558463777.42739'
with FakeInternalClient([
swob.Response(json.dumps([
{'name': 'obj1'},
{'name': 'obj2'},
{'name': 'obj3'},
{'name': u'obj4-\N{SNOWMAN}'},
{'name': 'obj5'},
{'name': 'obj6'},
])),
swob.Response(status=202),
swob.Response(json.dumps([
])),
swob.Response(status=202),
]) as swift:
self.assertEqual(list(container_deleter.mark_for_deletion(
swift,
'account',
'container',
'',
'end',
'pre',
timestamp=utils.Timestamp(ts),
yield_time=0,
)), [(5, 'obj5'), (6, 'obj6'), (6, None)])
self.assertEqual(swift.calls, [
('GET', '/v1/account/container',
'format=json&marker=&end_marker=end&prefix=pre', {}, None),
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': ts}, mock.ANY),
('GET', '/v1/account/container',
'format=json&marker=obj6&end_marker=end&prefix=pre',
{}, None),
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': ts}, mock.ANY),
])
self.assertEqual(
json.loads(swift.calls[-3].body),
container_deleter.make_delete_jobs(
'account', 'container',
['obj1', 'obj2', 'obj3', u'obj4-\N{SNOWMAN}', 'obj5'],
utils.Timestamp(ts)
)
)
self.assertEqual(
json.loads(swift.calls[-1].body),
container_deleter.make_delete_jobs(
'account', 'container', ['obj6'],
utils.Timestamp(ts)
)
)
| |
# -*- coding: utf-8 -*-
#
# tree.py
#
# (c) D.C.-G. 2014
#
# Tree widget for albow
#
from albow.widget import Widget
from albow.menu import Menu
from albow.fields import IntField, FloatField, TextFieldWrapped
from albow.controls import CheckBox, AttrRef, Label, Button
from albow.dialogs import ask, alert, input_text_buttons
from albow.translate import _
from extended_widgets import ChoiceButton
from theme import ThemeProperty
from layout import Column, Row
from dialogs import Dialog
from palette_view import PaletteView
from scrollpanel import ScrollRow
from utils import blit_in_rect
from pygame import image, Surface, Rect, SRCALPHA, draw, event
import copy
#-----------------------------------------------------------------------------
item_types_map = {dict: ("Compound", None, {}),
int: ("Integer", IntField, 0),
float: ("Floating point", FloatField, 0.0),
unicode: ("Text", TextFieldWrapped, ""),
bool: ("Boolean", CheckBox, True),
}
def setup_map_types_item(mp=None):
if not mp:
mp = item_types_map
map_types_item = {}
for k, v in mp.items():
if v[0] in map_types_item.keys():
_v = map_types_item.pop(v[0])
map_types_item[u"%s (%s)"%(_(v[0]), _v[0].__name__)] = _v
map_types_item[u"%s (%s)"%(_(v[0]), k.__name__)] = (k, v[1], v[2])
else:
map_types_item[v[0]] = (k, v[1], v[2])
return map_types_item
map_types_item = setup_map_types_item()
#-----------------------------------------------------------------------------
# Tree item builder methods
def create_base_item(self, i_type, i_name, i_value):
return i_name, type(i_type)(i_value)
create_dict = create_int = create_float = create_unicode = create_bool = create_base_item
#-----------------------------------------------------------------------------
class SetupNewItemPanel(Dialog):
def __init__(self, type_string, types=map_types_item, ok_action=None):
self.type_string = type_string
self.ok_action = ok_action
title = Label("Choose default data")
self.t, widget, self.v = types[type_string]
self.n = u""
w_name = TextFieldWrapped(ref=AttrRef(self, 'n'))
self.w_value = self.get_widget(widget)
col = Column([Column([title,]), Label(_("Item Type: %s")%type_string, doNotTranslate=True), Row([Label("Name"), w_name], margin=0), Row([Label("Value"), self.w_value], margin=0), Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss((self.t, self.n, getattr(self.w_value, 'value', map_types_item.get(self.type_string, [None,] * 3)[2])))
def get_widget(self, widget):
if hasattr(widget, 'value'):
value = widget(value=self.v)
elif hasattr(widget, 'text'):
value = widget(text=self.v)
elif widget is None:
value = Label("This item type is a container. Add chlidren later.")
else:
msg = "*** Error in SelectItemTypePanel.__init__():\n Widget <%s> has no 'text' or 'value' member."%widget
print msg
value = Label(msg)
return value
#-----------------------------------------------------------------------------
class SelectItemTypePanel(Dialog):
def __init__(self, title, responses, default=None, ok_action=None):
self.response = responses[0]
self.ok_action = ok_action
title = Label(title)
self.w_type = ChoiceButton(responses)
col = Column([title, self.w_type, Row([Button("OK", action=ok_action or self.dismiss_ok), Button("Cancel", action=ok_action or self.dismiss)], margin=0)], margin=0, spacing=2)
Dialog.__init__(self, client=col)
def dismiss_ok(self):
self.dismiss(self.w_type.selectedChoice)
#-----------------------------------------------------------------------------
def select_item_type(ok_action, types=map_types_item):
if len(types) > 1:
choices = types.keys()
choices.sort()
result = SelectItemTypePanel("Choose item type", responses=choices, default=None).present()
else:
result = types.keys()[0]
if type(result) in (str, unicode):
return SetupNewItemPanel(result, types, ok_action).present()
return None
#-----------------------------------------------------------------------------
class TreeRow(ScrollRow):
def click_item(self, n, e):
self.parent.click_item(n, e.local)
def mouse_down(self, e):
if e.button == 3:
_e = event.Event(e.type, {'alt': e.alt, 'meta': e.meta, 'ctrl': e.ctrl,
'shift': e.shift, 'button': 1, 'cmd': e.cmd,
'local': e.local, 'pos': e.pos,
'num_clicks': e.num_clicks})
ScrollRow.mouse_down(self, _e)
self.parent.show_menu(e.local)
else:
ScrollRow.mouse_down(self, e)
#-----------------------------------------------------------------------------
class Tree(Column):
"""..."""
rows = []
row_margin = 2
column_margin = 2
bullet_size = ThemeProperty('bullet_size')
bullet_color_active = ThemeProperty('bullet_color_active')
bullet_color_inactive = ThemeProperty('bullet_color_inactive')
def __init__(self, *args, **kwargs):
self.menu = [("Add", "add_item"),
("Delete", "delete_item"),
("New child", "add_child"),
("Rename", "rename_item"),
("", ""),
("Cut", "cut_item"),
("Copy", "copy_item"),
("Paste", "paste_item"),
("Paste as child", "paste_child"),
]
if not hasattr(self, 'map_types_item'):
global map_types_item
self.map_types_item = setup_map_types_item()
self.selected_item_index = None
self.selected_item = None
self.clicked_item = None
self.copyBuffer = kwargs.pop('copyBuffer', None)
self._parent = kwargs.pop('_parent', None)
self.styles = kwargs.pop('styles', {})
self.compound_types = [dict,] + kwargs.pop('compound_types', [])
self.item_types = self.compound_types + kwargs.pop('item_types', [a[0] for a in self.map_types_item.values()] or [int, float, unicode, bool])
for t in self.item_types:
if 'create_%s'%t.__name__ in globals().keys():
setattr(self, 'create_%s'%t.__name__, globals()['create_%s'%t.__name__])
self.show_fields = kwargs.pop('show_fields', False)
self.deployed = []
self.data = data = kwargs.pop("data", {})
self.draw_zebra = draw_zebra = kwargs.pop('draw_zebra', True)
# self.inner_width = kwargs.pop('inner_width', 'auto')
self.inner_width = kwargs.pop('inner_width', 500)
self.__num_rows = len(data.keys())
self.build_layout()
# row_height = self.font.size(' ')[1]
row_height = self.font.get_linesize()
self.treeRow = treeRow = TreeRow((self.inner_width, row_height), 10, draw_zebra=draw_zebra)
Column.__init__(self, [treeRow,], **kwargs)
def dispatch_key(self, name, evt):
if not hasattr(evt, 'key'):
return
if name == "key_down":
keyname = self.root.getKey(evt)
if keyname == "Up" and self.selected_item_index > 0:
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index = max(self.selected_item_index - 1, 0)
elif keyname == "Down" and self.selected_item_index < len(self.rows) - 1:
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index += 1
elif keyname == 'Page down':
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index = min(len(self.rows) - 1, self.selected_item_index + self.treeRow.num_rows())
elif keyname == 'Page up':
if self.selected_item_index == None:
self.selected_item_index = -1
self.selected_item_index = max(0, self.selected_item_index - self.treeRow.num_rows())
if self.treeRow.cell_to_item_no(0, 0) != None and (self.treeRow.cell_to_item_no(0, 0) + self.treeRow.num_rows() -1 > self.selected_item_index or self.treeRow.cell_to_item_no(0, 0) + self.treeRow.num_rows() -1 < self.selected_item_index):
self.treeRow.scroll_to_item(self.selected_item_index)
if keyname == 'Return' and self.selected_item_index != None:
self.select_item(self.selected_item_index)
if self.selected_item[7] in self.compound_types:
self.deploy(self.selected_item[6])
def cut_item(self):
self.copyBuffer = ([] + self.selected_item, 1)
self.delete_item()
def copy_item(self):
self.copyBuffer = ([] + self.selected_item, 0)
def paste_item(self):
parent = self.get_item_parent(self.selected_item)
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
if self.copyBuffer[1] == 0:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(parent, (name, new_item))
def paste_child(self):
name = self.copyBuffer[0][3]
old_name = u"%s"%self.copyBuffer[0][3]
names = []
children = self.get_item_children(self.selected_item)
if children:
names = [a[3] for a in children]
if name in names:
name = input_text_buttons("Choose a name", 300, self.copyBuffer[0][3])
else:
old_name = ""
if name and type(name) in (str, unicode) and name != old_name:
new_item = copy.deepcopy(self.copyBuffer[0][9])
if hasattr(new_item, 'name'):
new_item.name = name
self.add_item_to(self.selected_item, (name, new_item))
@staticmethod
def add_item_to_dict(parent, name, item):
parent[name] = item
def add_item_to(self, parent, (name, item)):
if parent is None:
tp = 'dict'
parent = self.data
else:
tp = parent[7].__name__
parent = parent[9]
if not name:
i = 0
name = 'Item %03d'%i
while name in self.data.keys():
i += 1
name = 'Item %03d'%i
meth = getattr(self, 'add_item_to_%s'%tp, None)
if meth:
meth(parent, name, item)
self.build_layout()
else:
alert(_("No function implemented to add items to %s type.")%type(parent).__name__, doNotTranslate=True)
def add_item(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.get_item_parent(self.selected_item), new_item)
def add_child(self, types_item=None):
r = select_item_type(None, types_item or self.map_types_item)
if type(r) in (list, tuple):
t, n, v = r
meth = getattr(self, 'create_%s'%t.__name__, None)
if meth:
new_item = meth(self, t, n, v)
self.add_item_to(self.selected_item, new_item)
def delete_item(self):
parent = self.get_item_parent(self.selected_item) or self.data
del parent[self.selected_item]
self.selected_item_index = None
self.selected_item = None
self.build_layout()
def rename_item(self):
result = input_text_buttons("Choose a name", 300, self.selected_item[3])
if type(result) in (str, unicode):
self.selected_item[3] = result
self.build_layout()
def get_item_parent(self, item):
if item:
pid = item[4]
for itm in self.rows:
if pid == itm[6]:
return itm
def get_item_children(self, item):
children = []
if item:
if item[6] in self.deployed:
cIds = item[5]
idx = self.rows.index(item)
for child in self.rows[idx:]:
if child[8] == item[8] + 1 and child[4] == item[6]:
children.append(child)
else:
k = item[3]
v = item[9]
lvl = item[8]
id = item[6]
aId = len(self.rows) + 1
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
itm = [lvl + 1, a, b, id, [], aId]
itm = [None, None, None, a, id, [], aId, type(b), lvl + 1, b]
children.insert(0, itm)
aId += 1
return children
def show_menu(self, pos):
if self.menu:
m = Menu("Menu", self.menu, handler=self)
i = m.present(self, pos)
if i > -1:
meth = getattr(self, self.menu[i][1], None)
if meth:
meth()
def cut_item_enabled(self):
return self.selected_item is not None
def copy_item_enabled(self):
return self.cut_item_enabled()
def paste_item_enabled(self):
return self.copyBuffer is not None
def paste_child_enabled(self):
if not self.selected_item:
return False
return self.paste_item_enabled() and self.selected_item[7] in self.compound_types
def add_item_enabled(self):
return True
def add_child_enabled(self):
if not self.selected_item:
return False
return self.selected_item[7] in self.compound_types
def delete_item_enabled(self):
return self.selected_item is not None
def rename_item_enabled(self):
return self.selected_item is not None
def build_layout(self):
data = self.data
parent = 0
children = []
keys = data.keys()
keys.sort()
items = [[0, a, data[a], parent, children, keys.index(a) + 1] for a in keys]
rows = []
w = 50
aId = len(items) + 1
while items:
lvl, k, v, p, c, id = items.pop(0)
_c = False
fields = []
c = [] + c
if type(v) in self.compound_types:
meth = getattr(self, 'parse_%s'%v.__class__.__name__, None)
if meth is not None:
_v = meth(k, v)
else:
_v = v
ks = _v.keys()
ks.sort()
ks.reverse()
for a in ks:
b = _v[a]
if id in self.deployed:
itm = [lvl + 1, a, b, id, [], aId]
items.insert(0, itm)
c.append(aId)
_c = True
aId += 1
else:
if type(v) in (list, tuple):
fields = v
elif type(v) not in self.compound_types or hasattr(self._parent, 'build_%s'%k.lower()):
fields = [v,]
head = Surface((self.bullet_size * (lvl + 1) + self.font.size(k)[0], self.bullet_size), SRCALPHA)
if _c:
meth = getattr(self, 'draw_%s_bullet'%{False: 'closed', True: 'opened'}[id in self.deployed])
else:
meth = getattr(self, 'draw_%s_bullet'%v.__class__.__name__, None)
if not meth:
meth = self.draw_deadend_bullet
bg, fg, shape, text = self.styles.get(type(v),
({True: self.bullet_color_active, False: self.bullet_color_inactive}[_c],
self.fg_color, 'square', ''),
)
try:
meth(head, bg, fg, shape, text, k, lvl)
except:
pass
rows.append([head, fields, [w] * len(fields), k, p, c, id, type(v), lvl, v])
self.rows = rows
return rows
def deploy(self, id):
if id in self.deployed:
self.deployed.remove(id)
else:
self.deployed.append(id)
self.build_layout()
def click_item(self, n, pos):
"""..."""
self.clicked_item = row = self.rows[n]
r = self.get_bullet_rect(row[0], row[8])
x = pos[0]
if self.margin + r.left - self.treeRow.hscroll <= x <= self.margin + self.treeRow.margin + r.right - self.treeRow.hscroll:
id = row[6]
self.deploy(id)
else:
self.select_item(n)
def select_item(self, n):
self.selected_item_index = n
self.selected_item = self.rows[n]
def get_bullet_rect(self, surf, lvl):
r = Rect(0, 0, self.bullet_size, self.bullet_size)
r.left = self.bullet_size * lvl
r.inflate_ip(-4, -4)
return r
def draw_item_text(self, surf, r, text):
buf = self.font.render(unicode(text), True, self.fg_color)
blit_in_rect(surf, buf, Rect(r.right, r.top, surf.get_width() - r.right, r.height), 'c')
def draw_deadend_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.midtop, r.midright, r.midbottom, r.midleft])
self.draw_item_text(surf, r, item_text)
def draw_closed_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midright, r.bottomleft])
self.draw_item_text(surf, r, item_text)
def draw_opened_bullet(self, surf, bg, fg, shape, text, item_text, lvl):
r = self.get_bullet_rect(surf, lvl)
draw.polygon(surf, bg, [r.topleft, r.midbottom, r.topright])
self.draw_item_text(surf, r, item_text)
def draw_tree_cell(self, surf, i, data, cell_rect, column):
"""..."""
if type(data) in (str, unicode):
self.draw_text_cell(surf, i, data, cell_rect, 'l', self.font)
else:
self.draw_image_cell(surf, i, data, cell_rect, column)
@staticmethod
def draw_image_cell(surf, i, data, cell_rect, column):
"""..."""
blit_in_rect(surf, data, cell_rect, 'l')
def draw_text_cell(self, surf, i, data, cell_rect, align, font):
buf = font.render(unicode(data), True, self.fg_color)
blit_in_rect(surf, buf, cell_rect, align)
def num_rows(self):
return len(self.rows)
def row_data(self, row):
return self.rows[row]
def column_info(self, row_data):
m = self.column_margin
d = 2 * m
x = 0
for i in range(0,2):
if i < 1:
width = self.width
data = row_data[i]
yield i, x + m, width - d, None, data
x += width
if self.show_fields:
for i in range(len(row_data[2])):
width = 50 * (i + 1)
data = row_data[2][i]
if type(data) != (str, unicode):
data = repr(data)
yield i, x + m, width - d, None, data
x += width
| |
import pkg_resources
try:
pkg_resources.get_distribution('numpy')
except pkg_resources.DistributionNotFound:
numpyPresent = False
print("Error: Numpy package not available.")
else:
numpyPresent = True
import numpy as np
try:
pkg_resources.get_distribution('pandas')
except pkg_resources.DistributionNotFound:
pandasPresent = False
print("Error: Pandas package not available.")
else:
pandasPresent = True
import pandas as pd
import collections
import inspect
def phjRemoveUnwantedRows(phjDF,
phjColumnNamesList,
phjPrintResults = False):
# Remove any rows with one or more NaN values
phjNumberRowsPreNaN = len(phjDF.index)
phjDF = phjDF.dropna(how = 'any').reset_index(drop = True)
phjNumberRowsPostNaN = len(phjDF.index)
if phjPrintResults == True:
print('Number of rows removed with NaN values = ', phjNumberRowsPreNaN - phjNumberRowsPostNaN)
print('\n')
print('Dataframe with NaN values removed')
print(phjDF)
print('\n')
# Convert each column to numeric values - strings will be converted to NaN and removed
phjNumberRowsPreStrings = len(phjDF.index)
for c in phjColumnNamesList:
phjDF[c] = pd.to_numeric(phjDF[c],errors = 'coerce')
phjDF = phjDF.dropna(how = 'any').reset_index(drop = True)
phjNumberRowsPostStrings = len(phjDF.index)
if phjPrintResults == True:
print('Number of rows removed due to containing string values = ', phjNumberRowsPreStrings - phjNumberRowsPostStrings)
print('\n')
print('Dataframe with strings values removed')
print(phjDF)
print('\n')
# Convert all columns to integers
for c in phjColumnNamesList:
phjDF[c] = phjDF[c].astype(int)
# Remove rows that contain values that are not zero or 1
phjNumberRowsPreBinaryRange = len(phjDF.index)
for c in phjColumnNamesList:
phjDF['isin'] = phjDF[c].isin([0,1])
phjDF = phjDF.loc[phjDF['isin'] == True,:]
phjDF = phjDF.drop('isin', 1).reset_index(drop = True)
phjNumberRowsPostBinaryRange = len(phjDF.index)
if phjPrintResults == True:
print('Number of rows removed due to values being out of range = ', phjNumberRowsPreBinaryRange - phjNumberRowsPostBinaryRange)
print('\n')
print('Dataframe containing zero and 1 values only')
print(phjDF)
print('\n')
return phjDF[phjColumnNamesList].reset_index(drop = True)
def phjBinaryVarsToSquareMatrix(phjDataDF,
phjColumnNamesList,
phjOutputFormat = 'arr',
phjPrintResults = False):
try:
phjDF = phjDataDF[phjColumnNamesList]
except KeyError as e:
print('A KeyError has occurred ({0}). Check that the column names provided exist in the dataframe.'.format(e))
return None
phjNumberRowsOriginal = len(phjDF.index)
if phjPrintResults == True:
print('Number of rows in original database = ', phjNumberRowsOriginal)
print('\n')
print('Original dataframe')
print(phjDF)
print('\n')
# Remove rows where any values are missing, strings, or not a zero or 1
phjDF = phjRemoveUnwantedRows(phjDF = phjDF,
phjColumnNamesList = phjColumnNamesList,
phjPrintResults = phjPrintResults)
phjDF['rowSum'] = phjDF[phjColumnNamesList].sum(axis=1)
# Create a blank square matrix (in dataframe form) with column and row indices the same
phjTempMatrixDF = pd.DataFrame(columns=phjColumnNamesList,index=phjColumnNamesList)
# Start by completing the diagonal
# ================================
# Use just those rows with only a single entry (i.e. only one variable is entered).
# Create a series containing the name of the variable and the sum of entries.
# (For some reason, if the dataframe contains one or more rows where rowSum equals 1 then
# the series contains integers but, if there are no rowSum values equal to 1 (and, therefore, the values
# sum of the columns equal zero), then the series contains floats. Use astype(int) to avoid issues.)
phjTempSer = phjDF.loc[phjDF['rowSum']==1,phjColumnNamesList].sum(axis=0).astype(int)
# Step through each diagonal cell in the matrix and enter tbe sum value
for c in phjColumnNamesList:
phjTempMatrixDF.loc[c,c]=phjTempSer[c]
# Next fill in the rest of the matrix
# ===================================
# Step through each variable in the list and create a series consisting
# of all OTHER variables and the number of entries or those variables
for c in phjColumnNamesList:
phjOtherCols = [i for i in phjColumnNamesList if i!=c]
phjTempSer = phjDF.loc[(phjDF['rowSum']>1) & (phjDF[c]==1),phjOtherCols].sum(axis=0).astype(int)
# For each row index, step through each column and add the data
for oc in phjOtherCols:
phjTempMatrixDF.loc[c,oc] = phjTempSer[oc]
if phjPrintResults == True:
print('Square matrix')
print(phjTempMatrixDF)
print('\n')
if phjOutputFormat == 'arr':
return phjTempMatrixDF.values
elif phjOutputFormat == 'df':
return phjTempMatrixDF
else:
print('The phjOutputFormat parammeter was set to an unknown value (\'{0}\'). The return value was set to None.'.format(phjOutputFormat))
print('\n')
return None
def phjLongToWideBinary(phjDF,
phjGroupbyVarName,
phjVariablesVarName,
phjValuesDict = {0:0,1:1},
phjPrintResults = False):
# This function converts a dataframe containing a grouping variable and a variable
# containing a series of factors that may or may not be present and converts to a
# wide dataframe containing a series of binary variables indicating whether the factor
# is present or not.
# For example, it converts:
#
# X Y
# 0 1 a
# 1 1 b
# 2 1 d
# 3 2 b
# 4 2 c
# 5 3 d
# 6 3 e
# 7 3 a
# 8 3 f
# 9 4 b
#
# to:
# X a b d c e f
# 0 1 1 1 1 0 0 0
# 1 2 0 1 0 1 0 0
# 2 3 1 0 1 0 1 1
# 3 4 0 1 0 0 0 0
# Check function parameters are set correctly
try:
# Check whether required parameters have been set to correct type
assert isinstance(phjDF,pd.DataFrame), "Parameter, 'phjDF' needs to be a Pandas dataframe."
assert isinstance(phjGroupbyVarName,str), "Parameter 'phjGroupbyVarName' needs to be a string."
assert isinstance(phjVariablesVarName,str), "Parameter 'phjVariablesVarName' needs to be a string."
assert isinstance(phjValuesDict,collections.Mapping), "Parameter 'phjValuesDict' needs to be a dict." # collections.Mapping will work for dict(), collections.OrderedDict() and collections.UserDict() (see comment by Alexander Ryzhov at https://stackoverflow.com/questions/25231989/how-to-check-if-a-variable-is-a-dictionary-in-python.
# Check whether arguments are set to allowable values
for k,v in phjValuesDict.items():
assert k in [0,1], "The key values in phjValuesDict need to either 0 or 1."
assert isinstance(phjPrintResults,bool), "Parameter 'phjPrintResults' needs to be a boolean (True, False) value."
# Check that referenced columns exist in the dataframe
assert phjGroupbyVarName in phjDF.columns, "The column name 'phjGroupbyVarName' does not exist in dataframe."
assert phjVariablesVarName in phjDF.columns, "The column name 'phjVariablesVarName' does not exist in dataframe."
except AssertionError as e:
# Set return value to none
phjScratchDF = None
# If function has been called directly, present message.
if inspect.stack()[1][3] == '<module>':
print("An AssertionError occurred in {fname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3]))
# If function has been called by another function then modify message and re-raise exception
else:
print("An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\n".format(msg = e,
fname = inspect.stack()[0][3],
callfname = inspect.stack()[1][3]))
raise
else:
# Create a scratch DF with appropriate rows and columns, filled with zero
phjScratchDF = pd.DataFrame(index = pd.Series(phjDF[phjGroupbyVarName].unique()),
columns = list(phjDF[phjVariablesVarName].unique())).fillna(0)
phjScratchDF.index.name = phjGroupbyVarName
# Within each group, create a list contain all variables
phjGroup = phjDF[[phjGroupbyVarName,phjVariablesVarName]].groupby(phjGroupbyVarName).agg(lambda phjRow: list(phjRow))
# Step through each group and change each variable contained in the list of present variables with a 1
for g in phjGroup.index.values.tolist():
phjScratchDF.loc[g,phjGroup.loc[g,phjVariablesVarName]] = 1
# This step replaces the default 0 and 1 with user-defined values. It should only be
# run if phjValuesDict has been set to something other than default. Check whether
# a passed dict is the same as the default (even if the order of elements has changed).
# If simply comparing one dict with another then {0:0,1:1} will be seen to be the
# same as {0:False,1:True}. But for the purposes of this exercise, those 2 dicts should
# be seen to be different. Therefore, convert the values is both dicts to strings
# before comparing.
if {k:str(v) for k,v in phjValuesDict.items()} != {k:str(v) for k,v in {0:0,1:1}.items()}:
phjScratchDF = phjScratchDF.replace(phjValuesDict)
phjScratchDF = phjScratchDF.reset_index(drop = False)
finally:
# Return phjScratchDF which will be a dataframe if successful or None if not
return phjScratchDF
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import subprocess
import textwrap
from cinder.volume import configuration
from cinder.compute import nova
OrderedDict = collections.OrderedDict
BASEDIR = os.path.split(os.path.realpath(__file__))[0] + "/../../"
if __name__ == "__main__":
os.chdir(BASEDIR)
opt_file = open("cinder/opts.py", 'w')
opt_dict = OrderedDict()
dir_trees_list = []
REGISTER_OPTS_STR = "CONF.register_opts("
REGISTER_OPT_STR = "CONF.register_opt("
license_str = textwrap.dedent(
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.\n
""")
opt_file.write(license_str)
edit_header = textwrap.dedent(
"""
###################################################################
# WARNING!
#
# Do not edit this file directly. This file should be generated by
# running the command "tox -e genopts" any time a config option
# has been added, changed, or removed.
###################################################################\n
""")
opt_file.write(edit_header)
opt_file.write("import itertools\n\n")
# NOTE(geguileo): We need to register all OVOs before importing any other
# cinder files, otherwise any decorator that uses cinder.objects.YYY will
# fail with exception AttributeError: 'module' object has no attribute
# 'YYY' when running tox -egenconfig
opt_file.write("from cinder import objects\nobjects.register_all()\n\n")
targetdir = 'cinder'
common_string = ('find ' + targetdir + ' -type f -name "*.py" ! '
'-path "*/tests/*" -exec grep -l "%s" {} '
'+ | sed -e "s|^' + BASEDIR +
'|/|g" | sort -u')
cmd_opts = common_string % REGISTER_OPTS_STR
output_opts = subprocess.check_output( # nosec : command is hardcoded
'{}'.format(cmd_opts), shell=True,
universal_newlines=True)
dir_trees_list = output_opts.split()
cmd_opt = common_string % REGISTER_OPT_STR
output_opt = subprocess.check_output( # nosec : command is hardcoded
'{}'.format(cmd_opt), shell=True,
universal_newlines=True)
temp_list = output_opt.split()
for item in temp_list:
dir_trees_list.append(item)
dir_trees_list.sort()
flag = False
def _check_import(aline):
if len(aline) > 79:
new_lines = aline.partition(' as ')
return new_lines
else:
return [aline]
for atree in dir_trees_list:
if atree in ["tools/config/generate_cinder_opts.py",
"cinder/hacking/checks.py",
"cinder/volume/configuration.py",
"cinder/test.py"]:
continue
dirs_list = atree.split('/')
import_module = "from "
init_import_module = ""
import_name = ""
for dir in dirs_list:
if dir.find(".py") == -1:
import_module += dir + "."
init_import_module += dir + "."
import_name += dir + "_"
else:
if dir[:-3] != "__init__":
import_name += dir[:-3].replace("_", "")
import_module = (import_module[:-1] + " import " +
dir[:-3] + " as " + import_name)
lines = _check_import(import_module)
if len(lines) > 1:
opt_file.write(lines[0] + lines[1] + "\\\n")
opt_file.write(" " + lines[2] + "\n")
else:
opt_file.write(lines[0] + "\n")
else:
import_name = import_name[:-1].replace('/', '.')
init_import = atree[:-12].replace('/', '.')
opt_file.write("import " + init_import + "\n")
flag = True
if flag is False:
opt_dict[import_name] = atree
else:
opt_dict[init_import_module.strip(".")] = atree
flag = False
registered_opts_dict = OrderedDict([('DEFAULT', [])])
def _write_item(opts):
list_name = opts[-3:]
if list_name.lower() == "opt":
line_to_write = " [" + opts.strip("\n") + "],\n"
opt_line = _check_line_length(line_to_write)
if len(opt_line) > 1:
opt_file.write(opt_line[0] + opt_line[1] + "\n")
opt_file.write(" " + opt_line[2])
else:
opt_file.write(opt_line[0])
else:
line_to_write = " " + opts.strip("\n") + ",\n"
opt_line = _check_line_length(line_to_write)
if len(opt_line) > 1:
opt_file.write(opt_line[0] + opt_line[1] + "\n")
opt_file.write(" " + opt_line[2])
else:
opt_file.write(opt_line[0])
def _retrieve_name(aline):
if REGISTER_OPT_STR in aline:
str_to_replace = REGISTER_OPT_STR
else:
str_to_replace = REGISTER_OPTS_STR
return aline.replace(str_to_replace, "")
def _check_line_length(aline):
if len(aline) > 79:
temp = aline.split(".")
lines_to_write = []
for section in temp:
lines_to_write.append(section)
lines_to_write.append('.')
return lines_to_write
else:
return [aline]
for key in opt_dict:
fd = os.open(opt_dict[key], os.O_RDONLY)
afile = os.fdopen(fd, "r")
for aline in afile:
exists = aline.find("CONF.register_opt")
if exists != -1:
# TODO(kjnelson) FIX THIS LATER. These are instances where
# CONF.register_opts is happening without actually registering
# real lists of opts
exists = aline.find('base_san_opts')
if (exists != -1) or (key == 'cinder_volume_configuration'):
continue
group_exists = aline.find(', group=')
formatted_opt = _retrieve_name(aline[: group_exists])
formatted_opt = formatted_opt.replace(')', '').strip()
if group_exists != -1:
group_name = aline[group_exists:-1].replace(
', group=\"\'', '').replace(
', group=', '').strip(
"\'\")").upper()
# NOTE(dulek): Hack to resolve constants manually.
if (group_name.endswith('SHARED_CONF_GROUP')
or group_name.lower() == 'backend_defaults'):
group_name = configuration.SHARED_CONF_GROUP
if (group_name == 'NOVA_GROUP'):
group_name = nova.NOVA_GROUP
if group_name in registered_opts_dict:
line = key + "." + formatted_opt
registered_opts_dict[group_name].append(line)
else:
line = key + "." + formatted_opt
registered_opts_dict[group_name] = [line]
else:
line = key + "." + formatted_opt
registered_opts_dict['DEFAULT'].append(line)
setup_str = ("\n\n"
"def list_opts():\n"
" return [\n")
opt_file.write(setup_str)
registered_opts_dict = OrderedDict(sorted(registered_opts_dict.items(),
key = lambda x: x[0]))
for key in registered_opts_dict:
# NOTE(jsbryant): We need to have 'DEFAULT' in uppercase but any
# other section using uppercase causes a Sphinx warning.
if (key == 'DEFAULT'):
section_start_str = (" ('" + key + "',\n"
" itertools.chain(\n")
else:
section_start_str = (" ('" + key.lower() + "',\n"
" itertools.chain(\n")
opt_file.write(section_start_str)
for item in registered_opts_dict[key]:
_write_item(item)
section_end_str = " )),\n"
opt_file.write(section_end_str)
closing_str = (" ]\n")
opt_file.write(closing_str)
opt_file.close()
| |
# (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import random
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder import ssh_utils
from cinder import utils
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts
import cinder.zonemanager.drivers.cisco.fc_zone_constants as zone_constant
from cinder.zonemanager import fc_san_lookup_service as fc_service
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
class CiscoFCSanLookupService(fc_service.FCSanLookupService):
"""The SAN lookup service that talks to Cisco switches.
Version History:
1.0.0 - Initial version
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
"""Initializing the client."""
super(CiscoFCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
self.create_configuration()
self.switch_user = ""
self.switch_port = ""
self.switch_pwd = ""
self.switch_ip = ""
self.sshpool = None
def create_configuration(self):
"""Configuration specific to SAN context values."""
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
# Cisco Zonesets require VSANs
if fabric_names:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
"""Provides the initiator/target map for available SAN contexts.
Looks up fcns database of each fc SAN configured to find logged in
devices and returns a map of initiator and target port WWNs for each
fabric.
:param initiator_wwn_list: List of initiator port WWN
:param target_wwn_list: List of target port WWN
:returns List -- device wwn map in following format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
:raises Exception when connection to fabric is failed
"""
device_map = {}
formatted_target_list = []
formatted_initiator_list = []
fabric_map = {}
fabric_names = self.configuration.fc_fabric_names
if not fabric_names:
raise exception.InvalidParameterValue(
err=_("Missing Fibre Channel SAN configuration "
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(zm_utils.get_formatted_wwn(t))
for i in initiator_wwn_list:
formatted_initiator_list.append(zm_utils.get_formatted_wwn(i))
for fabric_name in fabrics:
self.switch_ip = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_address')
self.switch_user = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_user')
self.switch_pwd = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_password')
self.switch_port = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_port')
zoning_vsan = self.fabric_configs[fabric_name].safe_get(
'cisco_zoning_vsan')
# Get name server data from fabric and find the targets
# logged in
nsinfo = ''
LOG.debug("show fcns database for vsan %s", zoning_vsan)
nsinfo = self.get_nameserver_info(zoning_vsan)
LOG.debug("Lookup service:fcnsdatabase-%s", nsinfo)
LOG.debug("Lookup service:initiator list from caller-%s",
formatted_initiator_list)
LOG.debug("Lookup service:target list from caller-%s",
formatted_target_list)
visible_targets = filter(lambda x: x in formatted_target_list,
nsinfo)
visible_initiators = filter(lambda x: x in
formatted_initiator_list, nsinfo)
if visible_targets:
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug("No targets are in the fcns database"
" for vsan %s", zoning_vsan)
if visible_initiators:
# getting rid of the : before returning ~sk
for idx, elem in enumerate(visible_initiators):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug("No initiators are in the fcns database"
" for vsan %s", zoning_vsan)
fabric_map = {'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[zoning_vsan] = fabric_map
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def get_nameserver_info(self, fabric_vsan):
"""Get fcns database info from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
nsinfo_list = []
try:
cmd = ([zone_constant.FCNS_SHOW, fabric_vsan, ' | no-more'])
cli_output = self._get_switch_info(cmd)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting show fcns database for"
" fabric"))
if cli_output:
nsinfo_list = self._parse_ns_output(cli_output)
LOG.debug("Connector returning fcns info-%s", nsinfo_list)
return nsinfo_list
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True, 1)
LOG.debug("CLI output from ssh - output:%s", stdout)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns list of device port wwn from ns info
"""
nsinfo_list = []
for line in switch_data:
if not(" N " in line):
continue
linesplit = line.split()
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
nsinfo_list.append(node_port_wwn)
else:
msg = _("Malformed fcns output string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return nsinfo_list
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
command = ' '.join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s") % command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands where status return is expected.
cmd_list is a list of commands, where each command is itself
a list of parameters. We use utils.check_ssh_injection to check each
command, but then join then with " ; " to form a single command.
"""
# Check that each command is secure
for cmd in cmd_list:
utils.check_ssh_injection(cmd)
# Combine into a single command.
command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list))
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s" % command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
greenthread.sleep(random.randint(20, 500) / 100.0)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh:%s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after SSH:%s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Error executing command via ssh: %s") %
six.text_type(e))
LOG.error(msg)
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def cleanup(self):
self.sshpool = None
| |
"""Types and helper methods for transitions and trajectories."""
import dataclasses
import logging
import os
import pathlib
import pickle
import sys
from typing import Dict, Mapping, Optional, Sequence, TypeVar, Union, overload
import numpy as np
import torch as th
from torch.utils import data as th_data
from imitation.data import old_types
T = TypeVar("T")
AnyPath = Union[str, bytes, os.PathLike]
def dataclass_quick_asdict(dataclass_instance) -> dict:
"""Extract dataclass to items using `dataclasses.fields` + dict comprehension.
This is a quick alternative to `dataclasses.asdict`, which expensively and
undocumentedly deep-copies every numpy array value.
See https://stackoverflow.com/a/52229565/1091722.
"""
obj = dataclass_instance
d = {f.name: getattr(obj, f.name) for f in dataclasses.fields(obj)}
return d
@dataclasses.dataclass(frozen=True)
class Trajectory:
"""A trajectory, e.g. a one episode rollout from an expert policy."""
obs: np.ndarray
"""Observations, shape (trajectory_len + 1, ) + observation_shape."""
acts: np.ndarray
"""Actions, shape (trajectory_len, ) + action_shape."""
infos: Optional[np.ndarray]
"""An array of info dicts, length trajectory_len."""
def __len__(self):
"""Returns number of transitions, `trajectory_len` in attribute docstrings.
This is equal to the number of actions, and is always positive.
"""
return len(self.acts)
def __post_init__(self):
"""Performs input validation: check shapes are as specified in docstring."""
if len(self.obs) != len(self.acts) + 1:
raise ValueError(
"expected one more observations than actions: "
f"{len(self.obs)} != {len(self.acts)} + 1"
)
if self.infos is not None and len(self.infos) != len(self.acts):
raise ValueError(
"infos when present must be present for each action: "
f"{len(self.infos)} != {len(self.acts)}"
)
if len(self.acts) == 0:
raise ValueError("Degenerate trajectory: must have at least one action.")
def _rews_validation(rews: np.ndarray, acts: np.ndarray):
if rews.shape != (len(acts),):
raise ValueError(
"rewards must be 1D array, one entry for each action: "
f"{rews.shape} != ({len(acts)},)"
)
if not np.issubdtype(rews.dtype, np.floating):
raise ValueError(f"rewards dtype {rews.dtype} not a float")
@dataclasses.dataclass(frozen=True)
class TrajectoryWithRew(Trajectory):
rews: np.ndarray
"""Reward, shape (trajectory_len, ). dtype float."""
def __post_init__(self):
"""Performs input validation, including for rews."""
super().__post_init__()
_rews_validation(self.rews, self.acts)
def transitions_collate_fn(
batch: Sequence[Mapping[str, np.ndarray]],
) -> Dict[str, Union[np.ndarray, th.Tensor]]:
"""Custom `torch.utils.data.DataLoader` collate_fn for `TransitionsMinimal`.
Use this as the `collate_fn` argument to `DataLoader` if using an instance of
`TransitionsMinimal` as the `dataset` argument.
Handles all collation except "infos" collation using Torch's default collate_fn.
"infos" needs special handling because we shouldn't recursively collate every
the info dict into a single dict, but instead join all the info dicts into a list of
dicts.
"""
batch_no_infos = [
{k: v for k, v in sample.items() if k != "infos"} for sample in batch
]
result = th_data.dataloader.default_collate(batch_no_infos)
assert isinstance(result, dict)
result["infos"] = [sample["infos"] for sample in batch]
return result
@dataclasses.dataclass(frozen=True)
class TransitionsMinimal(th_data.Dataset):
"""A Torch-compatible `Dataset` of obs-act transitions.
This class and its subclasses are usually instantiated via
`imitation.data.rollout.flatten_trajectories`.
Indexing an instance `trans` of TransitionsMinimal with an integer `i`
returns the `i`th `Dict[str, np.ndarray]` sample, whose keys are the field
names of each dataclass field and whose values are the ith elements of each field
value.
Slicing returns a possibly empty instance of `TransitionsMinimal` where each
field has been sliced.
"""
obs: np.ndarray
"""
Previous observations. Shape: (batch_size, ) + observation_shape.
The i'th observation `obs[i]` in this array is the observation seen
by the agent when choosing action `acts[i]`. `obs[i]` is not required to
be from the timestep preceding `obs[i+1]`.
"""
acts: np.ndarray
"""Actions. Shape: (batch_size,) + action_shape."""
infos: np.ndarray
"""Array of info dicts. Shape: (batch_size,)."""
def __len__(self):
"""Returns number of transitions. Always positive."""
return len(self.obs)
def __post_init__(self):
"""Performs input validation: check shapes & dtypes match docstring.
Also make array values read-only.
"""
for val in vars(self).values():
if isinstance(val, np.ndarray):
val.setflags(write=False)
if len(self.obs) != len(self.acts):
raise ValueError(
"obs and acts must have same number of timesteps: "
f"{len(self.obs)} != {len(self.acts)}"
)
if self.infos is not None and len(self.infos) != len(self.obs):
raise ValueError(
"obs and infos must have same number of timesteps: "
f"{len(self.obs)} != {len(self.infos)}"
)
@overload
def __getitem__(self: T, key: slice) -> T:
pass # pragma: no cover
@overload
def __getitem__(self, key: int) -> Dict[str, np.ndarray]:
pass # pragma: no cover
def __getitem__(self, key):
"""See TransitionsMinimal docstring for indexing and slicing semantics."""
d = dataclass_quick_asdict(self)
d_item = {k: v[key] for k, v in d.items()}
if isinstance(key, slice):
# Return type is the same as this dataclass. Replace field value with
# slices.
return dataclasses.replace(self, **d_item)
else:
assert isinstance(key, int)
# Return type is a dictionary. Array values have no batch dimension.
#
# Dictionary of np.ndarray values is a convenient
# torch.util.data.Dataset return type, as a torch.util.data.DataLoader
# taking in this `Dataset` as its first argument knows how to
# automatically concatenate several dictionaries together to make
# a single dictionary batch with `torch.Tensor` values.
return d_item
@dataclasses.dataclass(frozen=True)
class Transitions(TransitionsMinimal):
"""A batch of obs-act-obs-done transitions."""
next_obs: np.ndarray
"""New observation. Shape: (batch_size, ) + observation_shape.
The i'th observation `next_obs[i]` in this array is the observation
after the agent has taken action `acts[i]`.
Invariants:
* `next_obs.dtype == obs.dtype`
* `len(next_obs) == len(obs)`
"""
dones: np.ndarray
"""
Boolean array indicating episode termination. Shape: (batch_size, ).
`done[i]` is true iff `next_obs[i]` the last observation of an episode.
"""
def __post_init__(self):
"""Performs input validation: check shapes & dtypes match docstring."""
super().__post_init__()
if self.obs.shape != self.next_obs.shape:
raise ValueError(
"obs and next_obs must have same shape: "
f"{self.obs.shape} != {self.next_obs.shape}"
)
if self.obs.dtype != self.next_obs.dtype:
raise ValueError(
"obs and next_obs must have the same dtype: "
f"{self.obs.dtype} != {self.next_obs.dtype}"
)
if self.dones.shape != (len(self.acts),):
raise ValueError(
"dones must be 1D array, one entry for each timestep: "
f"{self.dones.shape} != ({len(self.acts)},)"
)
if self.dones.dtype != bool:
raise ValueError(f"dones must be boolean, not {self.dones.dtype}")
@dataclasses.dataclass(frozen=True)
class TransitionsWithRew(Transitions):
"""A batch of obs-act-obs-rew-done transitions."""
rews: np.ndarray
"""
Reward. Shape: (batch_size, ). dtype float.
The reward `rew[i]` at the i'th timestep is received after the
agent has taken action `acts[i]`.
"""
def __post_init__(self):
"""Performs input validation, including for rews."""
super().__post_init__()
_rews_validation(self.rews, self.acts)
def load(path: AnyPath) -> Sequence[TrajectoryWithRew]:
"""Loads a sequence of trajectories saved by `save()` from `path`."""
# TODO(shwang): In a future version, remove the DeprecationWarning and
# imitation.data.old_types.Trajectory entirely.
try:
assert "imitation.util.rollout" not in sys.modules
sys.modules["imitation.util.rollout"] = old_types
with open(path, "rb") as f:
trajectories = pickle.load(f)
finally:
del sys.modules["imitation.util.rollout"]
if len(trajectories) > 0:
if isinstance(trajectories[0], old_types.Trajectory):
import warnings
warnings.warn(
(
"Your trajectories are saved in an outdated format. Please update "
"them to the new format by running:\n"
f"python -m imitation.scripts.update_traj_file_in_place.py '{path}'"
),
DeprecationWarning,
)
trajectories = [
TrajectoryWithRew(**traj._asdict()) for traj in trajectories
]
return trajectories
def save(path: AnyPath, trajectories: Sequence[TrajectoryWithRew]) -> None:
"""Save a sequence of Trajectories to disk.
Args:
path: Trajectories are saved to this path.
trajectories: The trajectories to save.
"""
p = pathlib.Path(path)
p.parent.mkdir(parents=True, exist_ok=True)
tmp_path = f"{path}.tmp"
with open(tmp_path, "wb") as f:
pickle.dump(trajectories, f)
# Ensure atomic write
os.replace(tmp_path, path)
logging.info(f"Dumped demonstrations to {path}.")
| |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from webob import exc
from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
from nova.api.openstack.compute.plugins.v3 import hypervisors \
as hypervisors_v21
from nova.api.openstack import extensions
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
TEST_HYPERS = [
dict(id=1,
service_id=1,
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
service_id=2,
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
objects.Service(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
objects.Service(id=2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
dict(name="inst2", uuid="uuid2", host="compute2"),
dict(name="inst3", uuid="uuid3", host="compute1"),
dict(name="inst4", uuid="uuid4", host="compute2")]
def fake_compute_node_get_all(context):
return TEST_HYPERS_OBJ
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS_OBJ:
if hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
@classmethod
def fake_service_get_by_host_and_binary(cls, context, host, binary):
for service in TEST_SERVICES:
if service.host == host:
return service
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += hyper[key]
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
results.append(inst)
return results
class HypervisorsTestV21(test.NoDBTestCase):
DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
INDEX_HYPER_DICTS = [
dict(id=1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
def _get_request(self, use_admin_context):
return fakes.HTTPRequest.blank('', use_admin_context=use_admin_context)
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def setUp(self):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
fake_compute_node_get_all)
self.stubs.Set(objects.Service, 'get_by_host_and_binary',
fake_service_get_by_host_and_binary)
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor)
self.stubs.Set(self.controller.host_api, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(db, 'compute_node_statistics',
fake_compute_node_statistics)
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
def test_view_hypervisor_nodetail_noservers(self):
result = self.controller._view_hypervisor(
TEST_HYPERS_OBJ[0], TEST_SERVICES[0], False)
self.assertEqual(result, self.INDEX_HYPER_DICTS[0])
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(
TEST_HYPERS_OBJ[0], TEST_SERVICES[0], True)
self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
def test_view_hypervisor_servers(self):
result = self.controller._view_hypervisor(TEST_HYPERS_OBJ[0],
TEST_SERVICES[0],
False, TEST_SERVERS)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'servers': [
dict(name="inst1", uuid="uuid1"),
dict(name="inst2", uuid="uuid2"),
dict(name="inst3", uuid="uuid3"),
dict(name="inst4", uuid="uuid4")]})
self.assertEqual(result, expected_dict)
def test_index(self):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_index_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_detail(self):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
def test_detail_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.detail, req)
def test_show_noid(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_show_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = self._get_request(True)
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
def test_show_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, '1')
def test_uptime_noid(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3')
def test_uptime_notimplemented(self):
def fake_get_host_uptime(context, hyp):
raise exc.HTTPNotImplemented()
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, '1')
def test_uptime_implemented(self):
def fake_get_host_uptime(context, hyp):
return "fake uptime"
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = self._get_request(True)
result = self.controller.uptime(req, '1')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'uptime': "fake uptime"})
self.assertEqual(result, dict(hypervisor=expected_dict))
def test_uptime_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.uptime, req, '1')
def test_search(self):
req = self._get_request(True)
result = self.controller.search(req, 'hyper')
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_search_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.search, req, '1')
def test_search_non_exist(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
def test_servers(self):
req = self._get_request(True)
result = self.controller.servers(req, 'hyper')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
expected_dict[0].update({'servers': [
dict(name="inst1", uuid="uuid1"),
dict(name="inst3", uuid="uuid3")]})
expected_dict[1].update({'servers': [
dict(name="inst2", uuid="uuid2"),
dict(name="inst4", uuid="uuid4")]})
self.assertEqual(result, dict(hypervisors=expected_dict))
def test_servers_non_id(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers,
req, '115')
def test_servers_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.servers, req, '1')
def test_servers_with_non_integer_hypervisor_id(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
def test_servers_with_no_server(self):
def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
return []
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host_return_empty)
req = self._get_request(True)
result = self.controller.servers(req, '1')
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_statistics(self):
req = self._get_request(True)
result = self.controller.statistics(req)
self.assertEqual(result, dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)))
def test_statistics_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.statistics, req)
class HypervisorsTestV2(HypervisorsTestV21):
DETAIL_HYPERS_DICTS = copy.deepcopy(
HypervisorsTestV21.DETAIL_HYPERS_DICTS)
del DETAIL_HYPERS_DICTS[0]['state']
del DETAIL_HYPERS_DICTS[1]['state']
del DETAIL_HYPERS_DICTS[0]['status']
del DETAIL_HYPERS_DICTS[1]['status']
del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason']
del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason']
del DETAIL_HYPERS_DICTS[0]['host_ip']
del DETAIL_HYPERS_DICTS[1]['host_ip']
INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
del INDEX_HYPER_DICTS[0]['state']
del INDEX_HYPER_DICTS[1]['state']
del INDEX_HYPER_DICTS[0]['status']
del INDEX_HYPER_DICTS[1]['status']
def _set_up_controller(self):
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
| |
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#============================================================================
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import random
import uuid
from xml.sax import saxutils
import zlib
import pprint
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova.virt.xenapi.client import session as xenapi_session
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
text = msg or ""
content = pprint.pformat(_db_content)
LOG.debug(_("%(text)s: _db_content => %(content)s"),
{'text': text, 'content': content})
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
if status == 'Running':
domid = random.randrange(1, 1 << 16)
resident_on = _db_content['host'].keys()[0]
else:
domid = -1
resident_on = ''
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', -1)
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('is_a_template', False)
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'IP': '10.1.1.1',
'IPv6': '',
'uuid': '',
'management': 'true'})
_db_content['PIF'][pif_ref]['uuid'] = pif_ref
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return _db_content[table].keys()
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query[:4] == 'not ':
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if query[:5] != 'field':
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
# Strings should be directly compared
if isinstance(record[field], str):
return record[field] == value
# But for all other value-checks, convert to a string first
# (Notably used for booleans - which can be lower or camel
# case and are interpreted/sanitised by XAPI)
return str(record[field]).lower() == value.lower()
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict.
"""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
class SessionBase(object):
"""Base class for Fake Sessions."""
def __init__(self, uri):
self._session = None
xenapi_session.apply_session_helpers(self)
def pool_get_default_SR(self, _1, pool_ref):
return _db_content['pool'].values()[0]['default-SR']
def VBD_insert(self, _1, vbd_ref, vdi_ref):
vbd_rec = get_record('VBD', vbd_ref)
get_record('VDI', vdi_ref)
vbd_rec['empty'] = False
vbd_rec['VDI'] = vdi_ref
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config',
vbd_ref, key])
db_ref['other_config'][key] = value
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
ref = None
rec = None
for ref, rec in _db_content['SR'].iteritems():
if rec.get('uuid') == sr_uuid:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
# SR not found in db, so we create one
params = {'sr_uuid': sr_uuid,
'label': label,
'desc': desc,
'type': type,
'content_type': content_type,
'shared': shared,
'sm_config': sm_config}
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
vdi_per_lun = False
if type == 'iscsi':
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
def _plugin_agent_password(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_inject_file(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_agentupdate(self, method, args):
url = args["url"]
md5 = args["md5sum"]
message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
md5=md5)
return as_json(returncode='0', message=message)
def _plugin_noop(self, method, args):
return ''
def _plugin_pickle_noop(self, method, args):
return pickle.dumps(None)
def _plugin_migration_transfer_vhd(self, method, args):
kwargs = pickle.loads(args['params'])['kwargs']
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
(kwargs['vdi_uuid'], ))
assert vdi_ref
return pickle.dumps(None)
_plugin_glance_upload_vhd = _plugin_pickle_noop
_plugin_kernel_copy_vdi = _plugin_noop
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
_plugin_migration_move_vhds_into_sr = _plugin_noop
def _plugin_xenhost_host_data(self, method, args):
return jsonutils.dumps({'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40},
'host_hostname': 'fake-xenhost',
'host_cpu_info': {'cpu_count': 50},
})
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
_plugin_xenhost_host_reboot = _plugin_poweraction
_plugin_xenhost_host_startup = _plugin_poweraction
_plugin_xenhost_host_shutdown = _plugin_poweraction
def _plugin_xenhost_set_host_enabled(self, method, args):
enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
def _plugin_xenhost_host_uptime(self, method, args):
return jsonutils.dumps({"uptime": "fake uptime"})
def _plugin_xenhost_get_pci_device_details(self, method, args):
"""Simulate the ouput of three pci devices.
Both of those devices are available for pci passtrough but
only one will match with the pci whitelist used in the
method test_pci_passthrough_devices_*().
Return a single list.
"""
# Driver is not pciback
dev_bad1 = ["Slot:\t86:10.0", "Class:\t0604", "Vendor:\t10b5",
"Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"]
# Driver is pciback but vendor and device are bad
dev_bad2 = ["Slot:\t88:00.0", "Class:\t0300", "Vendor:\t0bad",
"Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
# Driver is pciback and vendor, device are used for matching
dev_good = ["Slot:\t87:00.0", "Class:\t0300", "Vendor:\t10de",
"Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good)
return pickle.dumps(lspci_output)
def _plugin_xenhost_get_pci_type(self, method, args):
return pickle.dumps("type-PCI")
def _plugin_console_get_console_log(self, method, args):
dom_id = args["dom_id"]
if dom_id == 0:
raise Failure('Guest does not have a console')
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
def _plugin_nova_plugin_version_get_version(self, method, args):
return pickle.dumps("1.2")
def _plugin_xenhost_query_gc(self, method, args):
return pickle.dumps("False")
def host_call_plugin(self, _1, _2, plugin, method, args):
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * units.Gi
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise Failure(['VM_BAD_POWER_STATE',
'fake-opaque-ref', db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
db_ref['domid'] = random.randrange(1, 1 << 16)
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
db_ref['domid'] = -1
VM_clean_shutdown = VM_hard_shutdown
def VM_suspend(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Suspended'
def VM_pause(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Paused'
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def host_migrate_receive(self, session, destref, nwref, options):
return "fake_migrate_data"
def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
vdi_map, vif_map, options):
pass
def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
vif_map, options):
pass
def VM_remove_from_blocked_operations(self, session, vm_ref, key):
# operation is idempotent, XenServer doesn't care if the key exists
_db_content['VM'][vm_ref]['blocked_operations'].pop(key, None)
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = str(uuid.uuid4())
_session_info = {'uuid': str(uuid.uuid4()),
'this_host': _db_content['host'].keys()[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
LOG.debug(_('Calling %(name)s %(impl)s'),
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug(_('Calling getter %s'), name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug(_('Calling setter %s'), name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_all_records_where':
self._check_arg_count(params, 2)
return get_all_records_where(cls, params[1])
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
if after_hook in globals():
globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = globals().get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except Failure as exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise Failure(['UUID_INVALID', v, result, recs, k])
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v2 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models.tensorflow import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v2_base(inputs,
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception v2 (6a2).
Constructs an Inception v2 network from inputs to the given final endpoint.
This method can construct the network up to the layer inception(5b) as
described in http://arxiv.org/abs/1502.03167.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
'Mixed_5c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d, slim.separable_conv2d],
stride=1, padding='SAME'):
# Note that sizes in the comments below assume an input spatial size of
# 224x224, however, the inputs can be of any size greater 32x32.
# 224 x 224 x 3
end_point = 'Conv2d_1a_7x7'
# depthwise_multiplier here is different from depth_multiplier.
# depthwise_multiplier determines the output channels of the initial
# depthwise conv (see docs for tf.nn.separable_conv2d), while
# depth_multiplier controls the # channels of the subsequent 1x1
# convolution. Must have
# in_channels * depthwise_multipler <= out_channels
# so that the separable convolution is not overparameterized.
depthwise_multiplier = min(int(depth(64) / 3), 8)
net = slim.separable_conv2d(
inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier,
stride=2, weights_initializer=trunc_normal(1.0),
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 112 x 112 x 64
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, depth(64), [1, 1], scope=end_point,
weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 192
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV2'):
"""Inception v2 model for classification.
Constructs an Inception v2 network for classification as described in
http://arxiv.org/abs/1502.03167.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v2_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
with tf.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v2.default_image_size = 224
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
inception_v2_arg_scope = inception_utils.inception_arg_scope
| |
# use UTF-8 #
"""
THis script is to manage experiment data,
including record infos for new data pack,
fetching data for experiments, etc.
"""
__author__ = "Xu Fangzhou"
__email__ = "kevin.xu.fangzhou@gmail.com"
from json import *
from pymongo import *
from pandas import *
import csv
import os
import re
class Data:
"""
structure of a single data set to manipulate on
"""
def __init__(self, Database, name):
self.Database = Database
self.db = Database.DB[name]
self.name = name
def record(self, _id, _dict):
"""
update MongoDB file of id with dict.
parameters:
_id: string, the identical id of the file.
_dict: dict, the dict to update.
"""
self.db.update({'id': _id}, {'$set': _dict})
def show(self, _id):
"""
show dile with particular id
parameters:
_id: string, the identical id of the file.
output:
the file required as a dict.
"""
return self.db.find_one({'id': _id})
def show_all(self):
"""
show all files in the collection.
output:
required files as a list of dicts.
"""
rtn = []
for i in self.db.find():
i.pop('_id')
rtn.append(i)
return rtn
class DSData(Data):
def find_parent(self):
info = self.db.find_one({'_id': 'info'})
if info.has_key('parent') and info['parent'] != "":
return info['parent']
return None
def find_root(self):
info = self.db.find_one({'_id': 'info'})
if info.has_key('parent') and info['parent'] != "":
return self.Database.get_data(info['parent']).find_root()
return self.name
def show_info(self):
return DataFrame([self.db.find_one({'_id':'info'})])
def show_data(self):
rtn = []
for i in self.db.find():
if i.pop('_id') != 'info':
rtn.append(i)
return DataFrame(rtn)
def diff(self, commit_id1="", commit_id2=""):
if commit_id1 == "" and commit_id2 == "":
commit_ids = self.show_info()['commit_ids'][0]
commit_id1, commit_id2 = commit_ids[0], commit_ids[1]
diffs = []
for i in self.db.find():
if (i.pop('_id') != 'info'):
diffs1 = {}
diffs2 = {}
for k in i:
if k not in ['_id', 'id']:
if not i[k].has_key(commit_id1):
if i[k].has_key(commit_id2):
diffs1[k]=''
diffs2[k]=i[k][commit_id2]
else:
if not i[k].has_key(commit_id2):
diffs2[k] = ''
diffs1[k] = i[k][commit_id1]
elif i[k][commit_id1] != i[k][commit_id2]:
diffs1[k] = i[k][commit_id1]
diffs2[k] = i[k][commit_id2]
if len(diffs1) > 0:
diffs.append({ commit_id1: diffs1, commit_id2: diffs2, 'id': i['id']})
return DataFrame(diffs)
class ExpData(Data):
def show_exp_names(self):
rtn = []
for i in self.db.find():
rtn.append(i['exp_name'])
return DataFrame(rtn)
def show_exp(self, name):
exp = self.db.find_one({'exp_name':name})
return DataFrame(exp['exp_records'])
def diff(self, exp_name, commit_id1="", commit_id2="", show=[]):
if commit_id1 == "" and commit_id2 == "":
commit_ids = self.show_exp(exp_name)['commit_id']
commit_id1 = commit_ids[commit_ids.size - 1]
commit_id2 = commit_ids[commit_ids.size - 2]
for i in self.db.find_one({'exp_name':exp_name})['exp_records']:
if i['commit_id'] == commit_id1:
c1 = i
if i['commit_id'] == commit_id2:
c2 = i
flag = True
if show == []:
show = c1.keys()
flag = False
else:
show.append('commit_id')
c1r, c2r = {},{}
for key in show:
if key not in c1.keys() and key in c2.keys():
c2r[key] = c2[key]
elif key not in c2.keys() and key in c1.keys():
c1r[key] = c1[key]
elif c1[key] != c2[key] or flag:
c1r[key], c2r[key] = c1[key], c2[key]
return DataFrame([c1r,c2r])
def diff_result(self, exp_name, commit_id1='', commit_id2=''):
if commit_id1 == "" and commit_id2 == "":
commit_ids = self.show_exp(exp_name)['commit_id']
commit_id1 = commit_ids[commit_ids.size - 1]
commit_id2 = commit_ids[commit_ids.size - 2]
for i in self.db.find_one({'exp_name':exp_name})['exp_records']:
if i['commit_id'] == commit_id1:
r1 = {} if not i.has_key('result') else i['result']
if i['commit_id'] == commit_id2:
r2 = {} if not i.has_key('result') else i['result']
r1['commit_id'] = i['commit_id']
r2['commit_id'] = i['commit_id']
for k in r1.keys():
if r2.has_key(k) and r1[k] == r2[k]:
r1.pop(k)
r2.pop(k)
return DataFrame([r1,r2])
class Database:
"""
structure of a database
"""
def __init__(self, db="datas", address=""):
if address == "":
config = json.load(open(os.environ.get("HOME") + "/sandbox/config.json"))
address = config['mongodb_url']
client = MongoClient(address)
self.DB = client[db]
self.name = db
def import_data(self, name, description="", parent="", ignore=[], it=None, _type='', **kwargs):
if self.DB.name != 'datas':
print "should not import data into db other than 'datas'!"
return
t = re.split('\.', name)
if it: # self defined input iterator
try:
coll = self.DB.create_collection(name)
if parent != "" and parent not in self.DB.collection_names():
raise Exception("parent data set not in DB!")
coll.insert({'_id': 'info',
'name': name,
'type': _type,
'path': '~/sandbox/data/'+name,
'description': description,
'parent': parent,
'commit_ids': []})
for i in it(**kwargs):
if 'id' not in i.keys():
raise Exception("no id attribute!")
coll.insert(i)
except Exception as e:
print e
print 'Aborting'
elif len(t) == 2 and t[-1] in ['csv','tsv']:
# it's csv or tsv file
try:
coll = self.DB.create_collection(t[0])
if parent != "" and parent not in self.DB.collection_names():
raise Exception("parent data set not in DB!")
coll.insert({'_id': 'info',
'name':t[0],
'type':t[-1],
'path':'~/sandbox/data/'+name,
'description': description,
'parent': parent,
'commit_ids': []})
fp = open(name)
if t[-1]=='csv': r = csv.reader(fp)
else:
r = csv.reader(fp, delimiter='\t', quoting=csv.QUOTE_ALL)
title = r.next()
if 'id' not in title:
raise Exception("no id attribute!")
for v in r:
if len(v) == 0: break
f = {}
for i in range(len(title)):
if title[i] not in ignore:
f[title[i]]=v[i]
coll.insert(f)
except Exception as e:
print e
print 'Aborting...'
elif len(t) == 2 and t[-1] == 'json':
try:
coll = self.DB.create_collection(t[0])
if parent != "" and parent not in self.DB.collection_names():
raise Exception("parent data set not in DB!")
coll.insert({'_id': 'info',
'name':t[0],
'type':t[-1],
'path':'~/sandbox/data/'+name,
'description': description,
'parent': parent,
'commit_ids': []})
l = json.load(open(name))
for d in l:
coll.insert(d)
except Exception as e:
print e
print "Aborting..."
def generate_data(self, name, description="", parent="", ignore=[]):
if name not in os.listdir('.'):
print "no such data set to register: "+name
return
os.system("cp %s ~/sandbox/data/" %name)
self.import_data(name, description, parent, ignore)
def join(self, name, name_list=[], key='_id'):
rtn = self.get_data(name).show_all()
for i in range(0,len(rtn)):
try:
value = rtn[i][key]
for n in name_list:
b = self.get_data(n).db.find_one({key:value})
if b:
for p in b.items():
if not rtn[i].has_key(p[0]):
rtn[i][p[0]] = p[1]
except Exception as e:
print e
return rtn
def get_data(self, name):
if self.name == 'datas':
return DSData(self, name)
elif self.name == 'users':
return ExpData(self, name)
class View:
"""
providing joined view for datasets in database.
"""
def __init__(self, database, name, name_list=[], key="_id"):
self.database = database
self.prim_ds = self.database.get_data(name)
self.name = name
self.name_list = name_list
self.key = key
def get(self, pair):
rtn = self.prim_ds.db.find_one(pair)
for name in self.name_list:
b = self.database.get_data(name).db.find_one({self.key:rtn[self.key]})
if b:
for p in b.items():
if not rtn.has_key(p[0]):
rtn[p[0]] = p[1]
return rtn
def dump(self):
return self.database.join(self.name, self.name_list, self.key)
def dump_df(self):
return DataFrame(self.dump()[1:])
| |
from testConfig import TestConfig
from Tkinter import *
import os
from sahanaTest import SahanaTest
from subprocess import call
from subprocess import Popen
import time
from selectTestsWindow import SelectTestWindow
class TestWindow(Frame):
""" TK GUI to set the Test Settings"""
def __init__(self, parent=None):
self.seleniumServer = 0
Frame.__init__(self, parent=parent)
self.winfo_toplevel().title("Sahana Eden regression testing helper program")
self.pack(fill=BOTH)
title = Frame(self)
title.pack(side=TOP)
detail = Frame(self)
detail.pack(side=TOP, fill=BOTH)
Label(title, text="Sahana Eden Regression Tests - Control Panel").pack(side=LEFT)
sahanaPanel = Frame(detail, borderwidth=2, relief=SUNKEN)
sahanaPanel.grid(row=0, column=0, sticky=NSEW)
self.sahanaPanel(sahanaPanel)
serverPanel = Frame(detail, borderwidth=2, relief=SUNKEN)
serverPanel.grid(row=0, column=1, sticky=NSEW)
self.serverPanel(serverPanel)
testModulesPanel = Frame(detail, borderwidth=2, relief=SUNKEN)
testModulesPanel.grid(row=1, column=0, sticky=NSEW)
self.testModulepanel(testModulesPanel)
browserPanel = Frame(detail, borderwidth=2, relief=SUNKEN)
browserPanel.grid(row=1, column=1, sticky=NSEW)
self.browser(browserPanel)
detail.rowconfigure(0, weight=1)
detail.rowconfigure(1, weight=1)
detail.columnconfigure(0, weight=1)
detail.columnconfigure(1, weight=1)
# def run(self):
# self.runTestSuite()
# ##thread.start_new(self.runTestSuite, ())
def runTestSuite(self):
# call static method of the base class for all Sahana test case classes
# this method will ensure that one Selenium instance exists and can be shared
SahanaTest.setUpHierarchy(self.radioB.get(),
self.browserPath.get(),
self.ipAddr.get(),
self.ipPort.get(),
self.URL.get() + self.app.get()
)
# SahanaTest.useSahanaAccount(self.adminUser.get(),
# self.adminPassword.get(),
# )
self.clean = False
testConfig = TestConfig()
testModuleList = self.getTestCasesToRun()
testConfig.test_main(testModuleList, self.radioB.get())
call(["firefox", os.path.join("..", "results", testConfig.fileName)])
if (not self.keepOpen):
SahanaTest.selenium.stop() # This will close the Selenium/Browser window
self.clean = True
def getTestCasesToRun(self):
""" Read the status of the checkBoxes & use this to work out which tests to run """
i = 0
testModuleList = []
for module in self.checkboxModules:
testModule = {}
if module.get() == 1:
testDetail = []
for test in self.moduleList[i]["tests"]:
if test["state"] == True:
testDetail.append(test["name"])
testModule["class"] = self.moduleList[i]["class"]
testModule["tests"] = testDetail
testModuleList.append(testModule)
i += 1
return tuple(testModuleList)
def __del__(self):
if (not self.clean):
SahanaTestSuite.stopSelenium()
def isSeleniumRunning(self):
if sys.platform[:5] == "linux":
# Need to find if a service is running on the Selenium port
sockets = os.popen("netstat -lnt").read()
# look for match on IPAddr and port
service = ":%s" % (self.ipPort.get())
if (service in sockets):
return True
else:
return False
if self.seleniumServer != 0:
return True
def sahanaPanel(self, panel):
Label(panel, text="Sahana options").pack(side=TOP)
Label(panel,
text="To run the tests a user with admin rights needs to be provided.").pack(side=TOP,
anchor=W)
Label(panel,
text="If this is left blank then it is assumed that there is a blank database\n & so this can be created by registering the user.").pack(side=TOP,
anchor=W)
detailPanel = Frame(panel)
detailPanel.pack(side=TOP, anchor=W, fill=X)
# Label(detailPanel, text="User name:").grid(row=0, column=0, sticky=NW)
# self.adminUser = Entry(detailPanel, width=30)
# self.adminUser.grid(row=0, column=1, sticky=NW)
# self.adminUser.insert(0, "")
# Label(detailPanel, text="Password:").grid(row=1, column=0, sticky=NW)
# self.adminPassword = Entry(detailPanel, show="*", width=16)
# self.adminPassword.grid(row=1, column=1, sticky=NW)
Label(detailPanel, text="Sahana URL:").grid(row=2, column=0, sticky=NW)
self.URL = Entry(detailPanel, width=40)
self.URL.grid(row=2, column=1, sticky=NW)
self.URL.insert(0, "http://127.0.0.1:8000/")
Label(detailPanel, text="Sahana Application:").grid(row=3, column=0,
sticky=NW)
self.app = Entry(detailPanel, width=40)
self.app.grid(row=3, column=1, sticky=NW)
self.app.insert(0, "eden/")
Label(detailPanel, text="Keep browser open:").grid(row=4, column=0, sticky=NW)
self.keepOpen = False;
Checkbutton(detailPanel, variable="keepOpen",
command=self.toggleKeepOpenButton).grid(row=4, column=1, sticky=NW)
def toggleKeepOpenButton(self):
self.keepOpen = not self.keepOpen
def selectTests(self, i, module, details):
dialog = SelectTestWindow(self, module, details)
i = 0
for lbl in self.labelList:
lbl["text"] = self.testcaseTotals(self.moduleList[i])
lbl["fg"] = self.testcaseColour
i += 1
self.writeTestCasesForClass(module, details)
def toggleButton(self):
i = 0
for module in self.checkboxModules:
# Show or hide the button to select the tests
if module.get() == 1:
self.buttonList[i].grid()
else:
self.buttonList[i].grid_remove()
i += 1
def testcaseTotals(self, testList):
total = 0
run = 0
for test in testList["tests"]:
total += 1
if test["state"] == True:
run += 1
if total == run:
self.testcaseColour = "black"
else:
self.testcaseColour = "red"
return "%s of %s" % (run, total)
def testModulepanel(self, panel):
self.moduleList = TestConfig().getTestModuleDetails()
Label(panel, text="Test Modules").pack(side=TOP)
Label(panel,
text="Select the test modules that you would like to run.").pack(side=TOP,
anchor=W)
detailPanel = Frame(panel)
detailPanel.pack(side=TOP, anchor=W, fill=X)
self.checkboxModules = []
self.buttonList = []
self.moduleName = []
self.labelList = []
i = 0
details = {}
for details in self.moduleList:
name = details["name"]
self.moduleName.append(name)
var = IntVar()
chk = Checkbutton(detailPanel, text=name, variable=var,
command=self.toggleButton)
self.checkboxModules.append(var)
btnFrame = Frame(detailPanel)
chk.grid(row=i//2, column=i%2*3, sticky=NW)
lbl = Label(detailPanel,
text=self.testcaseTotals(self.moduleList[i]))
lbl["fg"] = self.testcaseColour
lbl.grid(row=i//2, column=i%2*3+1, sticky=NW)
self.labelList.append(lbl)
btn = Button(btnFrame, text="Select tests")
btn.grid()
btnFrame.grid(row=i//2, column=i%2*3+2, sticky=NW)
btnFrame.grid_remove()
self.buttonList.append(btnFrame)
def handler(event, i=i, module=name, details=details):
return self.selectTests(i, module, details)
btn.bind(sequence="<ButtonRelease-1>", func=handler)
i += 1
def serverStatus(self, event):
if (self.ipAddr.get() != "127.0.0.1"):
self.statusLbl.config(text="Unknown")
self.stopSelenium.config(state="disabled")
self.startSelenium.config(state="disabled")
elif self.isSeleniumRunning():
self.statusLbl.config(text="Running")
self.stopSelenium.config(state="active")
self.startSelenium.config(state="disabled")
else:
self.statusLbl.config(text="Stopped")
self.stopSelenium.config(state="disabled")
self.startSelenium.config(state="active")
self.updateServerCommand()
def serverPanel(self, panel):
Label(panel, text="Selenium server options").pack(side=TOP)
detailPanel = Frame(panel)
detailPanel.columnconfigure(0, weight=0)
detailPanel.columnconfigure(1, weight=1)
detailPanel.pack(side=TOP, anchor=W, fill=X)
Label(detailPanel, text="Status:").grid(row=0, column=0, sticky=NW)
self.statusLbl = Label(detailPanel, text="Unknown")
self.statusLbl.grid(row=0, column=1, sticky=NW)
Label(detailPanel, text="IP Address:").grid(row=1,column=0, sticky=NW)
self.ipAddr = Entry(detailPanel, width=16)
self.ipAddr.insert(0, "127.0.0.1")
self.ipAddr.grid(row=1, column=1, sticky=NW)
Label(detailPanel, text="Port:").grid(row=2, column=0, sticky=NW)
self.ipPort = Entry(detailPanel, width=6)
self.ipPort.insert(0, "4444")
self.ipPort.grid(row=2, column=1, sticky=NW)
self.ipAddr.bind("<FocusOut>", self.serverStatus)
self.ipPort.bind("<FocusOut>", self.serverStatus)
Label(detailPanel, text="Logging:").grid(row=4, column=0, sticky=NW)
logPanel = Frame(detailPanel)
logPanel.grid(row=4, column=1, sticky=NSEW)
self.radioLog = StringVar()
Radiobutton(logPanel, text="No Logging", value="None",
command=self.onPressServerLog,
variable=self.radioLog).pack(side=TOP, anchor = W)
Radiobutton(logPanel, text="Log to file", value="File",
command=self.onPressServerLog,
variable=self.radioLog).pack(side=TOP, anchor = W)
self.logFilename = Entry(logPanel, width=40)
self.logFilename.insert(0, "SahanaEdenRegressionTests.log")
self.logFilename.config(state="readonly")
self.logFilename.pack(side=TOP, anchor=W, expand=YES, fill=X)
self.radioLog.set("None")
self.serverCommand = Entry(detailPanel, state="readonly", width=50)
self.serverCommand.grid(row=5, column=0, columnspan=2, sticky=NSEW)
self.updateServerCommand()
button = Frame(logPanel)
button.pack(side=TOP, fill=BOTH)
self.startSelenium = Button(button, text="Start",
command=self.startSelenium)
self.startSelenium.pack(side=RIGHT, anchor=SE)
self.stopSelenium = Button(button, text="Stop",
command=self.stopSelenium)
self.stopSelenium.pack(side=RIGHT, anchor=SE)
self.serverStatus(Event())
def updateServerCommand(self):
args = self.buildServerStartCommand()
self.serverCommand.config(state="normal")
self.serverCommand.delete(0, len(self.serverCommand.get()))
self.serverCommand.insert(0, args)
self.serverCommand.config(state="readonly")
def buildServerStartCommand(self):
if os.environ.has_key("JAVA_HOME"):
java = os.path.join(os.environ["JAVA_HOME"], "bin", "java")
else:
java = "java"
# http://wiki.openqa.org/display/SIDE/record+and+assert+Ext+JS
#args = [java, r"-jar", r"selenium-server.jar", r"-userExtensions", r"user-extensions.js", r"-singlewindow", "-port", "%s" % self.ipPort.get()]
args = [java, r"-jar", r"selenium-server.jar", r"-singlewindow",
"-port", "%s" % self.ipPort.get()]
if self.radioLog.get() == "File":
args.append("-log")
args.append(self.logFilename.get())
return tuple(args)
def startSelenium(self):
""" Start the Selenium server """
os.chdir(r"../server/")
args = self.buildServerStartCommand()
self.startSelenium.config(state="disabled")
self.seleniumServer = Popen(args)
os.chdir(r"../scripts/")
# Crude wait to give the server time to start
time.sleep(5)
self.serverStatus(Event())
def stopSelenium(self):
""" Stop the Selenium server """
if self.seleniumServer != 0:
self.seleniumServer.terminate()
self.seleniumServer = 0
self.serverStatus(Event())
return
if sys.platform[:5] == "linux":
result = os.popen("ps x").readlines()
for line in result:
if "selenium" in line and "java" in line:
pid = line.split()[0]
os.system("kill %s" % pid)
print "Stopping process %s started with command %s" % (pid,
line)
self.serverStatus(Event())
return
def onPressServerLog(self):
if self.radioLog.get() == "None":
self.logFilename.config(state="readonly")
else:
self.logFilename.config(state="normal")
self.updateServerCommand()
# a file with one browser detail on each line
# The browser name is first followed by the command to pass to selenium to start the browser
def getBrowserDetails(self):
source = open("../data/browser.txt", "r")
values = source.readlines()
source.close()
browserList = []
for browser in values:
details = browser.split(",")
if len(details) == 2:
browserList.append((details[0].strip(), details[1].strip()))
return browserList
def browser(self, panel):
# See http://svn.openqa.org/fisheye/browse/~raw,r=2335/selenium-rc/website/src/main/webapp/experimental.html
browserList = self.getBrowserDetails()
self.radioB = StringVar()
Label(panel, text="Browser").pack(side=TOP)
for browser in browserList:
Radiobutton(panel, text=browser[0], command=self.onPressBrowser,
value=browser[1],
variable=self.radioB).pack(side=TOP, anchor=W)
path = Frame(panel)
path.pack(side=TOP, fill=X)
Label(path, text="Path to custom Browser").pack(side=TOP, anchor=W)
self.browserPath = Entry(path, width=40)
self.browserPath.insert(0, "<<enter path to executable here>>")
self.browserPath.config(state="readonly")
self.browserPath.pack(side=TOP, anchor=W, expand=YES, fill=X)
self.radioB.set("*chrome")
button = Frame(panel)
button.pack(side=TOP, fill=BOTH)
Button(button, text="Run Test Suite", command=self.runTestSuite).pack(side=RIGHT, anchor=SE)
Button(button, text="Quit", command=self.quit).pack(side=RIGHT, anchor=SE)
def onPressBrowser(self):
if self.radioB.get() == "*custom":
self.browserPath.config(state="normal")
else:
self.browserPath.config(state="readonly")
def writeTestCasesForClass(self, module, details):
""" Save details of the tests run """
try:
source = open("../tests/%s.txt" % details['class'], "w")
except:
print "Failed to write to file ../tests/%s.txt" % details["class"]
for tests in details["tests"]:
source.write("%s, %s\n" % (tests["name"], tests["state"]))
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__='ar'
import re
import sys
import os
import glob
import time
import json
import numpy as np
import skimage.io as io
import skimage.color as skcolor
import skimage.transform as sktransform
import matplotlib.pyplot as plt
from keras import backend as K
import keras
from keras.models import Sequential
from keras.layers import Convolution1D, Convolution2D, Convolution3D,\
MaxPooling1D, MaxPooling2D, MaxPooling3D,\
AveragePooling1D,AveragePooling2D, AveragePooling3D,\
InputLayer, Flatten, Merge, Activation, Dense, Dropout
# from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD
from keras.models import model_from_json
from keras.optimizers import Optimizer
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax
from app.backend.core import utils as dlsutils
from batcher_image2d import BatcherImage2DLMDB
# from flow_parser import getKerasOptimizerName
from flow_parser_helper_opt import getOptimizerJson2Keras, getKerasOptimizerName
from cfg import CFG_MODEL_TRAIN, CFG_SOLVER
#########################
def split_list_by_blocks(lst, psiz):
"""
Split list by cuts fixed size psize (last cut can be less than psize),
:param lst: input list
:param psiz: size of cut
:return: cutted-list
"""
tret = [lst[x:x + psiz] for x in xrange(0, len(lst), psiz)]
return tret
def findLayerFromEndByType(model, layerType):
for ii,ll in enumerate(model.layers[::-1]):
if isinstance(ll, layerType):
return (len(model.layers) - ii - 1)
return -1
def cloneLayerFromLayer(pLayer):
if isinstance(pLayer, Convolution1D):
return Convolution1D.from_config(pLayer.get_config())
elif isinstance(pLayer, Convolution2D):
return Convolution2D.from_config(pLayer.get_config())
elif isinstance(pLayer, Convolution3D):
return Convolution3D.from_config(pLayer.get_config())
# Max-Pooling:
elif isinstance(pLayer, MaxPooling1D):
return MaxPooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, MaxPooling2D):
return MaxPooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, MaxPooling3D):
return MaxPooling3D.from_config(pLayer.get_config())
# Average-Pooling
elif isinstance(pLayer, AveragePooling1D):
return AveragePooling1D.from_config(pLayer.get_config())
elif isinstance(pLayer, AveragePooling2D):
return AveragePooling2D.from_config(pLayer.get_config())
elif isinstance(pLayer, AveragePooling3D):
return AveragePooling3D.from_config(pLayer.get_config())
#
elif isinstance(pLayer, Flatten):
return Flatten.from_config(pLayer.get_config())
elif isinstance(pLayer, Merge):
return Merge.from_config(pLayer.get_config())
elif isinstance(pLayer, Activation):
return Activation.from_config(pLayer.get_config())
elif isinstance(pLayer, Dropout):
return Dropout.from_config(pLayer.get_config())
#
elif isinstance(pLayer, Dense):
return Dense.from_config(pLayer.get_config())
return None
#########################
class KerasTrainer:
extModelWeights = 'h5kerasmodel'
extJsonTrainConfig = '_trainconfig.json'
extJsonSolverState = '_solverstate.json'
modelPrefix=''
batcherLMDB = None
pathModelConfig=None
model=None
outputDir=None
sizeBatch=32
numEpoch=1
numIterPerEpoch=0
intervalSaveModel=1
intervalValidation=1
currentIter=0
currentEpoch=0
printInterval=20
modelName="Unknown"
deviceType='cpu'
def __init__(self):
self.cleanResults()
@staticmethod
def adjustModelInputOutput2DBData(parModel, parLMDB, isFixOutputLayer = True):
# (1) check LMDB is object instance or path to DB
if isinstance(parLMDB, BatcherImage2DLMDB):
ptrLMDB = parLMDB
elif (isinstance(parLMDB, str) or isinstance(parLMDB, unicode)):
ptrLMDB = BatcherImage2DLMDB(parLMDB, 1)
else:
raise Exception("Unknown parLMDB instance")
# (2) Build Sequential model (currently only Sequential models supported)
retModel = Sequential()
tmpL0 = parModel.layers[0]
# (3) if InputLayer is present - skip it
if isinstance(tmpL0, InputLayer):
idxStart=1
else:
idxStart=0
# (4) Recreate new InputShape layer with DB input shape
retModel.add(InputLayer(input_shape=ptrLMDB.shapeImg))
#FIXME: check this code, do you think, that implicit layer resizing is a good idea?
# (5) find output Dense layer to automaticaly adjust his output with DB-output
idxDense = -1
if isFixOutputLayer:
idxDense = findLayerFromEndByType(parModel, keras.layers.Dense)
if idxDense<0:
raise Exception('Model without Dense layer currently not supported!')
listLayers = parModel.layers[idxStart:idxDense]
else:
listLayers = parModel.layers[idxStart:]
# (6) Re-create model layers
for ll in listLayers:
ll.inbound_nodes = []
# print ('\tadd [%s]' % (ll.__str__()))
tmpLayer = cloneLayerFromLayer(ll)
retModel.add(tmpLayer)
# (7) fix output dimension
if isFixOutputLayer and idxDense>0:
#FIXME: hak for classification model-task
tmpLayer = parModel.layers[idxDense]
tmpLayer.inbound_nodes = []
tmpLayerConfig = tmpLayer.get_config()
#FIXME: check Keras 'output_dim' paremater
tmpLayerConfig['output_dim'] = ptrLMDB.numLbl
retModel.add(Dense.from_config(tmpLayerConfig))
for ll in parModel.layers[idxDense+1:]:
ll.inbound_nodes = []
tmpLayer = cloneLayerFromLayer(ll)
retModel.add(tmpLayer)
#
# FIXME: check this point (automatic output layer size). SoftMax to config in feature
# if isFixOutputLayer:
# retModel.add(Dense(ptrLMDB.numLbl, activation='softmax'))
return retModel
def buildModel(self, pathLMDBJob, pathModelConfig,
sizeBatch, numEpoch, intervalSaveModel=1, intervalValidation=1,
outputDir=None, modelPrefixName='keras_model', isResizeInputLayerToImageShape=True):
if self.isOk():
self.cleanModel()
self.loadBatcherLMDB(pathLMDBJob, sizeBatch)
with open(pathModelConfig, 'r') as f:
modelJSON = f.read()
modelFromCfg = model_from_json(modelJSON)
if modelFromCfg is not None:
self.pathModelConfig = pathModelConfig
self.sizeBatch = sizeBatch
self.numEpoch = numEpoch
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.intervalSaveModel = intervalSaveModel
self.intervalValidation = intervalValidation
self.modelPrefix = modelPrefixName
self.cleanResults()
if outputDir is None:
self.outputDir = os.getcwd()
else:
if os.path.isdir(outputDir):
self.outputDir = outputDir
else:
strErr = "Directory not found [%s]" % outputDir
self.printError(strErr)
raise Exception(strErr)
# FIXME: check this point: need more accurate logic to sync Data-Shape and Model-Input-Shape
# if isResizeInputLayerToImageShape:
# tmpL0 = modelFromCfg.layers[0]
# tmpL0cfg = tmpL0.get_config()
# if re.match(r'dense_input*', tmpL0.input.name) is not None:
# tmpShapeImageSize = np.prod(self.lmdbReader.shapeImg)
# self.model = Sequential()
# self.model.add(
# Dense(tmpL0cfg['output_dim'], input_dim=tmpShapeImageSize, init=tmpL0cfg['init']))
# for ll in modelFromCfg.layers[1:]:
# self.model.add(ll)
# else:
# self.model = modelFromCfg
# else:
# self.model = modelFromCfg
# FIXME: check this point (automatic output layer size). SoftMax to config in feature
# self.model.add(Dense(self.lmdbReader.numLbl))
# self.model.add(Activation('softmax'))
self.model = KerasTrainer.adjustModelInputOutput2DBData(modelFromCfg, self.batcherLMDB)
# TODO: make the setting for code below. For optimizer, loss-function, metrics
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
def buildModelFromConfigs(self, paramBatcherLMDB, modelConfig,
sizeBatch, numEpoch,
modelOptimizer=None,
intervalSaveModel=1, intervalValidation=1,
outputDir=None, modelPrefixName='keras_model',
isAppendOutputLayer = True):
self.batcherLMDB = paramBatcherLMDB
modelFromCfg = modelConfig
if modelFromCfg is not None:
self.pathModelConfig = None
self.sizeBatch = sizeBatch
self.numEpoch = numEpoch
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.intervalSaveModel = intervalSaveModel
self.intervalValidation = intervalValidation
self.modelPrefix = modelPrefixName
self.cleanResults()
if outputDir is None:
self.outputDir = os.getcwd()
else:
if os.path.isdir(outputDir):
self.outputDir = outputDir
else:
strErr = "Directory not found [%s]" % outputDir
self.printError(strErr)
raise Exception(strErr)
self.model = KerasTrainer.adjustModelInputOutput2DBData(modelFromCfg, self.batcherLMDB, isFixOutputLayer=isAppendOutputLayer)
# TODO: make the setting for code below. For optimizer, loss-function, metrics
if modelOptimizer is None:
opt = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
else:
opt = modelOptimizer
self.model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
def isOk(self):
return ((self.batcherLMDB is not None) and (self.model is not None))
def loadBatcherLMDB(self, dbJobID, sizeBatch):
dirDataset=dlsutils.getPathForDatasetDir()
pathLMDBJob = os.path.join(dirDataset, dbJobID)
self.batcherLMDB = BatcherImage2DLMDB(pathLMDBJob, sizeBatch)
self.sizeBatch = sizeBatch
if not self.batcherLMDB.isOk():
strErr = "[KERAS-TRAINER] Incorrect LMDB-data in [%s]" % dbJobID
self.printError(strErr)
raise Exception(strErr)
def cleanResults(self):
self.trainLog={'epoch':[], 'iter':[], 'lossTrain':[], 'accTrain':[], 'lossVal':[], 'accVal':[]}
self.currentIter=0
self.currentEpoch=0
def cleanModel(self):
if self.isOk():
self.cleanResults()
self.model = None
self.batcherLMDB.close()
self.batcherLMDB = None
self.pathModelConfig = None
def printError(self, strError):
print("keras-error#%s" % strError)
def trainOneIter(self):
modelInputShape = list(self.model.input_shape)
dataX, dataY = self.batcherLMDB.getBatchTrain(reshape2Shape=modelInputShape)
tlossTrain = self.model.train_on_batch(dataX, dataY)
isNeedPrintInfo = False
if (self.currentIter % self.printInterval == 0):
dataXval, dataYval = self.batcherLMDB.getBatchVal(reshape2Shape=modelInputShape)
tlossVal = self.model.test_on_batch(dataXval, dataYval)
self.trainLog['epoch'].append(self.currentEpoch)
self.trainLog['iter'].append(self.currentIter)
self.trainLog['lossTrain'].append(float(tlossTrain[0]))
self.trainLog['accTrain'].append(float(tlossTrain[1]))
self.trainLog['lossVal'].append(float(tlossVal[0]))
self.trainLog['accVal'].append(float(tlossVal[1]))
print(("keras-info#%s#%s#%d|%d|%0.5f|%0.5f|%0.5f|%0.5f") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
self.currentEpoch,
self.currentIter,
self.trainLog['lossTrain'][-1],
self.trainLog['accTrain'][-1],
self.trainLog['lossVal'][-1],
self.trainLog['accVal'][-1]
))
sys.stdout.flush()
isNeedPrintInfo = True
self.currentIter += 1
return isNeedPrintInfo
def trainOneEpoch(self):
if not self.isOk():
strErr='KerasTrainer is not correctly initialized'
self.printError(strErr)
raise Exception(strErr)
modelInputShape = list(self.model.input_shape)
for ii in xrange(self.numIterPerEpoch):
dataX, dataY = self.batcherLMDB.getBatchTrain(reshape2Shape=modelInputShape)
tlossTrain = self.model.train_on_batch(dataX, dataY)
if (self.currentIter%self.printInterval==0):
dataXval, dataYval = self.batcherLMDB.getBatchVal(reshape2Shape=modelInputShape)
tlossVal = self.model.test_on_batch(dataXval, dataYval)
self.trainLog['epoch'].append(self.currentEpoch)
self.trainLog['iter'].append(self.currentIter)
self.trainLog['lossTrain'].append(tlossTrain[0])
self.trainLog['accTrain'].append(tlossTrain[1])
self.trainLog['lossVal'].append(tlossVal[0])
self.trainLog['accVal'].append(tlossVal[1])
print(("keras-info#%s#%s#%d|%d|%0.5f|%0.5f|%0.5f|%0.5f") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
self.currentEpoch,
self.currentIter,
self.trainLog['lossTrain'][-1],
self.trainLog['accTrain'][-1],
self.trainLog['lossVal'][-1],
self.trainLog['accVal'][-1]
))
sys.stdout.flush()
self.currentIter +=1
self.currentEpoch += 1
def convertImgUint8ToDBImage(self, pimg):
#FIXME: shape we can get from Batcher and from model.layers...
if len(self.batcherLMDB.shapeImg) < 3:
numCh = 1
else:
# FIXME: check this point, number of channels can be on last element on array...
numCh = self.batcherLMDB.shapeImg[0]
# check #channels of input image
if len(pimg.shape) < 3:
numChImg = 1
else:
numChImg = 3
# if #channels of input image is not equal to #channels in TrainDatabse, then convert shape inp Image to Database-Shape
if numCh != numChImg:
if numCh == 1:
# FIXME: this is fix potential bug: rgb2gray change automaticaly min/max range from (0,255) to (0,1), headbang!
pimg = skcolor.rgb2gray(pimg.astype(np.float))
else:
pimg = skcolor.gray2rgb(pimg)
timg = sktransform.resize(pimg.astype(np.float32) * self.batcherLMDB.scaleFactor, self.batcherLMDB.shapeImg[1:])
if numCh==1:
timg = timg.reshape([1] + list(timg.shape))
else:
timg = timg.transpose((2, 0, 1))
if self.batcherLMDB.isRemoveMean:
# FIXME: check this point: type of the mean-removing from one cofig (for train and inference stages)
timg -= self.batcherLMDB.meanChImage
return timg
def inferListImagePath(self, listPathToImages, batchSizeInfer=None):
if not self.isOk():
strError = 'KerasTrainer class is not initialized to call inference()'
self.printError(strError)
raise Exception(strError)
if batchSizeInfer is None:
batchSizeInfer = self.sizeBatch
splListPathToImages = split_list_by_blocks(listPathToImages, batchSizeInfer)
retProb = None
for idxBatch,lstPath in enumerate(splListPathToImages):
modelInputShape = list(self.model.input_shape)
# Fit batchSize to current number of images in list (lstPath)
tmpBatchSize = len(lstPath)
tdataX=None
for ppi,ppath in enumerate(lstPath):
timg = io.imread(ppath)
if timg is None:
strError = 'Cant read input image [%s], may be image is incorrect' % ppath
self.printError(strError)
raise Exception(strError)
timg = self.convertImgUint8ToDBImage(timg)
# Delayed initialization of Batch of Input-Data
if tdataX is None:
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[ppi] = timg
#FIXME: chack this point, this code tested on Fully-Connected NN, need tests for Convolution Neurel Networks
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
# tprob = self.model.predict(tdataX, batch_size=tmpBatchSize)
tprob = self.model.predict(tdataX)
# Delayed initialization of returned classification probability
if retProb is None:
retProb = tprob
else:
retProb = np.concatenate(retProb, tprob)
idxMax = np.argmax(retProb, axis=1)
retLbl = np.array(self.batcherLMDB.lbl)[idxMax]
retVal = np.max(retProb, axis=1)
ret = {
'prob' : retProb,
'label' : retLbl,
'val' : retVal
}
return ret
def inferOneImageU8_DebugActivations(self, imgu8):
# [BEGIN] this code is cloned from self.inferOneImageU8()
timg = self.convertImgUint8ToDBImage(imgu8)
tmpBatchSize = 1
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
# FIXME: [1] check data type! [float32/float64]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[0] = timg
modelInputShape = list(self.model.input_shape)
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
# [END] this code is cloned from self.inferOneImageU8()
lstLayerForK=[]
for ii in xrange(len(self.model.layers)):
lstLayerForK.append(self.model.layers[ii].output)
localGetActivations = K.function([self.model.layers[0].input], lstLayerForK)
dataActivations = localGetActivations([tdataX])
return dataActivations
def inferOneImageU8(self, imgu8):
timg = self.convertImgUint8ToDBImage(imgu8)
tmpBatchSize = 1
tsizeX = [tmpBatchSize, timg.shape[0], timg.shape[1], timg.shape[2]]
# FIXME: [1] check data type! [float32/float64]
tdataX = np.zeros(tsizeX, np.float32)
tdataX[0] = timg
modelInputShape = list(self.model.input_shape)
tdataX = tdataX.reshape([tmpBatchSize] + modelInputShape[1:])
tprob = self.model.predict(tdataX, batch_size=1)
posMax = np.argmax(tprob[0])
tlbl = self.batcherLMDB.lbl[posMax]
tval = tprob[0][posMax]
tret = {
'prob': tprob,
'label': tlbl,
'val': tval
}
return tret
def inferOneImagePath(self, pathToImage):
if not self.isOk():
strError = 'KerasTrainer class is not initialized to call inference()'
self.printError(strError)
raise Exception(strError)
if not os.path.isfile(pathToImage):
strError='Cant find input image [%s]' % pathToImage
self.printError(strError)
raise Exception(strError)
timgu8 = io.imread(pathToImage)
if timgu8 is None:
strError = 'Cant read input image [%s], may be image is incorrect' % pathToImage
self.printError(strError)
raise Exception(strError)
return self.inferOneImageU8(timgu8)
def inferOneImagePathSorted(self, pathToImage):
tret = self.inferOneImagePath(pathToImage)
tarrProb=tret['prob'][0]
sortedIdx = np.argsort(-tarrProb)
sortedLbl = np.array(self.batcherLMDB.lbl)[sortedIdx]
sortedProb = tarrProb[sortedIdx]
tmp = [(ll,pp) for ll,pp in zip(sortedLbl,sortedProb)]
ret = {
'best': {
'label': tret['label'],
'prob': tret['val']
},
'distrib': tmp
}
return ret
def saveModelState(self, parOutputDir=None, isSaveWeights=True):
if parOutputDir is not None:
if not os.path.isdir(parOutputDir):
strError = "Cant find directory [%s]" % parOutputDir
self.printError(strError)
raise Exception(strError)
self.outputDir = parOutputDir
foutModelCfg=os.path.join(self.outputDir,"%s%s" % (self.modelPrefix, self.extJsonTrainConfig))
foutSolverCfg=os.path.join(self.outputDir,"%s%s" % (self.modelPrefix, self.extJsonSolverState))
foutModelWeights=os.path.join(self.outputDir,'%s_iter_%06d.%s' % (self.modelPrefix,self.currentIter,self.extModelWeights))
#
#FIXME: this is temporary solution, fix this in the future!
tmpOptimizerCfg = self.model.optimizer.get_config()
tmpOptimizerCfg['name'] = getKerasOptimizerName(self.model.optimizer)
jsonSolverState={
'optimizer' : tmpOptimizerCfg,
'loss' : self.model.loss,
'metrics' : self.model.metrics_names,
'dataset-id' : self.batcherLMDB.cfg.dbId,
'pathModelConfig' : "%s" % os.path.basename(self.pathModelConfig),
'sizeBatch' : self.sizeBatch,
'numEpoch' : self.numEpoch,
'currentIter' : self.currentIter,
'intervalSaveModel' : self.intervalSaveModel,
'intervalValidation': self.intervalValidation,
'printInterval' : self.printInterval,
'modelPrefix' : "%s" % self.modelPrefix,
'modelName' : self.modelName,
'deviceType' : self.deviceType
}
# FIXME: check the necesserity of the item [pathModelConfig]
txtJsonSolverState = json.dumps(jsonSolverState, indent=4)
with open(foutSolverCfg, 'w') as fslv:
fslv.write(txtJsonSolverState)
#
with open(foutModelCfg, 'w') as fcfg:
fcfg.write(self.model.to_json(sort_keys=True, indent=4, separators=(',', ': ')))
if isSaveWeights:
self.model.save_weights(foutModelWeights, overwrite=True)
# Print message when model saved (for Digits)
print(("keras-savestate#%s#%s#%s|%s|%s") % (
'I',
time.strftime('%Y.%m.%d-%H:%M:%S'),
os.path.abspath(foutModelCfg),
os.path.abspath(foutSolverCfg),
os.path.abspath(foutModelWeights)
))
def getTrainingStatesInDir(self, pathTrainDir, isReturnAllWeightsPath=False):
"""
explore directory with training-output data, and return path to files
:param pathTrainDir: path to directory with training-output
:return: None or list [pathModelConfigJson, pathSolverStateJson, pathModelWeights]
"""
if not os.path.isdir(pathTrainDir):
strError = "Cant find directory [%s]" % pathTrainDir
self.printError(strError)
return None
lstModelConfig = glob.glob('%s/*%s' % (pathTrainDir, self.extJsonTrainConfig))
lstSolverStates = glob.glob('%s/*%s' % (pathTrainDir, self.extJsonSolverState))
lstModelWeights = glob.glob('%s/*_iter_[0-9]*.%s' % (pathTrainDir, self.extModelWeights))
if len(lstModelConfig)<1:
strError = 'Cant find ModelConfig [%s] files in directory [%s]' % (self.extJsonTrainConfig, pathTrainDir)
self.printError(strError)
return None
if len(lstSolverStates)<1:
strError = 'Cant find Solver-States [%s] files in directory [%s]' % (self.extJsonSolverState, pathTrainDir)
self.printError(strError)
return None
if len(lstModelWeights) < 1:
strError = 'Cant find Model-Weights [%s] files in directory [%s]' % (self.extModelWeights, pathTrainDir)
self.printError(strError)
return None
lstModelConfig = sorted(lstModelConfig)
lstSolverStates = sorted(lstSolverStates)
lstModelWeights = sorted(lstModelWeights)
pathModelConfig = lstModelConfig[-1]
pathSolverState = lstSolverStates[-1]
if not isReturnAllWeightsPath:
pathModelWeight = lstModelWeights[-1]
else:
pathModelWeight = lstModelWeights
return [pathModelConfig, pathSolverState, pathModelWeight]
def loadModelFromTrainingStateInDir(self, pathTrainDir, isLoadLMDBReader=True):
self.cleanModel()
stateConfigs = self.getTrainingStatesInDir(pathTrainDir)
if stateConfigs is None:
strError = 'Cant find Model saved state from directory [%s]' % pathTrainDir
self.printError(strError)
pathModelConfig = stateConfigs[0]
pathSolverState = stateConfigs[1]
pathModelWeight = stateConfigs[2]
self.loadModelFromTrainingState(pathModelConfig=pathModelConfig,
pathSolverState=pathSolverState,
pathModelWeight=pathModelWeight,
isLoadLMDBReader=isLoadLMDBReader)
def loadModelFromTaskModelDir(self, pathTaskDir):
pathConfigModel = os.path.join(pathTaskDir, CFG_MODEL_TRAIN)
pathConfigSolver = os.path.join(pathTaskDir, CFG_SOLVER)
self.loadModelFromTrainingState(pathModelConfig=pathConfigModel,
pathSolverState=pathConfigSolver)
self.outputDir = pathTaskDir
def loadModelFromTrainingState(self, pathModelConfig, pathSolverState,
pathModelWeight=None, pathLMDBDataset=None, isLoadLMDBReader=True):
"""
Load Keras Model from Trained state (if present path to model Weights), or
for initial config
:param pathModelConfig: path to Model Config in JSON format
:param pathSolverState: path to SolverState Config in JSON format
:param pathModelWeight: path to Model Weights as binary Keras dump
:param pathModelWeight: path to LMDB-Dataset, if None -> skip
:param isLoadLMDBReader: load or not LMDBReader from SolverState Config
:return: None
"""
self.cleanModel()
# (1) Load Model Config from Json:
with open(pathModelConfig, 'r') as fModelConfig:
tmpStr = fModelConfig.read()
self.model = keras.models.model_from_json(tmpStr)
if self.model is None:
strError = 'Invalid Model config in file [%s]' % pathModelConfig
self.printError(strError)
raise Exception(strError)
# (2) Load SoverState Config from Json:
with open(pathSolverState) as fSolverState:
tmpStr = fSolverState.read()
configSolverState = json.loads(tmpStr)
if configSolverState is None:
strError = 'Invalid SolverState config in file [%s]' % pathSolverState
self.printError(strError)
raise Exception(strError)
if pathLMDBDataset is not None:
configSolverState['dataset-id'] = pathLMDBDataset
# (3) Load Model Weights:
if pathModelWeight is not None:
self.model.load_weights(pathModelWeight)
# (4) Reconfigure Model State:
self.intervalSaveModel = configSolverState['intervalSaveModel']
self.intervalValidation = configSolverState['intervalValidation']
self.numEpoch = configSolverState['numEpoch']
self.currentIter = configSolverState['currentIter']
self.sizeBatch = configSolverState['sizeBatch']
self.modelPrefix = configSolverState['modelPrefix']
if 'modelName' in configSolverState.keys():
self.modelName = configSolverState['modelName']
if 'deviceType' in configSolverState.keys():
self.deviceType = configSolverState['deviceType']
if isLoadLMDBReader:
self.loadBatcherLMDB(configSolverState['dataset-id'], self.sizeBatch)
self.numIterPerEpoch = self.batcherLMDB.numTrain / self.sizeBatch
self.currentEpoch = np.floor(self.currentIter / self.numIterPerEpoch)
else:
self.numIterPerEpoch = 1
self.currentEpoch = 0
self.pathModelConfig = pathModelConfig
# (5) Configure Loss, Solver, Metrics and compile model
tmpCfgOptimizer = configSolverState['optimizer'].copy()
parOptimizer = keras.optimizers.get(tmpCfgOptimizer)
parLoss = configSolverState['loss']
# parMetrics = configSolverState['metrics']
#TODO: i think this is a bug or a bad realization in Keras: 'loss' is an unknown metrics, this is temporary fix
parMetrics = []
if 'acc' in configSolverState['metrics']:
parMetrics.append('accuracy')
self.model.compile(optimizer=parOptimizer, loss=parLoss, metrics=parMetrics)
def runTrain(self, paramNumEpoch=-1):
if not self.isOk():
strErr = 'KerasTrainer is not correctly initialized'
self.printError(strErr)
raise Exception(strErr)
if paramNumEpoch>0:
self.numEpoch = paramNumEpoch
for ei in xrange(self.numEpoch):
self.trainOneEpoch()
if (ei%self.intervalSaveModel)==0:
self.saveModelState()
if (ei%self.intervalValidation)==0:
pass
#########################
if __name__ == '__main__':
pass
| |
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TR-069 has mandatory attribute names that don't comply with policy
#pylint: disable-msg=C6409
#pylint: disable-msg=W0404
#
"""Implement the TR-069 style request/response protocol over HTTP."""
__author__ = 'apenwarr@google.com (Avery Pennarun)'
import binascii
import collections
import datetime
import os
import random
import socket
import sys
import time
import urllib
from curtain import digest
import tornado.httpclient
import tornado.ioloop
import tornado.util
import tornado.web
import api_soap
import cpe_management_server
import cwmp_session
import helpers
PROC_IF_INET6 = '/proc/net/if_inet6'
MAX_EVENT_QUEUE_SIZE = 64
def _Shorten(s, prefixofs, suffixofs, maxlen):
"""Shorten the given string if its length is >= maxlen.
Note: maxlen should generally be considerably bigger than
prefixofs + suffixofs. It's disconcerting to a reader when
you have a "..." to replace 10 bytes, but it feels fine when the
"..." replaces 500 bytes.
Args:
s: the string to shorten.
prefixofs: the number of chars to keep at the beginning of s.
suffixofs: the number of chars to keep at the end of s.
maxlen: if the string is longer than this, shorten it.
Returns:
A shortened version of the string.
"""
s = str(s)
if len(s) >= maxlen and not os.environ.get('DONT_SHORTEN'):
# When the string exceeds the limit, we deliberately shorten it to
# considerably less than the limit, because it's disconcerting when
# you have a "..." to replace 10 bytes, but it feels right when the
# "..." replaces 500 bytes.
s = s[0:prefixofs] + '\n........\n' + s[-suffixofs:]
return s
class LimitDeque(collections.deque):
"""Wrapper around a deque that limits the maximimum size.
If the maximum size is reached, call the supplied handler, or
exit if no handler is provided.
"""
def __init__(self, max_size=None, handler=None):
collections.deque.__init__(self)
self.max_size = max_size
self.handler = handler
def CheckSize(self):
if self.max_size and len(self) > self.max_size:
if self.handler:
self.handler()
else:
print 'Maximum length of deque (%d) was exceeded' % (self.max_size)
sys.exit(1)
def append(self, *args):
collections.deque.append(self, *args)
self.CheckSize()
def appendleft(self, *args):
collections.deque.appendleft(self, *args)
self.CheckSize()
def extend(self, *args):
collections.deque.extend(self, *args)
self.CheckSize()
def extendleft(self, *args):
collections.deque.extendleft(self, *args)
self.CheckSize()
# SPEC3 = TR-069_Amendment-3.pdf
# http://www.broadband-forum.org/technical/download/TR-069_Amendment-3.pdf
def SplitUrl(url):
Url = collections.namedtuple('Url', ('method host port path'))
method, rest = urllib.splittype(url)
hostport, path = urllib.splithost(rest)
host, port = urllib.splitport(hostport)
return Url(method, host, int(port or 0), path)
class PingHandler(digest.DigestAuthMixin, tornado.web.RequestHandler):
"""Handles accesses to the ConnectionRequestURL.
Args:
callback: the function to call when theURL is accessed.
cpe_ms: the cpe_management_server object, from which to retrieve
username and password.
"""
def initialize(self, callback, cpe_ms):
self.callback = callback
self.cpe_ms = cpe_ms
def getcredentials(self, username):
credentials = {'auth_username': self.cpe_ms.ConnectionRequestUsername,
'auth_password': self.cpe_ms.ConnectionRequestPassword}
if username == credentials['auth_username']:
return credentials
def get(self):
# Digest authentication handler
if self.get_authenticated_user(self.getcredentials, 'Authusers'):
return self.set_status(self.callback())
class Handler(tornado.web.RequestHandler):
def initialize(self, soap_handler):
self.soap_handler = soap_handler
def get(self):
self.write('This is the cpe/acs handler. It only takes POST requests.')
def post(self):
print 'TR-069 server: request received:\n%s' % self.request.body
if self.request.body.strip():
result = self.soap_handler(self.request.body)
self.write(str(result))
class CPEStateMachine(object):
"""A tr-69 Customer Premises Equipment implementation.
Args:
ip: local ip address to bind to. If None, find address automatically.
cpe: the api_soap.cpe object for this device
listenport: the port number to listen on for ACS ping requests.
acs_url: An ACS URL to use. This overrides platform_config.GetAcsUrl()
ping_path: URL path for the ACS Ping function
ping_ip6dev: ifname to use for the CPE Ping address.
fetch_args: kwargs to pass to HTTPClient.fetch
"""
def __init__(self, ip, cpe, listenport, platform_config, ping_path,
acs_url=None, ping_ip6dev=None, fetch_args=dict(), ioloop=None,
restrict_acs_hosts=None):
self.cpe = cpe
self.cpe_soap = api_soap.CPE(self.cpe)
self.encode = api_soap.Encode()
self.outstanding = None
self.response_queue = []
self.request_queue = []
self.event_queue = LimitDeque(MAX_EVENT_QUEUE_SIZE, self.EventQueueHandler)
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
self.retry_count = 0 # for Inform.RetryCount
self.start_session_timeout = None # timer for CWMPRetryInterval
self.session = None
self.my_configured_ip = ip
self.ping_ip6dev = ping_ip6dev
self.fetch_args = fetch_args
self.rate_limit_seconds = 60
self.platform_config = platform_config
self.previous_ping_time = 0
self.ping_timeout_pending = None
self._changed_parameters = set()
self._changed_parameters_sent = set()
self.cpe_management_server = cpe_management_server.CpeManagementServer(
acs_url=acs_url, platform_config=platform_config, port=listenport,
ping_path=ping_path, get_parameter_key=cpe.getParameterKey,
start_periodic_session=self.NewPeriodicSession, ioloop=self.ioloop,
restrict_acs_hosts=restrict_acs_hosts)
def EventQueueHandler(self):
"""Called if the event queue goes beyond the maximum threshold."""
print 'Event queue has grown beyond the maximum size, restarting...'
print 'event_queue=%s' % (str(self.event_queue))
sys.exit(1)
def GetManagementServer(self):
"""Return the ManagementServer implementation for tr-98/181."""
return self.cpe_management_server
def Send(self, req):
self.request_queue.append(str(req))
self.Run()
def SendResponse(self, req):
self.response_queue.append(str(req))
self.Run()
def LookupDevIP6(self, name):
"""Returns the global IPv6 address for the named interface."""
with open(PROC_IF_INET6, 'r') as f:
for line in f:
fields = line.split()
if len(fields) < 6:
continue
scope = int(fields[3].strip())
dev = fields[5].strip()
if dev == name and scope == 0:
bin_ip = binascii.unhexlify(fields[0])
return socket.inet_ntop(socket.AF_INET6, bin_ip)
return 0
def _GetLocalAddr(self):
if self.my_configured_ip is not None:
return self.my_configured_ip
if self.ping_ip6dev is not None:
return self.LookupDevIP6(self.ping_ip6dev)
acs_url = self.cpe_management_server.URL
if not acs_url:
return 0
# If not configured with an address it gets a bit tricky: we try connecting
# to the ACS, non-blocking, so we can find out which local IP the kernel
# uses when connecting to that IP. The local address is returned with
# getsockname(). Then we can tell the ACS to use that address for
# connecting to us later. We use a nonblocking socket because we don't
# care about actually connecting; we just care what the local kernel does
# in its implicit bind() when we *start* connecting.
url = SplitUrl(acs_url)
host = url.host
port = url.port or 0
s = socket.socket()
s.setblocking(0)
try:
s.connect((host, port or 1)) # port doesn't matter, but can't be 0
except socket.error:
pass
return s.getsockname()[0]
def EncodeInform(self):
"""Return an Inform message for this session."""
if not self.session.my_ip:
my_ip = self._GetLocalAddr()
self.session.my_ip = my_ip
self.cpe_management_server.my_ip = my_ip
events = []
for ev in self.event_queue:
events.append(ev)
parameter_list = []
try:
ms = self.cpe.root.GetExport('InternetGatewayDevice.ManagementServer')
di = self.cpe.root.GetExport('InternetGatewayDevice.DeviceInfo')
parameter_list += [
('InternetGatewayDevice.ManagementServer.ConnectionRequestURL',
ms.ConnectionRequestURL),
('InternetGatewayDevice.ManagementServer.ParameterKey',
ms.ParameterKey),
('InternetGatewayDevice.DeviceInfo.HardwareVersion',
di.HardwareVersion),
('InternetGatewayDevice.DeviceInfo.SoftwareVersion',
di.SoftwareVersion),
('InternetGatewayDevice.DeviceInfo.SpecVersion', di.SpecVersion),
]
# NOTE(jnewlin): Changed parameters can be set to be sent either
# explicitly with a value change event, or to be sent with the
# periodic inform. So it's not a bug if there is no value change
# event in the event queue.
# Take all of the parameters and put union them with the another
# set that has been previously sent. When we receive an inform
# from the ACS we clear the _sent version. This fixes a bug where
# we send this list of params to the ACS, followed by a PerioidStat
# adding itself to the list here, followed by getting an ack from the
# ACS where we clear the list. Now we just clear the list of the
# params that was sent when the ACS acks.
self._changed_parameters_sent.update(self._changed_parameters)
self._changed_parameters.clear()
parameter_list += self._changed_parameters_sent
except (AttributeError, KeyError):
pass
req = self.encode.Inform(root=self.cpe.root, events=events,
retry_count=self.retry_count,
parameter_list=parameter_list)
return str(req)
def SendTransferComplete(self, command_key, faultcode, faultstring,
starttime, endtime, event_code):
if not self.session:
tc = ('7 TRANSFER COMPLETE', None)
if tc not in self.event_queue:
self.event_queue.appendleft(tc)
self.event_queue.append((event_code, command_key))
cmpl = self.encode.TransferComplete(command_key, faultcode, faultstring,
starttime, endtime)
self.Send(cmpl)
def GetNext(self):
if not self.session:
return None
if self.session.inform_required():
self.session.state_update(sent_inform=True)
return self.EncodeInform()
if self.response_queue and self.session.response_allowed():
return self.response_queue.pop(0)
if self.request_queue and self.session.request_allowed():
return self.request_queue.pop(0)
return ''
def Run(self):
print 'RUN'
if not self.session:
print 'No ACS session, returning.'
return
if not self.session.acs_url:
print 'No ACS URL populated, returning.'
self._ScheduleRetrySession(wait=60)
return
if self.session.should_close():
print 'Idle CWMP session, terminating.'
self.outstanding = None
ping_received = self.session.close()
self.platform_config.AcsAccessSuccess(self.session.acs_url)
self.session = None
self.retry_count = 0 # Successful close
if self._changed_parameters:
# Some values triggered during the prior session, start a new session
# with those changed params. This should also satisfy a ping.
self.NewValueChangeSession()
elif ping_received:
# Ping received during session, start another
self._NewPingSession()
return
if self.outstanding is not None:
# already an outstanding request
return
if self.outstanding is None:
self.outstanding = self.GetNext()
if self.outstanding is None:
# We're not allowed to send anything yet, session not fully open.
return
headers = {}
if self.session.cookies:
headers['Cookie'] = ';'.join(self.session.cookies)
if self.outstanding:
headers['Content-Type'] = 'text/xml; charset="utf-8"'
headers['SOAPAction'] = ''
else:
# Empty message
self.session.state_update(cpe_to_acs_empty=True)
self.platform_config.AcsAccessAttempt(self.session.acs_url)
print('CPE POST (at {0!s}):\n'
'ACS URL: {1!r}\n'
'{2!s}\n'
'{3!s}'.format(time.ctime(), self.session.acs_url,
headers, _Shorten(self.outstanding, 768, 256, 2048)))
req = tornado.httpclient.HTTPRequest(
url=self.session.acs_url, method='POST', headers=headers,
body=self.outstanding, follow_redirects=True, max_redirects=5,
request_timeout=30.0, use_gzip=True, allow_ipv6=True,
**self.fetch_args)
self.session.http.fetch(req, self.GotResponse)
def GotResponse(self, response):
self.outstanding = None
print 'CPE RECEIVED (at %s):' % time.ctime()
if not self.session:
print 'Session terminated, ignoring ACS message.'
return
if not response.error:
cookies = response.headers.get_list('Set-Cookie')
if cookies:
self.session.cookies = cookies
print _Shorten(response.body, 768, 256, 2048)
if response.body:
out = self.cpe_soap.Handle(response.body)
if out is not None:
self.SendResponse(out)
# TODO(dgentry): $SPEC3 3.7.1.6 ACS Fault 8005 == retry same request
else:
self.session.state_update(acs_to_cpe_empty=True)
else:
print 'HTTP ERROR {0!s}: {1}'.format(response.code, response.error)
self._ScheduleRetrySession()
self.Run()
return 200
def _ScheduleRetrySession(self, wait=None):
"""Start a timer to retry a CWMP session.
Args:
wait: Number of seconds to wait. If wait=None, choose a random wait
time according to $SPEC3 section 3.2.1
"""
if self.session:
self.session.close()
self.session = None
if wait is None:
self.retry_count += 1
wait = self.cpe_management_server.SessionRetryWait(self.retry_count)
self.start_session_timeout = self.ioloop.add_timeout(
datetime.timedelta(seconds=wait), self._SessionWaitTimer)
def _SessionWaitTimer(self):
"""Handler for the CWMP Retry timer, to start a new session."""
self.start_session_timeout = None
self.session = cwmp_session.CwmpSession(
acs_url=self.cpe_management_server.URL, ioloop=self.ioloop)
self.Run()
def _CancelSessionRetries(self):
"""Cancel any pending CWMP session retry."""
if self.start_session_timeout:
self.ioloop.remove_timeout(self.start_session_timeout)
self.start_session_timeout = None
self.retry_count = 0
def _NewSession(self, reason):
if not self.session:
self._CancelSessionRetries()
self.event_queue.appendleft((reason, None))
self.session = cwmp_session.CwmpSession(
acs_url=self.cpe_management_server.URL, ioloop=self.ioloop)
self.Run()
def _NewTimeoutPingSession(self):
if self.ping_timeout_pending:
self.ping_timeout_pending = None
self._NewPingSession()
def _NewPingSession(self):
if self.session:
# $SPEC3 3.2.2 initiate at most one new session after this one closes.
self.session.ping_received = True
return
# Rate limit how often new sessions can be started with ping to
# once a minute
current_time = helpers.monotime()
elapsed_time = current_time - self.previous_ping_time
allow_ping = (elapsed_time < 0 or
elapsed_time > self.rate_limit_seconds)
if allow_ping:
self.ping_timeout_pending = None
self.previous_ping_time = current_time
self._NewSession('6 CONNECTION REQUEST')
elif not self.ping_timeout_pending:
# Queue up a new session via tornado.
callback_time = self.rate_limit_seconds - elapsed_time
if callback_time < 1:
callback_time = 1
self.ping_timeout_pending = self.ioloop.add_timeout(
datetime.timedelta(seconds=callback_time),
self._NewTimeoutPingSession)
def NewPeriodicSession(self):
# If the ACS stops responding for some period of time, it's possible
# that we'll already have a periodic inform queued up.
# In this case, don't start the new inform, wait for the session
# retry. The retry has a maximum timer of periodic session.
reason = '2 PERIODIC'
if not (reason, None) in self.event_queue:
self._NewSession(reason)
def SetNotificationParameters(self, parameters):
"""Set the list of parameters that have changed.
The list of parameters that have triggered and should be sent either
with the next periodic inform, or the next active active value change
session.
Args:
parameters: An array of the parameters that have changed, these
need to be sent to the ACS in the parameter list.
"""
for param in parameters:
self._changed_parameters.add(param)
def NewValueChangeSession(self):
"""Start a new session to the ACS for the parameters that have changed."""
# If all the changed parameters have been reported, or there is already
# a session running, don't do anything. The run loop for the session
# will autmatically kick off a new session if there are new changed
# parameters.
if not self._changed_parameters or self.session:
return
reason = '4 VALUE CHANGE'
if not (reason, None) in self.event_queue:
self._NewSession(reason)
def PingReceived(self):
self._NewPingSession()
return 204 # No Content
def _RemoveFromDequeue(self, dq, rmset):
"""Return a new deque which removes events in rmset."""
newdq = collections.deque()
for event in dq:
(reason, unused_command_key) = event
if reason.lower() not in rmset:
newdq.append(event)
return newdq
def TransferCompleteReceived(self):
"""Called when the ACS sends a TransferCompleteResponse."""
reasons = frozenset(['7 transfer complete', 'm download',
'm scheduledownload', 'm upload'])
self.event_queue = self._RemoveFromDequeue(self.event_queue, reasons)
def InformResponseReceived(self):
"""Called when the ACS sends an InformResponse."""
reasons = frozenset(['0 bootstrap', '1 boot', '2 periodic',
'3 scheduled', '4 value change',
'6 connection request', '8 diagnostics complete',
'm reboot', 'm scheduleinform'])
self.event_queue = self._RemoveFromDequeue(self.event_queue, reasons)
self._changed_parameters_sent.clear()
def Startup(self):
rb = self.cpe.download_manager.RestoreReboots()
if rb:
self.event_queue.extend(rb)
# TODO(dgentry) Check whether we have a config, send '1 BOOT' instead
self._NewSession('0 BOOTSTRAP')
# This will call SendTransferComplete, so we have to already be in
# a session.
self.cpe.startup()
def Listen(ip, port, ping_path, acs, cpe, cpe_listener, platform_config,
acs_url=None, ping_ip6dev=None, fetch_args=dict(), ioloop=None,
restrict_acs_hosts=None):
if not ping_path:
ping_path = '/ping/%x' % random.getrandbits(120)
while ping_path.startswith('/'):
ping_path = ping_path[1:]
cpe_machine = CPEStateMachine(ip=ip, cpe=cpe, listenport=port,
platform_config=platform_config,
ping_path=ping_path,
restrict_acs_hosts=restrict_acs_hosts,
acs_url=acs_url, ping_ip6dev=ping_ip6dev,
fetch_args=fetch_args, ioloop=ioloop)
cpe.setCallbacks(cpe_machine.SendTransferComplete,
cpe_machine.TransferCompleteReceived,
cpe_machine.InformResponseReceived)
handlers = []
if acs:
acshandler = api_soap.ACS(acs).Handle
handlers.append(('/acs', Handler, dict(soap_handler=acshandler)))
print 'TR-069 ACS at http://*:%d/acs' % port
if cpe and cpe_listener:
cpehandler = cpe_machine.cpe_soap.Handle
handlers.append(('/cpe', Handler, dict(soap_handler=cpehandler)))
print 'TR-069 CPE at http://*:%d/cpe' % port
if ping_path:
handlers.append(('/' + ping_path, PingHandler,
dict(cpe_ms=cpe_machine.cpe_management_server,
callback=cpe_machine.PingReceived)))
print 'TR-069 callback at http://*:%d/%s' % (port, ping_path)
webapp = tornado.web.Application(handlers)
webapp.listen(port)
return cpe_machine
| |
from __future__ import unicode_literals
import ctypes
import json
import random
import unittest
from unittest import skipUnless
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils.encoding import force_bytes
from django.utils import six
from django.utils.six.moves import xrange
from .. import HAS_GEOS
if HAS_GEOS:
from .. import (GEOSException, GEOSIndexError, GEOSGeometry,
GeometryCollection, Point, MultiPoint, Polygon, MultiPolygon, LinearRing,
LineString, MultiLineString, fromfile, fromstr, geos_version_info)
from ..base import gdal, numpy, GEOSBase
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(unittest.TestCase, TestDataMixin):
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz and geos_version_info()['version'] >= '3.3.0':
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
# Same for EWKB.
self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, six.memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = six.memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertTrue(p1 > p2)
self.assertFalse(p1 < p2)
self.assertFalse(p2 > p1)
self.assertTrue(p2 < p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertFalse(p4 < p3)
self.assertTrue(p3 < p4)
self.assertTrue(p4 > p3)
self.assertFalse(p3 > p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
""" Test with a srid unknown from GDAL """
pnt = Point(111200, 220900, srid=999999)
self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1., 2.))
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
if geos_version_info()['version'] > '3.3.0':
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@skipUnless(HAS_GEOS, "Geos is required.")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
| |
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import glob
import os
import re
import yaml
from fontaine.font import FontFactory
from fontaine.cmap import Library
from bakery_cli.ttfont import PiFont
from bakery_cli.utils import UpstreamDirectory
from bakery_lint.base import BakeryTestCase as TestCase, tags, \
TestCaseOperator
COPYRIGHT_REGEX = re.compile(r'Copyright.*?20\d{2}.*', re.U | re.I)
class FontTestPrepolation(TestCase):
name = __name__
targets = ['upstream-repo']
tool = 'lint'
def test_family_glyph_names_match(self):
""" Each font in family has matching glyph names? """
directory = UpstreamDirectory(self.operator.path)
# TODO does this glyphs list object get populated?
glyphs = []
for f in directory.get_fonts():
font = PiFont(os.path.join(self.operator.path, f))
glyphs_ = font.get_glyphs()
if glyphs and glyphs != glyphs_:
# TODO report which font
self.fail('Family has different glyphs across fonts')
def test_font_prepolation_glyph_contours(self):
""" Check that glyphs has same number of contours across family """
directory = UpstreamDirectory(self.operator.path)
glyphs = {}
for f in directory.get_fonts():
font = PiFont(os.path.join(self.operator.path, f))
glyphs_ = font.get_glyphs()
for glyphcode, glyphname in glyphs_:
contours = font.get_contours_count(glyphname)
if glyphcode in glyphs and glyphs[glyphcode] != contours:
msg = ('Number of contours of glyph "%s" does not match.'
' Expected %s contours, but actual is %s contours')
self.fail(msg % (glyphname, glyphs[glyphcode], contours))
glyphs[glyphcode] = contours
def test_font_prepolation_glyph_points(self):
""" Check that glyphs has same number of points across family """
directory = UpstreamDirectory(self.operator.path)
glyphs = {}
for f in directory.get_fonts():
font = PiFont(os.path.join(self.operator.path, f))
glyphs_ = font.get_glyphs()
for g, glyphname in glyphs_:
points = font.get_points_count(glyphname)
if g in glyphs and glyphs[g] != points:
msg = ('Number of points of glyph "%s" does not match.'
' Expected %s points, but actual is %s points')
self.fail(msg % (glyphname, glyphs[g], points))
glyphs[g] = points
class TestTTFAutoHintHasDeva(TestCase):
targets = ['upstream-repo']
tool = 'lint'
name = __name__
@classmethod
def skipUnless(cls):
projroot = os.path.join(cls.operator.path, '..')
bakeryconfig = None
bakeryfile = os.path.join(projroot, 'bakery.yaml')
if os.path.exists(bakeryfile):
bakeryconfig = yaml.load(open(bakeryfile))
bakeryfile = os.path.join(projroot, 'bakery.yml')
if os.path.exists(bakeryfile):
bakeryconfig = yaml.load(open(bakeryfile))
if bakeryconfig is None:
return True
if 'devanagari' not in bakeryconfig.get('subset', []):
return True
cls.bakeryconfig = bakeryconfig
def test_ttfautohint_has_deva(self):
""" Check that ttfautohint option has -f deva with devanagari subset """
if '-f deva' not in self.bakeryconfig.get('ttfautohint', ''):
self.fail((u'Subset `devanagari` is selected but ttfautohint'
u' does not have `-f deva` option'))
class CheckTextFilesExist(TestCase):
name = __name__
targets = ['upstream-repo']
tool = 'lint'
def assertExists(self, filename):
if not isinstance(filename, list):
filename = [filename]
exist = False
for p in filename:
if os.path.exists(os.path.join(self.operator.path, p)):
exist = True
if not exist:
self.fail('%s does not exist in project' % filename)
@tags('required')
def test_copyrighttxt_exists(self):
""" Font folder should contains COPYRIGHT.txt """
self.assertExists('COPYRIGHT.txt')
@tags('required')
def test_description_exists(self):
""" Font folder should contains DESCRIPTION.en_us.html """
self.assertExists('DESCRIPTION.en_us.html')
@tags('required')
def test_licensetxt_exists(self):
""" Font folder should contains LICENSE.txt """
self.assertExists(['LICENSE.txt', 'OFL.txt'])
def test_fontlogtxt_exists(self):
""" Font folder should contains FONTLOG.txt """
self.assertExists('FONTLOG.txt')
class TestUpstreamRepo(TestCase):
""" Tests for common upstream repository files.
.. note::
This test case is not related to font processing. It makes only common
checks like one - test that upstream repository contains bakery.yaml) """
targets = ['upstream-repo']
tool = 'lint'
name = __name__
@tags('note')
def test_bakery_yaml_exists(self):
""" Repository contains bakery.yaml configuration file? """
f = os.path.exists(os.path.join(self.operator.path, 'bakery.yaml'))
f = f or os.path.exists(os.path.join(self.operator.path, 'bakery.yml'))
self.assertTrue(f,
msg=('File `bakery.yaml` does not exist in root '
'of upstream repository'))
@tags('note')
def test_metadata_json_exists(self):
""" Repository contains METADATA.json file? """
self.assertTrue(os.path.exists(os.path.join(self.operator.path, 'METADATA.json')),
msg=('File `METADATA.json` does not exist in root '
'of upstream repository'))
def test_copyright_notices_same_across_family(self):
""" Each font copyright notice matches? """
ufo_dirs = []
for root, dirs, files in os.walk(self.operator.path):
for d in dirs:
fullpath = os.path.join(root, d)
if os.path.splitext(fullpath)[1].lower() == '.ufo':
ufo_dirs.append(fullpath)
copyright = None
for ufo_folder in ufo_dirs:
current_notice = self.lookup_copyright_notice(ufo_folder)
if current_notice is None:
continue
if copyright is not None and current_notice != copyright:
self.fail('"%s" != "%s"' % (current_notice, copyright))
break
copyright = current_notice
def grep_copyright_notice(self, contents):
match = COPYRIGHT_REGEX.search(contents)
if match:
return match.group(0).strip(',\r\n')
return
def lookup_copyright_notice(self, ufo_folder):
current_path = ufo_folder
try:
contents = open(os.path.join(ufo_folder, 'fontinfo.plist')).read()
copyright = self.grep_copyright_notice(contents)
if copyright:
return copyright
except (IOError, OSError):
pass
while os.path.realpath(self.operator.path) != current_path:
# look for all text files inside folder
# read contents from them and compare with copyright notice
# pattern
files = glob.glob(os.path.join(current_path, '*.txt'))
files += glob.glob(os.path.join(current_path, '*.ttx'))
for filename in files:
with open(os.path.join(current_path, filename)) as fp:
match = COPYRIGHT_REGEX.search(fp.read())
if not match:
continue
return match.group(0).strip(',\r\n')
current_path = os.path.join(current_path, '..') # go up
current_path = os.path.realpath(current_path)
return
def get_test_subset_function(value):
def function(self):
self.assertEqual(value, 100)
function.tags = ['note']
return function
class FontaineTest(TestCase):
targets = ['upstream-repo']
tool = 'PyFontaine'
name = __name__
@classmethod
def __generateTests__(cls):
pattern = re.compile(r'[\W_]+')
library = Library(collections=['subsets'])
directory = UpstreamDirectory(cls.operator.path)
yamlpath = os.path.join(cls.operator.path, 'bakery.yaml')
try:
bakerydata = yaml.load(open(yamlpath))
except IOError:
from bakery_cli.bakery import BAKERY_CONFIGURATION_DEFAULTS
bakerydata = yaml.load(open(BAKERY_CONFIGURATION_DEFAULTS))
for fontpath in directory.UFO + directory.TTX:
font = FontFactory.openfont(os.path.join(cls.operator.path, fontpath))
for charmap in font.get_orthographies(_library=library):
common_name = charmap.charset.common_name.replace('Subset ', '')
shortname = pattern.sub('', common_name)
if shortname not in bakerydata['subset']:
continue
exec 'cls.test_charset_%s = get_test_subset_function(%s)' % (shortname, charmap.coverage)
exec 'cls.test_charset_%s.__func__.__doc__ = "Is %s covered 100%%?"' % (shortname, common_name)
def get_suite(path, apply_autofix=False):
import unittest
suite = unittest.TestSuite()
testcases = [
CheckTextFilesExist,
FontTestPrepolation,
TestTTFAutoHintHasDeva,
TestUpstreamRepo,
FontaineTest
]
for testcase in testcases:
testcase.operator = TestCaseOperator(path)
testcase.apply_fix = apply_autofix
if getattr(testcase, 'skipUnless', False):
if testcase.skipUnless():
continue
if getattr(testcase, '__generateTests__', None):
testcase.__generateTests__()
for test in unittest.defaultTestLoader.loadTestsFromTestCase(testcase):
suite.addTest(test)
return suite
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.type import date_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.billing.budgets.v1beta1",
manifest={
"CalendarPeriod",
"Budget",
"BudgetAmount",
"LastPeriodAmount",
"ThresholdRule",
"AllUpdatesRule",
"Filter",
"CustomPeriod",
},
)
class CalendarPeriod(proto.Enum):
r"""A ``CalendarPeriod`` represents the abstract concept of a time
period that has a canonical start. Grammatically, "the start of the
current ``CalendarPeriod``". All calendar times begin at 12 AM US
and Canadian Pacific Time (UTC-8).
"""
CALENDAR_PERIOD_UNSPECIFIED = 0
MONTH = 1
QUARTER = 2
YEAR = 3
class Budget(proto.Message):
r"""A budget is a plan that describes what you expect to spend on
Cloud projects, plus the rules to execute as spend is tracked
against that plan, (for example, send an alert when 90% of the
target spend is met). The budget time period is configurable,
with options such as month (default), quarter, year, or custom
time period.
Attributes:
name (str):
Output only. Resource name of the budget. The resource name
implies the scope of a budget. Values are of the form
``billingAccounts/{billingAccountId}/budgets/{budgetId}``.
display_name (str):
User data for display name in UI.
Validation: <= 60 chars.
budget_filter (google.cloud.billing.budgets_v1beta1.types.Filter):
Optional. Filters that define which resources
are used to compute the actual spend against the
budget amount, such as projects, services, and
the budget's time period, as well as other
filters.
amount (google.cloud.billing.budgets_v1beta1.types.BudgetAmount):
Required. Budgeted amount.
threshold_rules (Sequence[google.cloud.billing.budgets_v1beta1.types.ThresholdRule]):
Optional. Rules that trigger alerts (notifications of
thresholds being crossed) when spend exceeds the specified
percentages of the budget.
Optional for ``pubsubTopic`` notifications.
Required if using email notifications.
all_updates_rule (google.cloud.billing.budgets_v1beta1.types.AllUpdatesRule):
Optional. Rules to apply to notifications
sent based on budget spend and thresholds.
etag (str):
Optional. Etag to validate that the object is
unchanged for a read-modify-write operation.
An empty etag will cause an update to overwrite
other changes.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
budget_filter = proto.Field(proto.MESSAGE, number=3, message="Filter",)
amount = proto.Field(proto.MESSAGE, number=4, message="BudgetAmount",)
threshold_rules = proto.RepeatedField(
proto.MESSAGE, number=5, message="ThresholdRule",
)
all_updates_rule = proto.Field(proto.MESSAGE, number=6, message="AllUpdatesRule",)
etag = proto.Field(proto.STRING, number=7,)
class BudgetAmount(proto.Message):
r"""The budgeted amount for each usage period.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
specified_amount (google.type.money_pb2.Money):
A specified amount to use as the budget. ``currency_code``
is optional. If specified when creating a budget, it must
match the currency of the billing account. If specified when
updating a budget, it must match the currency_code of the
existing budget. The ``currency_code`` is provided on
output.
This field is a member of `oneof`_ ``budget_amount``.
last_period_amount (google.cloud.billing.budgets_v1beta1.types.LastPeriodAmount):
Use the last period's actual spend as the budget for the
present period. LastPeriodAmount can only be set when the
budget's time period is a
[Filter.calendar_period][google.cloud.billing.budgets.v1beta1.Filter.calendar_period].
It cannot be set in combination with
[Filter.custom_period][google.cloud.billing.budgets.v1beta1.Filter.custom_period].
This field is a member of `oneof`_ ``budget_amount``.
"""
specified_amount = proto.Field(
proto.MESSAGE, number=1, oneof="budget_amount", message=money_pb2.Money,
)
last_period_amount = proto.Field(
proto.MESSAGE, number=2, oneof="budget_amount", message="LastPeriodAmount",
)
class LastPeriodAmount(proto.Message):
r"""Describes a budget amount targeted to the last
[Filter.calendar_period][google.cloud.billing.budgets.v1beta1.Filter.calendar_period]
spend. At this time, the amount is automatically 100% of the last
calendar period's spend; that is, there are no other options yet.
Future configuration options will be described here (for example,
configuring a percentage of last period's spend). LastPeriodAmount
cannot be set for a budget configured with a
[Filter.custom_period][google.cloud.billing.budgets.v1beta1.Filter.custom_period].
"""
class ThresholdRule(proto.Message):
r"""ThresholdRule contains the definition of a threshold. Threshold
rules define the triggering events used to generate a budget
notification email. When a threshold is crossed (spend exceeds the
specified percentages of the budget), budget alert emails are sent
to the email recipients you specify in the
`NotificationsRule <#notificationsrule>`__.
Threshold rules also affect the fields included in the `JSON data
object <https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications#notification_format>`__
sent to a Pub/Sub topic.
Threshold rules are *required* if using email notifications.
Threshold rules are *optional* if only setting a ```pubsubTopic``
NotificationsRule <#NotificationsRule>`__, unless you want your JSON
data object to include data about the thresholds you set.
For more information, see `set budget threshold rules and
actions <https://cloud.google.com/billing/docs/how-to/budgets#budget-actions>`__.
Attributes:
threshold_percent (float):
Required. Send an alert when this threshold
is exceeded. This is a 1.0-based percentage, so
0.5 = 50%. Validation: non-negative number.
spend_basis (google.cloud.billing.budgets_v1beta1.types.ThresholdRule.Basis):
Optional. The type of basis used to determine if spend has
passed the threshold. Behavior defaults to CURRENT_SPEND if
not set.
"""
class Basis(proto.Enum):
r"""The type of basis used to determine if spend has passed the
threshold.
"""
BASIS_UNSPECIFIED = 0
CURRENT_SPEND = 1
FORECASTED_SPEND = 2
threshold_percent = proto.Field(proto.DOUBLE, number=1,)
spend_basis = proto.Field(proto.ENUM, number=2, enum=Basis,)
class AllUpdatesRule(proto.Message):
r"""AllUpdatesRule defines notifications that are sent based on
budget spend and thresholds.
Attributes:
pubsub_topic (str):
Optional. The name of the Pub/Sub topic where budget related
messages will be published, in the form
``projects/{project_id}/topics/{topic_id}``. Updates are
sent at regular intervals to the topic. The topic needs to
be created before the budget is created; see
https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications
for more details. Caller is expected to have
``pubsub.topics.setIamPolicy`` permission on the topic when
it's set for a budget, otherwise, the API call will fail
with PERMISSION_DENIED. See
https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications#permissions_required_for_this_task
for more details on Pub/Sub roles and permissions.
schema_version (str):
Optional. Required when
[AllUpdatesRule.pubsub_topic][google.cloud.billing.budgets.v1beta1.AllUpdatesRule.pubsub_topic]
is set. The schema version of the notification sent to
[AllUpdatesRule.pubsub_topic][google.cloud.billing.budgets.v1beta1.AllUpdatesRule.pubsub_topic].
Only "1.0" is accepted. It represents the JSON schema as
defined in
https://cloud.google.com/billing/docs/how-to/budgets-programmatic-notifications#notification_format.
monitoring_notification_channels (Sequence[str]):
Optional. Targets to send notifications to when a threshold
is exceeded. This is in addition to default recipients who
have billing account IAM roles. The value is the full REST
resource name of a monitoring notification channel with the
form
``projects/{project_id}/notificationChannels/{channel_id}``.
A maximum of 5 channels are allowed. See
https://cloud.google.com/billing/docs/how-to/budgets-notification-recipients
for more details.
disable_default_iam_recipients (bool):
Optional. When set to true, disables default
notifications sent when a threshold is exceeded.
Default notifications are sent to those with
Billing Account Administrator and Billing
Account User IAM roles for the target account.
"""
pubsub_topic = proto.Field(proto.STRING, number=1,)
schema_version = proto.Field(proto.STRING, number=2,)
monitoring_notification_channels = proto.RepeatedField(proto.STRING, number=3,)
disable_default_iam_recipients = proto.Field(proto.BOOL, number=4,)
class Filter(proto.Message):
r"""A filter for a budget, limiting the scope of the cost to
calculate.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
projects (Sequence[str]):
Optional. A set of projects of the form
``projects/{project}``, specifying that usage from only this
set of projects should be included in the budget. If
omitted, the report will include all usage for the billing
account, regardless of which project the usage occurred on.
Only zero or one project can be specified currently.
credit_types (Sequence[str]):
Optional. If
[Filter.credit_types_treatment][google.cloud.billing.budgets.v1beta1.Filter.credit_types_treatment]
is INCLUDE_SPECIFIED_CREDITS, this is a list of credit types
to be subtracted from gross cost to determine the spend for
threshold calculations. See `a list of acceptable credit
type
values <https://cloud.google.com/billing/docs/how-to/export-data-bigquery-tables#credits-type>`__.
If
[Filter.credit_types_treatment][google.cloud.billing.budgets.v1beta1.Filter.credit_types_treatment]
is **not** INCLUDE_SPECIFIED_CREDITS, this field must be
empty.
credit_types_treatment (google.cloud.billing.budgets_v1beta1.types.Filter.CreditTypesTreatment):
Optional. If not set, default behavior is
``INCLUDE_ALL_CREDITS``.
services (Sequence[str]):
Optional. A set of services of the form
``services/{service_id}``, specifying that usage from only
this set of services should be included in the budget. If
omitted, the report will include usage for all the services.
The service names are available through the Catalog API:
https://cloud.google.com/billing/v1/how-tos/catalog-api.
subaccounts (Sequence[str]):
Optional. A set of subaccounts of the form
``billingAccounts/{account_id}``, specifying that usage from
only this set of subaccounts should be included in the
budget. If a subaccount is set to the name of the parent
account, usage from the parent account will be included. If
omitted, the report will include usage from the parent
account and all subaccounts, if they exist.
labels (Sequence[google.cloud.billing.budgets_v1beta1.types.Filter.LabelsEntry]):
Optional. A single label and value pair specifying that
usage from only this set of labeled resources should be
included in the budget. If omitted, the report will include
all labeled and unlabeled usage.
An object containing a single ``"key": value`` pair.
Example: ``{ "name": "wrench" }``.
*Currently, multiple entries or multiple values per entry
are not allowed.*
calendar_period (google.cloud.billing.budgets_v1beta1.types.CalendarPeriod):
Optional. Specifies to track usage for
recurring calendar period. For example, assume
that CalendarPeriod.QUARTER is set. The budget
will track usage from April 1 to June 30, when
the current calendar month is April, May, June.
After that, it will track usage from July 1 to
September 30 when the current calendar month is
July, August, September, so on.
This field is a member of `oneof`_ ``usage_period``.
custom_period (google.cloud.billing.budgets_v1beta1.types.CustomPeriod):
Optional. Specifies to track usage from any
start date (required) to any end date
(optional). This time period is static, it does
not recur.
This field is a member of `oneof`_ ``usage_period``.
"""
class CreditTypesTreatment(proto.Enum):
r"""Specifies how credits are applied when determining the spend for
threshold calculations. Budgets track the total cost minus any
applicable selected credits. `See the documentation for a list of
credit
types <https://cloud.google.com/billing/docs/how-to/export-data-bigquery-tables#credits-type>`__.
"""
CREDIT_TYPES_TREATMENT_UNSPECIFIED = 0
INCLUDE_ALL_CREDITS = 1
EXCLUDE_ALL_CREDITS = 2
INCLUDE_SPECIFIED_CREDITS = 3
projects = proto.RepeatedField(proto.STRING, number=1,)
credit_types = proto.RepeatedField(proto.STRING, number=7,)
credit_types_treatment = proto.Field(
proto.ENUM, number=4, enum=CreditTypesTreatment,
)
services = proto.RepeatedField(proto.STRING, number=3,)
subaccounts = proto.RepeatedField(proto.STRING, number=5,)
labels = proto.MapField(
proto.STRING, proto.MESSAGE, number=6, message=struct_pb2.ListValue,
)
calendar_period = proto.Field(
proto.ENUM, number=8, oneof="usage_period", enum="CalendarPeriod",
)
custom_period = proto.Field(
proto.MESSAGE, number=9, oneof="usage_period", message="CustomPeriod",
)
class CustomPeriod(proto.Message):
r"""All date times begin at 12 AM US and Canadian Pacific Time
(UTC-8).
Attributes:
start_date (google.type.date_pb2.Date):
Required. The start date must be after
January 1, 2017.
end_date (google.type.date_pb2.Date):
Optional. The end date of the time period. Budgets with
elapsed end date won't be processed. If unset, specifies to
track all usage incurred since the start_date.
"""
start_date = proto.Field(proto.MESSAGE, number=1, message=date_pb2.Date,)
end_date = proto.Field(proto.MESSAGE, number=2, message=date_pb2.Date,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
import abc
import contextlib
import threading
import unittest
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from grpc_test.framework.common import test_constants
from grpc_test.framework.common import test_control
from grpc_test.framework.common import test_coverage
from grpc_test.framework.interfaces.face import _digest
from grpc_test.framework.interfaces.face import _stock_service
from grpc_test.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class TestCase(test_coverage.Coverage, unittest.TestCase):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
__metaclass__ = abc.ABCMeta
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response = response_future.result()
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response = response_future.result()
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
@unittest.skip('TODO(nathaniel): implement.')
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
cancel_method_return_value = response_future.cancel()
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
cancel_method_return_value = response_future.cancel()
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_future = self._invoker.future(
group, method)(request, test_constants.SHORT_TIMEOUT)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.SHORT_TIMEOUT)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, test_constants.SHORT_TIMEOUT)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, test_constants.SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.SHORT_TIMEOUT)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.SHORT_TIMEOUT)
list(response_iterator)
| |
#! /usr/bin/env python
# $Header$
'''XML Schema support
'''
from ZSI import _copyright, _seqtypes, _find_type, _get_element_nsuri_name, EvaluateException
from ZSI.wstools.Namespaces import SCHEMA, SOAP
from ZSI.wstools.Utility import SplitQName
def _get_type_definition(namespaceURI, name, **kw):
return SchemaInstanceType.getTypeDefinition(namespaceURI, name, **kw)
def _get_global_element_declaration(namespaceURI, name, **kw):
return SchemaInstanceType.getElementDeclaration(namespaceURI, name, **kw)
def _get_substitute_element(head, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap Instance
'''
if not isinstance(head, ElementDeclaration):
return None
return ElementDeclaration.getSubstitutionElement(head, elt, ps)
def _has_type_definition(namespaceURI, name):
return SchemaInstanceType.getTypeDefinition(namespaceURI, name) is not None
def _is_substitute_element(head, sub):
'''if head and sub are both GEDs, and sub declares
head as its substitutionGroup then return True.
head -- Typecode instance
sub -- Typecode instance
'''
if not isinstance(head, ElementDeclaration) or not isinstance(sub, ElementDeclaration):
return False
try:
group = sub.substitutionGroup
except (AttributeError, TypeError):
return False
ged = GED(*group)
# TODO: better way of representing element references. Wrap them with
# facets, and dereference when needed and delegate to..
print (head.nspname == ged.nspname and head.pname == ged.pname)
if head is ged or not (head.nspname == ged.nspname and head.pname == ged.pname):
return False
return True
#
# functions for retrieving schema items from
# the global schema instance.
#
GED = _get_global_element_declaration
GTD = _get_type_definition
def WrapImmutable(pyobj, what):
'''Wrap immutable instance so a typecode can be
set, making it self-describing ie. serializable.
'''
return _GetPyobjWrapper.WrapImmutable(pyobj, what)
def RegisterBuiltin(arg):
'''Add a builtin to be registered, and register it
with the Any typecode.
'''
_GetPyobjWrapper.RegisterBuiltin(arg)
_GetPyobjWrapper.RegisterAnyElement()
def RegisterAnyElement():
'''register all Wrapper classes with the Any typecode.
This allows instances returned by Any to be self-describing.
ie. serializable. AnyElement falls back on Any to parse
anything it doesn't understand.
'''
return _GetPyobjWrapper.RegisterAnyElement()
class SchemaInstanceType(type):
'''Register all types/elements, when hit already defined
class dont create a new one just give back reference. Thus
import order determines which class is loaded.
class variables:
types -- dict of typecode classes definitions
representing global type definitions.
elements -- dict of typecode classes representing
global element declarations.
element_typecode_cache -- dict of typecode instances
representing global element declarations.
'''
types = {}
elements = {}
element_typecode_cache = {}
#substitution_registry = {}
def __new__(cls,classname,bases,classdict):
'''If classdict has literal and schema register it as a
element declaration, else if has type and schema register
it as a type definition.
'''
if classname in ['ElementDeclaration', 'TypeDefinition', 'LocalElementDeclaration',]:
return type.__new__(cls,classname,bases,classdict)
if ElementDeclaration in bases:
if classdict.has_key('schema') is False or classdict.has_key('literal') is False:
raise AttributeError, 'ElementDeclaration must define schema and literal attributes'
key = (classdict['schema'],classdict['literal'])
if SchemaInstanceType.elements.has_key(key):
return SchemaInstanceType.elements[key]
# create global element declaration
ged = SchemaInstanceType.elements[key] = type.__new__(cls,classname,bases,classdict)
# TODO: Maybe I want access to all registrants??
#
#if classdict.has_key('substitutionGroup'):
# sub = classdict.has_key('substitutionGroup')
# if not SchemaInstanceType.substitution_registry.has_key(sub):
# SchemaInstanceType.substitution_registry[sub] = [ged]
# else:
# SchemaInstanceType.substitution_registry[sub].append(ged)
return ged
if TypeDefinition in bases:
if classdict.has_key('type') is None:
raise AttributeError, 'TypeDefinition must define type attribute'
key = classdict['type']
if SchemaInstanceType.types.has_key(key) is False:
SchemaInstanceType.types[key] = type.__new__(cls,classname,bases,classdict)
return SchemaInstanceType.types[key]
if LocalElementDeclaration in bases:
return type.__new__(cls,classname,bases,classdict)
raise TypeError, 'SchemaInstanceType must be an ElementDeclaration or TypeDefinition '
def getTypeDefinition(cls, namespaceURI, name, lazy=False):
'''Grab a type definition, returns a typecode class definition
because the facets (name, minOccurs, maxOccurs) must be provided.
Parameters:
namespaceURI --
name --
'''
klass = cls.types.get((namespaceURI, name), None)
if lazy and klass is not None:
return _Mirage(klass)
return klass
getTypeDefinition = classmethod(getTypeDefinition)
def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False):
'''Grab an element declaration, returns a typecode instance
representation or a typecode class definition. An element
reference has its own facets, and is local so it will not be
cached.
Parameters:
namespaceURI --
name --
isref -- if element reference, return class definition.
'''
key = (namespaceURI, name)
if isref:
klass = cls.elements.get(key,None)
if klass is not None and lazy is True:
return _Mirage(klass)
return klass
typecode = cls.element_typecode_cache.get(key, None)
if typecode is None:
tcls = cls.elements.get(key,None)
if tcls is not None:
typecode = cls.element_typecode_cache[key] = tcls()
typecode.typed = False
return typecode
getElementDeclaration = classmethod(getElementDeclaration)
class ElementDeclaration:
'''Typecodes subclass to represent a Global Element Declaration by
setting class variables schema and literal.
schema = namespaceURI
literal = NCName
substitutionGroup -- GED reference of form, (namespaceURI,NCName)
'''
__metaclass__ = SchemaInstanceType
def checkSubstitute(self, typecode):
'''If this is True, allow typecode to be substituted
for "self" typecode.
'''
if not isinstance(typecode, ElementDeclaration):
return False
try:
nsuri,ncname = typecode.substitutionGroup
except (AttributeError, TypeError):
return False
if (nsuri,ncname) != (self.schema,self.literal):
# allow slop with the empty namespace
if not nsuri and not self.schema and ncname == self.literal:
return True
return False
sub = GED(self.schema, self.literal)
if sub is None or sub is not typecode:
return False
return True
def getSubstitutionElement(self, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode representation of the member.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap instance
'''
nsuri,ncname = _get_element_nsuri_name(elt)
typecode = GED(nsuri,ncname)
if typecode is None:
return
try:
nsuri,ncname = typecode.substitutionGroup
except (AttributeError, TypeError):
return
if (ncname == self.pname) and (nsuri == self.nspname or
(not nsuri and not self.nspname)):
return typecode
return
class LocalElementDeclaration:
'''Typecodes subclass to represent a Local Element Declaration.
'''
__metaclass__ = SchemaInstanceType
class TypeDefinition:
'''Typecodes subclass to represent a Global Type Definition by
setting class variable type.
type = (namespaceURI, NCName)
'''
__metaclass__ = SchemaInstanceType
def getSubstituteType(self, elt, ps):
'''if xsi:type does not match the instance type attr,
check to see if it is a derived type substitution.
DONT Return the element's type.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
'''
pyclass = SchemaInstanceType.getTypeDefinition(*self.type)
if pyclass is None:
raise EvaluateException(
'No Type registed for xsi:type=(%s, %s)' %
(self.type[0], self.type[1]), ps.Backtrace(elt))
typeName = _find_type(elt)
prefix,typeName = SplitQName(typeName)
uri = ps.GetElementNSdict(elt).get(prefix)
subclass = SchemaInstanceType.getTypeDefinition(uri, typeName)
if subclass is None:
raise EvaluateException(
'No registered xsi:type=(%s, %s), substitute for xsi:type=(%s, %s)' %
(uri, typeName, self.type[0], self.type[1]), ps.Backtrace(elt))
if not issubclass(subclass, pyclass) and subclass(None) and not issubclass(subclass, pyclass):
raise TypeError(
'Substitute Type (%s, %s) is not derived from %s' %
(self.type[0], self.type[1], pyclass), ps.Backtrace(elt))
return subclass((self.nspname, self.pname))
class _Mirage:
'''Used with SchemaInstanceType for lazy evaluation, eval during serialize or
parse as needed. Mirage is callable, TypeCodes are not. When called it returns the
typecode. Tightly coupled with generated code.
NOTE: **Must Use ClassType** for intended MRO of __call__ since setting it in
an instance attribute rather than a class attribute (will not work for object).
'''
def __init__(self, klass):
self.klass = klass
self.__reveal = False
self.__cache = None
if issubclass(klass, ElementDeclaration):
self.__call__ = self._hide_element
def __str__(self):
msg = "<Mirage id=%s, Local Element %s>"
if issubclass(self.klass, ElementDeclaration):
msg = "<Mirage id=%s, GED %s>"
return msg %(id(self), self.klass)
def _hide_type(self, pname, aname, minOccurs=0, maxOccurs=1, nillable=False,
**kw):
self.__call__ = self._reveal_type
self.__reveal = True
# store all attributes, make some visable for pyclass_type
self.__kw = kw
self.minOccurs,self.maxOccurs,self.nillable = minOccurs,maxOccurs,nillable
self.nspname,self.pname,self.aname = None,pname,aname
if type(self.pname) in (tuple,list):
self.nspname,self.pname = pname
return self
def _hide_element(self, minOccurs=0, maxOccurs=1, nillable=False, **kw):
self.__call__ = self._reveal_element
self.__reveal = True
# store all attributes, make some visable for pyclass_type
self.__kw = kw
self.nspname = self.klass.schema
self.pname = self.klass.literal
#TODO: Fix hack
#self.aname = '_%s' %self.pname
self.minOccurs,self.maxOccurs,self.nillable = minOccurs,maxOccurs,nillable
return self
def _reveal_type(self):
if self.__cache is None:
pname = self.pname
if self.nspname != None:
pname = (self.nspname,self.pname)
self.__cache = self.klass(pname=pname,
aname=self.aname, minOccurs=self.minOccurs,
maxOccurs=self.maxOccurs, nillable=self.nillable,
**self.__kw)
return self.__cache
def _reveal_element(self):
if self.__cache is None:
self.__cache = self.klass(minOccurs=self.minOccurs,
maxOccurs=self.maxOccurs, nillable=self.nillable,
**self.__kw)
return self.__cache
__call__ = _hide_type
class _GetPyobjWrapper:
'''Get a python object that wraps data and typecode. Used by
<any> parse routine, so that typecode information discovered
during parsing is retained in the pyobj representation
and thus can be serialized.
'''
types_dict = dict()
def RegisterBuiltin(cls, arg):
'''register a builtin, create a new wrapper.
'''
if arg in cls.types_dict:
raise RuntimeError, '%s already registered' %arg
class _Wrapper(arg):
'Wrapper for builtin %s\n%s' %(arg, cls.__doc__)
_Wrapper.__name__ = '_%sWrapper' %arg.__name__
cls.types_dict[arg] = _Wrapper
RegisterBuiltin = classmethod(RegisterBuiltin)
def RegisterAnyElement(cls):
'''If find registered TypeCode instance, add Wrapper class
to TypeCode class serialmap and Re-RegisterType. Provides
Any serialzation of any instances of the Wrapper.
'''
for k,v in cls.types_dict.items():
what = Any.serialmap.get(k)
if what is None: continue
if v in what.__class__.seriallist: continue
what.__class__.seriallist.append(v)
RegisterType(what.__class__, clobber=1, **what.__dict__)
RegisterAnyElement = classmethod(RegisterAnyElement)
def WrapImmutable(cls, pyobj, what):
'''return a wrapper for pyobj, with typecode attribute set.
Parameters:
pyobj -- instance of builtin type (immutable)
what -- typecode describing the data
'''
d = cls.types_dict
if type(pyobj) is bool:
pyclass = d[int]
elif d.has_key(type(pyobj)) is True:
pyclass = d[type(pyobj)]
else:
raise TypeError,\
'Expecting a built-in type in %s (got %s).' %(
d.keys(),type(pyobj))
newobj = pyclass(pyobj)
newobj.typecode = what
return newobj
WrapImmutable = classmethod(WrapImmutable)
from TC import Any, RegisterType
if __name__ == '__main__': print _copyright
| |
import datetime
from posixpath import join as pjoin
import pandas as pd
import pytest
import pytz
import ibis
import ibis.common.exceptions as com
import ibis.config as config
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis.tests.util import assert_equal
pytest.importorskip("impala")
thrift = pytest.importorskip("thrift")
@pytest.fixture
def db(con, test_data_db):
return con.database(test_data_db)
@pytest.mark.xfail(
raises=AssertionError,
reason='Temporary not setting default backends. #2676',
)
def test_execute_exprs_default_backend(con_no_hdfs):
expr = ibis.literal(2)
expected = 2
assert ibis.options.default_backend is not None
result = expr.execute()
assert result == expected
def test_cursor_garbage_collection(con):
for i in range(5):
con.raw_sql('select 1', True).fetchall()
con.raw_sql('select 1', True).fetchone()
def test_raise_ibis_error_no_hdfs(con_no_hdfs):
# GH299
with pytest.raises(com.IbisError):
con_no_hdfs.hdfs
def test_get_table_ref(db):
assert isinstance(db.functional_alltypes, ir.TableExpr)
assert isinstance(db['functional_alltypes'], ir.TableExpr)
def test_run_sql(con, test_data_db):
table = con.sql(f"SELECT li.* FROM {test_data_db}.tpch_lineitem li")
li = con.table('tpch_lineitem')
assert isinstance(table, ir.TableExpr)
assert_equal(table.schema(), li.schema())
expr = table.limit(10)
result = expr.execute()
assert len(result) == 10
def test_sql_with_limit(con):
table = con.sql("SELECT * FROM functional_alltypes LIMIT 10")
ex_schema = con.get_schema('functional_alltypes')
assert_equal(table.schema(), ex_schema)
def test_raw_sql(con):
query = 'SELECT * from functional_alltypes limit 10'
cur = con.raw_sql(query, results=True)
rows = cur.fetchall()
cur.release()
assert len(rows) == 10
def test_explain(con):
t = con.table('functional_alltypes')
expr = t.group_by('string_col').size()
result = con.explain(expr)
assert isinstance(result, str)
def test_get_schema(con, test_data_db):
t = con.table('tpch_lineitem')
schema = con.get_schema('tpch_lineitem', database=test_data_db)
assert_equal(t.schema(), schema)
def test_result_as_dataframe(con, alltypes):
expr = alltypes.limit(10)
ex_names = expr.schema().names
result = con.execute(expr)
assert isinstance(result, pd.DataFrame)
assert list(result.columns) == ex_names
assert len(result) == 10
def test_adapt_scalar_array_results(con, alltypes):
table = alltypes
expr = table.double_col.sum()
result = con.execute(expr)
assert isinstance(result, float)
with config.option_context('interactive', True):
result2 = expr.execute()
assert isinstance(result2, float)
expr = (
table.group_by('string_col')
.aggregate([table.count().name('count')])
.string_col
)
result = con.execute(expr)
assert isinstance(result, pd.Series)
def test_interactive_repr_call_failure(con):
t = con.table('tpch_lineitem').limit(100000)
t = t[t, t.l_receiptdate.cast('timestamp').name('date')]
keys = [t.date.year().name('year'), 'l_linestatus']
filt = t.l_linestatus.isin(['F'])
expr = (
t[filt]
.group_by(keys)
.aggregate(t.l_extendedprice.mean().name('avg_px'))
)
w2 = ibis.trailing_window(
9, group_by=expr.l_linestatus, order_by=expr.year
)
metric = expr['avg_px'].mean().over(w2)
enriched = expr[expr, metric]
with config.option_context('interactive', True):
repr(enriched)
def test_array_default_limit(con, alltypes):
t = alltypes
result = con.execute(t.float_col, limit=100)
assert len(result) == 100
def test_limit_overrides_expr(con, alltypes):
# #418
t = alltypes
result = con.execute(t.limit(10), limit=5)
assert len(result) == 5
def test_limit_equals_none_no_limit(alltypes):
t = alltypes
with config.option_context('sql.default_limit', 10):
result = t.execute(limit=None)
assert len(result) > 10
def test_verbose_log_queries(con, test_data_db):
queries = []
with config.option_context('verbose', True):
with config.option_context('verbose_log', queries.append):
con.table('tpch_orders', database=test_data_db)
# we can't make assertions about the length of queries, since the Python GC
# could've collected a temporary pandas table any time between construction
# of `queries` and the assertion
expected = f'DESCRIBE {test_data_db}.`tpch_orders`'
assert expected in queries
def test_sql_query_limits(con, test_data_db):
table = con.table('tpch_nation', database=test_data_db)
with config.option_context('sql.default_limit', 100000):
# table has 25 rows
assert len(table.execute()) == 25
# comply with limit arg for TableExpr
assert len(table.execute(limit=10)) == 10
# state hasn't changed
assert len(table.execute()) == 25
# non-TableExpr ignores default_limit
assert table.count().execute() == 25
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 25
with config.option_context('sql.default_limit', 20):
# TableExpr observes default limit setting
assert len(table.execute()) == 20
# explicit limit= overrides default
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=23)) == 23
# non-TableExpr ignores default_limit
assert table.count().execute() == 25
# non-TableExpr doesn't observe limit arg
assert table.count().execute(limit=10) == 25
# eliminating default_limit doesn't break anything
with config.option_context('sql.default_limit', None):
assert len(table.execute()) == 25
assert len(table.execute(limit=15)) == 15
assert len(table.execute(limit=10000)) == 25
assert table.count().execute() == 25
assert table.count().execute(limit=10) == 25
def test_database_repr(db, test_data_db):
assert test_data_db in repr(db)
def test_database_default_current_database(con):
db = con.database()
assert db.name == con.current_database
def test_close_drops_temp_tables(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
table = con.parquet_file(hdfs_path)
name = table.op().name
assert len(con.list_tables(like=name))
con.close()
assert not len(con.list_tables(like=name))
def test_set_compression_codec(con):
old_opts = con.get_options()
assert old_opts['COMPRESSION_CODEC'].upper() in ('NONE', '')
con.set_compression_codec('snappy')
opts = con.get_options()
assert opts['COMPRESSION_CODEC'].upper() == 'SNAPPY'
con.set_compression_codec(None)
opts = con.get_options()
assert opts['COMPRESSION_CODEC'].upper() in ('NONE', '')
def test_disable_codegen(con):
con.disable_codegen(False)
opts = con.get_options()
assert opts['DISABLE_CODEGEN'] == '0'
con.disable_codegen()
opts = con.get_options()
assert opts['DISABLE_CODEGEN'] == '1'
impala_con = con.con
cur1 = impala_con.execute('SET')
cur2 = impala_con.execute('SET')
opts1 = dict(row[:2] for row in cur1.fetchall())
cur1.release()
opts2 = dict(row[:2] for row in cur2.fetchall())
cur2.release()
assert opts1['DISABLE_CODEGEN'] == '1'
assert opts2['DISABLE_CODEGEN'] == '1'
def test_attr_name_conflict(
con, tmp_db, temp_parquet_table, temp_parquet_table2
):
left = temp_parquet_table
right = temp_parquet_table2
assert left.join(right, ['id']) is not None
assert left.join(right, ['id', 'name']) is not None
assert left.join(right, ['id', 'files']) is not None
@pytest.fixture
def con2(env):
con = ibis.impala.connect(
host=env.impala_host,
port=env.impala_port,
auth_mechanism=env.auth_mechanism,
)
if not env.use_codegen:
con.disable_codegen()
assert con.get_options()['DISABLE_CODEGEN'] == '1'
return con
def test_rerelease_cursor(con2):
# we use a separate `con2` fixture here because any connection pool
# manipulation we want to happen independently of `con`
with con2.raw_sql('select 1', True) as cur1:
pass
cur1.release()
with con2.raw_sql('select 1', True) as cur2:
pass
cur2.release()
with con2.raw_sql('select 1', True) as cur3:
pass
assert cur1 == cur2
assert cur2 == cur3
def test_day_of_week(con):
date_var = ibis.literal(datetime.date(2017, 1, 1), type=dt.date)
expr_index = date_var.day_of_week.index()
result = con.execute(expr_index)
assert result == 6
expr_name = date_var.day_of_week.full_name()
result = con.execute(expr_name)
assert result == 'Sunday'
def test_datetime_to_int_cast(con):
timestamp = pytz.utc.localize(
datetime.datetime(2021, 9, 12, 14, 45, 33, 0)
)
d = ibis.literal(timestamp)
result = con.execute(d.cast('int64'))
assert result == pd.Timestamp(timestamp).value // 1000
def test_set_option_with_dot(con):
con.set_options({'request_pool': 'baz.quux'})
result = dict(row[:2] for row in con.raw_sql('set', True).fetchall())
assert result['REQUEST_POOL'] == 'baz.quux'
def test_list_databases(con):
assert con.list_databases()
def test_list_tables(con, test_data_db):
assert con.list_tables(database=test_data_db)
assert con.list_tables(like='*nat*', database=test_data_db)
def test_set_database(con_no_db, test_data_db):
# create new connection with no default db set
# TODO: set test_data_db to None
with pytest.raises(Exception):
con_no_db.table('functional_alltypes')
con_no_db.set_database(test_data_db)
assert con_no_db.table('functional_alltypes') is not None
def test_tables_robust_to_set_database(con, test_data_db, temp_database):
table = con.table('functional_alltypes', database=test_data_db)
con.set_database(temp_database)
assert con.current_database == temp_database
# it still works!
n = 10
df = table.limit(n).execute()
assert len(df) == n
def test_connection_pool_size(hdfs, env, test_data_db):
client = ibis.impala.connect(
port=env.impala_port,
hdfs_client=hdfs,
host=env.impala_host,
database=test_data_db,
)
# the client cursor may or may not be GC'd, so the connection
# pool will contain either zero or one cursor
assert len(client.con.connection_pool) in (0, 1)
def test_connection_pool_size_after_close(hdfs, env, test_data_db):
client = ibis.impala.connect(
port=env.impala_port,
hdfs_client=hdfs,
host=env.impala_host,
database=test_data_db,
)
client.close()
assert not client.con.connection_pool
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 PAL Robotics SL.
# Released under the BSD License.
#
# Authors:
# * Siegfried-A. Gevatter
import curses
import math
import rospy
from geometry_msgs.msg import Twist
class Velocity(object):
def __init__(self, min_velocity, max_velocity, num_steps):
assert min_velocity > 0 and max_velocity > 0 and num_steps > 0
self._min = min_velocity
self._max = max_velocity
self._num_steps = num_steps
if self._num_steps > 1:
self._step_incr = (max_velocity - min_velocity) / (self._num_steps - 1)
else:
# If num_steps is one, we always use the minimum velocity.
self._step_incr = 0
def __call__(self, value, step):
"""
Takes a value in the range [0, 1] and the step and returns the
velocity (usually m/s or rad/s).
"""
if step == 0:
return 0
assert step > 0 and step <= self._num_steps
max_value = self._min + self._step_incr * (step - 1)
return value * max_value
class TextWindow():
_screen = None
_window = None
_num_lines = None
def __init__(self, stdscr, lines=10):
self._screen = stdscr
self._screen.nodelay(True)
curses.curs_set(0)
self._num_lines = lines
def read_key(self):
keycode = self._screen.getch()
return keycode if keycode != -1 else None
def clear(self):
self._screen.clear()
def write_line(self, lineno, message):
if lineno < 0 or lineno >= self._num_lines:
raise ValueError, 'lineno out of bounds'
height, width = self._screen.getmaxyx()
y = (height / self._num_lines) * lineno
x = 10
for text in message.split('\n'):
text = text.ljust(width)
self._screen.addstr(y, x, text)
y += 1
def refresh(self):
self._screen.refresh()
def beep(self):
curses.flash()
class KeyTeleop():
_interface = None
_linear = None
_angular = None
def __init__(self, interface):
self._interface = interface
self._pub_cmd = rospy.Publisher('key_vel', Twist)
self._hz = rospy.get_param('~hz', 10)
self._num_steps = rospy.get_param('~turbo/steps', 4)
forward_min = rospy.get_param('~turbo/linear_forward_min', 0.5)
forward_max = rospy.get_param('~turbo/linear_forward_max', 1.0)
self._forward = Velocity(forward_min, forward_max, self._num_steps)
backward_min = rospy.get_param('~turbo/linear_backward_min', 0.25)
backward_max = rospy.get_param('~turbo/linear_backward_max', 0.5)
self._backward = Velocity(backward_min, backward_max, self._num_steps)
angular_min = rospy.get_param('~turbo/angular_min', 0.7)
angular_max = rospy.get_param('~turbo/angular_max', 1.2)
self._rotation = Velocity(angular_min, angular_max, self._num_steps)
def run(self):
self._linear = 0
self._angular = 0
rate = rospy.Rate(self._hz)
while True:
keycode = self._interface.read_key()
if keycode:
if self._key_pressed(keycode):
self._publish()
else:
self._publish()
rate.sleep()
def _get_twist(self, linear, angular):
twist = Twist()
if linear >= 0:
twist.linear.x = self._forward(1.0, linear)
else:
twist.linear.x = self._backward(-1.0, -linear)
twist.angular.z = self._rotation(math.copysign(1, angular), abs(angular))
return twist
def _key_pressed(self, keycode):
movement_bindings = {
curses.KEY_UP: ( 1, 0),
curses.KEY_DOWN: (-1, 0),
curses.KEY_LEFT: ( 0, 1),
curses.KEY_RIGHT: ( 0, -1),
}
speed_bindings = {
ord(' '): (0, 0),
}
if keycode in movement_bindings:
acc = movement_bindings[keycode]
ok = False
if acc[0]:
linear = self._linear + acc[0]
if abs(linear) <= self._num_steps:
self._linear = linear
ok = True
if acc[1]:
angular = self._angular + acc[1]
if abs(angular) <= self._num_steps:
self._angular = angular
ok = True
if not ok:
self._interface.beep()
elif keycode in speed_bindings:
acc = speed_bindings[keycode]
# Note: bounds aren't enforced here!
if acc[0] is not None:
self._linear = acc[0]
if acc[1] is not None:
self._angular = acc[1]
elif keycode == ord('q'):
rospy.signal_shutdown('Bye')
else:
return False
return True
def _publish(self):
self._interface.clear()
self._interface.write_line(2, 'Linear: %d, Angular: %d' % (self._linear, self._angular))
self._interface.write_line(5, 'Use arrow keys to move, space to stop, q to exit.')
self._interface.refresh()
twist = self._get_twist(self._linear, self._angular)
self._pub_cmd.publish(twist)
class SimpleKeyTeleop():
def __init__(self, interface):
self._interface = interface
self._pub_cmd = rospy.Publisher('key_vel', Twist)
self._hz = rospy.get_param('~hz', 10)
self._forward_rate = rospy.get_param('~forward_rate', 0.8)
self._backward_rate = rospy.get_param('~backward_rate', 0.5)
self._rotation_rate = rospy.get_param('~rotation_rate', 1.0)
self._last_pressed = {}
self._angular = 0
self._linear = 0
movement_bindings = {
curses.KEY_UP: ( 1, 0),
curses.KEY_DOWN: (-1, 0),
curses.KEY_LEFT: ( 0, 1),
curses.KEY_RIGHT: ( 0, -1),
}
def run(self):
rate = rospy.Rate(self._hz)
self._running = True
while self._running:
while True:
keycode = self._interface.read_key()
if keycode is None:
break
self._key_pressed(keycode)
self._set_velocity()
self._publish()
rate.sleep()
def _get_twist(self, linear, angular):
twist = Twist()
twist.linear.x = linear
twist.angular.z = angular
return twist
def _set_velocity(self):
now = rospy.get_time()
keys = []
for a in self._last_pressed:
if now - self._last_pressed[a] < 0.4:
keys.append(a)
linear = 0.0
angular = 0.0
for k in keys:
l, a = self.movement_bindings[k]
linear += l
angular += a
if linear > 0:
linear = linear * self._forward_rate
else:
linear = linear * self._backward_rate
angular = angular * self._rotation_rate
self._angular = angular
self._linear = linear
def _key_pressed(self, keycode):
if keycode == ord('q'):
self._running = False
rospy.signal_shutdown('Bye')
elif keycode in self.movement_bindings:
self._last_pressed[keycode] = rospy.get_time()
def _publish(self):
self._interface.clear()
self._interface.write_line(2, 'Linear: %f, Angular: %f' % (self._linear, self._angular))
self._interface.write_line(5, 'Use arrow keys to move, q to exit.')
self._interface.refresh()
twist = self._get_twist(self._linear, self._angular)
self._pub_cmd.publish(twist)
def main(stdscr):
rospy.init_node('key_teleop')
app = SimpleKeyTeleop(TextWindow(stdscr))
app.run()
if __name__ == '__main__':
try:
curses.wrapper(main)
except rospy.ROSInterruptException:
pass
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=missing-function-docstring
"""MobileNet v3 models for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras import models
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
# TODO(scottzhu): Change this to the GCS path.
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet_v3/')
WEIGHTS_HASHES = {
'large_224_0.75_float': ('765b44a33ad4005b3ac83185abf1d0eb',
'e7b4d1071996dd51a2c2ca2424570e20'),
'large_224_1.0_float': ('59e551e166be033d707958cf9e29a6a7',
'037116398e07f018c0005ffcb0406831'),
'large_minimalistic_224_1.0_float': ('675e7b876c45c57e9e63e6d90a36599c',
'a2c33aed672524d1d0b4431808177695'),
'small_224_0.75_float': ('cb65d4e5be93758266aa0a7f2c6708b7',
'4d2fe46f1c1f38057392514b0df1d673'),
'small_224_1.0_float': ('8768d4c2e7dee89b9d02b2d03d65d862',
'be7100780f875c06bcab93d76641aa26'),
'small_minimalistic_224_1.0_float': ('99cd97fb2fcdad2bf028eb838de69e37',
'20d4e357df3f7a6361f3a288857b1051'),
}
layers = VersionAwareLayers()
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [Searching for MobileNetV3](
https://arxiv.org/pdf/1905.02244.pdf) (ICCV 2019)
The following table describes the performance of MobileNets:
------------------------------------------------------------------------
MACs stands for Multiply Adds
|Classification Checkpoint|MACs(M)|Parameters(M)|Top1 Accuracy|Pixel1|CPU(ms)|
| [mobilenet_v3_large_1.0_224] | 217 | 5.4 | 75.6 | 51.2 |
| [mobilenet_v3_large_0.75_224] | 155 | 4.0 | 73.3 | 39.8 |
| [mobilenet_v3_large_minimalistic_1.0_224] | 209 | 3.9 | 72.3 | 44.1 |
| [mobilenet_v3_small_1.0_224] | 66 | 2.9 | 68.1 | 15.8 |
| [mobilenet_v3_small_0.75_224] | 44 | 2.4 | 65.4 | 12.8 |
| [mobilenet_v3_small_minimalistic_1.0_224] | 65 | 2.0 | 61.9 | 12.2 |
The weights for all 6 models are obtained and translated from the Tensorflow
checkpoints from TensorFlow checkpoints found [here]
(https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet/README.md).
Optionally loads weights pre-trained on ImageNet.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.mobilenet_v3.preprocess_input` for an example.
Arguments:
input_shape: Optional shape tuple, to be specified if you would
like to use a model with an input image resolution that is not
(224, 224, 3).
It should have exactly 3 inputs channels (224, 224, 3).
You can also omit this option if you would like
to infer input_shape from an input_tensor.
If you choose to include both input_tensor and input_shape then
input_shape will be used if they match, if the shapes
do not match then we will throw an error.
E.g. `(160, 160, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
minimalistic: In addition to large and small models this module also
contains so-called minimalistic models, these models have the same
per-layer dimensions characteristic as MobilenetV3 however, they don't
utilize any of the advanced blocks (squeeze-and-excite units, hard-swish,
and 5x5 convolutions). While these models are less efficient on CPU, they
are much more performant on GPU/DSP.
include_top: Boolean, whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: String, one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: String, optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Integer, optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
dropout_rate: fraction of the input units to drop on the last layer.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape or invalid alpha, rows when
weights='imagenet'
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
def MobileNetV3(stack_fn,
last_point_ch,
input_shape=None,
alpha=1.0,
model_type='large',
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax'):
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
layer_utils.get_source_inputs(input_tensor))
except ValueError:
raise ValueError('input_tensor: ', input_tensor,
'is not type input_tensor')
if is_input_t_tensor:
if backend.image_data_format == 'channels_first':
if backend.int_shape(input_tensor)[1] != input_shape[1]:
raise ValueError('input_shape: ', input_shape, 'and input_tensor: ',
input_tensor,
'do not meet the same shape requirements')
else:
if backend.int_shape(input_tensor)[2] != input_shape[1]:
raise ValueError('input_shape: ', input_shape, 'and input_tensor: ',
input_tensor,
'do not meet the same shape requirements')
else:
raise ValueError('input_tensor specified: ', input_tensor,
'is not a keras tensor')
# If input_shape is None, infer shape from input_tensor
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError('input_tensor: ', input_tensor, 'is type: ',
type(input_tensor), 'which is not a valid type')
if backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == 'channels_first':
rows = backend.int_shape(input_tensor)[2]
cols = backend.int_shape(input_tensor)[3]
input_shape = (3, cols, rows)
else:
rows = backend.int_shape(input_tensor)[1]
cols = backend.int_shape(input_tensor)[2]
input_shape = (cols, rows, 3)
# If input_shape is None and input_tensor is None using standart shape
if input_shape is None and input_tensor is None:
input_shape = (None, None, 3)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if rows and cols and (rows < 32 or cols < 32):
raise ValueError('Input size must be at least 32x32; got `input_shape=' +
str(input_shape) + '`')
if weights == 'imagenet':
if (not minimalistic and alpha not in [0.75, 1.0]
or minimalistic and alpha != 1.0):
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of `0.75`, `1.0` for non minimalistic'
' or `1.0` for minimalistic only.')
if rows != cols or rows != 224:
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not 224.'
' Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
if minimalistic:
kernel = 3
activation = relu
se_ratio = None
else:
kernel = 5
activation = hard_swish
se_ratio = 0.25
x = img_input
x = layers.Rescaling(1. / 255.)(x)
x = layers.Conv2D(
16,
kernel_size=3,
strides=(2, 2),
padding='same',
use_bias=False,
name='Conv')(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3,
momentum=0.999, name='Conv/BatchNorm')(x)
x = activation(x)
x = stack_fn(x, kernel, activation, se_ratio)
last_conv_ch = _depth(backend.int_shape(x)[channel_axis] * 6)
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_point_ch = _depth(last_point_ch * alpha)
x = layers.Conv2D(
last_conv_ch,
kernel_size=1,
padding='same',
use_bias=False,
name='Conv_1')(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3,
momentum=0.999, name='Conv_1/BatchNorm')(x)
x = activation(x)
x = layers.Conv2D(
last_point_ch,
kernel_size=1,
padding='same',
use_bias=True,
name='Conv_2')(x)
x = activation(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
if channel_axis == 1:
x = layers.Reshape((last_point_ch, 1, 1))(x)
else:
x = layers.Reshape((1, 1, last_point_ch))(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate)(x)
x = layers.Conv2D(classes, kernel_size=1, padding='same', name='Logits')(x)
x = layers.Flatten()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='Predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='MobilenetV3' + model_type)
# Load weights.
if weights == 'imagenet':
model_name = '{}{}_224_{}_float'.format(
model_type, '_minimalistic' if minimalistic else '', str(alpha))
if include_top:
file_name = 'weights_mobilenet_v3_' + model_name + '.h5'
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = 'weights_mobilenet_v3_' + model_name + '_no_top.h5'
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHT_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export('keras.applications.MobileNetV3Small')
def MobileNetV3Small(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax'):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 2, se_ratio, relu, 0)
x = _inverted_res_block(x, 72. / 16, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 88. / 24, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 4, depth(40), kernel, 2, se_ratio, activation, 3)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 4)
x = _inverted_res_block(x, 6, depth(40), kernel, 1, se_ratio, activation, 5)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 6)
x = _inverted_res_block(x, 3, depth(48), kernel, 1, se_ratio, activation, 7)
x = _inverted_res_block(x, 6, depth(96), kernel, 2, se_ratio, activation, 8)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation, 9)
x = _inverted_res_block(x, 6, depth(96), kernel, 1, se_ratio, activation,
10)
return x
return MobileNetV3(stack_fn, 1024, input_shape, alpha, 'small', minimalistic,
include_top, weights, input_tensor, classes, pooling,
dropout_rate, classifier_activation)
@keras_export('keras.applications.MobileNetV3Large')
def MobileNetV3Large(input_shape=None,
alpha=1.0,
minimalistic=False,
include_top=True,
weights='imagenet',
input_tensor=None,
classes=1000,
pooling=None,
dropout_rate=0.2,
classifier_activation='softmax'):
def stack_fn(x, kernel, activation, se_ratio):
def depth(d):
return _depth(d * alpha)
x = _inverted_res_block(x, 1, depth(16), 3, 1, None, relu, 0)
x = _inverted_res_block(x, 4, depth(24), 3, 2, None, relu, 1)
x = _inverted_res_block(x, 3, depth(24), 3, 1, None, relu, 2)
x = _inverted_res_block(x, 3, depth(40), kernel, 2, se_ratio, relu, 3)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 4)
x = _inverted_res_block(x, 3, depth(40), kernel, 1, se_ratio, relu, 5)
x = _inverted_res_block(x, 6, depth(80), 3, 2, None, activation, 6)
x = _inverted_res_block(x, 2.5, depth(80), 3, 1, None, activation, 7)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 8)
x = _inverted_res_block(x, 2.3, depth(80), 3, 1, None, activation, 9)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 10)
x = _inverted_res_block(x, 6, depth(112), 3, 1, se_ratio, activation, 11)
x = _inverted_res_block(x, 6, depth(160), kernel, 2, se_ratio, activation,
12)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
13)
x = _inverted_res_block(x, 6, depth(160), kernel, 1, se_ratio, activation,
14)
return x
return MobileNetV3(stack_fn, 1280, input_shape, alpha, 'large', minimalistic,
include_top, weights, input_tensor, classes, pooling,
dropout_rate, classifier_activation)
MobileNetV3Small.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Small')
MobileNetV3Large.__doc__ = BASE_DOCSTRING.format(name='MobileNetV3Large')
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.)(x + 3.) * (1. / 6.)
def hard_swish(x):
return layers.Multiply()([hard_sigmoid(x), x])
# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py
def _depth(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _se_block(inputs, filters, se_ratio, prefix):
x = layers.GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(
inputs)
if backend.image_data_format() == 'channels_first':
x = layers.Reshape((filters, 1, 1))(x)
else:
x = layers.Reshape((1, 1, filters))(x)
x = layers.Conv2D(
_depth(filters * se_ratio),
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv')(
x)
x = layers.ReLU(name=prefix + 'squeeze_excite/Relu')(x)
x = layers.Conv2D(
filters,
kernel_size=1,
padding='same',
name=prefix + 'squeeze_excite/Conv_1')(
x)
x = hard_sigmoid(x)
x = layers.Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
return x
def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio,
activation, block_id):
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
shortcut = x
prefix = 'expanded_conv/'
infilters = backend.int_shape(x)[channel_axis]
if block_id:
# Expand
prefix = 'expanded_conv_{}/'.format(block_id)
x = layers.Conv2D(
_depth(infilters * expansion),
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'expand')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'expand/BatchNorm')(
x)
x = activation(x)
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=prefix + 'depthwise/pad')(
x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
padding='same' if stride == 1 else 'valid',
use_bias=False,
name=prefix + 'depthwise')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'depthwise/BatchNorm')(
x)
x = activation(x)
if se_ratio:
x = _se_block(x, _depth(infilters * expansion), se_ratio, prefix)
x = layers.Conv2D(
filters,
kernel_size=1,
padding='same',
use_bias=False,
name=prefix + 'project')(
x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project/BatchNorm')(
x)
if stride == 1 and infilters == filters:
x = layers.Add(name=prefix + 'Add')([shortcut, x])
return x
@keras_export('keras.applications.mobilenet_v3.preprocess_input')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
return x
@keras_export('keras.applications.mobilenet_v3.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| |
from datetime import datetime, timedelta
from unittest import mock
import pytz
from croniter import croniter
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from waldur_openstack.openstack import models as openstack_models
from waldur_openstack.openstack_tenant import models, tasks
from ...tests import factories
TenantQuotas = openstack_models.Tenant.Quotas
class DeleteExpiredBackupsTaskTest(TestCase):
def setUp(self):
self.expired_backup1 = factories.BackupFactory(
state=models.Backup.States.OK,
kept_until=timezone.now() - timedelta(minutes=1),
)
self.expired_backup2 = factories.BackupFactory(
state=models.Backup.States.OK,
kept_until=timezone.now() - timedelta(minutes=10),
)
@mock.patch(
'waldur_openstack.openstack_tenant.executors.BackupDeleteExecutor.execute'
)
def test_command_starts_backend_deletion(self, mocked_execute):
tasks.DeleteExpiredBackups().run()
mocked_execute.assert_has_calls(
[mock.call(self.expired_backup1), mock.call(self.expired_backup2),],
any_order=True,
)
class DeleteExpiredSnapshotsTaskTest(TestCase):
def setUp(self):
self.expired_snapshot1 = factories.SnapshotFactory(
state=models.Snapshot.States.OK,
kept_until=timezone.now() - timedelta(minutes=1),
)
self.expired_snapshot2 = factories.SnapshotFactory(
state=models.Snapshot.States.OK,
kept_until=timezone.now() - timedelta(minutes=10),
)
@mock.patch(
'waldur_openstack.openstack_tenant.executors.SnapshotDeleteExecutor.execute'
)
def test_command_starts_snapshot_deletion(self, mocked_execute):
tasks.DeleteExpiredSnapshots().run()
mocked_execute.assert_has_calls(
[mock.call(self.expired_snapshot1), mock.call(self.expired_snapshot2),],
any_order=True,
)
class BackupScheduleTaskTest(TestCase):
def setUp(self):
self.disabled_schedule = factories.BackupScheduleFactory(is_active=False)
self.instance = factories.InstanceFactory(state=models.Instance.States.OK,)
self.overdue_schedule = factories.BackupScheduleFactory(
instance=self.instance, timezone='Europe/Tallinn'
)
self.overdue_schedule.next_trigger_at = timezone.now() - timedelta(minutes=10)
self.overdue_schedule.save()
self.future_schedule = factories.BackupScheduleFactory(
instance=self.instance, timezone='Europe/Tallinn'
)
self.future_schedule.next_trigger_at = timezone.now() + timedelta(minutes=2)
self.future_schedule.save()
def test_disabled_schedule_is_skipped(self):
tasks.ScheduleBackups().run()
self.assertEqual(self.disabled_schedule.backups.count(), 0)
def test_backup_is_created_for_overdue_schedule(self):
tasks.ScheduleBackups().run()
self.assertEqual(self.overdue_schedule.backups.count(), 1)
@mock.patch('waldur_openstack.openstack_tenant.handlers.log.event_logger')
def test_if_quota_is_exceeded_backup_is_not_created_and_schedule_is_paused(
self, event_logger
):
schedule = self.overdue_schedule
scope = self.instance.service_settings
# Usage is equal to limit
scope.set_quota_limit(TenantQuotas.snapshots, 2)
scope.set_quota_usage(TenantQuotas.snapshots, 2)
# Trigger task
tasks.ScheduleBackups().run()
schedule.refresh_from_db()
# Backup is not created
self.assertEqual(schedule.backups.count(), 0)
# Schedule is deactivated
self.assertFalse(schedule.is_active)
# Error message is persisted in schedule
self.assertTrue(schedule.error_message.startswith('Failed to schedule'))
# Event is triggered for hooks
event_type = event_logger.openstack_backup_schedule.warning.call_args[1][
'event_type'
]
self.assertEqual(event_type, 'resource_backup_schedule_deactivated')
def test_next_trigger_at_is_updated_for_overdue_schedule(self):
# Arrange
old_dt = self.overdue_schedule.next_trigger_at
# Act
tasks.ScheduleBackups().run()
# Assert
self.overdue_schedule.refresh_from_db()
new_dt = self.overdue_schedule.next_trigger_at
self.assertGreater(new_dt, old_dt)
def test_next_trigger_at_is_updated_if_timezone_is_changed(self):
# Arrange
old_dt = self.future_schedule.next_trigger_at
# Act
self.future_schedule.timezone = 'Asia/Tokyo'
self.future_schedule.save()
# Assert
self.future_schedule.refresh_from_db()
new_dt = self.future_schedule.next_trigger_at
self.assertNotEqual(new_dt, old_dt)
def test_duplicate_backups_are_not_created_for_two_consequent_immediate_calls(self):
tasks.ScheduleBackups().run()
self.assertEqual(self.overdue_schedule.backups.count(), 1)
# timedelta is 0
tasks.ScheduleBackups().run()
self.assertEqual(self.overdue_schedule.backups.count(), 1)
def test_two_backups_are_created_if_enough_time_has_passed(self):
tasks.ScheduleBackups().run()
self.assertEqual(self.overdue_schedule.backups.count(), 1)
self._trigger_next_backup(timezone.now())
self.assertEqual(self.overdue_schedule.backups.count(), 2)
def test_future_schedule_is_skipped(self):
tasks.ScheduleBackups().run()
self.assertEqual(self.future_schedule.backups.count(), 0)
def test_command_does_not_create_more_backups_than_maximal_number_of_resources(
self,
):
maximum_number = 3
self.overdue_schedule.maximal_number_of_resources = maximum_number
self.overdue_schedule.save()
tasks.ScheduleBackups().run()
self.assertEqual(self.overdue_schedule.backups.count(), 1)
base_time = self._trigger_next_backup(timezone.now())
self.assertEqual(self.overdue_schedule.backups.count(), 2)
base_time = self._trigger_next_backup(base_time)
self.assertEqual(self.overdue_schedule.backups.count(), 3)
self._trigger_next_backup(base_time)
self.assertEqual(self.overdue_schedule.backups.count(), maximum_number)
def test_command_creates_backups_up_to_maximal_number_if_limit_is_updated(self):
self.overdue_schedule.maximal_number_of_resources = 2
self.overdue_schedule.save()
tasks.ScheduleBackups().run()
self.assertEqual(self.overdue_schedule.backups.count(), 1)
base_time = self._trigger_next_backup(timezone.now())
self.assertEqual(self.overdue_schedule.backups.count(), 2)
base_time = self._trigger_next_backup(base_time)
self.assertEqual(self.overdue_schedule.backups.count(), 2)
self.overdue_schedule.maximal_number_of_resources = 3
self.overdue_schedule.save()
self._trigger_next_backup(base_time)
self.assertEqual(self.overdue_schedule.backups.count(), 3)
def test_if_backup_amount_exceeds_allowed_limit_deletion_is_scheduled(self):
now = datetime.now()
todays_backup = factories.BackupFactory(instance=self.instance, kept_until=now)
older_backup = factories.BackupFactory(
instance=self.instance, kept_until=now - timedelta(minutes=30)
)
oldest_backup = factories.BackupFactory(
instance=self.instance, kept_until=now - timedelta(minutes=50)
)
self.overdue_schedule.backups.add(*[todays_backup, older_backup, oldest_backup])
self.overdue_schedule.maximal_number_of_resources = 1
self.overdue_schedule.save()
tasks.ScheduleBackups().run()
older_backup.refresh_from_db()
oldest_backup.refresh_from_db()
self.assertEqual(models.Backup.States.DELETION_SCHEDULED, older_backup.state)
self.assertEqual(models.Backup.States.DELETION_SCHEDULED, oldest_backup.state)
tasks.ScheduleBackups().run()
self.assertTrue(models.Backup.objects.filter(id=todays_backup.id).exists())
self.assertEqual(self.overdue_schedule.backups.count(), 3)
def test_if_backup_amount_equals_allowed_limit_deletion_is_scheduled_for_oldest_backup(
self,
):
now = datetime.now()
backup1 = factories.BackupFactory(
instance=self.instance, kept_until=None, created=now - timedelta(days=3)
)
backup2 = factories.BackupFactory(
instance=self.instance, kept_until=None, created=now - timedelta(days=2)
)
backup3 = factories.BackupFactory(
instance=self.instance, kept_until=None, created=now - timedelta(days=1)
)
self.overdue_schedule.backups.add(*[backup1, backup2, backup3])
self.overdue_schedule.maximal_number_of_resources = 3
self.overdue_schedule.save()
tasks.ScheduleBackups().run()
backup1.refresh_from_db()
backup2.refresh_from_db()
backup3.refresh_from_db()
self.assertEqual(models.Backup.States.DELETION_SCHEDULED, backup1.state)
self.assertNotEqual(models.Backup.States.DELETION_SCHEDULED, backup2.state)
self.assertNotEqual(models.Backup.States.DELETION_SCHEDULED, backup3.state)
@mock.patch(
'waldur_openstack.openstack_tenant.executors.BackupDeleteExecutor.execute'
)
def test_if_exceeding_backups_are_already_deleting_extra_deletion_is_not_scheduled(
self, mocked_executor
):
backup1 = factories.BackupFactory(
instance=self.instance, state=models.Backup.States.DELETION_SCHEDULED
)
backup2 = factories.BackupFactory(instance=self.instance)
backup3 = factories.BackupFactory(instance=self.instance)
self.overdue_schedule.backups.add(*[backup1, backup2, backup3])
self.overdue_schedule.maximal_number_of_resources = 3
self.overdue_schedule.save()
tasks.ScheduleBackups().run()
self.assertEqual(0, mocked_executor.call_count)
def _trigger_next_backup(self, base_dt):
tz = pytz.timezone(self.overdue_schedule.timezone)
dt = tz.normalize(base_dt)
next_trigger_at = croniter(self.overdue_schedule.schedule, dt).get_next(
datetime
)
mocked_now = next_trigger_at + timedelta(seconds=5)
with freeze_time(mocked_now):
tasks.ScheduleBackups().run()
return mocked_now
class SnapshotScheduleTaskTest(TestCase):
def test_command_does_not_create_snapshots_created_for_not_active_schedules(self):
self.not_active_schedule = factories.SnapshotScheduleFactory(is_active=False)
tasks.ScheduleSnapshots().run()
self.assertEqual(self.not_active_schedule.snapshots.count(), 0)
def test_command_create_one_snapshot_for_schedule_with_next_trigger_in_past(self):
self.schedule_for_execution = factories.SnapshotScheduleFactory()
self.schedule_for_execution.next_trigger_at = timezone.now() - timedelta(
minutes=10
)
self.schedule_for_execution.save()
tasks.ScheduleSnapshots().run()
self.assertEqual(self.schedule_for_execution.snapshots.count(), 1)
def test_command_does_not_create_snapshots_created_for_schedule_with_next_trigger_in_future(
self,
):
self.future_schedule = factories.SnapshotScheduleFactory()
self.future_schedule.next_trigger_at = timezone.now() + timedelta(minutes=2)
self.future_schedule.save()
tasks.ScheduleSnapshots().run()
self.assertEqual(self.future_schedule.snapshots.count(), 0)
@mock.patch('waldur_openstack.openstack_tenant.handlers.log.event_logger')
def test_if_quota_is_exceeded_snapshot_is_not_created_and_schedule_is_paused(
self, event_logger
):
schedule = factories.SnapshotScheduleFactory()
schedule.next_trigger_at = timezone.now() - timedelta(minutes=10)
schedule.save()
scope = schedule.source_volume.service_settings
# Usage is equal to limit
scope.set_quota_limit(TenantQuotas.snapshots, 2)
scope.set_quota_usage(TenantQuotas.snapshots, 2)
# Trigger task
tasks.ScheduleSnapshots().run()
schedule.refresh_from_db()
# Snapshot is not created
self.assertEqual(schedule.snapshots.count(), 0)
# Schedule is deactivated
self.assertFalse(schedule.is_active)
# Error message is persisted in schedule
self.assertTrue(schedule.error_message.startswith('Failed to schedule'))
# Event is triggered for hooks
event_type = event_logger.openstack_snapshot_schedule.warning.call_args[1][
'event_type'
]
self.assertEqual(event_type, 'resource_snapshot_schedule_deactivated')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # noqa; PEP8 asserts
from webtest_plus import TestApp
import mock
import datetime
import httplib as http
from flask import Flask
from werkzeug.wrappers import BaseResponse
from framework import auth
from framework.auth import cas
from framework.exceptions import HTTPError
from tests.base import OsfTestCase, assert_is_redirect
from tests.factories import (
UserFactory, UnregUserFactory, AuthFactory,
ProjectFactory, NodeFactory, AuthUserFactory, PrivateLinkFactory
)
from framework.auth import User, Auth
from framework.auth.decorators import must_be_logged_in
from website.util import web_url_for
from website.project.decorators import (
must_have_permission, must_be_contributor,
must_have_addon, must_be_addon_authorizer,
)
class TestAuthUtils(OsfTestCase):
def test_unreg_user_can_register(self):
user = UnregUserFactory()
auth.register_unconfirmed(
username=user.username,
password='gattaca',
fullname='Rosie',
)
assert_true(user.get_confirmation_token(user.username))
def test_get_user_by_id(self):
user = UserFactory()
assert_equal(User.load(user._id), user)
def test_get_user_by_email(self):
user = UserFactory()
assert_equal(auth.get_user(email=user.username), user)
def test_get_user_with_wrong_password_returns_false(self):
user = UserFactory.build()
user.set_password('killerqueen')
assert_false(
auth.get_user(email=user.username, password='wrong')
)
class TestAuthObject(OsfTestCase):
def test_repr(self):
auth = AuthFactory()
rep = repr(auth)
assert_in(str(auth.user), rep)
def test_factory(self):
auth_obj = AuthFactory()
assert_true(isinstance(auth_obj.user, auth.User))
def test_from_kwargs(self):
user = UserFactory()
request_args = {'view_only': 'mykey'}
kwargs = {'user': user}
auth_obj = Auth.from_kwargs(request_args, kwargs)
assert_equal(auth_obj.user, user)
assert_equal(auth_obj.private_key, request_args['view_only'])
def test_logged_in(self):
user = UserFactory()
auth_obj = Auth(user=user)
assert_true(auth_obj.logged_in)
auth2 = Auth(user=None)
assert_false(auth2.logged_in)
class TestPrivateLink(OsfTestCase):
def setUp(self):
super(TestPrivateLink, self).setUp()
self.flaskapp = Flask('testing_private_links')
@self.flaskapp.route('/project/<pid>/')
@must_be_contributor
def project_get(**kwargs):
return 'success', 200
self.app = TestApp(self.flaskapp)
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.append(self.project)
self.link.save()
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_has_private_link_key(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=None)
res = self.app.get('/project/{0}'.format(self.project._primary_key),
{'view_only': self.link.key})
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.body, 'success')
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_does_not_have_key(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=None)
res = self.app.get('/project/{0}'.format(self.project._primary_key),
{'key': None})
assert_is_redirect(res)
# Flask app for testing view decorators
decoratorapp = Flask('decorators')
@must_be_contributor
def view_that_needs_contributor(**kwargs):
return kwargs.get('node') or kwargs.get('parent')
class AuthAppTestCase(OsfTestCase):
def setUp(self):
self.ctx = decoratorapp.test_request_context()
self.ctx.push()
def tearDown(self):
self.ctx.pop()
class TestMustBeContributorDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustBeContributorDecorator, self).setUp()
self.contrib = AuthUserFactory()
self.project = ProjectFactory()
self.project.add_contributor(self.contrib, auth=Auth(self.project.creator))
self.project.save()
def test_must_be_contributor_when_user_is_contributor(self):
result = view_that_needs_contributor(
pid=self.project._primary_key,
user=self.contrib)
assert_equal(result, self.project)
def test_must_be_contributor_when_user_is_not_contributor_raises_error(self):
non_contributor = AuthUserFactory()
with assert_raises(HTTPError):
view_that_needs_contributor(
pid=self.project._primary_key,
user=non_contributor
)
def test_must_be_contributor_no_user(self):
res = view_that_needs_contributor(
pid=self.project._primary_key,
user=None,
)
assert_is_redirect(res)
# redirects to login url
redirect_url = res.headers['Location']
login_url = cas.get_login_url(service_url='http://localhost/')
assert_equal(redirect_url, login_url)
def test_must_be_contributor_parent_admin(self):
user = UserFactory()
node = NodeFactory(parent=self.project, creator=user)
res = view_that_needs_contributor(
pid=self.project._id,
nid=node._id,
user=self.project.creator,
)
assert_equal(res, node)
def test_must_be_contributor_parent_write(self):
user = UserFactory()
node = NodeFactory(parent=self.project, creator=user)
self.project.set_permissions(self.project.creator, ['read', 'write'])
self.project.save()
with assert_raises(HTTPError) as exc_info:
view_that_needs_contributor(
pid=self.project._id,
nid=node._id,
user=self.project.creator,
)
assert_equal(exc_info.exception.code, 403)
@must_be_logged_in
def protected(**kwargs):
return 'open sesame'
@must_have_permission('dance')
def thriller(**kwargs):
return 'chiller'
class TestPermissionDecorators(AuthAppTestCase):
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_logged_in_decorator_with_user(self, mock_from_kwargs):
user = UserFactory()
mock_from_kwargs.return_value = Auth(user=user)
protected()
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_logged_in_decorator_with_no_user(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth()
resp = protected()
assert_true(isinstance(resp, BaseResponse))
login_url = cas.get_login_url(service_url='http://localhost/')
assert_in(login_url, resp.headers.get('location'))
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_true(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
project.add_permission(project.creator, 'dance')
mock_from_kwargs.return_value = Auth(user=project.creator)
mock_to_nodes.return_value = (None, project)
thriller(node=project)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_false(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
mock_from_kwargs.return_value = Auth(user=project.creator)
mock_to_nodes.return_value = (None, project)
with assert_raises(HTTPError) as ctx:
thriller(node=project)
assert_equal(ctx.exception.code, http.FORBIDDEN)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_not_logged_in(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
mock_from_kwargs.return_value = Auth()
mock_to_nodes.return_value = (None, project)
with assert_raises(HTTPError) as ctx:
thriller(node=project)
assert_equal(ctx.exception.code, http.UNAUTHORIZED)
def needs_addon_view(**kwargs):
return 'openaddon'
class TestMustHaveAddonDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustHaveAddonDecorator, self).setUp()
self.project = ProjectFactory()
@mock.patch('website.project.decorators._kwargs_to_nodes')
def test_must_have_addon_node_true(self, mock_kwargs_to_nodes):
mock_kwargs_to_nodes.return_value = (None, self.project)
self.project.add_addon('github', auth=None)
decorated = must_have_addon('github', 'node')(needs_addon_view)
res = decorated()
assert_equal(res, 'openaddon')
@mock.patch('website.project.decorators._kwargs_to_nodes')
def test_must_have_addon_node_false(self, mock_kwargs_to_nodes):
mock_kwargs_to_nodes.return_value = (None, self.project)
self.project.delete_addon('github', auth=None)
decorated = must_have_addon('github', 'node')(needs_addon_view)
with assert_raises(HTTPError):
decorated()
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_addon_user_true(self, mock_current_user):
mock_current_user.return_value = Auth(self.project.creator)
self.project.creator.add_addon('github')
decorated = must_have_addon('github', 'user')(needs_addon_view)
res = decorated()
assert_equal(res, 'openaddon')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_addon_user_false(self, mock_current_user):
mock_current_user.return_value = Auth(self.project.creator)
self.project.creator.delete_addon('github')
decorated = must_have_addon('github', 'user')(needs_addon_view)
with assert_raises(HTTPError):
decorated()
class TestMustBeAddonAuthorizerDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustBeAddonAuthorizerDecorator, self).setUp()
self.project = ProjectFactory()
self.decorated = must_be_addon_authorizer('github')(needs_addon_view)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_authorizer_true(self, mock_get_current_user, mock_kwargs_to_nodes):
# Mock
mock_get_current_user.return_value = Auth(self.project.creator)
mock_kwargs_to_nodes.return_value = (None, self.project)
# Setup
self.project.add_addon('github', auth=None)
node_settings = self.project.get_addon('github')
self.project.creator.add_addon('github')
user_settings = self.project.creator.get_addon('github')
node_settings.user_settings = user_settings
# Test
res = self.decorated()
assert_equal(res, 'openaddon')
def test_must_be_authorizer_false(self):
# Setup
self.project.add_addon('github', auth=None)
node_settings = self.project.get_addon('github')
user2 = UserFactory()
user2.add_addon('github')
user_settings = user2.get_addon('github')
node_settings.user_settings = user_settings
# Test
with assert_raises(HTTPError):
self.decorated()
def test_must_be_authorizer_no_user_settings(self):
self.project.add_addon('github', auth=None)
with assert_raises(HTTPError):
self.decorated()
def test_must_be_authorizer_no_node_settings(self):
with assert_raises(HTTPError):
self.decorated()
class TestBasicAuth(OsfTestCase):
def test_basic_auth_returns_403(self):
url = web_url_for('dashboard')
ret = self.app.get(url, auth=('test', 'test'), expect_errors=True)
assert_equal(ret.status_code, 403)
if __name__ == '__main__':
unittest.main()
| |
# !/bin/python3
import sys
import datetime
import numpy as np
import pandas as pd
from pyentrp import entropy as ent
from tools import QuickPlot, detect_peaks
class ModelPrep:
@property
def discretisedStationarySymptomScoreTable(self):
return self.__discretisedStationarySymptomScoreTable
@discretisedStationarySymptomScoreTable.setter
def discretisedStationarySymptomScoreTable(self, dSST):
self.__discretisedStationarySymptomScoreTable = dSST
@property
def discretisedRawSymptomScoreTable(self):
return self.__discretisedRawSymptomScoreTable
@discretisedRawSymptomScoreTable.setter
def discretisedRawSymptomScoreTable(self, dSST):
self.__discretisedRawSymptomScoreTable = dSST
def __init__(self, log):
self.log = log
def discretiseSymtomScore(self, stationarySymptom, rawSymptom):
if 'label' not in rawSymptom.columns:
m = round(np.mean(stationarySymptom['total']), 1)
sd = round(np.std(stationarySymptom['total']), 2)
minorIdx = stationarySymptom['total'] >= (m + sd)
labels = ['major'] * len(minorIdx)
for i in np.where(minorIdx == True)[0]:
labels[i] = 'minor'
labelsTable = pd.DataFrame(labels)
labelsTable.columns = ['label']
labelsTable.index = stationarySymptom.index
self.discretisedStationarySymptomScoreTable = pd.concat([stationarySymptom, labelsTable], axis=1)
self.discretisedRawScoreTable = pd.concat([rawSymptom, labelsTable], axis=1)
else:
self.discretisedStationarySymptomScoreTable = stationarySymptom
self.discretisedRawScoreTable = rawSymptom
def removeEntriesPriorToStudyStart(self, info):
dStart = datetime.datetime.strptime(info['startDate'], '%d/%m/%Y')
dates = self.discretisedRawScoreTable['datetime']
dDates = []
for d in dates:
if type(d) is type(' '):
dDates.append(datetime.datetime.strptime(d, '%Y-%m-%d %H:%M'))
else:
dDates.append(datetime.datetime(1970, 1, 1, 0, 0, 0))
validDatesIdxs = []
for i in range(0, len(dDates)):
if dDates[i] > dStart:
validDatesIdxs.append(i)
self.discretisedRawScoreTable = self.discretisedRawScoreTable.loc[validDatesIdxs, :]
self.discretisedStationarySymptomScoreTable = self.discretisedStationarySymptomScoreTable.loc[validDatesIdxs, :]
class NonParaModel:
def __init__(self, yFeature, log, dayDivisionHour=0):
self.log = log
self.divTime = datetime.time(hour=dayDivisionHour)
self.yFeature = yFeature
self.sleepFeaturesSum = ['startTime',
'minutesToFallAsleep',
'minutesAfterWakeup',
'timeInBed',
'minutesAsleep',
'restlessCount',
'restlessDuration',
'awakeCount',
'awakeDuration',
'efficiency'
]
self.sleepFeaturesQ = ['sleepQuality']
self.restActivtyFeatures = ['L5', 'M10', 'RA', 'IV', 'IS']
def submitData(self, participant, xFeatures):
self.activeDataSy = participant.activeDataSymptom
self.activeDataSl = participant.activeDataSleep
self.sleepSummary = participant.sleepSummary
self.yData = self.activeDataSy[['datetime', self.yFeature]]
self.xFeatures = xFeatures
self.xData = participant.passiveData[(['timestamp'] + self.xFeatures)]
self.xDataNorm = self.xData[self.xFeatures] / self.xData[self.xFeatures].max()
self.enrolmentDate = datetime.datetime.strptime(participant.info['startDate'], '%d/%m/%Y')
def constructModel(self):
self.log.emit('[STATUS] Creating index table.', indents=1)
self.createIndexTable()
self.log.emit('[STATUS] Extracting rest-activity features.', indents=1)
dfRestActivity = self.extractRestActivityFeatures(leadFeature='intra_steps')
self.log.emit('[STATUS] Extracting disorganisation features.', indents=1)
dfDisorganisation = self.extractDisorganisationFeatures()
self.log.emit('[STATUS] Extracting sleep features.', indents=1)
dfSleep = self.extractSleepFeatures()
self.features = pd.concat([dfRestActivity, dfDisorganisation, dfSleep], axis=1)
def createIndexTable(self):
self.indexDict = []
self.extractDateIdxsFromYData()
self.extractDateIdxsFromXDataBasedOnY()
self.removeIncompleteIndexs()
def extractDateIdxsFromYData(self):
for i in range(len(self.yData)):
entry = {'index': i}
entry['y'] = float(self.yData[i:i+1][self.yFeature])
startDate, endDate = self.determineDatesFromYData(i)
entry['dateStart'] = startDate
entry['dateEnd'] = endDate
if self.enrolmentDate.date() <= startDate.date():
self.indexDict.append(entry)
def determineDatesFromYData(self, index):
dt_str = list(self.activeDataSy[index:(index+1)]['datetime'])[0]
dt = datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M')
dtEnd = dt.replace(hour=self.divTime.hour, minute=0)
tDay = datetime.timedelta(days=1)
tMin = datetime.timedelta(minutes=1)
dtStart= dtEnd - tDay + tMin
return (dtStart, dtEnd)
def extractDateIdxsFromXDataBasedOnY(self):
idxStart = 0
idxEnd = 0
currentTableIndex = 0
for i in range(len(self.xData)):
dateStart = self.indexDict[currentTableIndex]['dateStart']
dateEnd = self.indexDict[currentTableIndex]['dateEnd']
dateXDataStr = list(self.xData[i:(i + 1)]['timestamp'])[0]
dateXData = datetime.datetime.strptime(dateXDataStr, '%Y-%m-%d %H:%M')
if dateXData <= dateStart and dateXData < dateEnd:
idxStart = i
if dateXData <= dateEnd:
idxEnd = i
if dateXData >= dateEnd or i == (len(self.xData) - 1):
self.indexDict[currentTableIndex]['indexStart'] = idxStart
self.indexDict[currentTableIndex]['indexEnd'] = idxEnd
currentTableIndex += 1
if currentTableIndex >= len(self.indexDict):
break
def removeIncompleteIndexs(self):
newIndexDict = []
for index in self.indexDict:
try:
test = index['indexStart']
newIndexDict.append(index)
except KeyError:
self.log.emit('[WARN] Removing index {}, due to missing \'indexStart\'.'.format(index['index']), indents=1)
self.indexDict = newIndexDict
def extractSleepFeatures(self):
featureSleepTmp = []
indexDates = []
cols = self.sleepFeaturesSum + self.sleepFeaturesQ
for index in self.indexDict:
featureSleepSum = self.extractSleepSummarySample(index['dateStart'])
featureSleepQue = self.extractSleepQuestionnaire(index['dateStart'])
features = featureSleepSum + featureSleepQue
featureSleepTmp.append(features)
indexDates.append(index['dateStart'])
featuresSleep = pd.DataFrame(featureSleepTmp, columns=cols)
featuresSleep.index = indexDates
return featuresSleep
def extractSleepSummarySample(self, date):
index = self.sleepSummary.index
for i in range(0, len(index)):
sample = self.sleepSummary.loc[index[i]]
dateOfInterest = datetime.datetime.strptime(sample['dateOfSleep'], '%d/%m/%Y')
if date.date() == dateOfInterest.date():
return list(sample[self.sleepFeaturesSum])
return ['NaN'] * len(self.sleepFeaturesSum)
def extractSleepQuestionnaire(self, date):
index = self.activeDataSl.index
for i in range(0, len(index)):
sample = self.activeDataSl.loc[index[i]]
dateOfInterest = datetime.datetime.strptime(sample['dateTime'], '%Y-%m-%d %H:%M')
if date.date() == dateOfInterest.date():
return list(sample[self.sleepFeaturesQ])
return ['NaN'] * len(self.sleepFeaturesQ)
def extractRestActivityFeatures(self, leadFeature):
featureRATmp = []
indexDates = []
cols = self.formatColumns(self.xFeatures, prefixes=['L5', 'M10', 'RA', 'IV', 'IS'])
for index in self.indexDict:
l5Idx, m10Idx = self.determineL5M10Indexes(index, leadFeature)
L5 = self.xData.loc[l5Idx, self.xFeatures].mean()
M10 = self.xData.loc[m10Idx, self.xFeatures].mean()
RA = list(self.computeRA(L5, M10))
IV = list(self.computeIntraDayVariability(index))
IS = list(self.computeInterDayStability(index))
RATmp = list(L5) + list(M10) + RA + IV + IS
featureRATmp.append(RATmp)
indexDates.append(index['dateStart'])
featuresRestActivity = pd.DataFrame(featureRATmp, columns=cols)
featuresRestActivity.index = indexDates
return featuresRestActivity
def determineL5M10Indexes(self, index, leadFeature):
rawSamplePeriod = self.xData.loc[index['indexStart']:index['indexEnd']]
leadFeatureRaw = rawSamplePeriod[leadFeature]
leadFeatureRawSorted = leadFeatureRaw.sort_values()
l5Idx = list(leadFeatureRawSorted.index[0:(60 * 5)])
m10Idx = list(leadFeatureRawSorted.index[
(len(leadFeatureRawSorted.index) - (60 * 10)):(len(leadFeatureRawSorted.index) - 1)])
return (l5Idx, m10Idx)
def computeRA(self, L5, M10):
M10PlusL5 = M10.add(L5, fill_value=0)
M10MinusL5 = M10.subtract(L5, fill_value=0)
RA = M10MinusL5.div(M10PlusL5, fill_value=0)
return RA
def computeIntraDayVariability(self, index):
idxStart = index['indexStart']
idxEnd = index['indexEnd']
pData = self.xDataNorm.loc[idxStart:idxEnd, self.xFeatures]
pDataDiffSquared = pData.diff() * pData.diff()
nominatorP = len(self.xDataNorm) * pDataDiffSquared.sum()
nData = self.xDataNorm[self.xFeatures]
nDataMinusMean = nData - nData.mean()
nDataSquared = nDataMinusMean * nDataMinusMean
denominatorN = (len(self.xDataNorm)-1) * nDataSquared.sum()
IV = nominatorP / denominatorN
return IV
def computeInterDayStability(self, index):
idxStart = index['indexStart']
idxEnd = index['indexEnd']
pData = self.xDataNorm.loc[idxStart:idxEnd, self.xFeatures]
pDataMinusMean = pData - pData.mean()
pDataSquared = pDataMinusMean * pDataMinusMean
nominatorP = len(self.xData) * pDataSquared.sum()
nData = self.xDataNorm[self.xFeatures]
nDataMinusMean = nData - nData.mean()
nDataSquared = nDataMinusMean * nDataMinusMean
denominatorN = len(pData) * nDataSquared.sum()
IS = nominatorP / denominatorN
return IS
def extractDisorganisationFeatures(self):
disorgFeaturesTmp = []
indexDates = []
cols = self.formatColumns(self.xFeatures, prefixes=['MSE', 'DWT'])
for index in self.indexDict:
MSE = self.computeMSE(index)
DTWDist = self.computeDTW(index)
disorgFeaturesTmp.append((MSE + DTWDist))
indexDates.append(index['dateStart'])
disorganisationFeatures = pd.DataFrame(disorgFeaturesTmp, columns=cols)
disorganisationFeatures.index = indexDates
return disorganisationFeatures
def computeMSE(self, index, m_length=20):
indexStart = index['indexStart']
indexEnd = index['indexEnd']
dfData = self.xData.loc[indexStart:indexEnd]
MSE_means = []
for feature in self.xFeatures:
ts = list(dfData[feature])
tsStd = np.std(ts)
MSEs = ent.multiscale_entropy(ts, sample_length=m_length, tolerance=0.2*tsStd)
MSE_means.append(np.mean(MSEs))
return MSE_means
def computeDTW(self, index):
indexStart = index['indexStart']
indexEnd = index['indexEnd']
indexPrev = indexStart - 1440
try:
dfData = self.xData.loc[indexStart:indexEnd]
dfDataPrev = self.xData.loc[indexPrev:indexStart]
dtwDist = []
for feature in self.xFeatures:
ts = np.array(list(dfData[feature]))
tsPrev = np.array(list(dfDataPrev[feature]))
dist, path = self.DTW(ts, tsPrev)
dtwDist.append(dist)
except IndexError:
dtwDist = ['NaN'] * len(self.xFeatures)
return dtwDist
def DTW(self, A, B, window=sys.maxsize, d=lambda x, y: abs(x - y)):
# create the cost matrix
M, N = len(A), len(B)
cost = sys.maxsize * np.ones((M, N))
# initialize the first row and column
cost[0, 0] = d(A[0], B[0])
for i in range(1, M):
cost[i, 0] = cost[i - 1, 0] + d(A[i], B[0])
for j in range(1, N):
cost[0, j] = cost[0, j - 1] + d(A[0], B[j])
# fill in the rest of the matrix
for i in range(1, M):
for j in range(max(1, i - window), min(N, i + window)):
choices = cost[i - 1, j - 1], cost[i, j - 1], cost[i - 1, j]
cost[i, j] = min(choices) + d(A[i], B[j])
# find the optimal path
n, m = N - 1, M - 1
path = []
while (m, n) != (0, 0):
path.append((m, n))
m, n = min((m - 1, n), (m, n - 1), (m - 1, n - 1), key=lambda x: cost[x[0], x[1]])
path.append((0, 0))
return cost[-1, -1], path
def formatColumns(self, features, prefixes):
cols = []
for prefix in prefixes:
tmpF = []
for i in range(0, len(features)):
f = features[i].replace('_', ' ')
if f not in ['LAM', 'FAM', 'VAM']:
f = f.capitalize()
tmpF.append('{} {}'.format(prefix, f))
cols = cols + tmpF
return cols
class GpModel:
def __init__(self, xFeatures, yFeature, log, dayDivisionHour=0):
self.log = log
self.log.emit('Setting up GPModel ...')
self.divTime = datetime.time(hour=dayDivisionHour)
self.xFeatures = xFeatures
self.yFeature = yFeature
def submitData(self, active, passive):
self.activeData = active
self.passiveData = passive
self.yData = self.activeData[['datetime', self.yFeature]]
xSelection = ['timestamp'] + self.xFeatures
self.xData = self.passiveData[xSelection]
def createIndexTable(self):
self.indexDict = []
self.log.emit('Extracting indexes...', indents=1)
self.extractDateIdxsFromYData()
self.extractDateIdxsFromXDataBasedOnY()
self.log.emit('Trimming to complete samples...', indents=1)
self.trimIndexDict()
self.log.emit('Creating class index...', indents=1)
self.classIndexes = self.genClassIndex()
def extractDateIdxsFromYData(self):
for i in range(len(self.yData)):
entry = {'index': i}
yDataSelect = self.yData[self.yFeature]
entry['y'] = yDataSelect.loc[yDataSelect.index[i]]
startDate, endDate = self.determineDatesFromYData(i)
entry['dateStart'] = startDate
entry['dateEnd'] = endDate
try:
if entry['y'] not in 'nan':
self.indexDict.append(entry)
except TypeError:
self.log.emit('[WARN] Removed sample, due to label TypeError:\n{}'.format(entry), indents=1)
def determineDatesFromYData(self, index):
dt_str = list(self.activeData[index:(index+1)]['datetime'])[0]
dt = datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M')
dtEnd = dt.replace(hour=self.divTime.hour, minute=0)
tDay = datetime.timedelta(days=1)
tMin = datetime.timedelta(minutes=1)
dtStart= dtEnd - tDay + tMin
return (dtStart, dtEnd)
def extractDateIdxsFromXDataBasedOnY(self):
idxStart = 0
idxEnd = 0
currentTableIndex = 0
for i in range(len(self.xData)):
if currentTableIndex >= len(self.indexDict):
print(currentTableIndex)
break
dateStart = self.indexDict[currentTableIndex]['dateStart']
dateEnd = self.indexDict[currentTableIndex]['dateEnd']
dateXDataStr = list(self.xData[i:(i + 1)]['timestamp'])[0]
dateXData = datetime.datetime.strptime(dateXDataStr, '%Y-%m-%d %H:%M')
if dateXData > dateStart and dateXData > dateEnd:
while dateXData > dateEnd:
currentTableIndex += 1
if currentTableIndex >= len(self.indexDict):
print(currentTableIndex)
break
dateStart = self.indexDict[currentTableIndex]['dateStart']
dateEnd = self.indexDict[currentTableIndex]['dateEnd']
if dateXData <= dateStart and dateXData < dateEnd:
idxStart = i
if dateXData <= dateEnd:
idxEnd = i
if dateXData == dateEnd or i == (len(self.xData) - 1):
self.indexDict[currentTableIndex]['indexStart'] = idxStart
self.indexDict[currentTableIndex]['indexEnd'] = idxEnd
currentTableIndex += 1
if currentTableIndex >= len(self.indexDict):
break
def trimIndexDict(self):
tmpDict = []
for sample in self.indexDict:
try:
if sample['indexEnd'] - sample['indexStart'] == 1439:
tmpDict.append(sample)
else:
self.log.emit('[WARN] Removed sample, due to incomplete n < 1439:\n{}'.format(sample), indents=2)
except KeyError:
self.log.emit('[WARN] Removed sample, due to KeyError:\n{}'.format(sample), indents=2)
self.indexDict = tmpDict
def genClassIndex(self):
labels = self.yData[self.yFeature]
labels = [labels[labels.index[i]] for i in range(0, len(self.indexDict))]
uniqueLabels = np.unique(labels)
classIndexes = {}
for label in uniqueLabels:
classIndexes[label] = self.retrieveLabelIndexes(label)
return classIndexes
def retrieveLabelIndexes(self, label):
labelIndex = []
for i in range(0, len(self.indexDict)):
if self.indexDict[i]['y'] == label:
labelIndex.append(i)
return labelIndex
def getSamplesOfClassT(self, label):
if label is 'all':
return self.indexDict
sampleIdxs = self.classIndexes[label]
samples = [self.indexDict[idx] for idx in sampleIdxs]
return samples
class Periodicity:
@property
def periodicity(self):
periodicity = {
#'scf': self.scf,
'acf': self.acf,
#'pcf': self.pcf
}
return periodicity
def __init__(self, log, identifier='SleepSight', sensorName='Sensor name', path='/'):
self.log = log
self.observations = []
self.sensorName = sensorName
self.path = path
self.id = identifier
self.observationValid = True
def addObservtions(self, observations):
try:
obs = np.array(observations, dtype='U64')
obs_missing = np.where(obs == '-')
obs[obs_missing] = 999999
obs_masked = np.ma.masked_array(obs, dtype='float64')
obs_masked[obs_missing] = np.ma.masked
self.observations = obs_masked
except ValueError:
self.observationValid = False
# serial-correlation function
def serial_corr(self, step=1, nSteps=10):
if self.observationValid:
self.scf = []
n = len(self.observations)
for i in range(int(nSteps/step)):
lag = step*i
y1 = self.observations[lag:]
y2 = self.observations[:n - lag]
self.scf.append(np.corrcoef(y1, y2, ddof=0)[0,1])
# auto-correlation function
def auto_corr(self, nMinutes=20160, detectPeaks=True):
if self.observationValid:
acf_full = np.correlate(self.observations, self.observations, mode='full')
# 2nd half
N = len(acf_full)
acf_half = acf_full[N // 2: (N // 2 + nMinutes)]
# standardise
lengths = range((N // 2 + nMinutes), N // 2, -1)
acf_stand = acf_half / lengths
# normalise
acf_norm = acf_stand / acf_stand[0]
if detectPeaks:
self.detectPeaks(acf_norm)
self.acf = acf_norm
def cross_cor(self, targetObservation, lag):
if self.observationValid:
x = list(self.observations)
y = list(targetObservation)
windowLength = len(x) - 2*lag
if windowLength >= 5:
xWinIdx = list(range(lag, (lag+windowLength)))
featureCcf = []
for i in range(0, 2*lag):
yWinIdx = list(range(i, (i + windowLength)))
xOfInterest = [x[idx] for idx in xWinIdx]
yOfInterest = [y[idx] for idx in yWinIdx]
cross = np.correlate(xOfInterest, yOfInterest)
featureCcf.append(cross[0])
maxIdx = np.where(featureCcf == np.max(featureCcf))
if len(maxIdx[0]) > 0:
delay = maxIdx[0][0] - lag
return delay
return np.nan
else:
self.log.emit('[WARN] No cross validation possible. Choose a smaller lag to evaluate', indents=1)
# pearson's correlation matrix
def pearson_corr(self, lag=1440):
if self.observationValid:
n = int(len(self.observations)/lag) - 1
observation_windows = []
for i in range(n):
observation_windows.append(self.observations[(i*lag):((i*lag)+lag)])
self.pcf = np.corrcoef(observation_windows)
def detectPeaks(self, y):
if self.observationValid:
self.peaks = detect_peaks(y, mpd=720, kpsh=True)
peaksMean, peaksStd = self.generatePeakStats(self.peaks)
self.peakStats = {'mean': peaksMean, 'std': peaksStd}
def generatePeakStats(self, peaks):
pDiff = pd.Series(peaks).diff()
mean = np.mean(pDiff)
std = np.std(pDiff)
return (mean, std)
def plot(self, type='all', show=True, save=False):
if self.observationValid:
if type is 'scf' or type is 'all':
self.plotScf(show=show, save=save)
if type is 'acf' or type is 'all':
self.plotAcf(show=show, save=save)
if type is 'pcf' or type is 'all':
self.plotPcf(show=show, save=save)
if type not in ['all', 'scf', 'acf', 'pcf']:
self.log.emit('[PERIODICITY] WARN: Did not plot. Choose from "all", "scf", "acf" or "pcf".', indents=1)
def plotScf(self, show=True, save=False):
scfBetaOne = self.scf[1]
text = 'Beta = 1; SCF = {}'.format(scfBetaOne)
title = 'Serial-correlation: {}'.format(self.sensorName)
qp = QuickPlot(path=self.path, identifier=self.id)
qp.singlePlotOfTypeLine(self.scf, title=title, text=text, lineLabels=['SCF'], show=show, saveFigure=save)
def plotAcf(self, withPeak=True, show=True, save=True):
nDays = (len(self.acf) // 1440) + 1
ticks = np.arange(0, 1440*nDays, 1440)
tickLabels = np.arange(0,nDays)
title = 'Auto-correlation: {}'.format(self.sensorName)
qp = QuickPlot(path=self.path, identifier=self.id)
if withPeak:
qp.singlePlotOfTypeLine(self.acf, title=title, lineLabels=['ACF'], ticks=ticks, tickLabels=tickLabels,
show=show, saveFigure=save, highlightPoints=self.peaks)
else:
qp.singlePlotOfTypeLine(self.acf, title=title, lineLabels=['ACF'], ticks=ticks, tickLabels=tickLabels,
show=show, saveFigure=save)
def plotPcf(self, show=True, save=True):
title = 'Pearson\'s correlation matrix: {}'.format(self.sensorName)
qp = QuickPlot(path=self.path, identifier=self.id)
qp.singlePlotOfTypeHeatmap(self.pcf, title=title, show=show, saveFigure=save)
| |
#!/usr/bin/env python2
###############################################################################
# ------------------------- Description ---------------------------------------
###############################################################################
# This script is used to identify high wind days, precipitation days, and
# high temperature days. This script can be passed a rather confusing set of
# arguments.
import sys
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
# TODO: Make it possible to pass absolute thresholds.
threshDict = {}
if len(sys.argv) != 1:
threshType = sys.argv[1]
scenario = sys.argv[2]
print 'Using arguments passed via command line.'
if threshType == 'usePercentile':
TThresh = sys.argv[3] # percentile
WindThresh = sys.argv[4] # percentile
PRECTThresh = sys.argv[5] # percentile
threshDict['T'] = str(TThresh) + 'percentile'
threshDict['Wind'] = str(WindThresh) + 'percentile'
threshDict['PRECT'] = str(PRECTThresh) + 'percentile'
if threshType == 'useValue':
TThresh = sys.argv[3] # K
WindThresh = sys.argv[4] # m/s
PRECTThresh = sys.argv[5] # inches/day
threshDict['T'] = TThresh
threshDict['Wind'] = WindThresh
threshDict['PRECT'] = PRECTThresh
else:
print 'Using defualt arguments. None passed via command line.'
threshType = 'usePercentile'
scenario = '2000Base'
TThresh = 95 #298 # K, 90th percentile of all 20-60 North days daily temeperate
WindThresh = 95 #9 #m/s = 20 mph 1hr, NWS fire weather warning
PRECTThresh = 1 # 0.01 # percentile # TODO: Think carefully about this percentile value
if threshType == 'useValue': # TODO: selection
threshDict['T'] = TThresh
threshDict['Wind'] = WindThresh
threshDict['PRECT'] = PRECTThresh
if threshType == 'usePercentile':
threshDict['T'] = str(TThresh) + 'percentile'
threshDict['Wind'] = str(WindThresh) + 'percentile'
threshDict['PRECT'] = str(PRECTThresh) + 'percentile'
import os
import numpy as np
import sys
from mpl_toolkits.basemap import Basemap, cm
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy.ma as ma
from datetime import date
import matplotlib.ticker as tkr
import datetime
import cesm_nc_manager as cnm
import time as timer
dataDirBase= os.path.join("/pierce-scratch","mariavm")
startTime = timer.time()
# Make temperature, wind, and precip file connections
U = cnm.getSelf(findHighValueDays, scenario, "U")
V = cnm.getSelf(findHighValueDays, scenario, "V")
T = cnm.getSelf(findHighValueDays, scenario, "T")
PRECT = cnm.getSelf(findHighValueDays, scenario, "PRECT")
# Use T to get dimension information
TFile = cnm.makeAQNCFile(findHighValueDays, 'T', scenario, 'daily')
Tnc = Dataset(TFile, 'r')
t = Tnc.variables['time'][:]
lat = Tnc.variables['lat'][:]
lon = Tnc.variables['lon'][:]
lev = Tnc.variables['lev'][:]
Tnc.close()
# Get the surface index from lev. This is index used to make masks.
# NOTE: U, V, and T all on lev NOT ilev
srf_index = np.where(lev == lev.max())[0][0]
####################################################
# Get all variables massive arrays into useful forms
####################################################
# convert precip to inches per day
PRECT = cnm.metersPerSecToMetersPerDay(PRECT)
print '-----------------------------------------------------------------'
print 'working on getting full size surface arrays'
print '-----------------------------------------------------------------'
T = T[:, srf_index, :, :]
print 'done with T'
U = U[:, srf_index, :, :]
print 'Done with U'
V = V[:, srf_index, :, :]
print 'Done with V'
print 'Done getting wind, temperature, and precip variables into environment'
# Create surface wind mag
windMag = np.sqrt(U**2 + V**2)
# Get a more friendly time dimension that is useful for precentile
# maximum values
Time = cnm.getSelf(dataDirBase, scenario, "date")
dateTime = cnm.dateNumToDate(Time)
nTime = len(Time)
nLon = len(lon)
nLat = len(lat)
# We need to identify the threshold value and mask for each variable.
# Using a single value is the simple method. Using a percentile value
# for a given grid cell is much more complicated and still in development
if threshType == 'usePercentile':
print 'using percentile thresholds for masking'
TMask, TLimVals = cnm.findHighValueDays(T,\
dateTime,\
TThresh)
TMask = np.array(TMask, dtype=int)
WindMask, WindLimVals = cnm.findHighValueDays(windMag,\
dateTime,\
WindThresh)
WindMask = np.array(WindMask, dtype=int)
PRECTMask, PrecLimVals = cnm.findHighValueDays(PRECT,\
dateTime,\
PRECTThresh)
PRECTMask = np.array(PRECTMask, dtype=int)
elif threshType == 'useValue':
print 'using hard value thresholds for masking'
# Predefine the mask arrays
TMask = np.zeros((len(t), len(lat), len(lon)), dtype=int)
WindMask = TMask
PRECTMask = TMask
print 'Creating surface temperature Mask'
# Mask high temperature days for this date
TMask = np.array(T >= TThresh, dtype=int)
print 'Creating srf windMag Mask'
WindMask = np.array(windMag >= WindThresh, dtype=int)
print 'Create the low precip day Mask'
PRECTMask = np.array(PRECT <= PRECTThresh, dtype=int)
######################################################################
# Write all three masks with detailed description of chosen thresholds
######################################################################
masksDict = {}
masksDict['T'] = TMask
masksDict['Wind'] = WindMask
masksDict['PRECT'] = PRECTMask
for var in masksDict.keys():
if var!='PRECT':
condition = 'high'+var
else:
condition = 'low'+var
saveString = condition +'Mask_'+str(threshDict[var])
# connect this descriptive name to the directory of the scenario and var
saveName = cnm.makeAQNCFile(dataDirBase, saveString, scenario, 'daily')
ncFile = Dataset(saveName, 'w', format='NETCDF4')
ncFile.description = 'Mask indicating ' + condition + ' condition.'
ncFile.location = 'Global'
ncFile.createDimension('time', nTime )
ncFile.createDimension('lat', nLat )
ncFile.createDimension('lon', nLon )
# Create variables on the dimension they live on
maskVar = ncFile.createVariable(saveString, 'i', ('time','lat','lon'))
maskVar.units = '1=True, 0=False. ' + 'Limit used: ' + str(threshDict[var])
time_var = ncFile.createVariable('time', 'i4', ('time',))
time_var.units = 'days from origin'
latitude = ncFile.createVariable('lat', 'f4', ('lat',))
latitude.units = 'degrees north'
longitude = ncFile.createVariable('lon', 'f4', ('lon',))
longitude.units = 'degrees east'
# Write the actual data to these dimensions
maskVar[:] = masksDict[var]
latitude[:] = lat
longitude[:] = lon
time_var[:] = t
ncFile.close()
| |
# Copyright (c) 2017 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from typing import Optional
from oslo_log import versionutils
from oslo_policy import policy
# General observations
# --------------------
# - This file uses the three "default roles" provided by Keystone during
# the ``keystone-manage bootstrap`` operation. These are 'admin', 'member',
# and 'reader'.
#
# - The default Keystone installation implements an inheritance relation
# between the roles:
# 'admin' is-a 'member' is-a 'reader'
# More importantly, however, Keystone will actually populate the roles
# appropriately. Thus, someone with the 'admin' role on project X will also
# have the 'member' and 'reader' roles on project X. What this means for
# us is that if we have a policy we want satisfied by someone with any of
# the 'admin', 'member', or 'reader' roles, we do NOT need to do this:
# "get-foo-policy": "role:admin or role:member or role:reader"
# Instead we can simply say:
# "get-foo-policy": "role:reader"
# because we know that anyone who has been assigned the 'admin' role in
# Keystone also has the 'member' and 'reader' roles, and anyone assigned
# the 'member' role *also* has the 'reader' role.
#
# - How do I know what string to use?
# Cinder maintains a policy matrix correlating REST API calls, policy
# names, and what "personas" can perform them. The "personas" are
# abstract entities whose powers are supposed to be consistent across
# OpenStack services. The "personas" are implemented by each service
# using the default Keystone roles and scopes ... but you have to be
# careful, because for example, a "system-reader" persona is NOT simply
# a read-only administrator (it's actually less). See the policy matrix
# for details.
#
# - This is probably obvious, but I'll say it anyway. There is nothing
# magic about the 'reader' role that guarantees that someone with *only*
# that role can only do read-only kind of stuff in a service. We (as the
# Cinder service team) give it meaning by the way we define our policy
# rules. So if as a joke, we were to write rules that allowed someone
# with only the 'reader' role to delete volumes in any project, there is
# nothing Keystone could do about it. So be careful.
# Private policy checkstrings
# ---------------------------
# "Private" strings should not be used outside of this file. Add a new
# public string in the appropriate place if you need one.
# Generic policy check string for the persona we are calling 'system-admin'.
# Note: we aren't recognizing system scope in Xena, so we aren't actually
# using this check string yet.
_SYSTEM_ADMIN = 'role:admin and system_scope:all'
_LEGACY_SYSTEM_ADMIN = 'role:admin'
# Cinder doesn't plan to use this one. It doesn't map to any of our
# supported personas. It's only here in case you were wondering ...
# _SYSTEM_MEMBER = 'role:member and system_scope:all'
# Generic policy check string for the persona we are calling 'system-reader'.
_SYSTEM_READER = 'role:reader and system_scope:all'
# Note: In Xena, there isn't really a system-reader persona so make sure
# the system-admin can do this
_LEGACY_SYSTEM_READER = _LEGACY_SYSTEM_ADMIN
# Generic policy check string for the persona we are calling 'project-admin'.
# Note: We are not implementing this persona in Xena. (Compare it to the
# _LEGACY_SYSTEM_ADMIN string above and you'll see why.)
_PROJECT_ADMIN = 'role:admin and project_id:%(project_id)s'
# Generic policy check string for the persona we are calling 'project-member'.
# Note: The 'and project_id:%(project_id)s' part makes this a project-scoped
# checkstring.
_PROJECT_MEMBER = 'role:member and project_id:%(project_id)s'
# Generic policy check string for the persona we are calling 'project-reader'.
_PROJECT_READER = 'role:reader and project_id:%(project_id)s'
# rule names
_YOGA_SYSTEM_READER_OR_PROJECT_READER = 'rule:system_reader_or_project_reader'
_YOGA_SYSTEM_ADMIN_OR_PROJECT_MEMBER = 'rule:system_admin_or_project_member'
_YOGA_SYSTEM_ADMIN_OR_PROJECT_ADMIN = 'rule:system_admin_or_project_admin'
_YOGA_SYSTEM_ADMIN_ONLY = 'rule:system_admin_only'
# rules
yoga_rule_defaults = [
policy.RuleDefault('system_reader_or_project_reader',
f'({_SYSTEM_READER}) or ({_PROJECT_READER})',
description=("Grants permission for the following "
"Cinder personas: system-admin, system-"
"reader, project-admin, project-member, "
"and project-reader")),
policy.RuleDefault('system_admin_or_project_member',
f'({_SYSTEM_ADMIN}) or ({_PROJECT_MEMBER})',
description=("Grants permission for the following "
"Cinder personas: system-admin, project-"
"admin, and project-member")),
policy.RuleDefault('system_admin_or_project_admin',
f'({_SYSTEM_ADMIN}) or ({_PROJECT_ADMIN})',
description=("Grants permission for the following "
"Cinder personas: system-admin and "
"project-admin")),
policy.RuleDefault('system_admin_only',
f'({_SYSTEM_ADMIN})',
description=("Grants permission only to the system-"
"admin persona.")),
]
# Public policy checkstrings for deprecations
# -------------------------------------------
# The XENA_* need to be public because we'll use them in CinderDeprecatedRules
# in the individual policy files when these are updated in Yoga. They
# should *not* appear in any DocumentedRuleDefaults.
# we *call* it system reader for consistency with Yoga, but in Xena
# there isn't a system reader persona
XENA_SYSTEM_READER_OR_PROJECT_READER = (
"rule:xena_system_admin_or_project_reader")
XENA_SYSTEM_ADMIN_OR_PROJECT_MEMBER = (
"rule:xena_system_admin_or_project_member")
# This will not be used. Rules appropriate for this checkstring will remain
# as RULE_ADMIN_API in Xena and won't be deprecated until Yoga development.
# XENA_SYSTEM_ADMIN_ONLY = "rule:xena_system_admin_only"
RULE_ADMIN_API = "rule:admin_api"
# TODO: xena rules to be removed in AA
xena_rule_defaults = [
# these legacy rules are still used in Xena and will be used as the
# checkstrings for CinderDeprecatedRules in Yoga and Z
policy.RuleDefault('context_is_admin', 'role:admin',
description="Decides what is required for the "
"'is_admin:True' check to succeed."),
policy.RuleDefault('admin_api',
'is_admin:True or (role:admin and '
'is_admin_project:True)',
# FIXME: In Yoga, point out that is_admin_project
# is deprecated and operators should use system
# scope instead
description="Default rule for most Admin APIs."),
# "pure" Xena rules
policy.RuleDefault(
'xena_system_admin_or_project_reader',
f'({_LEGACY_SYSTEM_ADMIN}) or ({_PROJECT_READER})',
description=("NOTE: this purely role-based rule recognizes only "
"project scope")),
policy.RuleDefault(
'xena_system_admin_or_project_member',
f'({_LEGACY_SYSTEM_ADMIN}) or ({_PROJECT_MEMBER})',
description=("NOTE: this purely role-based rule recognizes only "
"project scope")),
]
# Public policy checkstrings expressed as personas
# ------------------------------------------------
# TODO: update the following in Yoga
SYSTEM_READER_OR_PROJECT_READER = XENA_SYSTEM_READER_OR_PROJECT_READER
# SYSTEM_READER_OR_PROJECT_READER = _YOGA_SYSTEM_READER_OR_PROJECT_READER
SYSTEM_ADMIN_OR_PROJECT_MEMBER = XENA_SYSTEM_ADMIN_OR_PROJECT_MEMBER
# SYSTEM_ADMIN_OR_PROJECT_MEMBER = _YOGA_SYSTEM_ADMIN_OR_PROJECT_MEMBER
# We won't be using this one in Xena. System-admin-only rules will NOT be
# modified during Xena development.
# SYSTEM_ADMIN_ONLY = XENA_SYSTEM_ADMIN_ONLY
# SYSTEM_ADMIN_ONLY = _YOGA_SYSTEM_ADMIN_ONLY
# Deprecation strategy
# --------------------
# We will be using the following strategy to transform Cinder policies
# from legacy Wallaby checkstrings to Keystone default-role-and-scope aware
# policies over the next few cycles:
#
# 1. In Xena, the Wallaby checkstrings are moved to CinderDeprecatedRules and
# new checkstrings (using the three default roles but project scope only)
# are defined in DocumentedRuleDefaults. At this point, only the
# three Cinder personas of system-admin, project-member, and project-reader
# will be implemented, but to prepare for Yoga, we'll use the variables
# defined in the "Public policy checkstrings expressed as personas" above.
#
# EXCEPTION: any policies that are currently (i.e., during Xena development)
# using "rule:admin_api" (which shows up in the policy files as
# 'base.RULE_ADMIN_API') will NOT be deprecated in Xena. (They will be
# deprecated in Yoga.)
#
# 2. In Yoga, the Xena checkstrings are moved to the CinderDeprecatedRules.
# For example, if a DocumentedRuleDefault with
# check_str=SYSTEM_READER_OR_PROJECT_READER
# contains a deprecated_rule, find the definition of that
# CinderDeprecatedRule in the file and change *its* checkstring to
# check_str=XENA_SYSTEM_READER_OR_PROJECT_READER
#
# The checkstrings in the DocumentedRuleDefaults will be updated
# when we change the "Public policy checkstrings expressed as personas"
# above to their _YOGA versions in this file--we will not have to manually
# update the checkstrings in the individual files.
#
# EXCEPTION: We'll need to add CinderDeprecatedRules for any policies that
# don't currently (i.e., during Yoga development) have them. (These will
# be the "Admin API" calls that we didn't modify in Xena.) Their current
# checkstrings will be moved to the deprecated rules, and their new
# checkstrings will be SYSTEM_ADMIN_ONLY.
#
# OTHER UPDATES: All DocumentedRuleDefaults will need to have the
# 'scope_types' field added to them, for example,
# scope_types=['system', 'project'],
# or
# scope_types['system'],
# depending on the intended scope of the rule.
#
# The Yoga checkstrings (using the three default roles + system scope) will
# give us the full five Cinder personas. After operators have made
# appropriate adjustments to user and group role assignments in Keystone,
# they will be able to use the new checkstrings by setting the
# 'enforce_new_defaults' and 'enforce_scope' options to appropriate
# values in the [oslo_policy] section of their cinder configuration file.
#
# 3. In Z, we let the Yoga policy configuration bake to allow operators
# to time to make the Keystone adjustments mentioned above before they
# enable the Yoga rules.
#
# 4. In AA, we remove the CinderDeprecatedRules and adjust the
# DocumentedRuleDefaults accordingly.
_XENA_DEPRECATED_REASON = (
'Default policies now support the three Keystone default roles, namely '
"'admin', 'member', and 'reader' to implement three Cinder "
'"personas". See "Policy Personas and Permissions" in the "Cinder '
'Service Configuration" documentation (Xena release) for details.')
_YOGA_DEPRECATED_REASON = (
'Default policies now support Keystone default roles and system scope to '
'implement five Cinder "personas". See "Policy Personas and Permissions" '
'in the "Cinder Service Configuration" documentation (Yoga release) for '
'details.')
# TODO: change these in Yoga
DEPRECATED_REASON = _XENA_DEPRECATED_REASON
DEPRECATED_SINCE = versionutils.deprecated.XENA
class CinderDeprecatedRule(policy.DeprecatedRule):
"""A DeprecatedRule subclass with pre-defined fields."""
def __init__(self,
name: str,
check_str: str,
*,
deprecated_reason: Optional[str] = DEPRECATED_REASON,
deprecated_since: Optional[str] = DEPRECATED_SINCE,
):
super().__init__(
name, check_str, deprecated_reason=deprecated_reason,
deprecated_since=deprecated_since
)
# This is used by the deprecated rules in the individual policy files
# in Xena.
# TODO: remove in Yoga
RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
# FIXME: remove these when cinder.policies.default_types is updated
SYSTEM_OR_DOMAIN_OR_PROJECT_ADMIN = 'rule:system_or_domain_or_project_admin'
SYSTEM_ADMIN = _SYSTEM_ADMIN
YOGA_REMOVAL = 'DEPRECATED: This rule will be removed in the Yoga release.'
PADDING = ' ' * (70 - len(YOGA_REMOVAL))
# legacy rules to be removed in Yoga
legacy_rule_defaults = [
policy.RuleDefault('admin_or_owner',
'is_admin:True or (role:admin and '
'is_admin_project:True) or project_id:%(project_id)s',
description=(f'{YOGA_REMOVAL}{PADDING}'
'Default rule for most non-Admin APIs.')),
# currently used only by cinder.policies.default_types
policy.RuleDefault('system_or_domain_or_project_admin',
'(role:admin and system_scope:all) or '
'(role:admin and domain_id:%(domain_id)s) or '
'(role:admin and project_id:%(project_id)s)',
description=(f'{YOGA_REMOVAL}{PADDING}'
"Default rule for admins of cloud, domain "
"or a project.")),
]
def list_rules():
# TODO: update in Yoga and AA
# xena: legacy_rule_defaults + xena_rule_defaults
# yoga: xena_rule_defaults + yoga_rule_defaults
# AA: yoga_rule_defaults only
return legacy_rule_defaults + xena_rule_defaults
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name,too-many-locals,no-self-use
""" Support import export formats."""
import numpy as np
from .... import symbol
from .... import ndarray as nd
from ....base import string_types
from ._import_helper import _convert_map as convert_map
class GraphProto(object): # pylint: disable=too-few-public-methods
"""A helper class for handling mxnet symbol copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
"""
def __init__(self):
self._nodes = {}
self._params = {}
self._num_input = 0
self._num_param = 0
self.aux_dict = {}
self.arg_dict = {}
self.model_metadata = {}
self.opset_version = 0
def _convert_operator(self, node_name, op_name, attrs, inputs):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol
"""
if op_name in convert_map:
op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
if isinstance(op_name, string_types):
new_op = getattr(symbol, op_name, None)
if not new_op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
if node_name is None:
mxnet_sym = new_op(*inputs, **new_attrs)
else:
mxnet_sym = new_op(name=node_name, *inputs, **new_attrs)
return mxnet_sym
return op_name
def from_onnx(self, graph, opset_version):
"""Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pretrained weights
"""
self.opset_version = opset_version
# get input, output shapes
self.model_metadata = self.get_graph_metadata(graph)
# parse network inputs, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
# converting GraphProto message
for i in graph.input:
if i.name in self._params:
# i is a param instead of input
self._nodes[i.name] = symbol.Variable(name=i.name,
shape=self._params[i.name].shape)
else:
self._nodes[i.name] = symbol.Variable(name=i.name)
# constructing nodes, nodes are stored as directed acyclic graph
# converting NodeProto message
for node in graph.node:
op_name = node.op_type
node_name = node.name.strip()
node_name = node_name if node_name else None
onnx_attr = self._parse_attr(node.attribute)
inputs = [self._nodes[i] for i in node.input]
mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs)
for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))):
self._nodes[k] = mxnet_sym[i]
# splitting params into args and aux params
for args in mxnet_sym.list_arguments():
if args in self._params:
self.arg_dict.update({args: nd.array(self._params[args])})
for aux in mxnet_sym.list_auxiliary_states():
if aux in self._params:
self.aux_dict.update({aux: nd.array(self._params[aux])})
# now return the outputs
out = [self._nodes[i.name] for i in graph.output]
if len(out) > 1:
out = symbol.Group(out)
else:
out = out[0]
return out, self.arg_dict, self.aux_dict
def get_graph_metadata(self, graph):
"""
Get the model metadata from a given onnx graph.
"""
_params = set()
for tensor_vals in graph.initializer:
_params.add(tensor_vals.name)
input_data = []
for graph_input in graph.input:
if graph_input.name not in _params:
shape = [val.dim_value for val in graph_input.type.tensor_type.shape.dim]
input_data.append((graph_input.name, tuple(shape)))
output_data = []
for graph_out in graph.output:
shape = [val.dim_value for val in graph_out.type.tensor_type.shape.dim]
output_data.append((graph_out.name, tuple(shape)))
metadata = {'input_tensor_data' : input_data,
'output_tensor_data' : output_data
}
return metadata
def graph_to_gluon(self, graph, ctx, opset_version):
"""Construct SymbolBlock from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block :gluon.nn.SymbolBlock
The returned gluon SymbolBlock
"""
sym, arg_params, aux_params = self.from_onnx(graph, opset_version)
metadata = self.get_graph_metadata(graph)
data_names = [input_tensor[0] for input_tensor in metadata['input_tensor_data']]
data_inputs = [symbol.var(data_name) for data_name in data_names]
from ....gluon import SymbolBlock
net = SymbolBlock(outputs=sym, inputs=data_inputs)
net_params = net.collect_params()
for param in arg_params:
if param in net_params:
net_params[param].shape = arg_params[param].shape
net_params[param]._load_init(arg_params[param], ctx=ctx)
for param in aux_params:
if param in net_params:
net_params[param].shape = aux_params[param].shape
net_params[param]._load_init(aux_params[param], ctx=ctx)
return net
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
if len(tuple(tensor_proto.dims)) > 0:
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims))
else:
# If onnx's params are scalar values without dims mentioned.
np_array = np.array([to_array(tensor_proto)])
return nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
# Needed for supporting python version > 3.5
if isinstance(attrs[a.name], bytes):
attrs[a.name] = attrs[a.name].decode(encoding='utf-8')
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t', 'g']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors', 'graphs']:
if list(getattr(a, f)):
raise NotImplementedError("Filed {} is not supported in mxnet.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
| |
import argparse
import json
import os
from time import sleep
from urllib import unquote_plus
from urlparse import urlparse, urlunparse
import sendgrid
from sendgrid.helpers.mail.mail import Content
from sendgrid.helpers.mail.mail import Email
from sendgrid.helpers.mail.mail import Mail
from sendgrid.helpers.mail.mail import TrackingSettings, ClickTracking
from sqlalchemy import text
from app import db
from app import logger
from page import PageNew
from pub import Pub
class HybridScrapeTestCase(db.Model):
id = db.Column(db.Text, primary_key=True)
scrape_evidence = db.Column(db.Text)
scrape_license = db.Column(db.Text)
scrape_metadata_url = db.Column(db.Text)
scrape_pdf_url = db.Column(db.Text)
def _hybrid_to_dict(pub_or_test_case):
return {
'scrape_evidence': pub_or_test_case.scrape_evidence,
'scrape_license': pub_or_test_case.scrape_license,
'scrape_metadata_url': pub_or_test_case.scrape_metadata_url,
'scrape_pdf_url': pub_or_test_case.scrape_pdf_url,
}
class GreenScrapeTestCase(db.Model):
id = db.Column(db.Text, primary_key=True)
scrape_version = db.Column(db.Text)
scrape_license = db.Column(db.Text)
scrape_metadata_url = db.Column(db.Text)
scrape_pdf_url = db.Column(db.Text)
def _green_to_dict(page_or_test_case):
return {
'scrape_version': page_or_test_case.scrape_version,
'scrape_license': page_or_test_case.scrape_license,
'scrape_metadata_url': page_or_test_case.scrape_metadata_url,
'scrape_pdf_url': page_or_test_case.scrape_pdf_url,
}
def _normalize_url(url):
if not url:
return url
parts = urlparse(url)
parts = parts._replace(path=unquote_plus(parts.path))
return urlunparse(parts)
def _run_hybrid_tests():
test_cases = HybridScrapeTestCase.query.all()
test_ids = [tc.id for tc in test_cases]
# refresh test cases now
refresh_query = text(u'''
update pub_refresh_queue
set priority=1000000, finished = null
where id = any(:ids)
and started is null
'''.format()).bindparams(ids=test_ids)
# prevent update from recalculating priority now
update_query = text(u'''
update pub_queue
set finished=now()
where id = any(:ids)
and started is null
'''.format()).bindparams(ids=test_ids)
db.session.execute(refresh_query)
db.session.execute(update_query)
db.session.commit()
# wait for refresh to finish
status_query = text(u'''
select
count(*) as total,
sum(case when finished is not null then 1 else 0 end) as done
from pub_refresh_queue
where id = any(:ids)
'''.format()).bindparams(ids=test_ids)
while True:
total, done = db.engine.execute(status_query).first()
if total == done:
break
logger.info(u'waiting for hybrid scrape: {}/{}'.format(done, total))
sleep(30)
pubs = Pub.query.filter(Pub.id.in_(test_ids)).all()
pubs_by_id = dict((p.id, p) for p in pubs)
successes = {}
failures = {}
for test_case in test_cases:
this_pub = pubs_by_id[test_case.id]
if (
test_case.scrape_evidence == this_pub.scrape_evidence and
test_case.scrape_license == this_pub.scrape_license and
_normalize_url(test_case.scrape_pdf_url) == _normalize_url(this_pub.scrape_pdf_url) and
_normalize_url(test_case.scrape_metadata_url) == _normalize_url(this_pub.scrape_metadata_url)
):
successes[test_case.id] = _hybrid_to_dict(test_case)
else:
failures[test_case.id] = {
'expected': _hybrid_to_dict(test_case),
'got': _hybrid_to_dict(this_pub)
}
report = u'failed:\n\n{}\n\npassed:\n\n{}\n'.format(json.dumps(failures, indent=4), json.dumps(successes, indent=4))
return report
def _run_green_tests():
test_cases = GreenScrapeTestCase.query.all()
test_ids = [tc.id for tc in test_cases]
# refresh test cases now
pages = PageNew.query.filter(PageNew.id.in_(test_ids)).all()
for i, p in enumerate(pages):
logger.info('refreshing page {} {}/{}'.format(p.id, i, len(pages)))
p.scrape()
db.session.commit()
pages_by_id = dict((p.id, p) for p in pages)
successes = {}
failures = {}
for test_case in test_cases:
this_page = pages_by_id.get(test_case.id, None)
if this_page is None:
failures[test_case.id] = {
'expected': _green_to_dict(test_case),
'got': None
}
elif (
test_case.scrape_version == this_page.scrape_version and
test_case.scrape_license == this_page.scrape_license and
_normalize_url(test_case.scrape_pdf_url) == _normalize_url(this_page.scrape_pdf_url) and
_normalize_url(test_case.scrape_metadata_url) == _normalize_url(this_page.scrape_metadata_url)
):
successes[test_case.id] = _green_to_dict(test_case)
else:
failures[test_case.id] = {
'expected': _green_to_dict(test_case),
'got': _green_to_dict(this_page)
}
report = u'failed:\n\n{}\n\npassed:\n\n{}\n'.format(json.dumps(failures, indent=4), json.dumps(successes, indent=4))
return report
def _send_report(subject, report, to_address):
content = Content("text/plain", report)
from_email = Email("dev@ourresearch.org", "Unpaywall Team")
to_email = Email(to_address)
email = Mail(from_email, subject, to_email, content)
tracking_settings = TrackingSettings()
tracking_settings.click_tracking = ClickTracking(False, False)
email.tracking_settings = tracking_settings
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
sg.client.mail.send.post(request_body=email.get())
logger.info(u'sent "{}" report to {}'.format(subject, to_address))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the hybrid scrape regression tests.")
parser.add_argument('--email', nargs="?", type=str, help="where to send the report (optional)")
parser.add_argument('--hybrid', default=False, action='store_true', help="run the hybrid tests")
parser.add_argument('--green', default=False, action='store_true', help="run the green tests")
parsed_args = parser.parse_args()
if parsed_args.hybrid:
hybrid_report = _run_hybrid_tests()
print hybrid_report
if parsed_args.email:
_send_report(u'hybrid scrape regression test results', hybrid_report, parsed_args.email)
if parsed_args.green:
green_report = _run_green_tests()
print green_report
if parsed_args.email:
_send_report(u'green scrape regression test results', green_report, parsed_args.email)
| |
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import timezone
from django.utils.duration import duration_microseconds
from django.utils.encoding import force_str
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = {
**BaseDatabaseOperations.integer_field_ranges,
'PositiveSmallIntegerField': (0, 65535),
'PositiveIntegerField': (0, 4294967295),
'PositiveBigIntegerField': (0, 18446744073709551615),
}
cast_data_types = {
'AutoField': 'signed integer',
'BigAutoField': 'signed integer',
'SmallAutoField': 'signed integer',
'CharField': 'char(%(max_length)s)',
'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)',
'TextField': 'char',
'IntegerField': 'signed integer',
'BigIntegerField': 'signed integer',
'SmallIntegerField': 'signed integer',
'PositiveBigIntegerField': 'unsigned integer',
'PositiveIntegerField': 'unsigned integer',
'PositiveSmallIntegerField': 'unsigned integer',
}
cast_char_field_without_max_length = 'char'
explain_prefix = 'EXPLAIN'
def date_extract_sql(self, lookup_type, field_name):
# https://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
return "DAYOFWEEK(%s)" % field_name
elif lookup_type == 'iso_week_day':
# WEEKDAY() returns an integer, 0-6, Monday=0.
return "WEEKDAY(%s) + 1" % field_name
elif lookup_type == 'week':
# Override the value of default_week_format for consistency with
# other database backends.
# Mode 3: Monday, 1-53, with 4 or more days this year.
return "WEEK(%s, 3)" % field_name
elif lookup_type == 'iso_year':
# Get the year part from the YEARWEEK function, which returns a
# number as year * 100 + week.
return "TRUNCATE(YEARWEEK(%s, 3), -2) / 100" % field_name
else:
# EXTRACT returns 1-53 based on ISO-8601 for the week number.
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = {
'year': '%%Y-01-01',
'month': '%%Y-%%m-01',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str)
elif lookup_type == 'quarter':
return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % (
field_name, field_name
)
elif lookup_type == 'week':
return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % (
field_name, field_name
)
else:
return "DATE(%s)" % (field_name)
def _prepare_tzname_delta(self, tzname):
if '+' in tzname:
return tzname[tzname.find('+'):]
elif '-' in tzname:
return tzname[tzname.find('-'):]
return tzname
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ and self.connection.timezone_name != tzname:
field_name = "CONVERT_TZ(%s, '%s', '%s')" % (
field_name,
self.connection.timezone_name,
self._prepare_tzname_delta(tzname),
)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE(%s)" % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "TIME(%s)" % field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
if lookup_type == 'quarter':
return (
"CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + "
"INTERVAL QUARTER({field_name}) QUARTER - " +
"INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)"
).format(field_name=field_name)
if lookup_type == 'week':
return (
"CAST(DATE_FORMAT(DATE_SUB({field_name}, "
"INTERVAL WEEKDAY({field_name}) DAY), "
"'%%Y-%%m-%%d 00:00:00') AS DATETIME)"
).format(field_name=field_name)
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join(format[:i] + format_def[i:])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def time_trunc_sql(self, lookup_type, field_name):
fields = {
'hour': '%%H:00:00',
'minute': '%%H:%%i:00',
'second': '%%H:%%i:%%s',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str)
else:
return "TIME(%s)" % (field_name)
def date_interval_sql(self, timedelta):
return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta)
def fetch_returned_insert_rows(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table, return the tuple of returned data.
"""
return cursor.fetchall()
def format_for_duration_arithmetic(self, sql):
return 'INTERVAL %s MICROSECOND' % sql
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
# MySQLdb returns string, PyMySQL bytes.
return force_str(getattr(cursor, '_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def return_insert_columns(self, fields):
# MySQL and MariaDB < 10.5.0 don't support an INSERT...RETURNING
# statement.
if not fields:
return '', ()
columns = [
'%s.%s' % (
self.quote_name(field.model._meta.db_table),
self.quote_name(field.column),
) for field in fields
]
return 'RETURNING %s' % ', '.join(columns), ()
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
if not tables:
return []
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
if reset_sequences:
# It's faster to TRUNCATE tables that require a sequence reset
# since ALTER TABLE AUTO_INCREMENT is slower than TRUNCATE.
sql.extend(
'%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table_name)),
) for table_name in tables
)
else:
# Otherwise issue a simple DELETE since it's faster than TRUNCATE
# and preserves sequences.
sql.extend(
'%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table_name)),
) for table_name in tables
)
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
return sql
def sequence_reset_by_name_sql(self, style, sequences):
return [
'%s %s %s %s = 1;' % (
style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_FIELD(self.quote_name(sequence_info['table'])),
style.SQL_FIELD('AUTO_INCREMENT'),
) for sequence_info in sequences
]
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return str(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
# Convert the result to a signed integer since MySQL's binary operators
# return an unsigned integer.
elif connector in ('&', '|', '<<', '#'):
connector = '^' if connector == '#' else connector
return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions)
elif connector == '>>':
lhs, rhs = sub_expressions
return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
return super().combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def binary_placeholder_sql(self, value):
return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s'
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
if self.connection.mysql_is_mariadb:
# MariaDB includes the microsecond component in TIME_TO_SEC as
# a decimal. MySQL returns an integer without microseconds.
return 'CAST((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000 AS SIGNED)' % {
'lhs': lhs_sql, 'rhs': rhs_sql
}, (*lhs_params, *rhs_params)
return (
"((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -"
" (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))"
) % {'lhs': lhs_sql, 'rhs': rhs_sql}, tuple(lhs_params) * 2 + tuple(rhs_params) * 2
params = (*rhs_params, *lhs_params)
return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), params
def explain_query_prefix(self, format=None, **options):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other backends.
if format and format.upper() == 'TEXT':
format = 'TRADITIONAL'
elif not format and 'TREE' in self.connection.features.supported_explain_formats:
# Use TREE by default (if supported) as it's more informative.
format = 'TREE'
analyze = options.pop('analyze', False)
prefix = super().explain_query_prefix(format, **options)
if analyze and self.connection.features.supports_explain_analyze:
# MariaDB uses ANALYZE instead of EXPLAIN ANALYZE.
prefix = 'ANALYZE' if self.connection.mysql_is_mariadb else prefix + ' ANALYZE'
if format and not (analyze and not self.connection.mysql_is_mariadb):
# Only MariaDB supports the analyze option with formats.
prefix += ' FORMAT=%s' % format
return prefix
def regex_lookup(self, lookup_type):
# REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE
# doesn't exist in MySQL 5.x or in MariaDB.
if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb:
if lookup_type == 'regex':
return '%s REGEXP BINARY %s'
return '%s REGEXP %s'
match_option = 'c' if lookup_type == 'regex' else 'i'
return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option
def insert_statement(self, ignore_conflicts=False):
return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
if internal_type == 'JSONField':
if self.connection.mysql_is_mariadb or lookup_type in (
'iexact', 'contains', 'icontains', 'startswith', 'istartswith',
'endswith', 'iendswith', 'regex', 'iregex',
):
lookup = 'JSON_UNQUOTE(%s)'
return lookup
| |
import asyncio
import contextlib
import copy
import os
import random
import textwrap
import pytest
import sys
import uuid
from _pytest.monkeypatch import MonkeyPatch
from _pytest.python import Function
from spacy import Language
from rasa.engine.caching import LocalTrainingCache
from rasa.engine.graph import ExecutionContext, GraphSchema
from rasa.engine.storage.local_model_storage import LocalModelStorage
from rasa.engine.storage.storage import ModelStorage
from sanic.request import Request
from typing import Iterator, Callable
from _pytest.tmpdir import TempPathFactory, TempdirFactory
from pathlib import Path
from sanic import Sanic
from typing import Text, List, Optional, Dict, Any
from unittest.mock import Mock
from rasa.shared.nlu.constants import METADATA_MODEL_ID
import rasa.shared.utils.io
from rasa import server
from rasa.core.agent import Agent, load_agent
from rasa.core.brokers.broker import EventBroker
from rasa.core.channels import channel, RestInput
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.utils.spacy_utils import SpacyNLP, SpacyModel
from rasa.shared.constants import LATEST_TRAINING_DATA_FORMAT_VERSION
from rasa.shared.core.domain import SessionConfig, Domain
from rasa.shared.core.events import Event, UserUttered
from rasa.core.exporter import Exporter
import rasa.core.run
from rasa.core.tracker_store import InMemoryTrackerStore, TrackerStore
from rasa.model_training import train, train_nlu
from rasa.shared.exceptions import RasaException
import rasa.utils.common
# we reuse a bit of pytest's own testing machinery, this should eventually come
# from a separately installable pytest-cli plugin.
pytest_plugins = ["pytester"]
# these tests are run separately
collect_ignore_glob = ["docs/*.py"]
# Defines how tests are parallelized in the CI
PATH_PYTEST_MARKER_MAPPINGS = {
"category_cli": [Path("tests", "cli").absolute()],
"category_core_featurizers": [Path("tests", "core", "featurizers").absolute()],
"category_policies": [
Path("tests", "core", "test_policies.py").absolute(),
Path("tests", "core", "policies").absolute(),
],
"category_nlu_featurizers": [
Path("tests", "nlu", "featurizers").absolute(),
Path("tests", "nlu", "utils").absolute(),
],
"category_nlu_predictors": [
Path("tests", "nlu", "classifiers").absolute(),
Path("tests", "nlu", "extractors").absolute(),
Path("tests", "nlu", "selectors").absolute(),
],
"category_full_model_training": [
Path("tests", "test_model_training.py").absolute(),
Path("tests", "nlu", "test_train.py").absolute(),
Path("tests", "core", "test_training.py").absolute(),
Path("tests", "core", "test_examples.py").absolute(),
],
"category_performance": [Path("tests", "test_memory_leak.py").absolute()],
}
@pytest.fixture(scope="session")
def nlu_as_json_path() -> Text:
return "data/examples/rasa/demo-rasa.json"
@pytest.fixture(scope="session")
def nlu_data_path() -> Text:
return "data/test_moodbot/data/nlu.yml"
@pytest.fixture(scope="session")
def config_path() -> Text:
return "rasa/shared/importers/default_config.yml"
@pytest.fixture(scope="session")
def default_config(config_path: Text) -> Dict[Text, Any]:
return rasa.shared.utils.io.read_yaml_file(config_path)
@pytest.fixture(scope="session")
def domain_with_categorical_slot_path() -> Text:
return "data/test_domains/domain_with_categorical_slot.yml"
@pytest.fixture(scope="session")
def domain_with_mapping_path() -> Text:
return "data/test_domains/default_with_mapping.yml"
@pytest.fixture(scope="session")
def stories_path() -> Text:
return "data/test_yaml_stories/stories_defaultdomain.yml"
@pytest.fixture(scope="session")
def e2e_stories_path() -> Text:
return "data/test_yaml_stories/stories_e2e.yml"
@pytest.fixture(scope="session")
def simple_stories_path() -> Text:
return "data/test_yaml_stories/stories_simple.yml"
@pytest.fixture(scope="session")
def stack_config_path() -> Text:
return "data/test_config/stack_config.yml"
@pytest.fixture(scope="session")
def incorrect_nlu_data_path() -> Text:
return "data/test/incorrect_nlu_format.yml"
@pytest.fixture(scope="session")
def end_to_end_story_path() -> Text:
return "data/test_evaluations/test_end_to_end_story.yml"
@pytest.fixture(scope="session")
def e2e_story_file_unknown_entity_path() -> Text:
return "data/test_evaluations/test_story_unknown_entity.yml"
@pytest.fixture(scope="session")
def domain_path() -> Text:
return "data/test_domains/default_with_slots.yml"
@pytest.fixture(scope="session")
def story_file_trips_circuit_breaker_path() -> Text:
return "data/test_evaluations/test_stories_trip_circuit_breaker.yml"
@pytest.fixture(scope="session")
def e2e_story_file_trips_circuit_breaker_path() -> Text:
return "data/test_evaluations/test_end_to_end_trips_circuit_breaker.yml"
@pytest.fixture(scope="session")
def endpoints_path() -> Text:
return "data/test_endpoints/example_endpoints.yml"
# https://github.com/pytest-dev/pytest-asyncio/issues/68
# this event_loop is used by pytest-asyncio, and redefining it
# is currently the only way of changing the scope of this fixture
@pytest.fixture(scope="session")
def event_loop(request: Request) -> Iterator[asyncio.AbstractEventLoop]:
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def trained_default_agent_model(
tmp_path_factory: TempPathFactory,
stories_path: Text,
domain_path: Text,
nlu_data_path: Text,
trained_async: Callable,
) -> Text:
project_path = tmp_path_factory.mktemp(uuid.uuid4().hex)
config = textwrap.dedent(
f"""
version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}"
pipeline:
- name: KeywordIntentClassifier
- name: RegexEntityExtractor
policies:
- name: AugmentedMemoizationPolicy
max_history: 3
- name: RulePolicy
"""
)
config_path = project_path / "config.yml"
rasa.shared.utils.io.write_text_file(config, config_path)
model_path = await trained_async(
domain_path, str(config_path), [stories_path, nlu_data_path],
)
return model_path
@pytest.fixture()
def empty_agent() -> Agent:
agent = Agent(domain=Domain.load("data/test_domains/default_with_slots.yml"))
return agent
def reset_conversation_state(agent: Agent) -> Agent:
# Clean tracker store after each test so tests don't affect each other
agent.tracker_store = InMemoryTrackerStore(agent.domain)
agent.domain.session_config = SessionConfig.default()
agent.load_model(agent.processor.model_path)
return agent
@pytest.fixture
def default_agent(trained_default_agent_model: Text) -> Agent:
return Agent.load(trained_default_agent_model)
@pytest.fixture(scope="session")
async def trained_moodbot_path(trained_async: Callable) -> Text:
return await trained_async(
domain="data/test_moodbot/domain.yml",
config="data/test_moodbot/config.yml",
training_files="data/test_moodbot/data/",
)
@pytest.fixture(scope="session")
async def trained_moodbot_core_path(trained_async: Callable) -> Text:
return await trained_async(
domain="data/test_moodbot/domain.yml",
config="data/test_moodbot/config.yml",
training_files="data/test_moodbot/data/stories.yml",
)
@pytest.fixture(scope="session")
async def trained_moodbot_nlu_path(trained_async: Callable) -> Text:
return await trained_async(
domain="data/test_moodbot/domain.yml",
config="data/test_moodbot/config.yml",
training_files="data/test_moodbot/data/nlu.yml",
)
@pytest.fixture(scope="session")
async def trained_unexpected_intent_policy_path(trained_async: Callable) -> Text:
return await trained_async(
domain="data/test_moodbot/domain.yml",
config="data/test_moodbot/unexpected_intent_policy_config.yml",
training_files="data/test_moodbot/data/",
)
@pytest.fixture(scope="session")
def trained_nlu_moodbot_path(trained_nlu: Callable) -> Text:
return trained_nlu(
domain="data/test_moodbot/domain.yml",
config="data/test_moodbot/config.yml",
nlu_data="data/test_moodbot/data/nlu.yml",
)
@pytest.fixture(scope="session")
async def trained_spacybot_path(trained_async: Callable) -> Text:
return await trained_async(
domain="data/test_spacybot/domain.yml",
config="data/test_spacybot/config.yml",
training_files="data/test_spacybot/data/",
)
@pytest.fixture(scope="session")
async def stack_agent(trained_rasa_model: Text) -> Agent:
return await load_agent(model_path=trained_rasa_model)
@pytest.fixture(scope="session")
async def core_agent(trained_core_model: Text) -> Agent:
return await load_agent(model_path=trained_core_model)
@pytest.fixture(scope="session")
async def nlu_agent(trained_nlu_model: Text) -> Agent:
return await load_agent(model_path=trained_nlu_model)
@pytest.fixture(scope="module")
async def unexpected_intent_policy_agent(
trained_unexpected_intent_policy_path: Text,
) -> Agent:
return await load_agent(model_path=trained_unexpected_intent_policy_path)
@pytest.fixture(scope="module")
async def mood_agent(trained_moodbot_path: Text) -> Agent:
return await load_agent(model_path=trained_moodbot_path)
@pytest.fixture(scope="session")
def _domain(domain_path: Text) -> Domain:
return Domain.load(domain_path)
@pytest.fixture()
def domain(_domain: Domain) -> Domain:
return copy.deepcopy(_domain)
@pytest.fixture(scope="session")
def trained_async(tmp_path_factory: TempPathFactory) -> Callable:
async def _train(
*args: Any,
output_path: Optional[Text] = None,
cache_dir: Optional[Path] = None,
**kwargs: Any,
) -> Optional[Text]:
if not cache_dir:
cache_dir = tmp_path_factory.mktemp("cache")
if output_path is None:
output_path = str(tmp_path_factory.mktemp("models"))
with enable_cache(cache_dir):
result = train(*args, output=output_path, **kwargs)
return result.model
return _train
@pytest.fixture(scope="session")
def trained_nlu(tmp_path_factory: TempPathFactory) -> Callable:
def _train_nlu(
*args: Any, output_path: Optional[Text] = None, **kwargs: Any
) -> Optional[Text]:
if output_path is None:
output_path = str(tmp_path_factory.mktemp("models"))
return train_nlu(*args, output=output_path, **kwargs)
return _train_nlu
@pytest.fixture(scope="session")
async def trained_rasa_model(
trained_async: Callable,
domain_path: Text,
nlu_data_path: Text,
stories_path: Text,
stack_config_path: Text,
) -> Text:
trained_stack_model_path = await trained_async(
domain=domain_path,
config=stack_config_path,
training_files=[nlu_data_path, stories_path],
)
return trained_stack_model_path
@pytest.fixture(scope="session")
async def trained_core_model(
trained_async: Callable,
domain_path: Text,
stack_config_path: Text,
stories_path: Text,
) -> Text:
trained_core_model_path = await trained_async(
domain=domain_path, config=stack_config_path, training_files=[stories_path],
)
return trained_core_model_path
@pytest.fixture(scope="session")
async def trained_nlu_model(
trained_async: Callable,
domain_path: Text,
nlu_data_path: Text,
stack_config_path: Text,
) -> Text:
trained_nlu_model_path = await trained_async(
domain=domain_path, config=stack_config_path, training_files=[nlu_data_path],
)
return trained_nlu_model_path
@pytest.fixture(scope="session")
def _trained_e2e_model_cache(tmp_path_factory: TempPathFactory) -> Path:
return tmp_path_factory.mktemp("cache")
@pytest.fixture()
def trained_e2e_model_cache(
_trained_e2e_model_cache: Path,
tmp_path_factory: TempPathFactory,
monkeypatch: MonkeyPatch,
) -> Path:
copied_cache = tmp_path_factory.mktemp("copy")
rasa.utils.common.copy_directory(_trained_e2e_model_cache, copied_cache)
with enable_cache(copied_cache):
yield copied_cache
@pytest.fixture(scope="session")
async def trained_e2e_model(
trained_async: Callable,
moodbot_domain_path: Text,
e2e_bot_config_file: Path,
nlu_data_path: Text,
e2e_stories_path: Text,
_trained_e2e_model_cache: Path,
) -> Text:
return await trained_async(
domain=moodbot_domain_path,
config=str(e2e_bot_config_file),
training_files=[nlu_data_path, e2e_stories_path],
cache_dir=_trained_e2e_model_cache,
)
@pytest.fixture(scope="session")
def moodbot_domain_path() -> Path:
return Path("data", "test_moodbot", "domain.yml")
@pytest.fixture(scope="session")
def moodbot_domain(moodbot_domain_path: Path) -> Domain:
return Domain.load(moodbot_domain_path)
@pytest.fixture(scope="session")
def moodbot_nlu_data_path() -> Path:
return Path(os.getcwd()) / "data" / "test_moodbot" / "data" / "nlu.yml"
@pytest.fixture
def rasa_server(stack_agent: Agent) -> Sanic:
app = server.create_app(agent=stack_agent)
channel.register([RestInput()], app, "/webhooks/")
return app
@pytest.fixture
def rasa_non_trained_server(empty_agent: Agent) -> Sanic:
app = server.create_app(agent=empty_agent)
channel.register([RestInput()], app, "/webhooks/")
return app
@pytest.fixture
def rasa_core_server(core_agent: Agent) -> Sanic:
app = server.create_app(agent=core_agent)
channel.register([RestInput()], app, "/webhooks/")
return app
@pytest.fixture
def rasa_nlu_server(nlu_agent: Agent) -> Sanic:
app = server.create_app(agent=nlu_agent)
channel.register([RestInput()], app, "/webhooks/")
return app
@pytest.fixture
def rasa_server_secured(default_agent: Agent) -> Sanic:
app = server.create_app(agent=default_agent, auth_token="rasa", jwt_secret="core")
channel.register([RestInput()], app, "/webhooks/")
return app
@pytest.fixture
def rasa_non_trained_server_secured(empty_agent: Agent) -> Sanic:
app = server.create_app(agent=empty_agent, auth_token="rasa", jwt_secret="core")
channel.register([RestInput()], app, "/webhooks/")
return app
@pytest.fixture
def rasa_server_without_api() -> Sanic:
app = rasa.core.run._create_app_without_api()
channel.register([RestInput()], app, "/webhooks/")
return app
@pytest.fixture(scope="session")
def project() -> Text:
import tempfile
from rasa.cli.scaffold import create_initial_project
directory = tempfile.mkdtemp()
create_initial_project(directory)
return directory
@pytest.fixture(scope="session")
def spacy_nlp_component() -> SpacyNLP:
return SpacyNLP.create({"model": "en_core_web_md"}, Mock(), Mock(), Mock())
@pytest.fixture(scope="session")
def spacy_model(spacy_nlp_component: SpacyNLP) -> SpacyModel:
return spacy_nlp_component.provide()
@pytest.fixture(scope="session")
def spacy_nlp(spacy_model: SpacyModel) -> Language:
return spacy_model.model
@pytest.fixture(scope="session")
async def response_selector_test_stories() -> Path:
return Path("data/test_response_selector_bot/tests/test_stories.yml")
@pytest.fixture(scope="session")
async def trained_response_selector_bot(trained_async: Callable) -> Path:
zipped_model = await trained_async(
domain="data/test_response_selector_bot/domain.yml",
config="data/test_response_selector_bot/config.yml",
training_files=[
"data/test_response_selector_bot/data/rules.yml",
"data/test_response_selector_bot/data/nlu.yml",
],
)
if not zipped_model:
raise RasaException("Model training for responseselectorbot failed.")
return Path(zipped_model)
@pytest.fixture(scope="session")
def e2e_bot_domain_file() -> Path:
return Path("data/test_e2ebot/domain.yml")
@pytest.fixture(scope="session")
def e2e_bot_config_file() -> Path:
return Path("data/test_e2ebot/config.yml")
@pytest.fixture(scope="session")
def e2e_bot_training_files() -> List[Path]:
return [
Path("data/test_e2ebot/data/stories.yml"),
Path("data/test_e2ebot/data/nlu.yml"),
]
@pytest.fixture(scope="session")
def e2e_bot_test_stories_with_unknown_bot_utterances() -> Path:
return Path("data/test_e2ebot/tests/test_stories_with_unknown_bot_utterances.yml")
@pytest.fixture(scope="session")
async def e2e_bot(
trained_async: Callable,
e2e_bot_domain_file: Path,
e2e_bot_config_file: Path,
e2e_bot_training_files: List[Path],
) -> Path:
zipped_model = await trained_async(
domain=e2e_bot_domain_file,
config=e2e_bot_config_file,
training_files=e2e_bot_training_files,
)
if not zipped_model:
raise RasaException("Model training for e2ebot failed.")
return Path(zipped_model)
@pytest.fixture(scope="module")
async def response_selector_agent(trained_response_selector_bot: Path,) -> Agent:
return await load_agent(str(trained_response_selector_bot))
@pytest.fixture(scope="module")
async def e2e_bot_agent(e2e_bot: Path) -> Agent:
return await load_agent(str(e2e_bot))
def write_endpoint_config_to_yaml(
path: Path, data: Dict[Text, Any], endpoints_filename: Text = "endpoints.yml"
) -> Path:
endpoints_path = path / endpoints_filename
# write endpoints config to file
rasa.shared.utils.io.write_yaml(data, endpoints_path)
return endpoints_path
def random_user_uttered_event(timestamp: Optional[float] = None) -> UserUttered:
return UserUttered(
uuid.uuid4().hex,
timestamp=timestamp if timestamp is not None else random.random(),
)
def pytest_runtest_setup(item: Function) -> None:
if (
"skip_on_windows" in [mark.name for mark in item.iter_markers()]
and sys.platform == "win32"
):
pytest.skip("cannot run on Windows")
class MockExporter(Exporter):
"""Mocked `Exporter` class."""
def __init__(
self,
tracker_store: TrackerStore = Mock(),
event_broker: EventBroker = Mock(),
endpoints_path: Text = "",
) -> None:
super().__init__(tracker_store, event_broker, endpoints_path)
class AsyncMock(Mock):
"""Helper class to mock async functions and methods."""
async def __call__(self, *args: Any, **kwargs: Any) -> Any:
return super().__call__(*args, **kwargs)
def _get_marker_for_ci_matrix(item: Function) -> Text:
"""Returns pytest marker which is used to parallelize the tests in GitHub actions.
Args:
item: The test case.
Returns:
A marker for this test based on which directory / python module the test is in.
"""
test_path = Path(item.fspath).absolute()
matching_markers = [
marker
for marker, paths_for_marker in PATH_PYTEST_MARKER_MAPPINGS.items()
if any(
path == test_path or path in test_path.parents for path in paths_for_marker
)
]
if not matching_markers:
return "category_other_unit_tests"
if len(matching_markers) > 1:
raise ValueError(
f"Each test should only be in one category. Test '{item.name}' is assigned "
f"to these categories: {matching_markers}. Please fix the "
"mapping in `PATH_PYTEST_MARKER_MAPPINGS`."
)
return matching_markers[0]
def pytest_collection_modifyitems(items: List[Function]) -> None:
"""Adds pytest markers dynamically when the tests are run.
This is automatically called by pytest during its execution.
Args:
items: Tests to be run.
"""
for item in items:
marker = _get_marker_for_ci_matrix(item)
item.add_marker(marker)
def create_test_file_with_size(directory: Path, size_in_mb: float) -> Path:
file_path = directory / uuid.uuid4().hex
with open(file_path, mode="wb") as f:
f.seek(int(1024 * 1024 * size_in_mb))
f.write(b"\0")
return file_path
@pytest.fixture()
def default_model_storage(tmp_path: Path, monkeypatch: MonkeyPatch) -> ModelStorage:
return LocalModelStorage.create(tmp_path)
@pytest.fixture()
def default_execution_context() -> ExecutionContext:
return ExecutionContext(GraphSchema({}), uuid.uuid4().hex)
@pytest.fixture(scope="session", autouse=True)
def temp_cache_for_fixtures(tmp_path_factory: TempPathFactory) -> None:
# This fixture makes sure that wide fixtures which don't have `function` scope
# (session, package, module) don't use the global
# cache. If you want to use the cache in a session scoped fixture, then please
# consider using the `enable_cache` context manager.
LocalTrainingCache._get_cache_location = lambda: tmp_path_factory.mktemp(
f"cache-{uuid.uuid4()}"
)
# We can omit reverting the monkeypatch as this fixture is torn down after all the
# tests ran
@pytest.fixture(autouse=True)
def use_temp_dir_for_cache(
monkeypatch: MonkeyPatch, tmp_path_factory: TempdirFactory
) -> None:
# This fixture makes sure that a single test function has a constant cache
# cache.
cache_dir = tmp_path_factory.mktemp(uuid.uuid4().hex)
monkeypatch.setattr(LocalTrainingCache, "_get_cache_location", lambda: cache_dir)
@contextlib.contextmanager
def enable_cache(cache_dir: Path):
old_get_cache_location = LocalTrainingCache._get_cache_location
LocalTrainingCache._get_cache_location = Mock(return_value=cache_dir)
yield
LocalTrainingCache._get_cache_location = old_get_cache_location
@pytest.fixture()
def whitespace_tokenizer() -> WhitespaceTokenizer:
return WhitespaceTokenizer(WhitespaceTokenizer.get_default_config())
def with_model_ids(events: List[Event], model_id: Text) -> List[Event]:
return [with_model_id(event, model_id) for event in events]
def with_model_id(event: Event, model_id: Text) -> Event:
new_event = copy.deepcopy(event)
new_event.metadata[METADATA_MODEL_ID] = model_id
return new_event
| |
# packet.py
#
# Copyright 2002-2005,2007 Wichert Akkerman <wichert@wiggy.net>
#
# A RADIUS packet as defined in RFC 2138
import struct
import random
try:
import hashlib
md5_constructor = hashlib.md5
except ImportError:
# BBB for python 2.4
import md5
md5_constructor = md5.new
import six
from pyrad import tools
# Packet codes
AccessRequest = 1
AccessAccept = 2
AccessReject = 3
AccountingRequest = 4
AccountingResponse = 5
AccessChallenge = 11
StatusServer = 12
StatusClient = 13
DisconnectRequest = 40
DisconnectACK = 41
DisconnectNAK = 42
CoARequest = 43
CoAACK = 44
CoANAK = 45
# Current ID
CurrentID = random.randrange(1, 255)
class PacketError(Exception):
pass
class Packet(dict):
"""Packet acts like a standard python map to provide simple access
to the RADIUS attributes. Since RADIUS allows for repeated
attributes the value will always be a sequence. pyrad makes sure
to preserve the ordering when encoding and decoding packets.
There are two ways to use the map intereface: if attribute
names are used pyrad take care of en-/decoding data. If
the attribute type number (or a vendor ID/attribute type
tuple for vendor attributes) is used you work with the
raw data.
Normally you will not use this class directly, but one of the
:obj:`AuthPacket` or :obj:`AcctPacket` classes.
"""
def __init__(self, code=0, id=None, secret=six.b(''), authenticator=None,
**attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
dict.__init__(self)
self.code = code
if id is not None:
self.id = id
else:
self.id = CreateID()
if not isinstance(secret, six.binary_type):
raise TypeError('secret must be a binary string')
self.secret = secret
if authenticator is not None and \
not isinstance(authenticator, six.binary_type):
raise TypeError('authenticator must be a binary string')
self.authenticator = authenticator
if 'dict' in attributes:
self.dict = attributes['dict']
if 'packet' in attributes:
self.DecodePacket(attributes['packet'])
for (key, value) in attributes.items():
if key in ['dict', 'fd', 'packet']:
continue
key = key.replace('_', '-')
self.AddAttribute(key, value)
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return Packet(id=self.id, secret=self.secret,
authenticator=self.authenticator, dict=self.dict,
**attributes)
def _DecodeValue(self, attr, value):
if attr.values.HasBackward(value):
return attr.values.GetBackward(value)
else:
return tools.DecodeAttr(attr.type, value)
def _EncodeValue(self, attr, value):
if attr.values.HasForward(value):
return attr.values.GetForward(value)
else:
return tools.EncodeAttr(attr.type, value)
def _EncodeKeyValues(self, key, values):
if not isinstance(key, str):
return (key, values)
attr = self.dict.attributes[key]
if attr.vendor:
key = (self.dict.vendors.GetForward(attr.vendor), attr.code)
else:
key = attr.code
return (key, [self._EncodeValue(attr, v) for v in values])
def _EncodeKey(self, key):
if not isinstance(key, str):
return key
attr = self.dict.attributes[key]
if attr.vendor:
return (self.dict.vendors.GetForward(attr.vendor), attr.code)
else:
return attr.code
def _DecodeKey(self, key):
"""Turn a key into a string if possible"""
if self.dict.attrindex.HasBackward(key):
return self.dict.attrindex.GetBackward(key)
return key
def AddAttribute(self, key, value):
"""Add an attribute to the packet.
:param key: attribute name or identification
:type key: string, attribute code or (vendor code, attribute code)
tuple
:param value: value
:type value: depends on type of attribute
"""
if isinstance(value, list):
values = value
else:
values = [value]
(key, values) = self._EncodeKeyValues(key, values)
self.setdefault(key, []).extend(values)
def __getitem__(self, key):
if not isinstance(key, six.string_types):
return dict.__getitem__(self, key)
values = dict.__getitem__(self, self._EncodeKey(key))
attr = self.dict.attributes[key]
res = []
for v in values:
res.append(self._DecodeValue(attr, v))
return res
def __contains__(self, key):
try:
return dict.__contains__(self, self._EncodeKey(key))
except KeyError:
return False
has_key = __contains__
def __delitem__(self, key):
dict.__delitem__(self, self._EncodeKey(key))
def __setitem__(self, key, item):
if isinstance(item, list):
items = item
else:
items = [item]
if isinstance(key, six.string_types):
(key, item) = self._EncodeKeyValues(key, items)
dict.__setitem__(self, key, item)
else:
assert isinstance(item, list)
dict.__setitem__(self, key, item)
def keys(self):
return [self._DecodeKey(key) for key in dict.keys(self)]
@staticmethod
def CreateAuthenticator():
"""Create a packet autenticator. All RADIUS packets contain a sixteen
byte authenticator which is used to authenticate replies from the
RADIUS server and in the password hiding algorithm. This function
returns a suitable random string that can be used as an authenticator.
:return: valid packet authenticator
:rtype: binary string
"""
data = []
for i in range(16):
data.append(random.randrange(0, 256))
if six.PY3:
return bytes(data)
else:
return ''.join(chr(b) for b in data)
def CreateID(self):
"""Create a packet ID. All RADIUS requests have a ID which is used to
identify a request. This is used to detect retries and replay attacks.
This function returns a suitable random number that can be used as ID.
:return: ID number
:rtype: integer
"""
return random.randrange(0, 256)
def ReplyPacket(self):
"""Create a ready-to-transmit authentication reply packet.
Returns a RADIUS packet which can be directly transmitted
to a RADIUS server. This differs with Packet() in how
the authenticator is calculated.
:return: raw packet
:rtype: string
"""
assert(self.authenticator)
assert(self.secret)
attr = self._PktEncodeAttributes()
header = struct.pack('!BBH', self.code, self.id, (20 + len(attr)))
authenticator = md5_constructor(header[0:4] + self.authenticator
+ attr + self.secret).digest()
return header + authenticator + attr
def VerifyReply(self, reply, rawreply=None):
if reply.id != self.id:
return False
if rawreply is None:
rawreply = reply.ReplyPacket()
hash = md5_constructor(rawreply[0:4] + self.authenticator +
rawreply[20:] + self.secret).digest()
if hash != rawreply[4:20]:
return False
return True
def _PktEncodeAttribute(self, key, value):
if isinstance(key, tuple):
value = struct.pack('!L', key[0]) + \
self._PktEncodeAttribute(key[1], value)
key = 26
return struct.pack('!BB', key, (len(value) + 2)) + value
def _PktEncodeAttributes(self):
result = six.b('')
for (code, datalst) in self.items():
for data in datalst:
result += self._PktEncodeAttribute(code, data)
return result
def _PktDecodeVendorAttribute(self, data):
# Check if this packet is long enough to be in the
# RFC2865 recommended form
if len(data) < 6:
return (26, data)
(vendor, type, length) = struct.unpack('!LBB', data[:6])[0:3]
# Another sanity check
if len(data) != length + 4:
return (26, data)
return ((vendor, type), data[6:])
def DecodePacket(self, packet):
"""Initialize the object from raw packet data. Decode a packet as
received from the network and decode it.
:param packet: raw packet
:type packet: string"""
try:
(self.code, self.id, length, self.authenticator) = \
struct.unpack('!BBH16s', packet[0:20])
except struct.error:
raise PacketError('Packet header is corrupt')
if len(packet) != length:
raise PacketError('Packet has invalid length')
if length > 8192:
raise PacketError('Packet length is too long (%d)' % length)
self.clear()
packet = packet[20:]
while packet:
try:
(key, attrlen) = struct.unpack('!BB', packet[0:2])
except struct.error:
raise PacketError('Attribute header is corrupt')
if attrlen < 2:
raise PacketError(
'Attribute length is too small (%d)' % attrlen)
value = packet[2:attrlen]
if key == 26:
(key, value) = self._PktDecodeVendorAttribute(value)
self.setdefault(key, []).append(value)
packet = packet[attrlen:]
class AuthPacket(Packet):
def __init__(self, code=AccessRequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
"""Constructor
:param code: packet type code
:type code: integer (8bits)
:param id: packet identifaction number
:type id: integer (8 bits)
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return AuthPacket(AccessAccept, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def RequestPacket(self):
"""Create a ready-to-transmit authentication request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr = self._PktEncodeAttributes()
if self.authenticator is None:
self.authenticator = self.CreateAuthenticator()
if self.id is None:
self.id = self.CreateID()
header = struct.pack('!BBH16s', self.code, self.id,
(20 + len(attr)), self.authenticator)
return header + attr
def PwDecrypt(self, password):
"""Unobfuscate a RADIUS password. RADIUS hides passwords in packets by
using an algorithm based on the MD5 hash of the packet authenticator
and RADIUS secret. This function reverses the obfuscation process.
:param password: obfuscated form of password
:type password: binary string
:return: plaintext password
:rtype: unicode string
"""
buf = password
pw = six.b('')
last = self.authenticator
while buf:
hash = md5_constructor(self.secret + last).digest()
if six.PY3:
for i in range(16):
pw += bytes((hash[i] ^ buf[i],))
else:
for i in range(16):
pw += chr(ord(hash[i]) ^ ord(buf[i]))
(last, buf) = (buf[:16], buf[16:])
while pw.endswith(six.b('\x00')):
pw = pw[:-1]
return pw.decode('utf-8')
def PwCrypt(self, password):
"""Obfuscate password.
RADIUS hides passwords in packets by using an algorithm
based on the MD5 hash of the packet authenticator and RADIUS
secret. If no authenticator has been set before calling PwCrypt
one is created automatically. Changing the authenticator after
setting a password that has been encrypted using this function
will not work.
:param password: plaintext password
:type password: unicode stringn
:return: obfuscated version of the password
:rtype: binary string
"""
if self.authenticator is None:
self.authenticator = self.CreateAuthenticator()
if isinstance(password, six.text_type):
password = password.encode('utf-8')
buf = password
if len(password) % 16 != 0:
buf += six.b('\x00') * (16 - (len(password) % 16))
hash = md5_constructor(self.secret + self.authenticator).digest()
result = six.b('')
last = self.authenticator
while buf:
hash = md5_constructor(self.secret + last).digest()
if six.PY3:
for i in range(16):
result += bytes((hash[i] ^ buf[i],))
else:
for i in range(16):
result += chr(ord(hash[i]) ^ ord(buf[i]))
last = result[-16:]
buf = buf[16:]
return result
class AcctPacket(Packet):
"""RADIUS accounting packets. This class is a specialization
of the generic :obj:`Packet` class for accounting packets.
"""
def __init__(self, code=AccountingRequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
if 'packet' in attributes:
self.raw_packet = attributes['packet']
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return AcctPacket(AccountingResponse, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def VerifyAcctRequest(self):
"""Verify request authenticator.
:return: True if verification failed else False
:rtype: boolean
"""
assert(self.raw_packet)
hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') +
self.raw_packet[20:] + self.secret).digest()
return hash == self.authenticator
def RequestPacket(self):
"""Create a ready-to-transmit authentication request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr = self._PktEncodeAttributes()
if self.id is None:
self.id = self.CreateID()
header = struct.pack('!BBH', self.code, self.id, (20 + len(attr)))
self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr
+ self.secret).digest()
return header + self.authenticator + attr
class CoAPacket(Packet):
"""RADIUS CoA packets. This class is a specialization
of the generic :obj:`Packet` class for CoA packets.
"""
def __init__(self, code=CoARequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
if 'packet' in attributes:
self.raw_packet = attributes['packet']
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return CoAPacket(CoAACK, self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def VerifyCoARequest(self):
"""Verify request authenticator.
:return: True if verification failed else False
:rtype: boolean
"""
assert(self.raw_packet)
hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') +
self.raw_packet[20:] + self.secret).digest()
return hash == self.authenticator
def RequestPacket(self):
"""Create a ready-to-transmit CoA request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr = self._PktEncodeAttributes()
if self.id is None:
self.id = self.CreateID()
header = struct.pack('!BBH', self.code, self.id, (20 + len(attr)))
self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr
+ self.secret).digest()
return header + self.authenticator + attr
class DiscPacket(Packet):
"""RADIUS accounting packets. This class is a specialization
of the generic :obj:`Packet` class for accounting packets.
"""
def __init__(self, code=DisconnectRequest, id=None, secret=six.b(''),
authenticator=None, **attributes):
"""Constructor
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary class
:param secret: secret needed to communicate with a RADIUS server
:type secret: string
:param id: packet identifaction number
:type id: integer (8 bits)
:param code: packet type code
:type code: integer (8bits)
:param packet: raw packet to decode
:type packet: string
"""
Packet.__init__(self, code, id, secret, authenticator, **attributes)
if 'packet' in attributes:
self.raw_packet = attributes['packet']
def CreateReply(self, **attributes):
"""Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance.
"""
return DiscPacket(DisconnectACK , self.id,
self.secret, self.authenticator, dict=self.dict,
**attributes)
def VerifyDiscRequest(self):
"""Verify request authenticator.
:return: True if verification failed else False
:rtype: boolean
"""
assert(self.raw_packet)
hash = md5_constructor(self.raw_packet[0:4] + 16 * six.b('\x00') +
self.raw_packet[20:] + self.secret).digest()
return hash == self.authenticator
def RequestPacket(self):
"""Create a ready-to-transmit disconnect request packet.
Return a RADIUS packet which can be directly transmitted
to a RADIUS server.
:return: raw packet
:rtype: string
"""
attr = self._PktEncodeAttributes()
if self.id is None:
self.id = self.CreateID()
header = struct.pack('!BBH', self.code, self.id, (20 + len(attr)))
self.authenticator = md5_constructor(header[0:4] + 16 * six.b('\x00') + attr
+ self.secret).digest()
return header + self.authenticator + attr
def CreateID():
"""Generate a packet ID.
:return: packet ID
:rtype: 8 bit integer
"""
global CurrentID
CurrentID = (CurrentID + 1) % 256
return CurrentID
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import requests
import socket
from path import Path
from urllib.parse import urljoin
from charmhelpers.core import hookenv, host, unitdata
from charms import layer
from charms.layer.apache_bigtop_base import Bigtop
from charms.reactive import is_state
from jujubigdata import utils
class Zeppelin(object):
"""
This class manages Zeppelin.
"""
def __init__(self):
self.dist_config = utils.DistConfig(
data=layer.options('apache-bigtop-base'))
def _add_override(self, name, value):
unitdata.kv().update({
name: value,
}, prefix='zeppelin.bigtop.overrides.')
def install(self):
'''
Perform initial one-time setup and trigger puppet.
'''
# Dirs are handled by the bigtop deb, so no need to call out to
# dist_config to do that work. However, we want to adjust the
# groups for the `ubuntu` user for better interaction with Juju.
self.dist_config.add_users()
# Set ports based on layer.yaml options
self._add_override('zeppelin::server::server_port',
self.dist_config.port('zeppelin'))
self._add_override('zeppelin::server::web_socket_port',
self.dist_config.port('zeppelin_websocket'))
# Default spark to local mode on initial install. This will be
# reconfigured if/when hadoop or spark relations are made.
local_master = 'local[*]'
self._add_override('zeppelin::server::spark_master_url', local_master)
# The spark-client role expects hdfs by default. Since we want to
# keep Hadoop optional, ensure we remove hadoopy bits from our
# local spark config. This has no effect if/when a remote spark joins,
# and since there is no spark history server running, the event dirs
# are not important -- they just need not be 'hdfs:///blah'.
events_log_dir = 'file:///tmp'
self._add_override('spark::common::master_url', local_master)
self._add_override('spark::common::event_log_dir', events_log_dir)
self._add_override('spark::common::history_log_dir', events_log_dir)
self.trigger_bigtop()
def trigger_bigtop(self):
'''
Trigger the Bigtop puppet recipe that handles the Zeppelin service.
'''
bigtop = Bigtop()
overrides = unitdata.kv().getrange('zeppelin.bigtop.overrides.',
strip=True)
# The zep deb depends on spark-core which unfortunately brings in
# most of hadoop. Include appropriate roles here to ensure these
# packages are configured in the same way as our other Bigtop
# software deployed with puppet.
bigtop.render_site_yaml(
roles=[
'spark-client',
'spark-yarn-slave',
'zeppelin-server',
],
overrides=overrides,
)
# NB: during an upgrade, we configure the site.yaml, but do not
# trigger puppet. The user must do that with the 'reinstall' action.
if unitdata.kv().get('zeppelin.version.repo', False):
hookenv.log("An upgrade is available and the site.yaml has been "
"configured. Run the 'reinstall' action to continue.",
level=hookenv.INFO)
else:
####################################################################
# BUG: BIGTOP-2742
# Default zeppelin init script looks for the literal '$(hostname)'
# string. Symlink it so it exists before the apt install from puppet
# tries to start the service.
import subprocess
host = subprocess.check_output(['hostname']).decode('utf8').strip()
zepp_pid = '/var/run/zeppelin/zeppelin-zeppelin-{}.pid'.format(host)
utils.run_as('root', 'mkdir', '-p', '/var/run/zeppelin')
utils.run_as('root', 'ln', '-sf',
zepp_pid,
'/var/run/zeppelin/zeppelin-zeppelin-$(hostname).pid')
####################################################################
bigtop.trigger_puppet()
self.wait_for_api(30)
####################################################################
# BUG: BIGTOP-2742
# Puppet apply will call systemctl daemon-reload, which removes the
# symlink we just created. Now that the bits are on disk, update the
# init script $(hostname) that caused this mess to begin with.
zepp_init_script = '/etc/init.d/zeppelin'
utils.re_edit_in_place(zepp_init_script, {
r'^# pidfile.*': '# pidfile: {}'.format(zepp_pid),
})
utils.run_as('root', 'systemctl', 'daemon-reload')
self.restart()
self.wait_for_api(30)
####################################################################
def reconfigure_zeppelin(self):
'''
Configure zeppelin based on current environment
'''
raise NotImplementedError()
# NB (kwm): this method is not currently called because Bigtop spark
# doesn't expose these settings. Leaving this here just in case
# we update the bigtop charms to provide these bits in the future.
etc_env = utils.read_etc_env()
hadoop_extra_classpath = etc_env.get('HADOOP_EXTRA_CLASSPATH', '')
spark_driver_mem = etc_env.get('SPARK_DRIVER_MEMORY', '1g')
spark_exe_mode = os.environ.get('MASTER', 'yarn-client')
spark_executor_mem = etc_env.get('SPARK_EXECUTOR_MEMORY', '1g')
zeppelin_env = self.dist_config.path('zeppelin_conf') / 'zeppelin-env.sh'
with open(zeppelin_env, "a") as f:
f.write('export ZEPPELIN_CLASSPATH_OVERRIDES={}\n'.format(hadoop_extra_classpath))
f.write('export ZEPPELIN_JAVA_OPTS="-Dspark.driver.memory={} -Dspark.executor.memory={}"\n'.format(
spark_driver_mem,
spark_executor_mem))
f.write('export SPARK_SUBMIT_OPTIONS="--driver-memory {} --executor-memory {}"\n'.format(
spark_driver_mem,
spark_executor_mem))
f.write('export MASTER={}\n'.format(spark_exe_mode))
def configure_hadoop(self):
# create hdfs storage space
utils.run_as('hdfs', 'hdfs', 'dfs', '-mkdir', '-p', '/user/zeppelin')
utils.run_as('hdfs', 'hdfs', 'dfs', '-chown', 'zeppelin', '/user/zeppelin')
# If spark is ready, let configure_spark() trigger bigtop. Otherwise,
# put our spark in yarn-client mode since hadoop is here.
if not is_state('spark.ready'):
self._add_override('spark::common::master_url', 'yarn-client')
self._add_override('zeppelin::server::spark_master_url', 'yarn-client')
self.trigger_bigtop()
def configure_spark(self, master_url):
'''
Configure the zeppelin spark interpreter
'''
# TODO: Add config for Spark driver and executor memory overrides
self._add_override('spark::common::master_url', master_url)
self._add_override('zeppelin::server::spark_master_url', master_url)
self.trigger_bigtop()
def configure_hive(self, hive_url):
'''
Configure the zeppelin hive interpreter
'''
self._add_override('zeppelin::server::hiveserver2_url', hive_url)
self.trigger_bigtop()
def restart(self):
self.stop()
self.start()
def start(self):
host.service_start('zeppelin')
def check_connect(self, addr, port):
try:
with socket.create_connection((addr, port), timeout=10):
return True
except OSError:
return False
def wait_for_api(self, timeout):
start = time.time()
while time.time() - start < timeout:
if self.check_connect('localhost', self.dist_config.port('zeppelin')):
return True
time.sleep(2)
raise utils.TimeoutError('Timed-out waiting for connection to Zeppelin')
def stop(self):
host.service_stop('zeppelin')
def open_ports(self):
for port in self.dist_config.exposed_ports('zeppelin'):
hookenv.open_port(port)
def close_ports(self):
for port in self.dist_config.exposed_ports('zeppelin'):
hookenv.close_port(port)
def register_notebook(self, local_id, contents):
api = ZeppelinAPI()
kv = unitdata.kv()
notebook_ids = kv.get('zeppelin.notebooks.ids', {})
if local_id in notebook_ids:
hookenv.log('Replacing notebook {} registered as {}'.format(
local_id, notebook_ids[local_id]))
api.delete_notebook(notebook_ids[local_id])
zeppelin_id = api.import_notebook(contents)
if zeppelin_id:
notebook_ids[local_id] = zeppelin_id
hookenv.log('Registered notebook {} as {}'.format(local_id,
zeppelin_id))
return True
else:
hookenv.log('Unable to register notebook: {}'.format(local_id),
hookenv.ERROR)
return False
kv.set('zeppelin.notebooks.ids', notebook_ids)
def remove_notebook(self, local_id):
api = ZeppelinAPI()
kv = unitdata.kv()
notebook_ids = kv.get('zeppelin.notebooks.ids', {})
if local_id in notebook_ids:
api.delete_notebook(notebook_ids[local_id])
del notebook_ids[local_id]
else:
hookenv.log('Notebook not registered: {}'.format(local_id),
hookenv.ERROR)
kv.set('zeppelin.notebooks.ids', notebook_ids)
def register_hadoop_notebooks(self):
for notebook in ('hdfs-tutorial', 'flume-tutorial'):
contents = (Path('resources') / notebook / 'note.json').text()
self.register_notebook(notebook, contents)
def remove_hadoop_notebooks(self):
for notebook in ('hdfs-tutorial', 'flume-tutorial'):
self.remove_notebook(notebook)
class ZeppelinAPI(object):
"""
Helper for interacting with the Appache Zeppelin REST API.
"""
def _url(self, *parts):
dc = utils.DistConfig(
data=layer.options('apache-bigtop-base'))
url = 'http://localhost:{}/api/'.format(dc.port('zeppelin'))
for part in parts:
url = urljoin(url, part)
return url
def import_notebook(self, contents):
response = requests.post(self._url('notebook'), data=contents)
if response.status_code != 201:
return None
return response.json()['body']
def delete_notebook(self, notebook_id):
requests.delete(self._url('notebook/', notebook_id))
def modify_interpreter(self, interpreter_name, properties):
response = requests.get(self._url('interpreter/', 'setting'))
try:
body = response.json()['body']
except json.JSONDecodeError:
hookenv.log('Invalid response from API server: {} {}'.format(
response, response.text), hookenv.ERROR)
raise
for interpreter_data in body:
if interpreter_data['name'] == interpreter_name:
break
else:
raise ValueError('Interpreter not found: {}'.format(
interpreter_name))
interpreter_data['properties'].update(properties)
response = requests.put(self._url('interpreter/', 'setting/',
interpreter_data['id']),
data=json.dumps(interpreter_data))
if response.status_code != 200:
raise ValueError('Unable to update interpreter: {}'.format(
response.text))
| |
#
#
# _____ _ _____ _
# / ____| (_) | __ \ (_)
# | (___ ___ _ __ _ ___ | |__) | __ _ _ __ ___ ___
# \___ \ / _ \ '_ \| |/ _ \ | ___/ '__| | '_ ` _ \ / _ \
# ____) | __/ |_) | | (_) | | | | | | | | | | | | __/
# |_____/ \___| .__/|_|\___/ |_| |_| |_|_| |_| |_|\___|
# | |
# |_|
#
#
# _____ _ _ _____ _ _ _
# | __ \ (_) | | |_ _| | | | | (_)
# | | | | ___ _ __ ___ _ ___| |_ ___ | | _ __ | |_ ___ __ _ _ __ __ _| |_ _ ___ _ __
# | | | |/ _ \ '_ ` _ \| / __| __/ _ \ | | | '_ \| __/ _ \/ _` | '__/ _` | __| |/ _ \| '_ \
# | |__| | __/ | | | | | \__ \ || (_) | _| |_| | | | || __/ (_| | | | (_| | |_| | (_) | | | |
# |_____/ \___|_| |_| |_|_|___/\__\___/ |_____|_| |_|\__\___|\__, |_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
#
#
# info : https://www.sepio.systems/
# support : support@sepio.systems
# IMPORTS
import json
import dateparser
import demistomock as demisto
import requests
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
from CommonServerUserPython import * # noqa: E402 lgtm [py/polluting-import]
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CONSTANTS
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
MAX_RESULTS = 1000000
MAX_RESULTS_EVENTS = 50
SEPIO = 'Sepio Systems'
# Convert Sepio Prime events severity to Demisto severity
SEPIO_PRIME_SEVERITY_TO_DEMOISTO_SEVERITY_CONVERT = {
'Debug': 1,
'Notice': 1,
'Informational': 1,
'Alert': 2,
'Warning': 2,
'Error': 3,
'Emergency': 4,
'Critical': 4
}
# Agents set mode values
AGENTS_ARMED_MODE_CONVERT = {
'Free': 'free',
'Armed': 'ARM'
}
# Peripherals set mode values
AGENT_PERIPHERALS_APPROVE_MODE_CONVERT = {
'Approve': 'APPROVE',
'Disapprove': 'DISAPPROVE'
}
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def __init__(self, *args, **kwargs):
self._prime_auth = kwargs.pop('auth')
super().__init__(*args, **kwargs)
def prime_get_agents(self, host_identifier, ip_address, uuid, has_unapproved_peripherals, has_vulnerable_peripherals,
has_known_attack_tools, limit):
"""Gets Agents from Sepio Prime using the '/agents' API endpoint
:type host_identifier: ``str``
:param host_identifier: filter only agents that their host identifier contains this
:type ip_address: ``str``
:param ip_address: filter only agents that their ip address contains this
:type uuid: ``str``
:param uuid: filter only agents that their uuid contains this
:type has_unapproved_peripherals: ``str`` or ``bool``
:param has_unapproved_peripherals: filter only agents their has_unapproved_peripherals is equal to this
:type has_vulnerable_peripherals: ``str`` or ``bool``
:param has_vulnerable_peripherals: filter only agents their has_vulnerable_peripherals is equal to this
:type has_known_attack_tools: ``str`` or ``bool``
:param has_known_attack_tools: filter only agents their has_known_attack_tools is equal to this
:type limit: ``int``
:param limit: maximum number of items to be returned
:return: List containing all the matching agents from Sepio Prime API
:rtype: ``List[Dict[str, Any]]``
"""
get_agents_params = {
'hostIdentifier': host_identifier,
'ipAddress': ip_address,
'uuid': uuid,
'hasUnapprovedPeripherals': has_unapproved_peripherals,
'hasVulnerablePeripherals': has_vulnerable_peripherals,
'hasKnownAttackTools': has_known_attack_tools
}
res = self.__prime_get_from_api_retries('/agents', get_agents_params, 'lastConfiguration_desc', limit)
return res
def prime_get_global_peripherals(self, host_identifier, ip_address, host_uuid, vendor_name, product_name, serial_number,
is_unapproved_peripheral, is_vulnerable_peripheral, is_known_attack_tool, limit):
"""Gets Peripherals from Sepio Prime using the '/peripherals' API endpoint
:type host_identifier: ``str``
:param host_identifier: filter only peripherals that their agent host identifier contains this
:type ip_address: ``str``
:param ip_address: filter only peripherals that their agent ip address contains this
:type host_uuid: ``str``
:param host_uuid: filter only peripherals that their agent uuid contains this
:type vendor_name: ``str``
:param vendor_name: filter only peripherals that their gui vid contains this
:type product_name: ``str``
:param product_name: filter only peripherals that their gui pid contains this
:type serial_number: ``str``
:param serial_number: filter only peripherals that their serial number contains this
:type is_unapproved_peripheral: ``str`` or ``bool``
:param is_unapproved_peripheral: filter only peripherals their is_unapproved_peripheral is equal to this
:type is_vulnerable_peripheral: ``str`` or ``bool``
:param is_vulnerable_peripheral: filter only peripherals their is_vulnerable_peripheral is equal to this
:type is_known_attack_tool: ``str`` or ``bool``
:param is_known_attack_tool: filter only peripherals is_known_attack_tool is equal to this
:type limit: ``int``
:param limit: maximum number of items to be returned
:return: List containing all the matching peripherals from Sepio Prime API
:rtype: ``List[Dict[str, Any]]``
"""
get_global_peripherals_params = {
'hostIdentifier': host_identifier,
'ipAddress': ip_address,
'hostUuid': host_uuid,
'productInfo': product_name,
'vendor': vendor_name,
'serialNumber': serial_number,
'isUnapprovedPeripheral': is_unapproved_peripheral,
'isVulnerablePeripheral': is_vulnerable_peripheral,
'isKnownAttackTool': is_known_attack_tool
}
res = self.__prime_get_from_api_retries('/peripherals', get_global_peripherals_params, 'hostUuid_asc', limit)
return res
def prime_get_switches(self):
"""Gets Switches from Sepio Prime using the '/switches/switches' API endpoint
:return: List containing all switches from Sepio Prime API
:rtype: ``List[Dict[str, Any]]``
"""
res = self.__prime_get_from_api_retries('/switches/switches', None, None, MAX_RESULTS, results_key=None)
return res
def prime_get_switch_ports(self, switch_ip_address, switch_name, port_id, port_name, link_partner_data_contains,
is_alarmed, limit):
"""Gets Switch Ports from Sepio Prime using the '/switches/ports' API endpoint
:type switch_ip_address: ``str``
:param switch_ip_address: filter only ports that their switch_ip address contains this
:type switch_name: ``str``
:param switch_name: filter only ports that their switch_name contains this
:type port_id: ``str``
:param port_id: filter only ports that their port_id contains this
:type port_name: ``str``
:param port_name: filter only peripherals that their port_name contains this
:type link_partner_data_contains: ``str``
:param link_partner_data_contains: filter only peripherals that link_partner_data_contains contains this
:type is_alarmed: ``str`` or ``bool``
:param is_alarmed: filter only ports that their is_alarmed is equal to this
:type limit: ``int``
:param limit: maximum number of items to be returned
:return: List containing all the matching ports from Sepio Prime API
:rtype: ``List[Dict[str, Any]]``
"""
get_switch_ports_params = {
'switchIp': switch_ip_address,
'switchName': switch_name,
'portID': port_id,
'assignedName': port_name,
'linkPartnerData': link_partner_data_contains,
'alarmed': is_alarmed
}
res = self.__prime_get_from_api_retries('/switches/ports', get_switch_ports_params, 'switchIp_asc', limit)
return res
def prime_get_events(self, from_datetime, min_severity, categories, max_results,
to_datetime=None, source=None, peripheral_type=None, from_eventid=None):
"""Gets Events from Sepio Prime using the '/events/getevents' API endpoint
:type from_datetime: ``str``
:param from_datetime: filter only events that their creation date is after this
:type min_severity: ``str``
:param min_severity: filter only events that their severity is equal to this or higher
:type categories: ``str`` or ``list``
:param categories: filter only events that their category is contained in this
:type max_results: ``int``
:param max_results: maximum number of results
:type to_datetime: ``str``
:param to_datetime: filter only events that their creation date is before this
:type source: ``str``
:param source: filter only events their source contains this
:type peripheral_type: ``str`` or ``list``
:param peripheral_type: maximum number of items to be returned
:return: List containing all the matching events from Sepio Prime API
:rtype: ``List[Dict[str, Any]]``
"""
search_category = categories[0] if categories and len(categories) == 1 else None
get_events_params = {
'category': search_category,
'minimumSeverity': min_severity,
'source': source,
'peripheralIcon': peripheral_type
}
if from_eventid is None:
get_events_params["FromDate"] = from_datetime
get_events_params["ToDate"] = to_datetime
else:
get_events_params["FromEventId"] = from_eventid
res = self.__prime_get_from_api_retries('/events/getevents', get_events_params, 'date_asc', max_results)
return res
def prime_set_agent_mode(self, uuid, host_identifier, ip_address, mode):
"""Set Agent Mode in Sepio Prime using the '/agents/configuration' API endpoint
At least one of uuid, host_identifier or ip_address should not be empty,
if only one agent that match all the search params (uuid, host_identifier, ip_address)
is found, its mode will be updated
:type uuid: ``str``
:param uuid: Agent unique identifier
:type host_identifier: ``str``
:param host_identifier: Agent host identifier
:type ip_address: ``str``
:param ip_address: Agent ip address identifier
:type mode: ``str``
:param mode: mode to be applied
:return: List containing all updated agents details
:rtype: ``List[Dict[str, Any]]``
"""
set_agent_mode_data = [
{
'uuid': uuid,
'hostIdentifier': host_identifier,
'ipAddress': ip_address,
'agentConfigViewResource':
{
'isSystemArmed': mode
}
}
]
res = self.__prime_post_to_api_retries('/agents/configuration', set_agent_mode_data)
return res
def prime_set_agent_peripherals_mode(self, uuid, host_identifier, ip_address, vid, pid, mode):
"""Set Agent peripherals Mode in Sepio Prime using the '/peripherals/command' API endpoint
At least one of uuid, host_identifier or ip_address should not be empty,
if only one agent that match all the search params (uuid, host_identifier, ip_address)
is found, all the peripherals that match the vid and pid will be updated to new mode
:type uuid: ``str``
:param uuid: Agent unique identifier
:type host_identifier: ``str``
:param host_identifier: Agent host identifier
:type ip_address: ``str``
:param ip_address: Agent ip address identifier
:type vid: ``str``
:param vid: Agent ip address identifier
:type pid: ``str``
:param pid: Agent ip address identifier
:type mode: ``str``
:param mode: mode to be applied
:return: List containing all updated agents uuid
:rtype: ``List[str]``
"""
set_agent_peripherals_data = {
'opCode': mode,
'peripheralsIds': [
{
'uuid': uuid,
'hostIdentifier': host_identifier,
'ipAddress': ip_address,
'vid': vid,
'pid': pid
}
]
}
res = self.__prime_post_to_api_retries('/peripherals/command', set_agent_peripherals_data)
return res
def prime_test_connection(self):
"""Test connection to Sepio Prime server using the url, username and password
that was inserted by the user
:return: success boolean result and error message if its not successfully
:rtype: ``Tuple[bool, str]``
"""
try:
res = self.__prime_request_token()
is_successfull = bool(res and res.get('token'))
message = res.get('text') if not is_successfull else None
return is_successfull, message
except Exception as e:
error_message = str(e)
demisto.error(error_message)
if isinstance(e, DemistoException):
args_len = len(e.args)
if args_len > 0:
error_message = e.args[0]
return False, error_message
def __prime_request_token(self):
data = {'username': self._prime_auth[0], 'password': self._prime_auth[1]}
res = self._http_request('POST', '/auth/signin', json_data=data, ok_codes=(200, 400), resp_type='response')
return {
'is_successfull': res.ok,
'token': res.json()['token'] if res.ok else None,
'text': res.text
}
def __prime_get_from_api(self, url_suffix, search_params, sort_by, max_size, resp_type='response'):
params = {}
if max_size:
params.update({
'pageSize': str(max_size),
'pageNumber': '1'
})
if sort_by:
params['sortBy'] = sort_by
if search_params:
for key, value in search_params.items():
if value is not None:
params[key] = value
headers = self.__prime_api_auth_headers_format(self.__prime_get_token_from_cache())
res = self._http_request('GET', url_suffix, headers=headers, params=params, resp_type=resp_type)
return res
def __prime_get_from_api_retries(self, url_suffix, search_params, sort_by, max_size, retries=2, results_key='data'):
i = 1
while i <= retries:
i += 1
try:
res = self.__prime_get_from_api(url_suffix, search_params, sort_by, max_size)
if res.status_code == 401: # api token in not valid
self.__prime_get_token_from_cache(renew=True)
continue
self.__prime_handle_http_response(res, url_suffix)
data = res.json()
return data[results_key] if results_key else data
except Exception as e:
demisto.error(str(e))
raise
def __prime_post_to_api(self, url_suffix, json_data, resp_type='response'):
headers = self.__prime_api_auth_headers_format(self.__prime_get_token_from_cache())
res = self._http_request('POST', url_suffix, headers=headers, json_data=json_data, resp_type=resp_type)
return res
def __prime_post_to_api_retries(self, url_suffix, json_data, retries=2):
i = 1
while i <= retries:
i += 1
try:
res = self.__prime_post_to_api(url_suffix, json_data)
if res.status_code == 401: # api token in not valid
self.__prime_get_token_from_cache(renew=True)
continue
self.__prime_handle_http_response(res, url_suffix)
res_obj = res.json() if res.ok else None
return {'ok': res.ok, 'text': res.text, 'object': res_obj}
except Exception as e:
demisto.error(str(e))
raise
def __prime_set_token_to_cache(self, token):
demisto.setIntegrationContext({'api_token': token})
def __prime_get_token_from_cache(self, renew=False):
integration_context = demisto.getIntegrationContext()
access_token = integration_context.get('api_token')
# renew token
if not access_token or renew:
token_new = None
try:
res = self.__prime_request_token()
token_new = res.get('token')
except Exception as e:
demisto.error(str(e))
# if an error to connect with prime and get token
if not token_new:
self.__prime_set_cache_keys_to_none('api_token')
raise Exception(f'Cannot get token from Sepio Prime server at ({self._base_url})')
self.__prime_set_token_to_cache(token_new)
return token_new
return access_token
def __prime_set_cache_keys_to_none(self, *keys):
integration_context = demisto.getIntegrationContext()
for key in keys:
if key in integration_context:
integration_context[key] = None
if integration_context:
demisto.setIntegrationContext(integration_context)
@staticmethod
def __prime_api_auth_headers_format(token):
return {'Authorization': f'Bearer {token}'}
@staticmethod
def __prime_handle_http_response(http_res, url_suffix):
if http_res.status_code == 400:
raise Exception(http_res.text)
if http_res.status_code == 403: # forbbiden for users with this type of user
raise Exception('This command can be used only by Sepio Prime '
'users with higher user profile')
if not http_res.ok:
raise Exception(
f'Failed to request {url_suffix}, reason: ({http_res.status_code}) {http_res.reason}: {http_res.text}')
def convert_to_demisto_severity(severity: str) -> int:
"""Maps Sepio Prime Events severity to Cortex XSOAR severity
Converts the SepioPrimeAPI alert severity level ('Debug', 'Notice',
'Informational', 'Alert', 'Warning', 'Error', 'Emergency', 'Critical') to Cortex XSOAR incident severity (1 to 4)
for mapping.
:type severity: ``str``
:param severity: severity as returned from the Sepio Prime event (str)
:return: Cortex XSOAR Severity (1 to 4)
:rtype: ``int``
"""
return SEPIO_PRIME_SEVERITY_TO_DEMOISTO_SEVERITY_CONVERT[severity]
def arg_to_int(arg, arg_name, required):
"""Converts an XSOAR argument to a Python int
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` type. It will throw a ValueError
if the input is invalid. If the input is None, it will throw a ValueError
if required is ``True``, or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` if arg can be converted
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str):
if arg.isdigit():
return int(arg)
raise ValueError(f'Invalid number: "{arg_name}"="{arg}"')
if isinstance(arg, int):
return arg
raise ValueError(f'Invalid number: "{arg_name}"')
def arg_to_timestamp(arg, arg_name, required):
"""Converts an XSOAR argument to a timestamp (seconds from epoch)
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` containing a timestamp (seconds
since epoch). It will throw a ValueError if the input is invalid.
If the input is None, it will throw a ValueError if required is ``True``,
or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` containing a timestamp (seconds from epoch) if conversion works
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
# timestamp is a str containing digits - we just convert it to int
return int(arg)
if isinstance(arg, str):
# we use dateparser to handle strings either in ISO8601 format, or
# relative time stamps.
# For example: format 2019-10-23T00:00:00 or "1 days", etc
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp())
if isinstance(arg, (int, float)):
# Convert to int if the input is a float
return int(arg)
raise ValueError(f'Invalid date: "{arg_name}"')
def validate_fetch_data_max_result(user_results, max_results, arg_name):
""" Validate and handle cases where the limit of result requested from Sepio Prime API
is exceeding or not positive value
:type user_results: ``int``
:param user_results: maximum results value
:type max_results: ``int``
:param max_results: maximum allowed value for results count
:type arg_name: ``str``
:param arg_name: argument name for error message
:return:
returns an ``int`` of the original max_results value if its ok
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if not user_results or not isinstance(user_results, int) or user_results <= 0 or user_results > max_results:
raise ValueError(f'{arg_name} must be an integer, in the range between 1 to {max_results}')
return user_results
def string_contains(original, should_contains_str):
"""Custom string contains method that handales cases where original is None
:type original: ``str``
:param original: the string that should contain
:type should_contains_str: ``str``
:param should_contains_str: the string that should be contained
:return:
returns an ``bool`` that indicates that original contains should_contains_str
otherwise throws an if original is not None and should_contains_str is None
:rtype: ``Optional[bool]``
"""
if should_contains_str and not original:
return False
return should_contains_str in original
def string_startswith(original, starts_with_str):
"""Custom string startswith method that handales cases where original is None
:type original: ``str``
:param original: the string that should starts with
:type starts_with_str: ``str``
:param starts_with_str: the string that should be the begining of original
:return:
returns an ``bool`` that indicates that original contains should_contains_str
otherwise throws an if original is not None and starts_with_str is None
:rtype: ``Optional[bool]``
"""
if starts_with_str and not original:
return False
return original.startswith(starts_with_str)
def list_of_object_to_list_subset(original, *args):
"""Creates new list of object with only few fields from the original
:type original: ``List[Dict[str, any]]``
:param original: original list of objects
:type args: ``List[str]``
:param args: list of fields that should be in each new object
:return:
returns an ``List[Dict[str, any]]`` that contains only the fields from args
:rtype: ``List[Dict[str, any]]``
"""
return [{k: v for k, v in d.items() if k in args} for d in original]
def list_of_objects_to_readable_output(name, items, headers):
"""Creates readable output from list of items
:type name: ``str``
:param name: readable output table name
:type items: ``List[Dict[str, any]]``
:param items: original list of objects
:type headers: ``List[Dict[str, any]]``
:param headers: original list of objects
:return:
returns an ``str`` with markdown format
:rtype: ``str``
"""
return tableToMarkdown(name, list_of_object_to_list_subset(items, *headers), headers)
def empty_get_result_to_readable_result(readable_output_markdown):
"""Creates readable output for empty reults
:type readable_output_markdown: ``str``
:param readable_output_markdown: the readable output markdown
:return:
returns an ``[Dict[str, any]`` with result object
:rtype: ``[Dict[str, any]``
"""
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': [],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': readable_output_markdown
}
def test_module(client):
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
Args:
client: SepioPrimeAPI client
Returns:
'ok' if test passed, anything else will fail the test.
"""
is_successfull, message = client.prime_test_connection()
if is_successfull:
return 'ok'
return message
def sepio_query_agents_command(client, args):
"""
Returns CommandResults with all the agents that are in the query args
Args:
client (Client): SepioPrimeAPI client.
args (dict): all command arguments.
Returns:
All the agents that are in the query args
readable_output (str): This will be presented in the war room - should be in markdown syntax - human readable
outputs (dict): Dictionary/JSON - saved in the incident context in order to be used as inputs
for other tasks in the playbook
"""
host_identifier = args.get('host_identifier')
ip_address = args.get('ip_address')
uuid = args.get('uuid')
has_unapproved_peripherals = args.get('has_unapproved_peripherals')
has_vulnerable_peripherals = args.get('has_vulnerable_peripherals')
has_known_attack_tools = args.get('has_known_attack_tools')
limit = validate_fetch_data_max_result(arg_to_int(args.get('limit', 20), 'limit', False), MAX_RESULTS, 'limit')
agents = client.prime_get_agents(host_identifier, ip_address, uuid, has_unapproved_peripherals,
has_vulnerable_peripherals, has_known_attack_tools, limit)
outputs = [{
'HostIdentifier': agent['hostIdentifier'],
'IpAddress': agent['localIpAddress'],
'UUID': agent['uuid'],
'OsVersion': agent['osVersion'],
'HardwareModel': agent['pcModel'],
'NicInfo': agent['nicsText'].split('**') if agent['nicsText'] is not None else None,
'LastUpdate': agent['lastUpdated'],
'Status': agent['displayStatusCombined'],
'HasUnapprovedPeripherals': agent['hasUnapprovedPeripherals'],
'HasVulnerablePeripherals': agent['hasVulnerablePeripherals'],
'HasKnownAttackTools': agent['hasKnownAttackTools'],
'LastConfiguration': agent['lastConfiguered'],
'Version': agent['serviceVersion'],
'License': agent['licenseStatus']
} for agent in agents]
outputs_headers = ['UUID', 'IpAddress', 'HostIdentifier',
'HasUnapprovedPeripherals', 'HasVulnerablePeripherals', 'HasKnownAttackTools']
readable_output = list_of_objects_to_readable_output('Agents', outputs, outputs_headers)
return CommandResults(
outputs_prefix='Sepio.Agent',
outputs_key_field='UUID',
outputs=outputs,
readable_output=readable_output,
raw_response=agents
) if outputs else empty_get_result_to_readable_result(readable_output)
def sepio_query_global_peripherals_command(client, args):
"""
Returns CommandResults with all the agent peripherals that are in the query args
Args:
client (Client): SepioPrimeAPI client.
args (dict): all command arguments.
Returns:
All the agents that are in the query args
readable_output (str): This will be presented in the war room - should be in markdown syntax - human readable
outputs (dict): Dictionary/JSON - saved in the incident context in order to be used as inputs
for other tasks in the playbook
"""
host_identifier = args.get('host_identifier')
ip_address = args.get('ip_address')
host_uuid = args.get('host_uuid')
vendor_name = args.get('vendor_name')
product_name = args.get('product_name')
serial_number = args.get('serial_number')
is_unapproved_peripheral = args.get('is_unapproved_peripheral')
is_vulnerable_peripheral = args.get('is_vulnerable_peripheral')
is_known_attack_tool = args.get('is_known_attack_tool')
limit = validate_fetch_data_max_result(arg_to_int(args.get('limit', 20), 'limit', False), MAX_RESULTS, 'limit')
peripherals = client.prime_get_global_peripherals(host_identifier, ip_address, host_uuid,
vendor_name, product_name, serial_number,
is_unapproved_peripheral, is_vulnerable_peripheral, is_known_attack_tool,
limit)
outputs = [{
'HostIdentifier': peripheral['hostIdentifier'],
'HostUUID': peripheral['uuid'],
'DeviceID': peripheral['deviceID'],
'DeviceIcon': peripheral['devIcon'],
'DeviceType': peripheral['devIconDescription'],
'VID': peripheral['vid'],
'VendorName': peripheral['guiVid'],
'PID': peripheral['pid'],
'ProductName': peripheral['guiPid'],
'SerialNumber': peripheral['guiSerial'],
'Status': peripheral['status'],
'IsUnapprovedPeripheral': not peripheral['approved'],
'IsVulnerablePeripheral': peripheral['isVulnerablePeripheral'],
'IsKnownAttackTool': peripheral['isKnownAttackTool']
} for peripheral in peripherals]
outputs_headers = ['HostUUID', 'DeviceID', 'Status', 'IsUnapprovedPeripheral', 'IsVulnerablePeripheral', 'IsKnownAttackTool']
readable_output = list_of_objects_to_readable_output('Peripherals', outputs, outputs_headers)
return CommandResults(
outputs_prefix='Sepio.Peripheral((val.HostUUID == obj.HostUUID) && (val.DeviceID == obj.DeviceID))',
outputs_key_field='',
outputs=outputs,
readable_output=readable_output,
raw_response=peripherals
) if outputs else empty_get_result_to_readable_result(readable_output)
def sepio_query_switches_command(client, args):
"""
Returns CommandResults with all the switches that are in the query args,
this command is getting all the data from Sepio Prime server and filter and order it locally
Args:
client (Client): SepioPrimeAPI client.
args (dict): all command arguments.
Returns:
All the agents that are in the query args
readable_output (str): This will be presented in the war room - should be in markdown syntax - human readable
outputs (dict): Dictionary/JSON - saved in the incident context in order to be used as inputs
for other tasks in the playbook
"""
ip_address = args.get('ip_address')
switch_name = args.get('switch_name')
model = args.get('model')
ios_version = args.get('ios_version')
is_alarmed = args.get('is_alarmed')
is_alarmed_bool_or_none = argToBoolean(is_alarmed) if is_alarmed is not None else None
limit = validate_fetch_data_max_result(arg_to_int(args.get('limit', 20), 'limit', False), MAX_RESULTS, 'limit')
switches = client.prime_get_switches()
outputs = []
for switch in switches:
switch_connection_data = switch['connectionData']
switch_ip_address = switch_connection_data['ipAddress']
switch_assigned_name = switch['assignedName']
switch_model = switch['model']
switch_ios = switch['ios']
switch_status = switch['statusDescription']
switch_is_alarmed = switch_status == 'Alarmed'
if ((ip_address and not string_contains(switch_ip_address, ip_address))
or (switch_name and not string_contains(switch_assigned_name, switch_name))
or (model and not string_startswith(switch_model, model))
or (ios_version and not string_contains(switch_ios, ios_version))
or (is_alarmed_bool_or_none is not None and is_alarmed_bool_or_none != switch_is_alarmed)):
continue
outputs.append({
'SwitchID': switch['switchID'],
'IpAddress': switch_ip_address,
'Name': switch_assigned_name,
'Model': switch_model,
'IosVersion': switch_ios,
'LastUpdate': switch_connection_data['lastUpdated'],
'NumberOfPorts': switch['numOfPorts'],
'Status': switch_status,
'IsAlarmed': switch_is_alarmed
})
if len(outputs) == limit:
break
outputs_headers = ['SwitchID', 'Status', 'IsAlarmed']
readable_output = list_of_objects_to_readable_output('Switches', outputs, outputs_headers)
return CommandResults(
outputs_prefix='Sepio.Switch',
outputs_key_field='SwitchID',
outputs=outputs,
readable_output=readable_output,
raw_response=switches
) if outputs else empty_get_result_to_readable_result(readable_output)
def sepio_query_switch_ports_command(client, args):
"""
Returns CommandResults with all the ports that are in the query args
Args:
client (Client): SepioPrimeAPI client.
args (dict): all command arguments.
Returns:
All the agents that are in the query args
readable_output (str): This will be presented in the war room - should be in markdown syntax - human readable
outputs (dict): Dictionary/JSON - saved in the incident context in order to be used as inputs
for other tasks in the playbook
"""
switch_ip_address = args.get('switch_ip_address')
switch_name = args.get('switch_name')
port_id = args.get('port_id')
port_name = args.get('port_name')
link_partner_data_contains = args.get('link_partner_data_contains')
is_alarmed = args.get('is_alarmed')
limit = validate_fetch_data_max_result(arg_to_int(args.get('limit', 20), 'limit', False), MAX_RESULTS, 'limit')
ports = client.prime_get_switch_ports(switch_ip_address, switch_name, port_id,
port_name, link_partner_data_contains, is_alarmed,
limit)
outputs = [{
'SwitchID': port['switchID'],
'SwitchIpAddress': port['switchIp'],
'SwitchName': port['switchName'],
'PortID': port['portID'],
'Name': port['assignedName'],
'LastUpdate': port['switchLastPolled'],
'NumberOfMacAddresses': port['portMacsDataCount'],
'LinkPartners': [mac_data['mac'] for mac_data in port['linkPartnerInfo']['portMacsData']],
'Status': port['portStatusString'],
'IsAlarmed': port['alarmed'],
'AlarmInfo': port['identifiedString']
} for port in ports]
outputs_headers = ['SwitchID', 'PortID', 'Status', 'IsAlarmed', 'AlarmInfo']
readable_output = list_of_objects_to_readable_output('Ports', outputs, outputs_headers)
return CommandResults(
outputs_prefix='Sepio.Port((val.SwitchID == obj.SwitchID) && (val.PortID == obj.PortID))',
outputs_key_field='',
outputs=outputs,
readable_output=readable_output,
raw_response=ports
) if outputs else empty_get_result_to_readable_result(readable_output)
def sepio_query_system_events_command(client, args):
"""
Returns CommandResults with all the events that are in the query args
Args:
client (Client): SepioPrimeAPI client.
args (dict): all command arguments.
Returns:
All the agents that are in the query args
readable_output (str): This will be presented in the war room - should be in markdown syntax - human readable
outputs (dict): Dictionary/JSON - saved in the incident context in order to be used as inputs
for other tasks in the playbook
"""
start_datetime = args.get('start_datetime')
end_datetime = args.get('end_datetime')
min_severity = args.get('min_severity')
category = argToList(args.get('category'))
source = args.get('source')
peripheral_type = args.get('peripheral_type')
limit = validate_fetch_data_max_result(arg_to_int(args.get('limit', 20), 'limit', False), MAX_RESULTS, 'limit')
events = client.prime_get_events(start_datetime, min_severity, category, limit, end_datetime, source, peripheral_type)
outputs = [{
'EventID': event['eventID'],
'CreationDatetime': event['creationTime'],
'Severity': event['severityString'],
'Description': event['description'],
'Category': event['category'],
'Source': event['eventEntityID'],
'PeripheralType': event['peripheralIcon'],
'Details': event['details']
} for event in events]
outputs_headers = ['EventID', 'CreationDatetime', 'Category', 'Source', 'Description']
readable_output = list_of_objects_to_readable_output('Events', outputs, outputs_headers)
return CommandResults(
outputs_prefix='Sepio.Event',
outputs_key_field='EventID',
outputs=outputs,
readable_output=readable_output,
raw_response=events
) if outputs else empty_get_result_to_readable_result(readable_output)
def sepio_set_agent_mode_command(client, args):
"""
Updates agent mode
Args:
client (Client): SepioPrimeAPI client.
args (dict): all command arguments.
Returns:
(str) update response
"""
uuid = args.get('uuid')
host_identifier = args.get('host_identifier')
ip_address = args.get('ip_address')
mode = args.get('mode')
prime_agent_mode = AGENTS_ARMED_MODE_CONVERT.get(mode)
if not prime_agent_mode:
raise ValueError('mode must be one value from ' + ' or '.join(AGENTS_ARMED_MODE_CONVERT.keys()))
res = client.prime_set_agent_mode(uuid, host_identifier, ip_address, prime_agent_mode)
return f'Agent [\'{res["object"][0]["uuid"]}\'] mode has been changed successfully to \'{mode}\''
def sepio_set_agent_peripherals_mode_command(client, args):
"""
Updates agent peripherals mode
Args:
client (Client): SepioPrimeAPI client.
args (dict): all command arguments.
Returns:
(str) update response
"""
uuid = args.get('uuid')
ip_address = args.get('ip_address')
host_identifier = args.get('host_identifier')
vid = args.get('vid')
pid = args.get('pid')
mode = args.get('mode')
prime_agent_peripherals_mode = AGENT_PERIPHERALS_APPROVE_MODE_CONVERT.get(mode)
if not prime_agent_peripherals_mode:
raise ValueError('mode must be one value from ' + ' or '.join(AGENT_PERIPHERALS_APPROVE_MODE_CONVERT.keys()))
res = client.prime_set_agent_peripherals_mode(uuid, host_identifier, ip_address, vid, pid, prime_agent_peripherals_mode)
return f'Peripherals of [\'{res["object"][0]}\'] with vid \'{vid}\' and pid \'{pid}\' mode changed successfully to \'{mode}\''
def fetch_incidents(client, last_run, first_fetch_time, min_serverity, categories, max_results):
"""
This function will execute each interval (default is 1 minute).
Args:
client (Client): SepioPrimeAPI client
last_run (dict): The greatest incident created_time we fetched from last fetch
first_fetch_time (str): If last_run is None then fetch all incidents since first_fetch_time
min_serverity (str): Alert minimum severity from which to retrieve. Values are: Warning, Error, Critical
categories (list): Alert category to retrieve. Values are:USB, Network
max_results (int): Maximum number of alerts to fetch at a time
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
# Get the last fetch time, if exists
last_fetch = last_run.get('last_fetch')
last_fetch_eventid = last_run.get('last_fetch_eventid')
# Handle first time fetch
last_fetch_dt = None
if last_fetch is None:
last_fetch_dt = dateparser.parse(first_fetch_time)
else:
last_fetch_dt = dateparser.parse(last_fetch)
last_fetch_timestamp = date_to_timestamp(last_fetch_dt)
# the number of new incidents for each time is limited
max_results = validate_fetch_data_max_result(max_results, MAX_RESULTS_EVENTS, 'limit')
incidents = []
items = client.prime_get_events(timestamp_to_datestring(last_fetch_timestamp), min_serverity,
categories, max_results, None, None, None, last_fetch_eventid)
for item in items:
item['eventSource'] = SEPIO # constant for mapping
incident_created_time = dateparser.parse(item['creationTime'])
incident_created_timestamp = date_to_timestamp(incident_created_time)
incident = {
'name': f'[{item["eventSource"]}] ' + item["description"] + ' ' + item["details"],
'occurred': timestamp_to_datestring(incident_created_timestamp, DATE_FORMAT),
'rawJSON': json.dumps(item),
'severity': convert_to_demisto_severity(item.get('severityString', 'Debug'))
}
incidents.append(incident)
# Update last run and add incident if the incident is newer than last fetch
if incident_created_timestamp > last_fetch_timestamp:
last_fetch_timestamp = incident_created_timestamp
if len(items):
last_fetch_eventid = items[0]["eventID"] + 1
next_run = {'last_fetch': timestamp_to_datestring(
last_fetch_timestamp, DATE_FORMAT), 'last_fetch_eventid': last_fetch_eventid}
return next_run, incidents
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
credentials = params.get('credentials')
username = credentials.get('identifier')
password = credentials.get('password')
# get the service API url
base_url = urljoin(params['url'], '/prime/webui')
verify_certificate = not params.get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = params.get('fetch_time', '1 days').strip()
proxy = params.get('proxy', False)
# Maximum number of alerts to receive teach run of fetch_incidents
fetch_incidents_max_alerts = arg_to_int(
arg=params.get('max_alerts'),
arg_name='max_alerts',
required=False
)
# Categories for events to be receive in fetch_incidents, the values are USB, Network
fetch_incidents_categories = argToList(params.get('category'))
fetch_incidents_min_severity = params.get('min_severity')
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
ok_codes=(200, 201, 204, 400, 401, 403))
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'sepio-query-agents':
return_results(sepio_query_agents_command(client, demisto.args()))
elif demisto.command() == 'sepio-query-peripherals':
return_results(sepio_query_global_peripherals_command(client, demisto.args()))
elif demisto.command() == 'sepio-query-switches':
return_results(sepio_query_switches_command(client, demisto.args()))
elif demisto.command() == 'sepio-query-switch-ports':
return_results(sepio_query_switch_ports_command(client, demisto.args()))
elif demisto.command() == 'sepio-query-system-events':
return_results(sepio_query_system_events_command(client, demisto.args()))
elif demisto.command() == 'sepio-set-agent-mode':
return_results(sepio_set_agent_mode_command(client, demisto.args()))
elif demisto.command() == 'sepio-set-peripherals-mode':
return_results(sepio_set_agent_peripherals_mode_command(client, demisto.args()))
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time,
min_serverity=fetch_incidents_min_severity,
categories=fetch_incidents_categories,
max_results=fetch_incidents_max_alerts)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from urllib.parse import urlparse
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union, Tuple
from datetime import datetime
import six
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy
from azure.core.async_paging import AsyncItemPaged
from .._shared.user_credential_async import CommunicationTokenCredential
from .._generated.aio import AzureCommunicationChatService
from .._generated.models import (
AddChatParticipantsRequest,
SendReadReceiptRequest,
SendChatMessageRequest,
SendTypingNotificationRequest,
UpdateChatMessageRequest,
UpdateChatThreadRequest,
SendChatMessageResult,
ChatMessageType,
ChatError
)
from .._models import (
ChatParticipant,
ChatMessage,
ChatMessageReadReceipt,
ChatThreadProperties
)
from .._shared.models import CommunicationIdentifier
from .._communication_identifier_serializer import serialize_identifier
from .._utils import CommunicationErrorResponseConverter
from .._version import SDK_MONIKER
class ChatThreadClient(object):
"""A client to interact with the AzureCommunicationService Chat gateway.
Instances of this class is normally retrieved by ChatClient.get_chat_thread_client()
This client provides operations to add participant(s) to chat thread, remove participant from
chat thread, send message, delete message, update message, send typing notifications,
send and list read receipt
:ivar thread_id: Chat thread id.
:vartype thread_id: str
:param str endpoint:
The endpoint of the Azure Communication resource.
:param CommunicationTokenCredential credential:
The credentials with which to authenticate. The value contains a User
Access Token
:param str thread_id:
The unique thread id.
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START create_chat_thread_client]
:end-before: [END create_chat_thread_client]
:language: python
:dedent: 8
:caption: Creating the ChatThreadClient.
"""
def __init__(
self,
endpoint: str,
credential: CommunicationTokenCredential,
thread_id: str,
**kwargs: Any
): # type: (...) -> None
if not thread_id:
raise ValueError("thread_id can not be None or empty")
if not credential:
raise ValueError("credential can not be None")
try:
if not endpoint.lower().startswith('http'):
endpoint = "https://" + endpoint
except AttributeError:
raise ValueError("Host URL must be a string")
parsed_url = urlparse(endpoint.rstrip('/'))
if not parsed_url.netloc:
raise ValueError("Invalid URL: {}".format(endpoint))
self._thread_id = thread_id
self._endpoint = endpoint
self._credential = credential
self._client = AzureCommunicationChatService(
endpoint,
authentication_policy=AsyncBearerTokenCredentialPolicy(self._credential),
sdk_moniker=SDK_MONIKER,
**kwargs)
@property
def thread_id(self):
# type: () -> str
"""
Gets the thread id from the client.
:rtype: str
"""
return self._thread_id
@distributed_trace_async
async def get_properties(
self,
**kwargs
): # type: (...) -> ChatThreadProperties
"""Gets the properties of the chat thread.
:return: ChatThreadProperties
:rtype: ~azure.communication.chat.ChatThreadProperties
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START get_thread]
:end-before: [END get_thread]
:language: python
:dedent: 12
:caption: Retrieving chat thread properties by chat thread id.
"""
chat_thread = await self._client.chat_thread.get_chat_thread_properties(self._thread_id, **kwargs)
return ChatThreadProperties._from_generated(chat_thread) # pylint:disable=protected-access
@distributed_trace_async
async def update_topic(
self,
topic: str = None,
**kwargs
) -> None:
"""Updates a thread's properties.
:param topic: Thread topic. If topic is not specified, the update will succeed but
chat thread properties will not be changed.
:type topic: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START update_topic]
:end-before: [END update_topic]
:language: python
:dedent: 12
:caption: Updating chat thread.
"""
update_topic_request = UpdateChatThreadRequest(topic=topic)
return await self._client.chat_thread.update_chat_thread_properties(
chat_thread_id=self._thread_id,
update_chat_thread_request=update_topic_request,
**kwargs)
@distributed_trace_async
async def send_read_receipt(
self,
message_id: str,
**kwargs
) -> None:
"""Posts a read receipt event to a chat thread, on behalf of a user.
:param message_id: Required. Id of the latest message read by current user.
:type message_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START send_read_receipt]
:end-before: [END send_read_receipt]
:language: python
:dedent: 12
:caption: Sending read receipt of a chat message.
"""
if not message_id:
raise ValueError("message_id cannot be None.")
post_read_receipt_request = SendReadReceiptRequest(chat_message_id=message_id)
return await self._client.chat_thread.send_chat_read_receipt(
self._thread_id,
send_read_receipt_request=post_read_receipt_request,
**kwargs)
@distributed_trace
def list_read_receipts(
self,
**kwargs: Any
): # type: (...) -> AsyncItemPaged[ChatMessageReadReceipt]
"""Gets read receipts for a thread.
:keyword int results_per_page: The maximum number of chat message read receipts to be returned per page.
:keyword int skip: Skips chat message read receipts up to a specified position in response.
:return: An iterator like instance of ChatMessageReadReceipt
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.chat.ChatMessageReadReceipt]
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START list_read_receipts]
:end-before: [END list_read_receipts]
:language: python
:dedent: 12
:caption: Listing read receipts.
"""
results_per_page = kwargs.pop("results_per_page", None)
skip = kwargs.pop("skip", None)
return self._client.chat_thread.list_chat_read_receipts(
self._thread_id,
max_page_size=results_per_page,
skip=skip,
cls=lambda objs: [ChatMessageReadReceipt._from_generated(x) for x in objs], # pylint:disable=protected-access
**kwargs)
@distributed_trace_async
async def send_typing_notification(
self,
*,
sender_display_name: Optional[str] = None,
**kwargs
) -> None:
"""Posts a typing event to a thread, on behalf of a user.
:keyword str sender_display_name: The display name of the typing notification sender. This property
is used to populate sender name for push notifications.
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START send_typing_notification]
:end-before: [END send_typing_notification]
:language: python
:dedent: 12
:caption: Send typing notification.
"""
send_typing_notification_request = SendTypingNotificationRequest(sender_display_name=sender_display_name)
return await self._client.chat_thread.send_typing_notification(
chat_thread_id=self._thread_id,
send_typing_notification_request=send_typing_notification_request,
**kwargs)
@distributed_trace_async
async def send_message(
self,
content: str,
*,
metadata: Dict[str, str] = None,
**kwargs
) -> SendChatMessageResult:
"""Sends a message to a thread.
:param content: Required. Chat message content.
:type content: str
:keyword chat_message_type:
The chat message type. Possible values include: "text", "html". Default: ChatMessageType.TEXT
:paramtype chat_message_type: Union[str, ~azure.communication.chat.ChatMessageType]
:keyword str sender_display_name: The display name of the message sender. This property is used to
populate sender name for push notifications.
:keyword dict[str, str] metadata: Message metadata.
:return: SendChatMessageResult
:rtype: ~azure.communication.chat.SendChatMessageResult
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START send_message]
:end-before: [END send_message]
:language: python
:dedent: 12
:caption: Sending a message.
"""
if not content:
raise ValueError("content cannot be None.")
chat_message_type = kwargs.pop("chat_message_type", None)
if chat_message_type is None:
chat_message_type = ChatMessageType.TEXT
elif not isinstance(chat_message_type, ChatMessageType):
try:
chat_message_type = ChatMessageType.__getattr__(chat_message_type) # pylint:disable=protected-access
except Exception:
raise ValueError(
"chat_message_type: {message_type} is not acceptable".format(message_type=chat_message_type))
if chat_message_type not in [ChatMessageType.TEXT, ChatMessageType.HTML]:
raise ValueError(
"chat_message_type: {message_type} can be only 'text' or 'html'".format(message_type=chat_message_type))
sender_display_name = kwargs.pop("sender_display_name", None)
create_message_request = SendChatMessageRequest(
content=content,
type=chat_message_type,
sender_display_name=sender_display_name,
metadata=metadata
)
send_chat_message_result = await self._client.chat_thread.send_chat_message(
chat_thread_id=self._thread_id,
send_chat_message_request=create_message_request,
**kwargs)
return send_chat_message_result
@distributed_trace_async
async def get_message(
self,
message_id: str,
**kwargs
) -> ChatMessage:
"""Gets a message by id.
:param message_id: Required. The message id.
:type message_id: str
:return: ChatMessage
:rtype: ~azure.communication.chat.ChatMessage
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START get_message]
:end-before: [END get_message]
:language: python
:dedent: 12
:caption: Retrieving a message by message id.
"""
if not message_id:
raise ValueError("message_id cannot be None.")
chat_message = await self._client.chat_thread.get_chat_message(self._thread_id, message_id, **kwargs)
return ChatMessage._from_generated(chat_message) # pylint:disable=protected-access
@distributed_trace
def list_messages(
self,
**kwargs: Any
): # type: (...) -> AsyncItemPaged[ChatMessage]
"""Gets a list of messages from a thread.
:keyword int results_per_page: The maximum number of messages to be returned per page.
:keyword ~datetime.datetime start_time: The start time where the range query.
:return: An iterator like instance of ChatMessage
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.chat.ChatMessage]
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START list_messages]
:end-before: [END list_messages]
:language: python
:dedent: 12
:caption: Listing messages of a chat thread.
"""
results_per_page = kwargs.pop("results_per_page", None)
start_time = kwargs.pop("start_time", None)
return self._client.chat_thread.list_chat_messages(
self._thread_id,
max_page_size=results_per_page,
start_time=start_time,
cls=lambda objs: [ChatMessage._from_generated(x) for x in objs], # pylint:disable=protected-access
**kwargs)
@distributed_trace_async
async def update_message(
self,
message_id: str,
content: str = None,
*,
metadata: Dict[str, str] = None,
**kwargs
) -> None:
"""Updates a message.
:param message_id: Required. The message id.
:type message_id: str
:keyword content: Chat message content
:keyword dict[str, str] metadata: Message metadata.
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START update_message]
:end-before: [END update_message]
:language: python
:dedent: 12
:caption: Updating an already sent message.
"""
if not message_id:
raise ValueError("message_id cannot be None.")
update_message_request = UpdateChatMessageRequest(content=content, metadata=metadata)
return await self._client.chat_thread.update_chat_message(
chat_thread_id=self._thread_id,
chat_message_id=message_id,
update_chat_message_request=update_message_request,
**kwargs)
@distributed_trace_async
async def delete_message(
self,
message_id: str,
**kwargs
) -> None:
"""Deletes a message.
:param message_id: Required. The message id.
:type message_id: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START delete_message]
:end-before: [END delete_message]
:language: python
:dedent: 12
:caption: Deleting a message.
"""
if not message_id:
raise ValueError("message_id cannot be None.")
return await self._client.chat_thread.delete_chat_message(
chat_thread_id=self._thread_id,
chat_message_id=message_id,
**kwargs)
@distributed_trace
def list_participants(
self,
**kwargs: Any
): # type: (...) -> AsyncItemPaged[ChatParticipant]
"""Gets the participants of a thread.
:keyword int results_per_page: The maximum number of participants to be returned per page.
:keyword int skip: Skips participants up to a specified position in response.
:return: An iterator like instance of ChatParticipant
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.communication.chat.ChatParticipant]
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START list_participants]
:end-before: [END list_participants]
:language: python
:dedent: 12
:caption: Listing participants of chat thread.
"""
results_per_page = kwargs.pop("results_per_page", None)
skip = kwargs.pop("skip", None)
return self._client.chat_thread.list_chat_participants(
self._thread_id,
max_page_size=results_per_page,
skip=skip,
cls=lambda objs: [ChatParticipant._from_generated(x) for x in objs], # pylint:disable=protected-access
**kwargs)
@distributed_trace_async
async def add_participants(
self,
thread_participants: List[ChatParticipant],
**kwargs
) -> List[Tuple[ChatParticipant, ChatError]]:
# type: (...) -> List[Tuple[ChatParticipant, ChatError]]
"""Adds thread participants to a thread. If participants already exist, no change occurs.
If all participants are added successfully, then an empty list is returned;
otherwise, a list of tuple(chat_thread_participant, chat_error) is returned,
of failed participants and its respective error
:param thread_participants: Thread participants to be added to the thread.
:type thread_participants: List[~azure.communication.chat.ChatParticipant]
:return: List[Tuple[ChatParticipant, ChatError]]
:rtype: List[Tuple[~azure.communication.chat.ChatParticipant, ~azure.communication.chat.ChatError]]
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START add_participants]
:end-before: [END add_participants]
:language: python
:dedent: 12
:caption: Adding participants to chat thread.
"""
response = []
if thread_participants:
participants = [m._to_generated() for m in thread_participants] # pylint:disable=protected-access
add_thread_participants_request = AddChatParticipantsRequest(participants=participants)
add_chat_participants_result = await self._client.chat_thread.add_chat_participants(
chat_thread_id=self._thread_id,
add_chat_participants_request=add_thread_participants_request,
**kwargs)
if hasattr(add_chat_participants_result, 'invalid_participants') and \
add_chat_participants_result.invalid_participants is not None:
response = CommunicationErrorResponseConverter._convert( # pylint:disable=protected-access
participants=thread_participants,
chat_errors=add_chat_participants_result.invalid_participants
)
return response
@distributed_trace_async
async def remove_participant(
self,
identifier: CommunicationIdentifier,
**kwargs
) -> None:
"""Remove a participant from a thread.
:param identifier: Required. Identifier of the thread participant to remove from the thread.
:type identifier: ~azure.communication.chat.CommunicationIdentifier
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError, ValueError
.. admonition:: Example:
.. literalinclude:: ../samples/chat_thread_client_sample_async.py
:start-after: [START remove_participant]
:end-before: [END remove_participant]
:language: python
:dedent: 12
:caption: Removing participant from chat thread.
"""
if not identifier:
raise ValueError("identifier cannot be None.")
return await self._client.chat_thread.remove_chat_participant(
chat_thread_id=self._thread_id,
participant_communication_identifier=serialize_identifier(identifier),
**kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ChatThreadClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *args):
# type: (*Any) -> None
await self._client.__aexit__(*args)
| |
import glob
import json
import datetime
import geojson
import os
from django.conf import settings
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render
from django.utils import timezone
from django.views.generic import TemplateView, View, ListView
from django.core.exceptions import ObjectDoesNotExist
import main.import_gpx_to_stations
import main.models
from main import import_gpx_to_stations
from main.forms import InputShipDateTime, InputCoordinates, InputShipTimes
from main.models import Event, EventAction, Country, FilesStorage, FilesStorageGeneral, Port, Station,\
Message, SamplingMethod, ProposedStation, Leg, Depth, Sample, Person, ContactDetails
from ctd.models import CtdSampleVolume
from main import utils
from ship_data.models import GpggaGpsFix, GpvtgVelocity
import main.find_locations as find_locations
import subprocess
import main.utils_coordinates as utils_coordinates
from django.views.static import serve
from django.db.models import Sum
import geojson
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
def calculate_km_travelled():
fp = open(settings.TRACK_MAP_FILEPATH)
g = geojson.load(fp)
previous = None
distance = 0
for item in g.get('coordinates'):
if previous is not None:
distance += utils_coordinates.calculate_distance((previous[1], previous[0]), (item[1], item[0]))
previous = item
return distance
def people_in_leg(number):
return Person.objects.filter(leg=Leg.objects.get(number=number)).count()
class StatsView(TemplateView):
template_name = "stats.html"
def get_context_data(self, **kwargs):
context = super(StatsView, self).get_context_data(**kwargs)
context['number_of_samples'] = Sample.objects.all().count()
context['number_of_events'] = Event.objects.filter(outcome="success").count()
context['litres_of_ctd_water'] = int(CtdSampleVolume.objects.all().aggregate(Sum('volume'))['volume__sum'])
context['km_travelled'] = int(calculate_km_travelled())
context['people_leg1'] = people_in_leg(1)
context['people_leg2'] = people_in_leg(2)
context['people_leg3'] = people_in_leg(3)
context['people_all_legs'] = Person.objects.\
filter(leg=Leg.objects.get(number=1)).\
filter(leg=Leg.objects.get(number=2)).\
filter(leg=Leg.objects.get(number=3)).count()
context['terrestial_sites'] = 13
context['most_southerly_point'] = "-74.009 -127.475"
return context
class MainMenuView(TemplateView):
template_name = "main_menu.html"
def get_context_data(self, **kwargs):
context = super(MainMenuView, self).get_context_data(**kwargs)
last_message = Message.objects.order_by('date_time')
if len(last_message) == 0:
message = "No message has been introduced yet, come back later"
date_time = "N/A"
person = "Data management team"
subject = "No message"
else:
last_message = Message.objects.order_by('-date_time').first()
message = last_message.message
date_time = last_message.date_time
person = last_message.person
subject = last_message.subject
now = utils.now_with_timezone()
location = utils.latest_ship_position()
context['message'] = message
context['date_time'] = date_time
context['person'] = person
context['subject'] = subject
context['date'] = now.strftime("%a %d %B %Y")
context['time'] = now.strftime("%H:%M:%S")
context['julian_day'] = now.strftime("%j")
if location.latitude is not None:
context['position_latitude'] = "{0:.4f}".format(location.latitude)
context['position_longitude'] = "{0:.4f}".format(location.longitude)
context['position_date_time'] = location.date_time
else:
context['position_latitude'] = "Unknown"
context['position_longitude'] = "Unknown"
context['position_date_time'] = "Unknown"
speed = latest_ship_speed()
if speed is not None:
context['speed_kts'] = speed
else:
context['speed_kts'] = "Unknown"
depths = Depth.objects.filter(depth__gt=0).order_by('-date_time')
if depths.exists():
depth = depths[0].depth
time1 = utils.set_utc(datetime.datetime.now())
time2 = utils.set_utc(depths[0].date_time)
depth_updated_seconds_ago = (time1-time2).seconds
else:
depth = "Unknown"
depth_updated_seconds_ago = "Unknown"
context['depth'] = depth
context['depth_updated_seconds_ago'] = depth_updated_seconds_ago
return context
class AccessingDataView(TemplateView):
template_name = "accessing_data.html"
def get_context_data(self, **kwargs):
context = super(AccessingDataView, self).get_context_data(**kwargs)
return context
class MainMapView(TemplateView):
template_name = "main_map.html"
def get_context_data(self, **kwargs):
context = super(MainMapView, self).get_context_data(**kwargs)
return context
class InteractiveMapView(TemplateView):
template_name = "interactive_map.html"
def get_context_data(self, **kwargs):
context = super(InteractiveMapView, self).get_context_data(**kwargs)
return context
class TrackJson(View):
def get(self, request_):
track = open(settings.TRACK_MAP_FILEPATH, "r")
return JsonResponse(json.load(track))
class PositionsJson(View):
def get(self, request_):
# Possibles colors: black, blue, green, grey, orange, red, violet, yellow
tbegins = main.models.EventAction.tbegin()
tinstant = main.models.EventAction.tinstant()
features = []
for eventAction in EventAction.objects.filter(Q(type=tbegins) | Q(type=tinstant)):
if eventAction.longitude is None or eventAction.latitude is None:
continue
point = geojson.Point((eventAction.longitude, eventAction.latitude))
link = '<a href="/admin/main/eventaction/{}/change/">{}</a>'.format(eventAction.id, eventAction.event.number)
id_text = "Event: {}".format(link)
date_time = eventAction.time.strftime("%Y-%m-%d %H:%M")
features.append(
geojson.Feature(geometry=point, properties={'id': id_text,
'text': "{}<br>{}<br>({:.2f}, {:.2f})".format(eventAction.event.sampling_method.name, date_time, eventAction.latitude, eventAction.longitude),
'marker_color': 'blue'}))
for port in Port.objects.all():
if port.longitude is None or port.latitude is None:
continue
point = geojson.Point((port.longitude, port.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': 'Port.{}'.format(port.id),
'text': port.name,
'marker_color': 'yellow'}))
for proposedstation in ProposedStation.objects.all():
if proposedstation.longitude is None or proposedstation.latitude is None:
continue
point = geojson.Point((proposedstation.longitude, proposedstation.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': 'Planned station',
'text': "{}<br>{}<br>({:.2f}, {:.2f})".format(proposedstation.name, proposedstation.comment, proposedstation.latitude, proposedstation.longitude),
'marker_color': 'red'}))
for station in Station.objects.all():
if station.longitude is None or station.latitude is None:
continue
link = '<a href="/admin/main/station/{}/change/">{}</a>'.format(station.name, station.name)
id_text = "Station: {}".format(link)
if station.arrival_time is not None:
date_time = station.arrival_time.strftime("%Y-%m-%d %H:%M")
else:
date_time = "Unknown arrival datetime"
point = geojson.Point((station.longitude, station.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': '{}'.format(id_text),
'text': "Type: {}<br>{}<br>({:.2f}, {:.2f})".format(station.type, date_time, station.latitude, station.longitude),
'marker_color': 'green'}))
location = utils.latest_ship_position()
point = geojson.Point((location.longitude, location.latitude))
features.append(
geojson.Feature(geometry=point, properties={'id': 'ship',
'text': 'You are here',
'marker_color': 'orange'}))
return JsonResponse(geojson.FeatureCollection(features))
class LatestShipPosition(View):
# simple view with only latitude, longitude and last ship position
def get(self, request_):
location = utils.latest_ship_position()
information = {}
information['latitude'] = location.latitude
information['longitude'] = location.longitude
information['date_time'] = location.date_time
return JsonResponse(information)
# class PositionsJson(View):
# def get(self, request):
# # print("-----------", request.GET['newer_than'])
# features = []
# for position in Position.objects.order_by('number'):
# point = geojson.Point((position.longitude, position.latitude))
#
# text = position.text
# if text is None:
# text = ""
#
# features.append(
# geojson.Feature(geometry=point, properties={'id': position.id,
# 'number': position.number,
# 'text': text,
# 'type': position.position_type.name
# }))
#
# return JsonResponse(geojson.FeatureCollection(features))
#
# def post(self, request):
# decoded_data = request.body.decode('utf-8')
# json_data = json.loads(decoded_data)
#
# # new POI to be inserted
# poi = Position()
# poi.latitude = json_data['latitude']
# poi.longitude = json_data['longitude']
# poi.position_type = PositionType.objects.get(name='Event')
# poi.save()
#
# print("POST",poi)
#
# return JsonResponse({'id': poi.id, 'text': poi.text})
#
# def put(self, request):
# decoded_data = request.body.decode('utf-8')
# json_data = json.loads(decoded_data)
#
# poi = Position.objects.get(id=json_data['id'])
#
# if 'latitude' in json_data:
# poi.latitude = json_data['latitude']
#
# if 'longitude' in json_data:
# poi.longitude = json_data['longitude']
#
# if 'text' in json_data:
# poi.text = json_data['text']
#
# poi.save()
# print("PUT ",poi)
# response = JsonResponse({'id': poi.id, 'text': poi.text})
#
# return response
class CountryListView(ListView):
model = Country
def get_context_data(self, **kwargs):
context = super(CountryListView, self).get_context_data(**kwargs)
context['now'] = timezone.now()
return context
class EventListView(ListView):
model = Event
def get_context_data(self, **kwargs):
context = super(EventListView, self).get_context_data(**kwargs)
context['event_list'] = Event.objects.all()
print(Event.objects.all()[0])
return context
class FileStorageView(TemplateView):
template_name = "file_storage.html"
units = "GB"
def format_space_number(self, number):
if self.units == "GB":
conversion_from_kb = 1 / (1024 * 1024) # How many context['units'] in one KB
number *= conversion_from_kb
return "{0:.2f}".format(number)
else:
raise
def get_context_data(self, **kwargs):
context = super(FileStorageView, self).get_context_data(**kwargs)
context['file_storages'] = FilesStorage.objects.all()
context['units'] = "GB"
detailed_storage = []
for storage in context['file_storages']:
detailed_storage.append({'relative_path': str(storage.relative_path), context['units']: self.format_space_number(storage.kilobytes)})
context['detailed_storage_json'] = json.dumps(detailed_storage)
last_general_storage = FilesStorageGeneral.objects.latest('time')
context['general_storage_free'] = self.format_space_number(last_general_storage.free)
context['general_storage_used'] = self.format_space_number(last_general_storage.used)
context['general_storage_size'] = self.format_space_number(last_general_storage.free + last_general_storage.used)
context['general_storage_json'] = json.dumps({'used': self.format_space_number(last_general_storage.used), 'free': self.format_space_number(last_general_storage.free)})
return context
class DocumentsView(TemplateView):
template_name = "documents.html"
def get_context_data(self, **kwargs):
context = super(DocumentsView, self).get_context_data(**kwargs)
documents = []
directories = []
# Prepares a dictionary with the directory names as keys
for file in glob.glob(os.path.join(settings.DOCUMENTS_DIRECTORY, "*")):
if os.path.isdir(file):
directories.append(os.path.basename(file))
for directory in directories:
for file in glob.glob(os.path.join(settings.DOCUMENTS_DIRECTORY, os.path.join(settings.DOCUMENTS_DIRECTORY), directory, "*")):
if os.path.isfile(file):
file_name = os.path.basename(file)
if file_name == "Thumbs.db":
continue
document = {}
document['title'] = file_name.split(".")[0]
document['link'] = os.path.join('/documents_storage/{}/{}'.format(directory, file_name))
document['topic'] = directory
documents.append(document)
context['documents'] = documents
context['topics'] = sorted(directories)
return context
class ImportPortsFromGpx(View):
def get(self, request, *args, **kwargs):
return render(request, "import_ports_from_gpx_form.html")
def post(self, request, *args, **kwargs):
file = request.FILES['gpxfile']
file_name = file.name
file_content = file.read().decode('utf-8')
(created, modified, skipped, reports) = import_gpx_to_stations.import_gpx_to_stations(file_content)
template_information = {
'created': created,
'modified': modified,
'skipped': skipped,
'reports': reports,
'file_name': file_name
}
return render(request, "import_ports_from_gpx_exec.html", template_information)
class CoordinatesConversion(TemplateView):
def get(self, request, *args, **kwargs):
form = InputCoordinates()
return render(request, "coordinates_conversion.html", {"form": form})
def post(self, request, *args, **kwargs):
coordinates = request.POST['coordinates']
form = InputCoordinates(initial={'coordinates': coordinates})
template_information = {}
template_information['form'] = form
utils_coordinates.process(coordinates, template_information)
return render(request, "coordinates_conversion_exec.html", template_information)
class PositionFromDateTime(TemplateView):
def get(self, request, *args, **kwargs):
form = InputShipDateTime(initial={'ship_date_time': timezone.now})
return render(request, "position_from_date_time.html", {'form': form})
def post(self, request, *args, **kwargs):
ship_date_time = request.POST['ship_date_time']
ship_date_times = request.POST['ship_date_times']
form = InputShipDateTime(initial={'ship_date_time': ship_date_time,
'ship_date_times': ship_date_times})
template_information = find_locations.find_locations(ship_date_time, ship_date_times)
template_information['form'] = form
return render(request, "position_from_date_time_exec.html", template_information)
class ShipTimeToUtc(TemplateView):
def get(self, request, *args, **kwargs):
form = InputShipTimes(initial={'ship_date_times': datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")})
return render(request, "ship_time_to_utc.html", {'form': form})
def post(self, request, *args, **kwargs):
ship_date_times = request.POST['ship_date_times']
form = InputShipTimes(initial={'ship_date_times': ship_date_times})
template_information = {}
template_information['times'] = ship_date_times_to_utc(ship_date_times)
template_information['form'] = form
return render(request, "ship_time_to_utc_exec.html", template_information)
class MailState(TemplateView):
def get(self, request, *args, **kwargs):
s = subprocess.Popen("mailq", stdout=subprocess.PIPE)
mails = s.stdout.read()
return render(request, "mail_state.html", {'mails': mails})
class LatestImage(View):
def get(self, request):
filepath = settings.IMAGE_RELOAD_FILEPATH
return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
class ImageReloaderView(TemplateView):
def get(self, request, *args, **kwargs):
return render(request, "image_reloader.html")
def latest_ship_speed():
try:
gps = SamplingMethod.objects.get(name=settings.MAIN_GPS)
except ObjectDoesNotExist:
return None
velocities = GpvtgVelocity.objects.filter(device=gps).order_by('-date_time')
if velocities.exists():
speed = velocities[0]
return speed.ground_speed_kts
else:
return None
def ship_date_times_to_utc(ship_date_times):
output = []
for ship_date_time in ship_date_times.split("\n"):
ship_date_time = ship_date_time.strip()
try:
date_time = datetime.datetime.strptime(ship_date_time, "%Y-%m-%d %H:%M:%S")
message = ""
if date_time.date() == settings.DATE_TWO_DAYS.date():
message = "We had two days with the same date, unknown UTC"
elif date_time > datetime.datetime.now() + datetime.timedelta(days=1):
message = "Don't ask about the future..."
elif utils.set_utc(date_time) < Leg.objects.all().order_by("start_time")[0].start_time:
# This is an approximation - due to the timezones
message = "Don't ask about before the beginning of the voyage"
if message != "":
output.append({'ship_date_time': ship_date_time,
'utc_date_time': message,
'utc_julian_day': message
})
continue
ship_ahead_of_utc = main.models.TimeChange.objects.filter(Q(date_changed_utc__lte=date_time)).order_by('-date_changed_utc')
if len(ship_ahead_of_utc) > 0:
ship_ahead_of_utc_hours = int(ship_ahead_of_utc[0].difference_to_utc_after_change)
ahead_of_utc = datetime.timedelta(hours=ship_ahead_of_utc_hours)
date_time_utc = date_time - ahead_of_utc
else:
date_time_utc = "Unknown"
utc_julian_day = date_time_utc.strftime("%j")
except ValueError:
date_time_utc = '<p style="color:red"><b>Date in invalid format</b></p>'
utc_julian_day = '<p style="color:red"><b>Date in invalid format</b></p>'
output.append({'ship_date_time': ship_date_time,
'utc_date_time': date_time_utc,
'utc_julian_day': utc_julian_day
})
return output
class ContactDetailsListView(ListView):
model = ContactDetails
template_name = "list_of_contacts.html"
def get_context_data(self, **kwargs):
context = super(ContactDetailsListView, self).get_context_data(**kwargs)
return context
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import fractions
import unittest
import numpy
from openfermion.ops import QubitOperator
from openfermion.transforms import get_fermion_operator
from openfermion.utils import count_qubits, is_hermitian
from openfermion.utils._testing_utils import (
EqualsTester,
haar_random_vector,
random_antisymmetric_matrix,
random_diagonal_coulomb_hamiltonian,
random_hermitian_matrix,
random_interaction_operator,
random_quadratic_hamiltonian,
random_qubit_operator,
random_unitary_matrix)
def test_random_qubit_operator():
op = random_qubit_operator(
n_qubits=20,
max_num_terms=20,
max_many_body_order=20
)
assert isinstance(op, QubitOperator)
assert op.many_body_order() <= 20
assert len(op.terms) <= 20
assert count_qubits(op) <= 20
class EqualsTesterTest(unittest.TestCase):
def test_add_equality_group_correct(self):
eq = EqualsTester(self)
eq.add_equality_group(fractions.Fraction(1, 1))
eq.add_equality_group(fractions.Fraction(1, 2),
fractions.Fraction(2, 4))
eq.add_equality_group(
fractions.Fraction(2, 3),
fractions.Fraction(12, 18), fractions.Fraction(14, 21))
eq.add_equality_group(2, 2.0, fractions.Fraction(2, 1))
eq.add_equality_group([1, 2, 3], [1, 2, 3])
eq.add_equality_group({'b': 3, 'a': 2}, {'a': 2, 'b': 3})
eq.add_equality_group('unrelated')
def test_assert_add_equality_pair(self):
eq = EqualsTester(self)
with self.assertRaises(AssertionError):
eq.make_equality_pair(object)
eq.make_equality_pair(lambda: 1)
eq.make_equality_pair(lambda: 2)
eq.add_equality_group(3)
with self.assertRaises(AssertionError):
eq.add_equality_group(1)
with self.assertRaises(AssertionError):
eq.make_equality_pair(lambda: 1)
with self.assertRaises(AssertionError):
eq.make_equality_pair(lambda: 3)
def test_add_equality_group_not_equivalent(self):
eq = EqualsTester(self)
with self.assertRaises(AssertionError):
eq.add_equality_group(1, 2)
def test_add_equality_group_not_disjoint(self):
eq = EqualsTester(self)
eq.add_equality_group(1)
with self.assertRaises(AssertionError):
eq.add_equality_group(1)
def test_add_equality_group_bad_hash(self):
class KeyHash(object):
def __init__(self, k, h):
self._k = k
self._h = h
def __eq__(self, other):
return isinstance(other, KeyHash) and self._k == other._k
def __ne__(self, other):
return not self == other
def __hash__(self):
return self._h
eq = EqualsTester(self)
eq.add_equality_group(KeyHash('a', 5), KeyHash('a', 5))
eq.add_equality_group(KeyHash('b', 5))
with self.assertRaises(AssertionError):
eq.add_equality_group(KeyHash('c', 2), KeyHash('c', 3))
def test_add_equality_group_exception_hash(self):
class FailHash(object):
def __hash__(self):
raise ValueError('injected failure')
eq = EqualsTester(self)
with self.assertRaises(ValueError):
eq.add_equality_group(FailHash())
def test_can_fail_when_forgot_type_check(self):
eq = EqualsTester(self)
class NoTypeCheckEqualImplementation(object):
def __init__(self):
self.x = 1
def __eq__(self, other):
return self.x == other.x
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.x)
with self.assertRaises(AttributeError):
eq.add_equality_group(NoTypeCheckEqualImplementation())
def test_fails_hash_is_default_and_inconsistent(self):
eq = EqualsTester(self)
class DefaultHashImplementation(object):
__hash__ = object.__hash__
def __init__(self):
self.x = 1
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.x == other.x
def __ne__(self, other):
return not self == other
with self.assertRaises(AssertionError):
eq.make_equality_pair(DefaultHashImplementation)
def test_fails_when_ne_is_inconsistent(self):
eq = EqualsTester(self)
class InconsistentNeImplementation(object):
def __init__(self):
self.x = 1
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.x == other.x
def __ne__(self, other):
return NotImplemented
def __hash__(self):
return hash(self.x)
with self.assertRaises(AssertionError):
eq.make_equality_pair(InconsistentNeImplementation)
def test_fails_when_not_reflexive(self):
eq = EqualsTester(self)
class NotReflexiveImplementation(object):
def __init__(self):
self.x = 1
def __eq__(self, other):
if other is not self:
return NotImplemented
return False
def __ne__(self, other):
return not self == other
with self.assertRaises(AssertionError):
eq.add_equality_group(NotReflexiveImplementation())
def test_fails_when_not_commutative(self):
eq = EqualsTester(self)
class NotCommutativeImplementation(object):
def __init__(self, x):
self.x = x
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.x <= other.x
def __ne__(self, other):
return not self == other
with self.assertRaises(AssertionError):
eq.add_equality_group(NotCommutativeImplementation(0),
NotCommutativeImplementation(1))
with self.assertRaises(AssertionError):
eq.add_equality_group(NotCommutativeImplementation(1),
NotCommutativeImplementation(0))
class RandomInteractionOperatorTest(unittest.TestCase):
def test_hermiticity(self):
n_orbitals = 5
# Real, no spin
iop = random_interaction_operator(n_orbitals, real=True)
ferm_op = get_fermion_operator(iop)
self.assertTrue(is_hermitian(ferm_op))
# Real, spin
iop = random_interaction_operator(
n_orbitals, expand_spin=True, real=True)
ferm_op = get_fermion_operator(iop)
self.assertTrue(is_hermitian(ferm_op))
# Complex, no spin
iop = random_interaction_operator(n_orbitals, real=False)
ferm_op = get_fermion_operator(iop)
self.assertTrue(is_hermitian(ferm_op))
# Complex, spin
iop = random_interaction_operator(
n_orbitals, expand_spin=True, real=False)
ferm_op = get_fermion_operator(iop)
self.assertTrue(is_hermitian(ferm_op))
def test_symmetry(self):
n_orbitals = 5
# Real.
iop = random_interaction_operator(n_orbitals, expand_spin=False,
real=True)
ferm_op = get_fermion_operator(iop)
self.assertTrue(is_hermitian(ferm_op))
two_body_coefficients = iop.two_body_tensor
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
self.assertAlmostEqual(two_body_coefficients[p, q, r, s],
two_body_coefficients[r, q, p, s])
self.assertAlmostEqual(two_body_coefficients[p, q, r, s],
two_body_coefficients[p, s, r, q])
self.assertAlmostEqual(two_body_coefficients[p, q, r, s],
two_body_coefficients[s, r, q, p])
self.assertAlmostEqual(two_body_coefficients[p, q, r, s],
two_body_coefficients[q, p, s, r])
self.assertAlmostEqual(two_body_coefficients[p, q, r, s],
two_body_coefficients[r, s, p, q])
self.assertAlmostEqual(two_body_coefficients[p, q, r, s],
two_body_coefficients[s, p, q, r])
self.assertAlmostEqual(two_body_coefficients[p, q, r, s],
two_body_coefficients[q, r, s, p])
class HaarRandomVectorTest(unittest.TestCase):
def test_vector_norm(self):
n = 15
seed = 8317
vector = haar_random_vector(n, seed)
norm = vector.dot(numpy.conjugate(vector))
self.assertAlmostEqual(1. + 0.j, norm)
class RandomSeedingTest(unittest.TestCase):
def test_random_operators_are_reproducible(self):
op1 = random_diagonal_coulomb_hamiltonian(5, seed=5947)
op2 = random_diagonal_coulomb_hamiltonian(5, seed=5947)
numpy.testing.assert_allclose(op1.one_body, op2.one_body)
numpy.testing.assert_allclose(op1.two_body, op2.two_body)
op1 = random_interaction_operator(5, seed=8911)
op2 = random_interaction_operator(5, seed=8911)
numpy.testing.assert_allclose(op1.one_body_tensor, op2.one_body_tensor)
numpy.testing.assert_allclose(op1.two_body_tensor, op2.two_body_tensor)
op1 = random_quadratic_hamiltonian(5, seed=17711)
op2 = random_quadratic_hamiltonian(5, seed=17711)
numpy.testing.assert_allclose(op1.combined_hermitian_part,
op2.combined_hermitian_part)
numpy.testing.assert_allclose(op1.antisymmetric_part,
op2.antisymmetric_part)
op1 = random_antisymmetric_matrix(5, seed=24074)
op2 = random_antisymmetric_matrix(5, seed=24074)
numpy.testing.assert_allclose(op1, op2)
op1 = random_hermitian_matrix(5, seed=56753)
op2 = random_hermitian_matrix(5, seed=56753)
numpy.testing.assert_allclose(op1, op2)
op1 = random_unitary_matrix(5, seed=56486)
op2 = random_unitary_matrix(5, seed=56486)
numpy.testing.assert_allclose(op1, op2)
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 14:05:05 2016
@author: pavel
"""
#import matplotlib
#matplotlib.use('GTKAgg')
from gi.repository import Gtk#, Gdk#, Gio, GLib
from gtk_wrapper import GTK_Wrapper
import data_filter
from logger import Logger
from data_processor import DataProcessor, FileProcessor
from data_plotter import Plotter
from graph import GTK_Graph
from data_provider import RandomWalkDataProvider, SerialPortDataProvider
import global_settings as settings
logger = Logger.get_logger()
class GUI:
GLADE_FILE = "GUI.glade"
EXPORT_RESPONSE_OK = 1
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file(GUI.GLADE_FILE)
# order is important!
self._gui_elements_init_()
self._graph_init_()
self._data_procesor_init_()
self._provider_init_()
self.is_active = False
self.builder.connect_signals(self)
self.builder.get_object("main_window").show_all()
self.stop()
#
def _gui_elements_init_(self):
#attach elements to paned
graph_window = self.builder.get_object("graph_window")
control_panel = self.builder.get_object("control_panel")
working_area_paned = self.builder.get_object("working_area_paned")
#graph to the left: resizable
#control panel to the right
working_area_paned.pack1(graph_window, resize=True, shrink=True)
working_area_paned.pack2(control_panel, resize=False, shrink=True)
#start button
self.gui_start_btn = self.builder.get_object("start_btn")
self.gui_start_label = self.builder.get_object("start_lbl")
self.gui_stop_label = self.builder.get_object("stop_lbl")
#provider settings
self.gui_provider_settings_area = self.builder.get_object("provider_settings_alignment")
#
self.gui_provider_settings_box = self.builder.get_object("provider_settings_box")
#filter settings
self.gui_filter_settings_box = self.builder.get_object("filter_settings_box")
#export dialog
self.gui_export_btn = self.builder.get_object("export_btn")
self.gui_export_dialog = None
#error message dialog
self.gui_error_message_dialog = None
def _data_procesor_init_(self):
filters = [data_filter.Invertor(),
data_filter.SelfAdjustableNotchFilter()]
self.data_processor = DataProcessor(self.plotter.plot_valid,
self.plotter.plot_error, filters)
for filter_ in filters:
name = filter_.get_name()
name_repr = GTK_Wrapper.get_wrapper(name).get_gui_object()
self.gui_filter_settings_box.pack_start(name_repr, True, True, 0)
filter_settings_mgr = filter_.settings_manager()
self._add_all_params(filter_settings_mgr, self.gui_filter_settings_box)
def _provider_init_(self):
self.data_provider = RandomWalkDataProvider(onData = self.data_processor.new_data,
onError = self.error_stop)
#self.data_provider = SerialPortDataProvider(self.data_processor.new_data, self.error_stop)
data_provider_settings_mgr = self.data_provider.settings_manager()
self._add_all_params(data_provider_settings_mgr, self.gui_provider_settings_box)
def _graph_init_(self):
# Create graph
#self.graph = MatplotlibGraph(onClose=self.stop)
self.graph = GTK_Graph(self.builder.get_object("graph_area"),
settings.GRAPH_COLORS,
settings.DATA_MIN_VALUE, settings.DATA_MAX_VALUE)
self.plotter = Plotter(self.graph)
def _add_all_params(self, obj_settings_mgr, gui_setting_box):
for param in obj_settings_mgr.all_params():
wrapper = GTK_Wrapper.get_wrapper(param)
gui_obj = wrapper.get_gui_object()
gui_setting_box.pack_start(gui_obj, True, True, 0)
def close(self):
self.stop()
self.graph.close()
def start(self):
if self.is_active:
self.stop()
logger.to_log("start")
self.is_active = True
# handling with gui first!!!
#disable port settings
self.gui_provider_settings_area.set_sensitive(False)
#rename start button
self.gui_start_btn.set_label(self.gui_stop_label.get_text())
#disable export button
self.gui_export_btn.set_sensitive(False)
self.data_processor.enable()
# start listening
self.data_provider.activate()
def error_stop(self, text):
self.error_message(text)
self.stop()
def error_message(self, text):
logger.to_log(text)
if self.gui_error_message_dialog is None:
self.gui_error_message_dialog = self.builder.get_object("error_message_dialog")
self.gui_error_message_dialog.set_property("secondary-text", text)
self.gui_error_message_dialog.run()
self.gui_error_message_dialog.hide()
def stop(self):
logger.to_log("stop")
self.data_provider.deactivate()
self.data_processor.disable()
self.is_active = False
#enable settings
self.gui_provider_settings_area.set_sensitive(True)
#rename start button
self.gui_start_btn.set_label(self.gui_start_label.get_text())
#enable export button
self.gui_export_btn.set_sensitive(True)
def on_main_window_delete_event(self, *args):
self.close()
Gtk.main_quit()
def on_start_clicked(self, *args):
if self.is_active:
self.stop()
else:
self.start()
def on_export_clicked(self, *args):
if self.gui_export_dialog is None:
self.gui_export_dialog = self.builder.get_object("export_filechooser_dialog")
fproc = FileProcessor()
self.gui_export_dialog.set_current_name(fproc.get_name())
response = self.gui_export_dialog.run()
if response == gui.EXPORT_RESPONSE_OK:
fproc.set_name(self.gui_export_dialog.get_filename())
fproc.do_export(self.data_processor, onError=self.error_message)
self.gui_export_dialog.hide()
if __name__ == "__main__":
gui = GUI()
Gtk.main()
| |
from __future__ import print_function, with_statement
import os
import sys
import stat
import json
import etcd
from functools import wraps
from fabric.api import *
from fabric.colors import red, yellow, white, green
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
__doc__ = """\
Help Doc
"""
# A few setup steps and environment checks
curdir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(curdir, '.project_config.json')
try:
project_config = json.loads(open(config_file, 'r').read())
except:
project_config = {
"etcd_host": env.etcd_host,
"docker_registry":env.registry_host
}
hidden_output = []
try:
venv_wrap_path = os.environ['WORKON_HOME']
except KeyError:
venv_wrap_path = None
if venv_wrap_path and os.path.exists(os.path.join(venv_wrap_path, 'omop_harvest')):
full_env_path = os.path.join(venv_wrap_path, 'omop_harvest')
else:
full_env_path = os.path.abspath('..')
venv_wrap_path = None
def get_hosts_settings():
# TODO: Will probably have to retain this to support legacy deploy.
# Load all the host settings
try:
hosts = json.loads(open(config_file).read())['hosts']
except KeyError:
abort(red('Error: No host settings are defined in the project configuration'))
# Pop the default settings
# Pre-populated defaults
# for host in hosts:
# base = base_settings.copy()
# base.update(default_settings)
# print(hosts)
# base.update(hosts[host])
# hosts[host] = base
return hosts
# ** Decorators
def virtualenv(path, venv_wrap):
"Wraps a function and prefixes the call with the virtualenv active."
if path is None:
activate = None
else:
activate = os.path.join(path, 'bin/activate')
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
if venv_wrap:
with prefix('source /usr/local/bin/virtualenvwrapper.sh'):
with prefix('workon {0}'.format('omop_harvest')):
return func(*args, **kwargs)
elif path is not None and venv is None:
with prefix('source {0}'.format(activate)):
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return inner
return decorator
def host_context(func):
"Sets the context of the setting to the current host"
@wraps(func)
def decorator(*args, **kwargs):
hosts = get_hosts_settings()
with settings(**hosts[env.host]):
return func(*args, **kwargs)
return decorator
# ---------------------------------------------------------------
# Configuration Commands
# ---------------------------------------------------------------
def set_configuration(noinput=False):
'''
Takes the settings in .project_config.json file and writes them to the
appropriate etcd endpoint for this application.
ab set_configuration:noinput=True will not prompt for confirmation
'''
client = etcd.Client(host=project_config['etcd_host'])
config = json.loads(open('.project_config.json', 'r').read())
if noinput or confirm("Are you sure you want to upload your local settings?"):
client.write('/applications/omop_harvest/configuration', json.dumps(config))
def get_configuration(noinput=False):
'''
Retrieves the applications settings from etcd and generates a local settings file.
fab get_configuration:noinput=True will not prompt for confirmation
'''
client = etcd.Client(host=project_config['etcd_host'])
try:
etcd_config = client.read('/applications/omop_harvest/configuration')
except KeyError:
abort(red('Error: No host settings found on etcd'))
configuration = json.loads(etcd_config.value)
if configuration == {}:
print(red('Empty configuration found. Aborting'))
sys.exit(1)
# Establish the configuration locally
if noinput or confirm('Are you sure you want to overwrite your local settings?'):
f = open('.project_config.json', 'w')
f.write(json.dumps(configuration, indent=4, sort_keys=True))
f.close()
# ---------------------------------------------------------------
# Docker Commands
# ---------------------------------------------------------------
# TODO:
# - Continuous Integration. Automatic provisioning of services
def build_container(noinput=False):
# Check git status to make sure our build hash matches our git commit
index_status = local('git status --porcelain', capture=True)
if index_status != '':
abort('Please commit or stash any changes to git before building your container')
try:
get_configuration(noinput)
except:
if not confirm('Unable to retrieve configuration. Would you like to attempt to build this container with locally available settings?'):
sys.exit(1)
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
local('docker build -t omop_harvest-{0}:{1} .'.format(git_branch, git_hash))
def test_container():
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
#local('docker run -i -t -e APP_ENV=test omop_harvest-{0}:{1} test'.format(git_branch, git_hash))
#Temporary: Anticipating new version of ATI Template
local('docker run --link memcache:mc -d -p :8000 -e CID_ENV={0} -e APP_ENV={1} omop_harvest-{2}:{3} test'.format(
env.cid_env,
env.host,
git_branch,
git_hash)
)
#
def build_and_test():
build_container(noinput=True)
test_container()
# Remote Deployment Commands
def pull_repo():
local('docker pull {0}/omop_harvest-{1}'.format(project_config['docker_registry'], git_branch))
def push_to_repo():
git_hash = local('git rev-parse --short HEAD', capture=True)
git_branch = local('git rev-parse --abbrev-ref HEAD', capture=True)
try:
with hide('output'):
local("docker inspect --format='{{{{.id}}}}' omop_harvest-{0}:{1}".format(git_branch, git_hash))
except:
if confirm('Could not find most most recent container. Would you like to build it?'):
build_container()
local('docker tag omop_harvest-{0}:{1} {2}/omop_harvest-{0}:{1}'.format(git_branch, git_hash, project_config['docker_registry']))
local('docker tag omop_harvest-{0}:{1} {2}/omop_harvest-{0}:latest'.format(git_branch, git_hash, project_config['docker_registry']))
local('docker push {0}/omop_harvest-{1}'.format(project_config['docker_registry'], git_branch))
local('docker rmi -f {0}/omop_harvest-{1}:{2}'.format(project_config['docker_registry'], git_branch, git_hash))
@host_context
def deploy(commit='latest'):
run('docker pull {0}/omop_harvest-{1}:{2}'.format(project_config['docker_registry'], env.git_branch, commit))
#container = run('docker run -d -p :8000 -e APP_ENV={0} {1}/omop_harvest-{2}:{3} start'.format(
# env.host,
# project_config['docker_registry'],
# env.git_branch,
# commit)
#)
#Temporary: Anticipating new version of ATI Template
container = run('docker run --hostname=omop-harvest-{2}-{3} --link memcache:mc -d -p :8000 -e CID_ENV={4} -e APP_ENV={0} {1}/omop_harvest-{2}:{3} start'.format(
env.host,
project_config['docker_registry'],
env.git_branch,
commit,
env.cid_env)
)
#
port = run("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(container))
commit_msg = local('git --no-pager log --oneline -1', capture = True)
auth_token = project_config['hipchat']['auth_token']
deploy_msg = 'omop_harvest-{0}:{1} now deployed at http://{2}:{3} <a href="http://{2}:{3}">Open</a> <a href="http://{4}:4001/v2/keys/applications/omop_harvest/status">Status</a> -- {5}'.format(env.git_branch, commit, env.host_string, port, project_config['etcd_host'], commit_msg)
# Notifications
local('curl -d "room_id=529405&from=deployservice&color=yellow" --data-urlencode message="{deploy_msg}" https://cbmi.hipchat.com/v1/rooms/message?auth_token={auth_token}'.format(
deploy_msg=deploy_msg,
auth_token=auth_token
))
client = etcd.Client(host=project_config['etcd_host'])
client.write('/applications/omop_harvest/status/{0}/latest_commit'.format(env.git_branch), commit)
client.write('/applications/omop_harvest/status/{0}/latest_deploy'.format(env.git_branch), 'http://{0}:{1}'.format(env.host_string, port))
print(green('Now Running at http://{0}:{1}'.format(env.host_string, port)))
@host_context
def setup_env():
"Sets up the initial environment."
parent, project = os.path.split(env.path)
if not exists(parent):
run('mkdir -p {}}'.format(parent))
with cd(parent):
if not exists(project):
run('git clone {repo_url} {project}'.format(project=project, **env))
with cd(project):
run('git checkout {git_branch}'.format(**env))
run('git pull origin {git_branch}'.format(**env))
else:
with cd(project):
run('git checkout {git_branch}'.format(**env))
run('git pull origin {git_branch}'.format(**env))
# ---------------------------------------------------------------
# Template Bootstrap Hooks
# ---------------------------------------------------------------
@virtualenv(full_env_path, venv_wrap_path)
def harvest_bootstrap():
# Handle Settings Configuration
# TODO:
# Perhaps at this point we go out to etcd and
# find the relavent DB connection settings if
# they exist then we use those here... otherwise
# we fall back to the default sqlite stuff
print('Setup default configuration file')
with hide(*hidden_output):
local('mv .project_config.json.sample .project_config.json')
print('Make test script executable')
mode = stat.S_IMODE(os.stat('run-tests.sh').st_mode)
executable = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod('run-tests.sh', mode | executable)
# Sync DB
print(green('- Creating SQLiteDB.'))
with hide(*hidden_output):
local('./bin/manage.py syncdb --settings=omop_harvest.conf.local')
# Collect Static
print(green('- Collecting Static Files'))
with hide(*hidden_output):
local('./bin/manage.py collectstatic --noinput --settings=omop_harvest.conf.local')
# Migrations
print(green('- Run Migrations'))
with hide(*hidden_output):
local('./bin/manage.py migrate --noinput --settings=omop_harvest.conf.local')
# ---------------------------------------------------------------
# Testing and Continuous Integration Commands
# ---------------------------------------------------------------
def check_for_config(noinput):
if 'project_settings' not in project_config.keys():
if noinput or confirm(red("No configuration found. Would you like to download this applications configuration?")):
get_configuration(noinput=True)
def check_for_pg(database):
'''
Check the current Docker host for an existing instance of the specified
database. If found returns the container ID.
'''
with hide('output', 'running', 'warnings'), settings(warn_only=True):
res = local("docker ps -a | awk '/{0}/ {{ print $1 }}'".format(database), capture=True)
if res:
return res.split("\n")
else:
return None
def check_for_mc():
'''
Check the current Docker host for an existing instance of memcache. If
found returns the container ID.
'''
with hide('output', 'running', 'warnings'), settings(warn_only=True):
res = local("docker ps | awk '/memcache/ { print $1 }'", capture=True)
if res:
print(green('Found Memcache running at {0}'.format(res)))
return res.split("\n")
else:
return None
def test_setup(noinput=False):
'''
Examine the project for a proper configuration file.
Examine the existing environment for Harvest app's service dependencies
(Memcache, and Postgres). If they do not exists create them as containers,
build the application container and apply ETL command from the application
to the Postgres DB.
After the data load is complete, attach the application to the Postgres
container and to Memcache. Apply normal bootstrapping procedures (syncdb,
migrations, collectstatic) and load fixture container test user "cbmi" with
default password "chopchop"
'''
DB_CONTAINER_NAME = 'omop_harvest_test_db'
check_for_config(noinput)
dbs = check_for_pg(DB_CONTAINER_NAME)
if dbs:
if noinput or confirm(yellow('It looks like you might already have an instance running on this machine. Do you want to stop and remove the existing containers?')):
with hide('output', 'running'):
print(red('Stopping and removing associated Harvest application containers.'))
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker stop")
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker rm")
mc = check_for_mc()
if not mc:
with hide('output', 'running'):
print(green('Starting Memcached Container...'))
local("docker run -d --name memcache ehazlett/memcached")
with hide('output', 'running', 'warnings'):
# Spin up a fresh Postgres instance:
print(green('Starting Postgres Container...'))
pg_container = local('docker run -p :5432 -d --name omop_harvest_test_db {registry_host}:5000/postgresql'.format(hosts=project_config['registry_host']), capture=True)
port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(pg_container), capture=True)
time.sleep(2)
# Create DB and User in fresh DB
print(green('Prepare Postgres DB...'))
local('export PGPASSWORD=docker && createdb -h localhost -p {port} -U docker omop_harvest'.format(port=port))
conn = psycopg2.connect(host='localhost', port=port, user='docker', password='docker', database='postgres')
conn.cursor().execute("create user omop_harvest with password 'docker'; ")
conn.commit()
conn.close()
# Build the Application Container to facilitate ETL:
print(green('Building Application Container...'))
local('docker build -t omop_harvest_test .')
# Run ETL on attached Postgres DB
print(green('Start ETL on attached DB'))
local('docker run --link omop_harvest_test_db:db -e APP_ENV=test --name omop_harvest_etl omop_harvest_test etl')
# Wait for ETL process to finish
local('docker wait omop_harvest_etl')
print(green('ETL Complete.'))
local('docker rm omop_harvest_etl')
# Start the application container
print(green('Start Application Container...'))
omop_harvest = local('docker run -d --link omop_harvest_test_db:db --link memcache:mc -p :8000 -e APP_ENV=test --name omop_harvest_test_app omop_harvest_test debug', capture=True)
omop_harvest_port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(omop_harvest), capture=True)
# Sleep to give syncdb and migrations time to run.
time.sleep(10)
print(red('\n***\nomop_harvest Test Instance now running on: http://{0}:{1}'.format(socket.gethostname(), omop_harvest_port)))
def ci_setup(noinput=False):
"Copy down the production omop_harvest database to a fresh postgres container."
# TODO
# - Make sure configuration file exists.
DB_CONTAINER_NAME = 'omop_harvest_ci_pg'
check_for_config(noinput)
dbs = check_for_pg(DB_CONTAINER_NAME)
if dbs:
if noinput or confirm(yellow('It looks like you might already have an instance running on this machine. Do you want to stop and remove the existing containers?')):
with hide('output', 'running'):
print(red('Stopping and removing associated Harvest application containers.'))
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker stop")
local("docker ps -a | awk '/(omop_harvest_test:|omop_harvest_test_db)/ { print $1 }' | xargs docker rm")
# Spin up a fresh postgres instance:
with hide('output', 'running', 'warnings'):
print(green('Starting Postgres Container...'))
pg_container = local('docker run -p :5432 -d --name omop_harvest_ci_db {registry_host}:5000/postgresql'.format(hosts=project_config['registry_host']), capture=True)
port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(pg_container), capture=True)
time.sleep(2)
print(green('Dump Production DB...'))
db = parse_db(project_config['project_settings']['production']['databases']['default'])
local('export PGPASSWORD={password} && pg_dump -h {host} -U {user} -Fc {database} > tmp.dump'.format(**db))
time.sleep(2)
print(green('Prepare Postgres DB...'))
local('export PGPASSWORD=docker && createdb -h localhost -p {port} -U docker omop_harvest'.format(port=port))
conn = psycopg2.connect(host='localhost', port=port, user='docker', password='docker', database='postgres')
conn.cursor().execute("create user omop_harvest with password 'docker'; ")
conn.commit()
conn.close()
print(green('Restoring Backup to Container...'))
local('export PGPASSWORD=docker && pg_restore -h localhost -p {port} -U docker -d omop_harvest tmp.dump'.format(port=port))
local('rm tmp.dump')
print(green('Building Application Container...'))
local('docker build -t omop_harvest_test .')
print(green('Start Application Container...'))
omop_harvest = local('docker run -d --link omop_harvest_ci_db:db -p :8000 -e APP_ENV=ci --name omop_harvest_ci omop_harvest start', capture=True)
omop_harvest_port = local("docker inspect --format='{{{{range $p, $conf := .NetworkSettings.Ports}}}}{{{{(index $conf 0).HostPort}}}} {{{{end}}}}' {0}".format(omop_harvest), capture=True)
print(red('\n***\nomop_harvest Production Clone now running on: http://localhost:{0}'.format(omop_harvest_port)))
| |
# -*- coding: utf-8 -*-
"""
Default Controllers
"""
module = "default"
# -----------------------------------------------------------------------------
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# If webservices don't use sessions, avoid cluttering up the storage
#session.forget()
return service()
# -----------------------------------------------------------------------------
def download():
""" Download a file """
try:
filename = request.args[0]
except:
session.error("Need to specify the file to download!")
redirect(URL(f="index"))
# Load the Model
tablename = filename.split(".", 1)[0]
table = s3db[tablename]
return response.download(request, db)
# =============================================================================
def register_validation(form):
""" Validate the fields in registration form """
vars = form.vars
# Mobile Phone
if "mobile" in vars and vars.mobile:
import re
regex = re.compile(single_phone_number_pattern)
if not regex.match(vars.mobile):
form.errors.mobile = T("Invalid phone number")
elif settings.get_auth_registration_mobile_phone_mandatory():
form.errors.mobile = T("Phone number is required")
org = settings.get_auth_registration_organisation_id_default()
if org:
# Add to default organisation
vars.organisation_id = org
return
# =============================================================================
def index():
""" Main Home Page """
auth.settings.register_onvalidation = register_validation
auth.configure_user_fields()
page = request.args(0)
if page:
# Go to a custom page
# Arg 1 = function in /private/templates/<template>/controllers.py
# other Args & Vars passed through
controller = "applications.%s.private.templates.%s.controllers" % \
(appname, settings.get_template())
try:
exec("import %s as custom" % controller)
except ImportError, e:
# No Custom Page available, continue with the default
page = "private/templates/%s/controllers.py" % \
settings.get_template()
s3base.s3_debug("File not loadable: %s, %s" % (page, e.message))
else:
if "." in page:
# Remove extension
page = page.split(".", 1)[0]
if page in custom.__dict__:
exec ("output = custom.%s()()" % page)
return output
elif page != "login":
raise(HTTP(404, "Function not found: %s()" % page))
else:
output = custom.index()()
return output
elif settings.get_template() != "default":
# Try a Custom Homepage
controller = "applications.%s.private.templates.%s.controllers" % \
(appname, settings.get_template())
try:
exec("import %s as custom" % controller)
except ImportError, e:
# No Custom Page available, continue with the default
# @ToDo: cache this result in session
s3base.s3_debug("Custom homepage cannot be loaded: %s" % e.message)
else:
if "index" in custom.__dict__:
output = custom.index()()
return output
# Default Homepage
title = settings.get_system_name()
response.title = title
item = ""
has_module = settings.has_module
if has_module("cms"):
table = s3db.cms_post
ltable = s3db.cms_post_module
query = (ltable.module == module) & \
((ltable.resource == None) | (ltable.resource == "index")) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = db(query).select(table.body,
limitby=(0, 1)).first()
if item:
item = DIV(XML(item.body))
else:
item = ""
if has_module("cr"):
table = s3db.cr_shelter
SHELTERS = s3.crud_strings["cr_shelter"].title_list
else:
SHELTERS = ""
# Menu Boxes
menu_btns = [#div, label, app, function
["facility", T("Facilities"), "org", "facility"],
["facility", T("Hospitals"), "hms", "hospital"],
["facility", T("Offices"), "org", "office"],
["facility", SHELTERS, "cr", "shelter"],
["facility", T("Warehouses"), "inv", "warehouse"],
["sit", T("Staff"), "hrm", "staff"],
["sit", T("Volunteers"), "vol", "volunteer"],
["sit", T("Incidents"), "irs", "ireport"],
["sit", T("Assessments"), "survey", "series"],
["sit", T("Assets"), "asset", "asset"],
["sit", T("Inventory Items"), "inv", "inv_item"],
#["dec", T("Gap Map"), "project", "gap_map"],
#["dec", T("Gap Report"), "project", "gap_report"],
["dec", T("Requests"), "req", "req"],
["res", T("Projects"), "project", "project"],
["res", T("Commitments"), "req", "commit"],
["res", T("Sent Shipments"), "inv", "send"],
["res", T("Received Shipments"), "inv", "recv"],
]
# Change to (Mitigation)/Preparedness/Response/Recovery?
menu_divs = {"facility": DIV(H3(T("Facilities")),
_id = "facility_box",
_class = "menu_box",
),
"sit": DIV(H3(T("Situation")),
_id = "menu_div_sit",
_class = "menu_div",
),
"dec": DIV(H3(T("Decision")),
_id = "menu_div_dec",
_class = "menu_div",
),
"res": DIV(H3(T("Response")),
_id = "menu_div_res",
_class = "menu_div",
),
}
for div, label, app, function in menu_btns:
if has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A(DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app, function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
appname),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append(A(IMG(_src = "/%s/static/img/map_icon_128.png" % \
appname),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
# Check logged in AND permissions
roles = session.s3.roles
table = s3db.org_organisation
has_permission = auth.s3_has_permission
if AUTHENTICATED in roles and \
has_permission("read", table):
org_items = organisation()
datatable_ajax_source = "/%s/default/organisation.aadata" % \
appname
s3.actions = None
response.view = "default/index.html"
permission = auth.permission
permission.controller = "org"
permission.function = "site"
permitted_facilities = auth.permitted_facilities(redirect_on_error=False)
if permitted_facilities:
facilities = s3db.org_SiteRepresent().bulk(permitted_facilities)
facility_list = [(fac, facilities[fac]) for fac in facilities]
facility_list = sorted(facility_list, key=lambda fac: fac[1])
facility_opts = [OPTION(fac[1], _value=fac[0])
for fac in facility_list]
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft"
)
s3.jquery_ready.append(
'''$('#manage_facility_select').change(function(){
$('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val()))})
$('#manage_facility_btn').click(function(){
if ( ($('#manage_facility_btn').attr('href').toString())===S3.Ap.concat('/default/site/None') )
{$("#manage_facility_box").append("<div class='alert alert-error'>%s</div>")
return false}})''' % (T("Please Select a Facility")))
else:
manage_facility_box = ""
if has_permission("create", table):
create = A(T("Add Organization"),
_href = URL(c="org", f="organisation",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right: 10px;")
else:
create = ""
org_box = DIV(H3(T("Organizations")),
create,
org_items,
_id = "org_box",
_class = "menu_box fleft"
)
else:
datatable_ajax_source = ""
manage_facility_box = ""
org_box = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
register_form = auth.s3_registration_form()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:5,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
output = dict(title = title,
item = item,
sit_dec_res_box = sit_dec_res_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
org_box = org_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
output = s3_guided_tour(output)
return output
# -----------------------------------------------------------------------------
def organisation():
"""
Function to handle pagination for the org list on the homepage
"""
request = current.request
get_vars = request.get_vars
representation = request.extension
resource = current.s3db.resource("org_organisation")
totalrows = resource.count()
display_start = int(get_vars.iDisplayStart) if get_vars.iDisplayStart else 0
display_length = int(get_vars.iDisplayLength) if get_vars.iDisplayLength else 10
limit = 4 * display_length
list_fields = ["id", "name"]
default_orderby = orderby = "org_organisation.name asc"
if representation == "aadata":
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
if orderby is None:
orderby = default_orderby
data = resource.select(list_fields,
start=display_start,
limit=limit,
orderby=orderby,
count=True,
represent=True)
filteredrows = data["numrows"]
rfields = data["rfields"]
data = data["rows"]
dt = S3DataTable(rfields, data)
dt.defaultActionButtons(resource)
current.response.s3.no_formats = True
if representation == "html":
items = dt.html(totalrows,
totalrows,
"org_dt",
dt_displayLength=display_length,
dt_ajax_url=URL(c="default",
f="organisation",
extension="aadata",
vars={"id": "org_dt"},
),
dt_pagination="true",
)
elif representation == "aadata":
if "sEcho" in request.vars:
echo = int(request.vars.sEcho)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
"org_dt",
echo)
else:
from gluon.http import HTTP
raise HTTP(501, resource.ERROR.BAD_FORMAT)
return items
# -----------------------------------------------------------------------------
def site():
"""
@ToDo: Avoid redirect
"""
try:
site_id = request.args[0]
except:
raise HTTP(404)
table = s3db.org_site
record = db(table.site_id == site_id).select(table.instance_type,
limitby=(0, 1)).first()
tablename = record.instance_type
table = s3db.table(tablename)
if table:
query = (table.site_id == site_id)
id = db(query).select(table.id,
limitby = (0, 1)).first().id
cf = tablename.split("_", 1)
redirect(URL(c = cf[0],
f = cf[1],
args = [id]))
# -----------------------------------------------------------------------------
def message():
""" Show a confirmation screen """
#if "verify_email_sent" in request.args:
title = T("Account Registered - Please Check Your Email")
message = T( "%(system_name)s has sent an email to %(email)s to verify your email address.\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters." )\
% {"system_name": settings.get_system_name(),
"email": request.vars.email}
image = "email_icon.png"
return dict(title = title,
message = message,
image_src = "/%s/static/img/%s" % (appname, image)
)
# -----------------------------------------------------------------------------
def rapid():
""" Set/remove rapid data entry flag """
val = request.vars.get("val", True)
if val == "0":
val = False
else:
val = True
session.s3.rapid_data_entry = val
response.view = "xml.html"
return dict(item=str(session.s3.rapid_data_entry))
# -----------------------------------------------------------------------------
def user():
""" Auth functions based on arg. See gluon/tools.py """
arg = request.args(0)
auth.settings.on_failed_authorization = URL(f="error")
auth.configure_user_fields()
auth.settings.register_onvalidation = register_validation
_table_user = auth.settings.table_user
auth.settings.profile_onaccept = auth.s3_user_profile_onaccept
self_registration = settings.get_security_self_registration()
login_form = register_form = None
if request.args:
arg = request.args(0)
else:
arg = None
# Check for template-specific customisations
customize = settings.ui.get("customize_auth_user", None)
if customize:
customize(arg=arg)
# Needs more work to integrate our form extensions
#auth.settings.formstyle = s3_formstyle
if arg == "login":
title = response.title = T("Login")
# @ToDo: move this code to /modules/s3/s3aaa.py:def login()?
auth.messages.submit_button = T("Login")
form = auth()
#form = auth.login()
login_form = form
elif arg == "register":
title = response.title = T("Register")
# @ToDo: move this code to /modules/s3/s3aaa.py:def register()?
if not self_registration:
session.error = T("Registration not permitted")
redirect(URL(f="index"))
form = register_form = auth.s3_registration_form()
elif arg == "change_password":
title = response.title = T("Change Password")
form = auth()
# Add client-side validation
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/jquery.pstrength.2.1.0.min.js" % appname)
s3.jquery_ready.append("$('.password:eq(1)').pstrength()")
elif arg == "retrieve_password":
title = response.title = T("Retrieve Password")
form = auth()
elif arg == "profile":
title = response.title = T("User Profile")
form = auth.profile()
elif arg == "options.s3json":
# Used when adding organisations from registration form
return s3_rest_controller(prefix="auth", resourcename="user")
else:
# logout
title = ""
form = auth()
if form:
if s3.crud.submit_style:
form[0][-1][1][0]["_class"] = s3.crud.submit_style
elif s3_formstyle == "bootstrap":
form[0][-1][1][0]["_class"] = "btn btn-primary"
# Use Custom Ext views
# Best to not use an Ext form for login: can't save username/password in browser & can't hit 'Enter' to submit!
#if request.args(0) == "login":
# response.title = T("Login")
# response.view = "auth/login.html"
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "user.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
return dict(title=title,
form=form,
login_form=login_form,
register_form=register_form,
self_registration=self_registration)
# -----------------------------------------------------------------------------
def person():
"""
Profile to show:
- User Details
- Person Details
- Staff/Volunteer Record
- Map Config
"""
# Set to current user
user_person_id = str(s3_logged_in_person())
# When request.args = [], set it as user_person_id.
# When it is not an ajax request and the first argument is not user_person_id, set it.
# If it is an json request, leave the arguments unmodified.
if not request.args or (request.args[0] != user_person_id and \
request.args[-1] != "options.s3json" and \
request.args[-1] != "validate.json"
):
request.args = [user_person_id]
set_method = s3db.set_method
# Custom Method for User
def auth_profile_method(r, **attr):
# Custom View
response.view = "update.html"
current.menu.breadcrumbs = None
# RHeader for consistency
rheader = attr.get("rheader", None)
if callable(rheader):
rheader = rheader(r)
table = auth.settings.table_user
tablename = table._tablename
next = URL(c = "default",
f = "person",
args = [user_person_id, "user"])
onaccept = lambda form: auth.s3_approve_user(form.vars),
form = auth.profile(next = next,
onaccept = onaccept)
return dict(title = T("User Profile"),
rheader = rheader,
form = form,
)
set_method("pr", "person",
method="user",
action=auth_profile_method)
# Custom Method for Contacts
set_method("pr", "person",
method="contacts",
action=s3db.pr_contacts)
#if settings.has_module("asset"):
# # Assets as component of people
# s3db.add_component("asset_asset",
# pr_person="assigned_to_id")
# CRUD pre-process
def prep(r):
if r.method in ("options", "validate"):
return True
if r.interactive and r.method != "import":
# Load default model to override CRUD Strings
tablename = "pr_person"
table = s3db[tablename]
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# Organisation-dependent Fields
set_org_dependent_field = settings.set_org_dependent_field
set_org_dependent_field("pr_person_details", "father_name")
set_org_dependent_field("pr_person_details", "mother_name")
set_org_dependent_field("pr_person_details", "affiliations")
set_org_dependent_field("pr_person_details", "company")
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = False
table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = True
table.ethnicity.readable = True
table.blood_type.writable = True
table.blood_type.readable = True
table.medical_conditions.writable = True
table.medical_conditions.readable = True
table.other_details.writable = True
table.other_details.readable = True
elif r.component_name == "saved_search":
if r.method == "load":
if r.component_id:
table = db.pr_saved_search
record = db(table.id == r.component_id).select(table.url,
limitby=(0, 1)
).first()
if record:
redirect(record.url)
else:
raise HTTP(404)
elif r.component_name == "config":
ctable = s3db.gis_config
s3db.gis_config_form_setup()
# Create forms use this
# (update forms are in gis/config())
fields = ["name",
"pe_default",
"default_location_id",
"zoom",
"lat",
"lon",
#"projection_id",
#"symbology_id",
#"wmsbrowser_url",
#"wmsbrowser_name",
]
osm_table = s3db.gis_layer_openstreetmap
openstreetmap = db(osm_table.deleted == False).select(osm_table.id,
limitby=(0, 1))
if openstreetmap:
# OpenStreetMap config
s3db.add_component("auth_user_options",
gis_config=dict(joinby="pe_id",
pkey="pe_id",
multiple=False)
)
fields += ["user_options.osm_oauth_consumer_key",
"user_options.osm_oauth_consumer_secret",
]
crud_form = s3base.S3SQLCustomForm(*fields)
list_fields = ["name",
"pe_default",
]
s3db.configure("gis_config",
crud_form=crud_form,
insertable=False,
list_fields = list_fields,
)
else:
table.pe_label.readable = False
table.pe_label.writable = False
table.missing.readable = False
table.missing.writable = False
table.age_group.readable = False
table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
elif r.component_name == "config":
update_url = URL(c="gis", f="config",
args="[id]")
s3_action_buttons(r, update_url=update_url)
s3.actions.append(
dict(url=URL(c="gis", f="index",
vars={"config":"[id]"}),
label=str(T("Show")),
_class="action-btn")
)
elif r.component_name == "saved_search" and r.method in (None, "search"):
s3_action_buttons(r)
s3.actions.append(
dict(url=URL(args=r.args + ["[id]", "load"]),
label=str(T("Load")),
_class="action-btn")
)
elif r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
s3.postp = postp
if settings.get_hrm_staff_experience() == "experience":
experience_tab = (T("Experience"), "experience")
else:
experience_tab = None
if settings.get_hrm_use_certificates():
certificates_tab = (T("Certificates"), "certificate")
else:
certificates_tab = None
if settings.get_hrm_use_credentials():
credentials_tab = (T("Credentials"), "credential")
else:
credentials_tab = None
if settings.get_hrm_use_description():
description_tab = (T("Description"), "physical_description")
else:
description_tab = None
if settings.get_hrm_use_education():
education_tab = (T("Education"), "education")
else:
education_tab = None
if settings.get_hrm_use_id():
id_tab = (T("ID"), "identity")
else:
id_tab = None
if settings.get_hrm_use_skills():
skills_tab = (T("Skills"), "competency")
else:
skills_tab = None
teams = settings.get_hrm_teams()
if teams:
teams_tab = (T(teams), "group_membership")
else:
teams_tab = None
if settings.get_hrm_use_trainings():
trainings_tab = (T("Trainings"), "training")
else:
trainings_tab = None
if settings.get_search_save_widget():
searches_tab = (T("Saved Searches"), "saved_search")
else:
searches_tab = None
tabs = [(T("Person Details"), None),
(T("User Account"), "user"),
(T("Staff/Volunteer Record"), "human_resource"),
id_tab,
description_tab,
(T("Address"), "address"),
(T("Contacts"), "contacts"),
education_tab,
trainings_tab,
certificates_tab,
skills_tab,
credentials_tab,
experience_tab,
teams_tab,
#(T("Assets"), "asset"),
(T("My Maps"), "config"),
searches_tab,
]
output = s3_rest_controller("pr", "person",
rheader = lambda r: \
s3db.pr_rheader(r, tabs=tabs))
return output
# -----------------------------------------------------------------------------
def group():
"""
RESTful CRUD controller
- needed when group add form embedded in default/person
- only create method is allowed, when opened in a inline form.
"""
# Check if it is called from a inline form
if auth.permission.format != "popup":
return ""
# Pre-process
def prep(r):
if r.method != "create":
return False
return True
s3.prep = prep
output = s3_rest_controller("pr", "group")
return output
# -----------------------------------------------------------------------------
def skill():
"""
RESTful CRUD controller
- needed when skill add form embedded in default/person
- only create method is allowed, when opened in a inline form.
"""
# Check if it is called from a inline form
if auth.permission.format != "popup":
return ""
# Pre-process
def prep(r):
if r.method != "create":
return False
return True
s3.prep = prep
output = s3_rest_controller("hrm", "skill")
return output
# -----------------------------------------------------------------------------
def facebook():
""" Login using Facebook """
if not auth.settings.facebook:
redirect(URL(f="user", args=request.args, vars=request.vars))
from s3oauth import FaceBookAccount
auth.settings.login_form = FaceBookAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def google():
""" Login using Google """
if not auth.settings.google:
redirect(URL(f="user", args=request.args, vars=request.vars))
from s3oauth import GooglePlusAccount
auth.settings.login_form = GooglePlusAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
# About Sahana
def apath(path=""):
""" Application path """
from gluon.fileutils import up
opath = up(request.folder)
# @ToDo: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software dependencies and
versions available to this instance of Sahana Eden.
"""
response.title = T("About")
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "about.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
# Database
sqlite_version = None
mysql_version = None
mysqldb_version = None
pgsql_version = None
psycopg_version = None
if db_string.find("sqlite") != -1:
try:
import sqlite3
sqlite_version = sqlite3.version
except:
sqlite_version = T("Unknown")
elif db_string.find("mysql") != -1:
try:
import MySQLdb
mysqldb_version = MySQLdb.__revision__
except:
mysqldb_version = T("Not installed or incorrectly configured.")
mysql_version = T("Unknown")
else:
#mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
con = MySQLdb.connect(host=settings.database.get("host", "localhost"),
port=settings.database.get("port", None) or 3306,
db=settings.database.get("database", "sahana"),
user=settings.database.get("username", "sahana"),
passwd=settings.database.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT VERSION()")
mysql_version = cur.fetchone()
else:
# Postgres
try:
import psycopg2
psycopg_version = psycopg2.__version__
except:
psycopg_version = T("Not installed or incorrectly configured.")
pgsql_version = T("Unknown")
else:
#pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE).communicate()[0])
#pgsql_version = string.split(pgsql_reply)[2]
con = psycopg2.connect(host=settings.database.get("host", "localhost"),
port=settings.database.get("port", None) or 5432,
database=settings.database.get("database", "sahana"),
user=settings.database.get("username", "sahana"),
password=settings.database.get("password", "password")
)
cur = con.cursor()
cur.execute("SELECT version()")
pgsql_version = cur.fetchone()
# Libraries
try:
import reportlab
reportlab_version = reportlab.Version
except:
reportlab_version = T("Not installed or incorrectly configured.")
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except:
xlwt_version = T("Not installed or incorrectly configured.")
return dict(
python_version=python_version,
sahana_version=sahana_version,
web2py_version=web2py_version,
sqlite_version=sqlite_version,
mysql_version=mysql_version,
mysqldb_version=mysqldb_version,
pgsql_version=pgsql_version,
psycopg_version=psycopg_version,
reportlab_version=reportlab_version,
xlwt_version=xlwt_version
)
# -----------------------------------------------------------------------------
def help():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "help.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Help")
return dict()
# -----------------------------------------------------------------------------
def privacy():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "privacy.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Privacy")
return dict()
# -----------------------------------------------------------------------------
def tos():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "tos.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Terms of Service")
return dict()
# -----------------------------------------------------------------------------
def video():
""" Custom View """
if settings.get_template() != "default":
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
settings.get_template(), "views", "video.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Video Tutorials")
return dict()
# -----------------------------------------------------------------------------
def contact():
"""
Give the user options to contact the site admins.
Either:
An internal Support Requests database
or:
Custom View
"""
if auth.is_logged_in() and settings.has_module("support"):
# Provide an internal Support Requests ticketing system.
prefix = "support"
resourcename = "req"
tablename = "%s_%s" % (prefix, resourcename)
table = s3db[tablename]
# Pre-processor
def prep(r):
if r.interactive:
# Only Admins should be able to update ticket status
status = table.status
actions = table.actions
if not auth.s3_has_role(ADMIN):
status.writable = False
actions.writable = False
if r.method != "update":
status.readable = False
status.writable = False
actions.readable = False
actions.writable = False
return True
s3.prep = prep
output = s3_rest_controller(prefix, resourcename)
return output
template = settings.get_template()
if template != "default":
# Try a Custom Page
controller = "applications.%s.private.templates.%s.controllers" % \
(appname, template)
try:
exec("import %s as custom" % controller) in globals(), locals()
except ImportError, e:
# No Custom Page available, try a custom view
pass
else:
if "contact" in custom.__dict__:
output = custom.contact()()
return output
# Try a Custom View
view = os.path.join(request.folder, "private", "templates",
template, "views", "contact.html")
if os.path.exists(view):
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = T("Contact us")
return dict()
if settings.has_module("cms"):
# Use CMS
return s3db.cms_index("default", "contact", page_name=T("Contact Us"))
# Just use default HTML View
return dict()
# -----------------------------------------------------------------------------
def load_all_models():
"""
Controller to load all models in web browser
- to make it easy to debug in Eclipse
"""
s3db.load_all_models()
return "ok"
# -----------------------------------------------------------------------------
def audit():
"""
RESTful CRUD Controller for Audit Logs
- used e.g. for Site Activity
"""
return s3_rest_controller("s3", "audit")
# END =========================================================================
| |
import re
from decimal import Decimal
from setman.exceptions import ValidationError
from setman.utils import importlib, logger
from setman.utils.common import force_bool, load_from_path
from setman.utils.validators import decimal_places_validator, \
max_digits_validator, max_length_validator, max_value_validator, \
min_length_validator, min_value_validator, regex_validator
__all__ = ('SetmanSetting', 'BooleanSetting', 'ChoiceSetting',
'DecimalSetting', 'FloatSetting', 'IntSetting', 'StringSetting')
class SetmanSetting(object):
"""
Base class for setting values that can provided in configuration definition
file.
The class has next attributes:
* ``name``
* ``type``
* ``default``
* ``required``
* ``label``
* ``help_text``
* ``validators``
* ``field``
* ``field_args``
* ``field_kwargs``
The last three attributes can be provided only in Python module, when all
other attrs can read from configuration definition file.
"""
app_name = None
default = None
help_text = None
field_args = ('label', 'help_text', 'initial', 'required', 'validators')
field_klass = None
field_kwargs = {}
label = None
name = None
required = True
type = None
validators = None
def __init__(self, **kwargs):
"""
Initialize setting.
"""
self.app_name = kwargs.pop('app_name', None)
self.update(**kwargs)
def __repr__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.__unicode__())
def __unicode__(self):
return u'%s = %r' % (self.name, self.initial)
@property
def builtin_validators(self):
"""
List of builtin validators to use cause of setting attributes.
"""
return None
@property
def initial(self):
"""
Read real setting value from database or if impossible - just send
default setting value.
"""
from setman import settings
if self.app_name:
settings = getattr(settings, self.app_name)
return getattr(settings, self.name, self.default)
def get_field_args(self):
"""
Return list of all available setting field keyword arguments keys.
"""
return self.field_args
def get_field_kwargs(self):
"""
Return dict of all available setting field keyword arguments.
"""
return self.field_kwargs
def to_python(self, value):
"""
Convert setting value to necessary Python type. By default, returns
same value without any conversion.
"""
return value
def update(self, **kwargs):
"""
Update attributes for current setting instance.
"""
self._validators = kwargs.pop('validators', None)
restricted = ('field_klass', 'field_args', 'field_kwargs',
'validators')
for key, _ in kwargs.items():
if not hasattr(self, key):
kwargs.pop(key)
if key in restricted:
kwargs.pop(key)
self.__dict__.update(kwargs)
self.required = force_bool(self.required)
def validate(self, value):
"""
Run all available validators for current setting and raise
``ValidationError`` if some checkin ended with error.
As result return validated value.
"""
has_value = bool(value)
value = self.to_python(value)
if not self.required and not value:
return value
if value is None:
if has_value:
raise ValidationError('Enter a valid value.')
if self.required:
raise ValidationError('This setting is required.')
return value
for validator in self.validators:
value = validator(value)
return value
@property
def validators(self):
"""
Lazy loaded validators.
"""
cache_key = '_validators_cache'
if not hasattr(self, cache_key):
setattr(self, cache_key, self._parse_validators(self._validators))
builtin_validators = list(self.builtin_validators or [])
loaded_validators = getattr(self, cache_key)
return builtin_validators + loaded_validators
def _parse_validators(self, value):
"""
Parse validators string and try to convert it to list with actual
validator functions.
"""
if not value:
return []
items = map(lambda item: item.strip(), value.split(','))
validators = []
for item in items:
try:
validator = load_from_path(item)
except (AttributeError, ImportError):
logger.exception('Cannot load %r validator for %s setting.',
item, self.name)
continue
validators.append(validator)
return validators
class BooleanSetting(SetmanSetting):
"""
Boolean setting.
"""
required = False
type = 'boolean'
def to_python(self, value):
"""
Convert string to the boolean type.
"""
return force_bool(value)
def update(self, **kwargs):
super(BooleanSetting, self).update(**kwargs)
self.default = self.to_python(self.default)
class ChoiceSetting(SetmanSetting):
"""
Choice setting.
"""
choices = None
field_args = SetmanSetting.field_args + ('choices', )
type = 'choice'
@property
def choices(self):
"""
Lazy loaded choices.
"""
cache_key = '_choices_cache'
if not hasattr(self, cache_key):
setattr(self, cache_key, self._parse_choices(self._choices))
return getattr(self, cache_key)
def _parse_choices(self, value):
"""
Convert string value to valid choices tuple.
**Supported formats:**
* a, b, c
* (a, A), (b, B), (c, C)
* a { b, c }, d { e, f }
* A { (b, B), (c, C) }, D { (e, E), (f, F) }
* path.to.CHOICES
* path.to.Model.CHOICES
"""
# Start parsing with internal choices
if not ',' in value and '.' in value:
# Choices tuple should be last part of value
path, attr = value.rsplit('.', 1)
# Load choices from module
try:
module = importlib.import_module(path)
except ImportError:
# Or from module class
try:
module = load_from_path(path)
except (AttributeError, ImportError):
logger.exception('Cannot load choices from %r path',
value)
return ()
# Try to get choices attr in module
try:
choices = getattr(module, attr)
except AttributeError:
logger.exception('Cannot load choices from %r path', value)
return ()
elif not '{' in value and not '}' in value:
# Parse choice with labels
label_re = re.compile(r'\(([^,]+),\s+([^\)]+)\)', re.M)
found = label_re.findall(value)
if found:
choices = found
# If nothing found by regex, just split value by comma and
# duplicate resulted items
else:
choices = map(lambda item: (item.strip(), item.strip()),
value.split(','))
else:
# Parse groups
groups_re = re.compile(r'([^{]+){([^}]+)},?', re.M)
found = groups_re.findall(value)
if found:
choices = []
for group, data in found:
group = group.strip()
choices.append((group, self._parse_choices(data.strip())))
else:
logger.error('Cannot parse choices from %r', value)
return ()
return tuple(choices)
def update(self, **kwargs):
self._choices = kwargs.pop('choices', None)
super(ChoiceSetting, self).update(**kwargs)
class DecimalSetting(SetmanSetting):
"""
Decimal setting.
"""
decimal_places = None
field_args = SetmanSetting.field_args + ('decimal_places', 'max_digits',
'max_value', 'min_value')
max_digits = None
max_value = None
min_value = None
type = 'decimal'
@property
def builtin_validators(self):
validators = []
if self.decimal_places is not None:
validators.append(decimal_places_validator(self.decimal_places))
if self.max_digits is not None:
validators.append(max_digits_validator(self.max_digits))
if self.max_value is not None:
validators.append(max_value_validator(self.max_value))
if self.min_value is not None:
validators.append(min_value_validator(self.min_value))
return validators
def to_python(self, value):
if value is None:
return value
return Decimal(str(value))
def update(self, **kwargs):
super(DecimalSetting, self).update(**kwargs)
int_setting = IntSetting()
self.decimal_places = int_setting.to_python(self.decimal_places)
self.max_digits = int_setting.to_python(self.max_digits)
self.default = self.to_python(self.default)
self.max_value = self.to_python(self.max_value)
self.min_value = self.to_python(self.min_value)
class IntSetting(SetmanSetting):
"""
Integer setting.
"""
field_args = SetmanSetting.field_args + ('max_value', 'min_value')
max_value = None
min_value = None
type = 'int'
@property
def builtin_validators(self):
validators = []
if self.max_value is not None:
validators.append(max_value_validator(self.max_value))
if self.min_value is not None:
validators.append(min_value_validator(self.min_value))
return validators
def to_python(self, value):
try:
return int(value)
except (TypeError, ValueError):
return None
def update(self, **kwargs):
super(IntSetting, self).update(**kwargs)
self.default = self.to_python(self.default)
self.max_value = self.to_python(self.max_value)
self.min_value = self.to_python(self.min_value)
class FloatSetting(IntSetting):
"""
Float setting.
"""
type = 'float'
def to_python(self, value):
try:
return float(value)
except (TypeError, ValueError):
return None
class StringSetting(SetmanSetting):
"""
String setting.
"""
max_length = None
min_length = None
regex = None
type = 'string'
@property
def builtin_validators(self):
validators = []
if self.max_length is not None:
validators.append(max_length_validator(self.max_length))
if self.min_length is not None:
validators.append(min_length_validator(self.min_length))
if self.regex is not None:
validators.append(regex_validator(self.regex))
return validators
def get_field_args(self):
"""
Use ``RegexField`` for string setting if ``regex`` was filled in
configuration definition file.
"""
if self.regex:
if not 'regex' in self.field_args:
return self.field_args + ('regex', )
return super(StringSetting, self).get_field_args()
def update(self, **kwargs):
super(StringSetting, self).update(**kwargs)
int_setting = IntSetting()
self.max_length = int_setting.to_python(self.max_length)
self.min_length = int_setting.to_python(self.min_length)
| |
"""
This code is used to rotate the images given some angles between [0,1].
Obliging License, credit and conditions for Lasagne: Some part of the file was
directly reproduced from the Lasagne code base.
Author: Anchit Agarwal
LICENSE
=======
Copyright (c) 2014-2015 Lasagne contributors
Lasagne uses a shared copyright model: each contributor holds copyright over
their contributions to Lasagne. The project versioning records all such
contribution and copyright details.
By contributing to the Lasagne repository through pull-request, comment,
or otherwise, the contributor releases their content to the license and
copyright terms herein.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from abstract import layer, _activate, _dropout
import numpy
import numpy.random as nprnd
import theano
import theano.tensor as T
class rotate_layer (layer):
"""
This is a rotate layer. This takes a layer and an angle (rotation normalized in [0,1]) as input
and rotates the batch of images by the specified rotation parameter.
Args:
input: An input ``theano.tensor`` variable. Even ``theano.shared`` will work as long as they
are in the following shape ``mini_batch_size, channels, height, width``
verbose: similar to the rest of the toolbox.
input_shape: ``(mini_batch_size, input_size)``
angle: value from [0,1]
borrow: ``theano`` borrow, typically ``True``.
input_params: Supply params or initializations from a pre-trained system.
"""
def __init__ (self,
input,
input_shape,
id,
angle = None,
borrow = True,
verbose = 2 ):
super(rotate_layer,self).__init__(id = id, type = 'rotate', verbose = verbose)
if verbose >= 3:
print("... Creating rotate layer")
if len(input_shape) == 4:
if verbose >= 3:
print("... Creating the rotate layer")
if angle is None:
angle = nprnd.uniform(size = (input_shape[0],1), low = 0, high = 1)
theta = T.stack([T.cos(angle[:,0]*90).reshape([angle.shape[0],1]),
-T.sin(angle[:,0]*90).reshape([angle.shape[0],1]),
T.zeros((input_shape[0],1),dtype='float32'),
T.sin(angle[:,0]*90).reshape([angle.shape[0],1]),
T.cos(angle[:,0]*90).reshape([angle.shape[0],1]),
T.zeros((input_shape[0],1),dtype='float32')], axis=1)
theta = theta.reshape((-1, 6))
self.output = self._transform_affine(theta, input)
self.output_shape = input_shape
self.angle = angle
self.inference = self.output
def _transform_affine(self, theta, input):
num_batch, num_channels, height, width = input.shape
theta = T.reshape(theta, (-1, 2, 3))
# grid of (x_t, y_t, 1)
out_height = T.cast(height, 'int64')
out_width = T.cast(width, 'int64')
grid = self._meshgrid(out_height, out_width)
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = T.dot(theta, grid)
x_s = T_g[:, 0]
y_s = T_g[:, 1]
x_s_flat = x_s.flatten()
y_s_flat = y_s.flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = self._interpolate(
input_dim, x_s_flat, y_s_flat,
out_height, out_width)
output = T.reshape(
input_transformed, (num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
def _interpolate(self, im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# clip coordinates to [-1, 1]
x = T.clip(x, -1, 1)
y = T.clip(y, -1, 1)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing. for
# indexing, we need to take care they do not extend past the image.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
x0 = T.cast(x0_f, 'int64')
y0 = T.cast(y0_f, 'int64')
x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
def _linspace(self, start, stop, num):
# Theano linspace. Behaves similar to np.linspace
start = T.cast(start, theano.config.floatX)
stop = T.cast(stop, theano.config.floatX)
num = T.cast(num, theano.config.floatX)
step = (stop-start)/(num-1)
return T.arange(num, dtype=theano.config.floatX)*step+start
def _meshgrid(self, height, width):
# This function is the grid generator.
# It is equivalent to the following numpy code:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
# It is implemented in Theano instead to support symbolic grid sizes.
# Note: If the image size is known at layer construction time, we could
# compute the meshgrid offline in numpy instead of doing it dynamically
# in Theano. However, it hardly affected performance when we tried.
x_t = T.dot(T.ones((height, 1)),
self._linspace(-1.0, 1.0, width).dimshuffle('x', 0))
y_t = T.dot(self._linspace(-1.0, 1.0, height).dimshuffle(0, 'x'),
T.ones((1, width)))
x_t_flat = x_t.reshape((1, -1))
y_t_flat = y_t.reshape((1, -1))
ones = T.ones_like(x_t_flat)
grid = T.concatenate([x_t_flat, y_t_flat, ones], axis=0)
return grid
class dropout_rotate_layer (rotate_layer):
"""
This class is the typical dropout neural hidden layer and batch normalization layer. Called
by the ``add_layer`` method in network class.
Args:
input: An input ``theano.tensor`` variable. Even ``theano.shared`` will work as long as they
are in the following shape ``mini_batch_size, channels, height, width``
verbose: similar to the rest of the toolbox.
input_shape: ``(mini_batch_size, input_size)``
angle: value from [0,1]
borrow: ``theano`` borrow, typically ``True``.
rng: typically ``numpy.random``.
dropout_rate: ``0.5`` is the default.
Notes:
Use ``dropout_rotate_layer.output`` and ``dropout_rotate_layer.output_shape`` from
this class.
"""
def __init__ (self,
input,
input_shape,
id,
rng = None,
dropout_rate = 0.5,
angle = None,
borrow = True,
verbose = 2):
if verbose >= 3:
print("... set up the dropout rotate layer")
if rng is None:
rng = numpy.random
super(dropout_rotate_layer, self).__init__ (
input = input,
input_shape = input_shape,
id = id,
borrow = borrow,
verbose = verbose
)
if not dropout_rate == 0:
self.output = _dropout(rng = rng,
params = self.output,
dropout_rate = dropout_rate)
self.dropout_rate = dropout_rate
if verbose >=3:
print("... Dropped out")
if __name__ == '__main__':#pragma: no cover
pass
| |
#!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Account authorization tests for Facebook and Facebook accounts.
"""
__authors__ = ['spencer@emailscrubbed.com (Spencer Kimball)',
'andy@emailscrubbed.com (Andrew Kimball)']
import json
import mock
import os
import time
import unittest
import urllib
from functools import partial
from tornado import httpclient, ioloop
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test_timeout, MockAsyncHTTPClient
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.device import Device
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.user import User
from viewfinder.backend.www.test import auth_test, facebook_utils, service_base_test
@unittest.skip("needs facebook credentials")
@unittest.skipIf('NO_NETWORK' in os.environ, 'no network')
class AuthFacebookTestCase(service_base_test.ServiceBaseTestCase):
"""Tests authentication via the Facebook OAuth service."""
def setUp(self):
super(AuthFacebookTestCase, self).setUp()
self._facebook_user_dict = {'first_name': 'Andrew', 'last_name': 'Kimball', 'name': 'Andrew Kimball',
'id': 'id', 'link': 'http://www.facebook.com/andrew.kimball.50',
'timezone':-7, 'locale': 'en_US', 'email': 'andy@facebook.com',
'picture': {'data': {'url': 'http://foo.com/pic.jpg',
'is_silhouette': False}},
'verified': True}
self._facebook_user2_dict = {'name': 'Spencer Kimball', 'id': 'id2'}
self._mobile_device_dict = {'name': 'Andy\'s IPhone', 'version': '1.0', 'platform': 'IPhone 4S',
'os': 'iOS 5.0.1', 'push_token': 'push_token'}
def testRegisterWebUser(self):
"""Test successful register of web user."""
# Register as web user, register as mobile user (2nd attempt is error).
self._tester.RegisterFacebookUser(self._facebook_user_dict)
self.assertRaisesHttpError(403,
self._tester.RegisterFacebookUser,
self._facebook_user_dict,
self._mobile_device_dict)
def testRegisterMobileUser(self):
"""Test successful register of mobile user."""
# Register as mobile user, register as web user (2nd attempt is error).
self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
self.assertRaisesHttpError(403,
self._tester.RegisterFacebookUser,
self._facebook_user_dict)
def testLoginWebUser(self):
"""Test successful login of web user."""
# Register as web user, login as web user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
user2, device_id2 = self._tester.LoginFacebookUser(self._facebook_user_dict)
self.assertEqual(user.user_id, user2.user_id)
self.assertEqual(device_id, device_id2)
# And login again as mobile user.
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
def testLoginMobileUser(self):
"""Test successful login of mobile user."""
# Register as web user, login as mobile user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
user2, device_id2 = self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
self.assertEqual(user.user_id, user2.user_id)
self.assertNotEqual(device_id, device_id2)
# And login again as web user.
self._tester.LoginFacebookUser(self._facebook_user_dict)
def testLinkWebUser(self):
"""Test successful link of web user."""
# Register as mobile user, link as web user
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
cookie = self._GetSecureUserCookie(user, device_id)
user2, device_id2 = self._tester.LinkFacebookUser(self._facebook_user2_dict, user_cookie=cookie)
self.assertEqual(user.user_id, user2.user_id)
self.assertNotEqual(device_id, device_id2)
# And link again as mobile user.
self._tester.LinkFacebookUser(self._facebook_user2_dict, self._mobile_device_dict, user_cookie=cookie)
self.assertEqual(len(self._tester.ListIdentities(cookie)), 2)
def testLinkMobileUser(self):
"""Test successful link of mobile user."""
# Register as web user, link as mobile user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LinkFacebookUser(self._facebook_user2_dict, self._mobile_device_dict, user_cookie=cookie)
# And link again as web user.
self._tester.LinkFacebookUser(self._facebook_user2_dict, user_cookie=cookie)
self.assertEqual(len(self._tester.ListIdentities(cookie)), 2)
def testLoginNoExist(self):
"""ERROR: Try to login with Facebook identity that is not linked to a Viewfinder account."""
self.assertRaisesHttpError(403, self._tester.LoginFacebookUser, self._facebook_user_dict)
self.assertRaisesHttpError(403, self._tester.LoginFacebookUser, self._facebook_user_dict,
self._mobile_device_dict)
def testAuthenticationFailed(self):
"""ERROR: Fail Facebook authentication (which returns None user_dict)."""
with mock.patch('tornado.httpclient.AsyncHTTPClient', MockAsyncHTTPClient()) as mock_client:
mock_client.map(r'https://graph.facebook.com/me\?',
lambda request: httpclient.HTTPResponse(request, 400))
url = self.get_url('/register/facebook?access_token=access_token')
self.assertRaisesHttpError(401,
auth_test._SendAuthRequest,
self._tester,
url,
'POST',
user_cookie=self._cookie,
request_dict=auth_test._CreateRegisterRequest(self._mobile_device_dict))
def testMissingAccessToken(self):
"""ERROR: Test error on missing facebook access token."""
self.assertRaisesHttpError(400,
auth_test._SendAuthRequest,
self._tester,
self.get_url('/register/facebook'),
'POST',
request_dict=auth_test._CreateRegisterRequest(self._mobile_device_dict))
@async_test_timeout(timeout=30)
def testFacebookRegistration(self):
"""Test end-end Facebook registration scenario using a test Facebook
account.
"""
self._validate = False
# Get one facebook test user by querying facebook.
fu = facebook_utils.FacebookUtils()
users = fu.QueryFacebookTestUsers(limit=1)
assert len(users) == 1, users
def _VerifyAccountStatus(cookie, results):
u = results['user']
dev = results['device']
ident = results['identity']
self.assertEqual(ident.user_id, u.user_id)
self.assertTrue(u.name)
self.assertTrue(u.given_name)
self.assertTrue(u.family_name)
self.assertIsNotNone(u.webapp_dev_id)
[self.assertEqual(getattr(dev, k), v) for k, v in self._mobile_device_dict.items()]
# Keep querying until notifications are found.
while True:
response_dict = self._SendRequest('query_notifications', cookie, {})
if len(response_dict['notifications']) > 2:
break
time.sleep(0.100)
self.assertEqual(response_dict['notifications'][1]['name'], 'register friend')
notification = response_dict['notifications'][2]
self.assertEqual(notification['name'], 'fetch_contacts')
sort_key = Contact.CreateSortKey(None, notification['timestamp'])
self.assertEqual(notification['invalidate']['contacts']['start_key'], sort_key)
self.stop()
def _VerifyResponse(response):
"""Verify successful registration. Query the identity and
contacts and verify against the actual test data in facebook.
"""
self.assertEqual(response.code, 200)
cookie = self._tester.GetCookieFromResponse(response)
user_dict = self._tester.DecodeUserCookie(cookie)
response_dict = json.loads(response.body)
self.assertTrue('user_id' in user_dict)
self.assertTrue('device_id' in user_dict)
self.assertEqual(user_dict['device_id'], response_dict['device_id'])
with util.DictBarrier(partial(_VerifyAccountStatus, cookie)) as b:
identity_key = 'FacebookGraph:%s' % users[0]['id']
Identity.Query(self._client, hash_key=identity_key, col_names=None,
callback=b.Callback('identity'))
User.Query(self._client, hash_key=user_dict['user_id'], col_names=None,
callback=b.Callback('user'))
Device.Query(self._client, hash_key=user_dict['user_id'], range_key=user_dict['device_id'],
col_names=None, callback=b.Callback('device'))
url = self.get_url('/link/facebook') + '?' + \
urllib.urlencode({'access_token': users[0]['access_token']})
self.http_client.fetch(url, method='POST',
headers={'Content-Type': 'application/json',
'X-Xsrftoken': 'fake_xsrf',
'Cookie': 'user=%s;_xsrf=fake_xsrf' % self._cookie},
body=json.dumps(auth_test._CreateRegisterRequest(self._mobile_device_dict)),
callback=_VerifyResponse)
def get_new_ioloop(self):
"""Override get_io_loop() to return IOLoop.instance(). The global IOLoop instance is used
by self.http_client.fetch in the testFacebookRegistration test.
"""
return ioloop.IOLoop.instance()
def _TestAuthFacebookUser(action, tester, user_dict, device_dict=None, user_cookie=None):
"""Called by the ServiceTester in order to test login/facebook, link/facebook, and
register/facebook calls.
"""
ident_dict = {'key': 'FacebookGraph:%s' % user_dict['id'], 'authority': 'Facebook',
'access_token': 'access_token'}
if device_dict:
device_dict.pop('device_uuid', None)
device_dict.pop('test_udid', None)
# Mock responses from Facebook.
with mock.patch('tornado.httpclient.AsyncHTTPClient', MockAsyncHTTPClient()) as mock_client:
# Add response to request for an access token.
mock_client.map(r'https://graph.facebook.com/oauth/access_token',
'access_token=%s&expires=3600' % ident_dict['access_token'])
# Response to request for user info.
auth_test._AddMockJSONResponse(mock_client, r'https://graph.facebook.com/me\?', user_dict)
# Add empty response to request for photos and friends.
auth_test._AddMockJSONResponse(mock_client, r'https://graph.facebook.com/me/photos\?', {'data': []})
auth_test._AddMockJSONResponse(mock_client, r'https://graph.facebook.com/me/friends\?', {'data': []})
response = auth_test._AuthFacebookOrGoogleUser(tester, action, user_dict, ident_dict, device_dict, user_cookie)
return auth_test._ValidateAuthUser(tester, action, user_dict, ident_dict, device_dict, user_cookie, response)
| |
"""Support for Amcrest IP camera binary sensors."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass
from datetime import timedelta
import logging
from typing import TYPE_CHECKING, Callable
from amcrest import AmcrestError
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_SOUND,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.const import CONF_BINARY_SENSORS, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle
from .const import (
BINARY_SENSOR_SCAN_INTERVAL_SECS,
DATA_AMCREST,
DEVICES,
SERVICE_EVENT,
SERVICE_UPDATE,
)
from .helpers import log_update_error, service_signal
if TYPE_CHECKING:
from . import AmcrestDevice
@dataclass
class AmcrestSensorEntityDescription(BinarySensorEntityDescription):
"""Describe Amcrest sensor entity."""
event_code: str | None = None
should_poll: bool = False
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=BINARY_SENSOR_SCAN_INTERVAL_SECS)
_ONLINE_SCAN_INTERVAL = timedelta(seconds=60 - BINARY_SENSOR_SCAN_INTERVAL_SECS)
_AUDIO_DETECTED_KEY = "audio_detected"
_AUDIO_DETECTED_POLLED_KEY = "audio_detected_polled"
_AUDIO_DETECTED_NAME = "Audio Detected"
_AUDIO_DETECTED_EVENT_CODE = "AudioMutation"
_CROSSLINE_DETECTED_KEY = "crossline_detected"
_CROSSLINE_DETECTED_POLLED_KEY = "crossline_detected_polled"
_CROSSLINE_DETECTED_NAME = "CrossLine Detected"
_CROSSLINE_DETECTED_EVENT_CODE = "CrossLineDetection"
_MOTION_DETECTED_KEY = "motion_detected"
_MOTION_DETECTED_POLLED_KEY = "motion_detected_polled"
_MOTION_DETECTED_NAME = "Motion Detected"
_MOTION_DETECTED_EVENT_CODE = "VideoMotion"
_ONLINE_KEY = "online"
BINARY_SENSORS: tuple[AmcrestSensorEntityDescription, ...] = (
AmcrestSensorEntityDescription(
key=_AUDIO_DETECTED_KEY,
name=_AUDIO_DETECTED_NAME,
device_class=DEVICE_CLASS_SOUND,
event_code=_AUDIO_DETECTED_EVENT_CODE,
),
AmcrestSensorEntityDescription(
key=_AUDIO_DETECTED_POLLED_KEY,
name=_AUDIO_DETECTED_NAME,
device_class=DEVICE_CLASS_SOUND,
event_code=_AUDIO_DETECTED_EVENT_CODE,
should_poll=True,
),
AmcrestSensorEntityDescription(
key=_CROSSLINE_DETECTED_KEY,
name=_CROSSLINE_DETECTED_NAME,
device_class=DEVICE_CLASS_MOTION,
event_code=_CROSSLINE_DETECTED_EVENT_CODE,
),
AmcrestSensorEntityDescription(
key=_CROSSLINE_DETECTED_POLLED_KEY,
name=_CROSSLINE_DETECTED_NAME,
device_class=DEVICE_CLASS_MOTION,
event_code=_CROSSLINE_DETECTED_EVENT_CODE,
should_poll=True,
),
AmcrestSensorEntityDescription(
key=_MOTION_DETECTED_KEY,
name=_MOTION_DETECTED_NAME,
device_class=DEVICE_CLASS_MOTION,
event_code=_MOTION_DETECTED_EVENT_CODE,
),
AmcrestSensorEntityDescription(
key=_MOTION_DETECTED_POLLED_KEY,
name=_MOTION_DETECTED_NAME,
device_class=DEVICE_CLASS_MOTION,
event_code=_MOTION_DETECTED_EVENT_CODE,
should_poll=True,
),
AmcrestSensorEntityDescription(
key=_ONLINE_KEY,
name="Online",
device_class=DEVICE_CLASS_CONNECTIVITY,
should_poll=True,
),
)
BINARY_SENSOR_KEYS = [description.key for description in BINARY_SENSORS]
_EXCLUSIVE_OPTIONS = [
{_AUDIO_DETECTED_KEY, _AUDIO_DETECTED_POLLED_KEY},
{_MOTION_DETECTED_KEY, _MOTION_DETECTED_POLLED_KEY},
{_CROSSLINE_DETECTED_KEY, _CROSSLINE_DETECTED_POLLED_KEY},
]
_UPDATE_MSG = "Updating %s binary sensor"
def check_binary_sensors(value: list[str]) -> list[str]:
"""Validate binary sensor configurations."""
for exclusive_options in _EXCLUSIVE_OPTIONS:
if len(set(value) & exclusive_options) > 1:
raise vol.Invalid(
f"must contain at most one of {', '.join(exclusive_options)}."
)
return value
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up a binary sensor for an Amcrest IP Camera."""
if discovery_info is None:
return
name = discovery_info[CONF_NAME]
device = hass.data[DATA_AMCREST][DEVICES][name]
binary_sensors = discovery_info[CONF_BINARY_SENSORS]
async_add_entities(
[
AmcrestBinarySensor(name, device, entity_description)
for entity_description in BINARY_SENSORS
if entity_description.key in binary_sensors
],
True,
)
class AmcrestBinarySensor(BinarySensorEntity):
"""Binary sensor for Amcrest camera."""
def __init__(
self,
name: str,
device: AmcrestDevice,
entity_description: AmcrestSensorEntityDescription,
) -> None:
"""Initialize entity."""
self._signal_name = name
self._api = device.api
self.entity_description: AmcrestSensorEntityDescription = entity_description
self._attr_name = f"{name} {entity_description.name}"
self._attr_should_poll = entity_description.should_poll
self._unsub_dispatcher: list[Callable[[], None]] = []
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.entity_description.key == _ONLINE_KEY or self._api.available
def update(self) -> None:
"""Update entity."""
if self.entity_description.key == _ONLINE_KEY:
self._update_online()
else:
self._update_others()
@Throttle(_ONLINE_SCAN_INTERVAL)
def _update_online(self) -> None:
if not (self._api.available or self.is_on):
return
_LOGGER.debug(_UPDATE_MSG, self.name)
if self._api.available:
# Send a command to the camera to test if we can still communicate with it.
# Override of Http.command() in __init__.py will set self._api.available
# accordingly.
with suppress(AmcrestError):
self._api.current_time # pylint: disable=pointless-statement
self._attr_is_on = self._api.available
def _update_others(self) -> None:
if not self.available:
return
_LOGGER.debug(_UPDATE_MSG, self.name)
event_code = self.entity_description.event_code
if event_code is None:
_LOGGER.error("Binary sensor %s event code not set", self.name)
return
try:
self._attr_is_on = len(self._api.event_channels_happened(event_code)) > 0
except AmcrestError as error:
log_update_error(_LOGGER, "update", self.name, "binary sensor", error)
async def async_on_demand_update(self) -> None:
"""Update state."""
if self.entity_description.key == _ONLINE_KEY:
_LOGGER.debug(_UPDATE_MSG, self.name)
self._attr_is_on = self._api.available
self.async_write_ha_state()
else:
self.async_schedule_update_ha_state(True)
@callback
def async_event_received(self, state: bool) -> None:
"""Update state from received event."""
_LOGGER.debug(_UPDATE_MSG, self.name)
self._attr_is_on = state
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Subscribe to signals."""
self._unsub_dispatcher.append(
async_dispatcher_connect(
self.hass,
service_signal(SERVICE_UPDATE, self._signal_name),
self.async_on_demand_update,
)
)
if (
self.entity_description.event_code
and not self.entity_description.should_poll
):
self._unsub_dispatcher.append(
async_dispatcher_connect(
self.hass,
service_signal(
SERVICE_EVENT,
self._signal_name,
self.entity_description.event_code,
),
self.async_event_received,
)
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect from update signal."""
for unsub_dispatcher in self._unsub_dispatcher:
unsub_dispatcher()
| |
#! /usr/bin/env python
"""Londiste setup and sanity checker.
"""
import sys, os, re, skytools
from pgq.cascade.admin import CascadeAdmin
from londiste.exec_attrs import ExecAttrs
from londiste.util import find_copy_source
import londiste.handler
__all__ = ['LondisteSetup']
class LondisteSetup(CascadeAdmin):
"""Londiste-specific admin commands."""
initial_db_name = 'node_db'
provider_location = None
commands_without_pidfile = CascadeAdmin.commands_without_pidfile + [
'tables', 'seqs', 'missing', 'show-handlers']
def install_code(self, db):
self.extra_objs = [
skytools.DBSchema("londiste", sql_file = 'londiste.sql'),
skytools.DBFunction("londiste.global_add_table", 2, sql_file = 'londiste.upgrade_2.1_to_3.1.sql'),
]
CascadeAdmin.install_code(self, db)
def __init__(self, args):
"""Londiste setup init."""
CascadeAdmin.__init__(self, 'londiste3', 'db', args, worker_setup = True)
# compat
self.queue_name = self.cf.get('pgq_queue_name', '')
# real
if not self.queue_name:
self.queue_name = self.cf.get('queue_name')
self.set_name = self.queue_name
self.lock_timeout = self.cf.getfloat('lock_timeout', 10)
londiste.handler.load_handler_modules(self.cf)
def init_optparse(self, parser=None):
"""Add londiste switches to CascadeAdmin ones."""
p = CascadeAdmin.init_optparse(self, parser)
p.add_option("--expect-sync", action="store_true", dest="expect_sync",
help = "no copy needed", default=False)
p.add_option("--skip-truncate", action="store_true", dest="skip_truncate",
help = "do not delete old data", default=False)
p.add_option("--find-copy-node", action="store_true", dest="find_copy_node",
help = "add: find table source for copy by walking upwards")
p.add_option("--copy-node", metavar = "NODE", dest="copy_node",
help = "add: use NODE as source for initial copy")
p.add_option("--force", action="store_true",
help="force", default=False)
p.add_option("--all", action="store_true",
help="include all tables", default=False)
p.add_option("--wait-sync", action="store_true",
help = "add: wait until all tables are in sync"),
p.add_option("--create", action="store_true",
help="create, minimal", default=False)
p.add_option("--create-full", action="store_true",
help="create, full")
p.add_option("--trigger-flags",
help="set trigger flags (BAIUDLQ)")
p.add_option("--trigger-arg", action="append",
help="custom trigger arg")
p.add_option("--no-triggers", action="store_true",
help="no triggers on table")
p.add_option("--handler", action="store",
help="add: custom handler for table")
p.add_option("--handler-arg", action="append",
help="add: argument to custom handler")
p.add_option("--merge-all", action="store_true",
help="merge tables from all source queues", default=False)
p.add_option("--no-merge", action="store_true",
help="do not merge tables from source queues", default=False)
p.add_option("--max-parallel-copy", metavar = "NUM", type = "int",
help="max number of parallel copy processes")
p.add_option("--dest-table", metavar = "NAME",
help="add: name for actual table")
p.add_option("--skip-non-existing", action="store_true",
help="add: skip object that does not exist")
return p
def extra_init(self, node_type, node_db, provider_db):
"""Callback from CascadeAdmin init."""
if not provider_db:
return
pcurs = provider_db.cursor()
ncurs = node_db.cursor()
# sync tables
q = "select table_name from londiste.get_table_list(%s)"
pcurs.execute(q, [self.set_name])
for row in pcurs.fetchall():
tbl = row['table_name']
q = "select * from londiste.global_add_table(%s, %s)"
ncurs.execute(q, [self.set_name, tbl])
# sync seqs
q = "select seq_name, last_value from londiste.get_seq_list(%s)"
pcurs.execute(q, [self.set_name])
for row in pcurs.fetchall():
seq = row['seq_name']
val = row['last_value']
q = "select * from londiste.global_update_seq(%s, %s, %s)"
ncurs.execute(q, [self.set_name, seq, val])
# done
node_db.commit()
provider_db.commit()
def is_root(self):
return self.queue_info.local_node.type == 'root'
def set_lock_timeout(self, curs):
ms = int(1000 * self.lock_timeout)
if ms > 0:
q = "SET LOCAL statement_timeout = %d" % ms
self.log.debug(q)
curs.execute(q)
def cmd_add_table(self, *args):
"""Attach table(s) to local node."""
self.load_local_info()
src_db = self.get_provider_db()
if not self.is_root():
src_curs = src_db.cursor()
src_tbls = self.fetch_set_tables(src_curs)
src_db.commit()
dst_db = self.get_database('db')
dst_curs = dst_db.cursor()
dst_tbls = self.fetch_set_tables(dst_curs)
if self.is_root():
src_tbls = dst_tbls
else:
self.sync_table_list(dst_curs, src_tbls, dst_tbls)
dst_db.commit()
needs_tbl = self.handler_needs_table()
args = self.expand_arg_list(dst_db, 'r', False, args, needs_tbl)
# pick proper create flags
if self.options.create_full:
create_flags = skytools.T_ALL
elif self.options.create:
create_flags = skytools.T_TABLE | skytools.T_PKEY
else:
create_flags = 0
# search for usable copy node if requested & needed
if (self.options.find_copy_node and create_flags != 0
and needs_tbl and not self.is_root()):
src_name, src_loc, _ = find_copy_source(self, self.queue_name, args, None, self.provider_location)
self.options.copy_node = src_name
self.close_database('provider_db')
src_db = self.get_provider_db()
src_curs = src_db.cursor()
src_tbls = self.fetch_set_tables(src_curs)
src_db.commit()
# dont check for exist/not here (root handling)
if not self.is_root() and not self.options.expect_sync and not self.options.find_copy_node:
problems = False
for tbl in args:
tbl = skytools.fq_name(tbl)
if (tbl in src_tbls) and not src_tbls[tbl]['local']:
if self.options.skip_non_existing:
self.log.warning("Table %s does not exist on provider", tbl)
else:
self.log.error("Table %s does not exist on provider, need to switch to different provider", tbl)
problems = True
if problems:
self.log.error("Problems, canceling operation")
sys.exit(1)
# sanity check
if self.options.dest_table and len(args) > 1:
self.log.error("--dest-table can be given only for single table")
sys.exit(1)
# seems ok
for tbl in args:
self.add_table(src_db, dst_db, tbl, create_flags, src_tbls)
# wait
if self.options.wait_sync:
self.wait_for_sync(dst_db)
def add_table(self, src_db, dst_db, tbl, create_flags, src_tbls):
# use full names
tbl = skytools.fq_name(tbl)
dest_table = self.options.dest_table or tbl
dest_table = skytools.fq_name(dest_table)
src_curs = src_db.cursor()
dst_curs = dst_db.cursor()
tbl_exists = skytools.exists_table(dst_curs, dest_table)
dst_db.commit()
self.set_lock_timeout(dst_curs)
if dest_table == tbl:
desc = tbl
else:
desc = "%s(%s)" % (tbl, dest_table)
if create_flags:
if tbl_exists:
self.log.info('Table %s already exist, not touching', desc)
else:
src_dest_table = src_tbls[tbl]['dest_table']
if not skytools.exists_table(src_curs, src_dest_table):
# table not present on provider - nowhere to get the DDL from
self.log.warning('Table %s missing on provider, cannot create, skipping', desc)
return
schema = skytools.fq_name_parts(dest_table)[0]
if not skytools.exists_schema(dst_curs, schema):
q = "create schema %s" % skytools.quote_ident(schema)
dst_curs.execute(q)
s = skytools.TableStruct(src_curs, src_dest_table)
src_db.commit()
# create, using rename logic only when necessary
newname = None
if src_dest_table != dest_table:
newname = dest_table
s.create(dst_curs, create_flags, log = self.log, new_table_name = newname)
elif not tbl_exists and self.options.skip_non_existing:
self.log.warning('Table %s does not exist on local node, skipping', desc)
return
tgargs = self.build_tgargs()
attrs = {}
if self.options.handler:
attrs['handler'] = self.build_handler(tbl, tgargs, self.options.dest_table)
if self.options.find_copy_node:
attrs['copy_node'] = '?'
elif self.options.copy_node:
attrs['copy_node'] = self.options.copy_node
if not self.options.expect_sync:
if self.options.skip_truncate:
attrs['skip_truncate'] = 1
if self.options.max_parallel_copy:
attrs['max_parallel_copy'] = self.options.max_parallel_copy
# actual table registration
args = [self.set_name, tbl, tgargs, None, None]
if attrs:
args[3] = skytools.db_urlencode(attrs)
if dest_table != tbl:
args[4] = dest_table
q = "select * from londiste.local_add_table(%s, %s, %s, %s, %s)"
self.exec_cmd(dst_curs, q, args)
dst_db.commit()
def build_tgargs(self):
"""Build trigger args"""
tgargs = []
if self.options.trigger_arg:
tgargs = self.options.trigger_arg
tgflags = self.options.trigger_flags
if tgflags:
tgargs.append('tgflags='+tgflags)
if self.options.no_triggers:
tgargs.append('no_triggers')
if self.options.merge_all:
tgargs.append('merge_all')
if self.options.no_merge:
tgargs.append('no_merge')
if self.options.expect_sync:
tgargs.append('expect_sync')
return tgargs
def build_handler(self, tbl, tgargs, dest_table=None):
"""Build handler and return handler string"""
hstr = londiste.handler.create_handler_string(
self.options.handler, self.options.handler_arg)
p = londiste.handler.build_handler(tbl, hstr, dest_table)
p.add(tgargs)
return hstr
def handler_needs_table(self):
if self.options.handler:
hstr = londiste.handler.create_handler_string(
self.options.handler, self.options.handler_arg)
p = londiste.handler.build_handler('unused.string', hstr, None)
return p.needs_table()
return True
def sync_table_list(self, dst_curs, src_tbls, dst_tbls):
for tbl in src_tbls.keys():
q = "select * from londiste.global_add_table(%s, %s)"
if tbl not in dst_tbls:
self.log.info("Table %s info missing from subscriber, adding", tbl)
self.exec_cmd(dst_curs, q, [self.set_name, tbl])
dst_tbls[tbl] = {'local': False, 'dest_table': tbl}
for tbl in dst_tbls.keys():
q = "select * from londiste.global_remove_table(%s, %s)"
if tbl not in src_tbls:
self.log.info("Table %s gone but exists on subscriber, removing", tbl)
self.exec_cmd(dst_curs, q, [self.set_name, tbl])
del dst_tbls[tbl]
def fetch_set_tables(self, curs):
q = "select table_name, local, "\
" coalesce(dest_table, table_name) as dest_table "\
" from londiste.get_table_list(%s)"
curs.execute(q, [self.set_name])
res = {}
for row in curs.fetchall():
res[row[0]] = row
return res
def cmd_remove_table(self, *args):
"""Detach table(s) from local node."""
db = self.get_database('db')
args = self.expand_arg_list(db, 'r', True, args)
q = "select * from londiste.local_remove_table(%s, %s)"
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_change_handler(self, tbl):
"""Change handler (table_attrs) of the replicated table."""
self.load_local_info()
tbl = skytools.fq_name(tbl)
db = self.get_database('db')
curs = db.cursor()
q = "select table_attrs, dest_table "\
" from londiste.get_table_list(%s) "\
" where table_name = %s and local"
curs.execute(q, [self.set_name, tbl])
if curs.rowcount == 0:
self.log.error("Table %s not found on this node", tbl)
sys.exit(1)
attrs, dest_table = curs.fetchone()
attrs = skytools.db_urldecode(attrs or '')
old_handler = attrs.get('handler')
tgargs = self.build_tgargs()
if self.options.handler:
new_handler = self.build_handler(tbl, tgargs, dest_table)
else:
new_handler = None
if old_handler == new_handler:
self.log.info("Handler is already set to desired value, nothing done")
sys.exit(0)
if new_handler:
attrs['handler'] = new_handler
elif 'handler' in attrs:
del attrs['handler']
args = [self.set_name, tbl, tgargs, None]
if attrs:
args[3] = skytools.db_urlencode(attrs)
q = "select * from londiste.local_change_handler(%s, %s, %s, %s)"
self.exec_cmd(curs, q, args)
db.commit()
def cmd_add_seq(self, *args):
"""Attach seqs(s) to local node."""
dst_db = self.get_database('db')
dst_curs = dst_db.cursor()
src_db = self.get_provider_db()
src_curs = src_db.cursor()
src_seqs = self.fetch_seqs(src_curs)
dst_seqs = self.fetch_seqs(dst_curs)
src_db.commit()
self.sync_seq_list(dst_curs, src_seqs, dst_seqs)
dst_db.commit()
args = self.expand_arg_list(dst_db, 'S', False, args)
# pick proper create flags
if self.options.create_full:
create_flags = skytools.T_SEQUENCE
elif self.options.create:
create_flags = skytools.T_SEQUENCE
else:
create_flags = 0
# seems ok
for seq in args:
seq = skytools.fq_name(seq)
self.add_seq(src_db, dst_db, seq, create_flags)
dst_db.commit()
def add_seq(self, src_db, dst_db, seq, create_flags):
src_curs = src_db.cursor()
dst_curs = dst_db.cursor()
seq_exists = skytools.exists_sequence(dst_curs, seq)
if create_flags:
if seq_exists:
self.log.info('Sequence %s already exist, not creating', seq)
else:
if not skytools.exists_sequence(src_curs, seq):
# sequence not present on provider - nowhere to get the DDL from
self.log.warning('Sequence "%s" missing on provider, skipping', seq)
return
s = skytools.SeqStruct(src_curs, seq)
src_db.commit()
s.create(dst_curs, create_flags, log = self.log)
elif not seq_exists:
if self.options.skip_non_existing:
self.log.warning('Sequence "%s" missing on local node, skipping', seq)
return
else:
raise skytools.UsageError("Sequence %r missing on local node", seq)
q = "select * from londiste.local_add_seq(%s, %s)"
self.exec_cmd(dst_curs, q, [self.set_name, seq])
def fetch_seqs(self, curs):
q = "select seq_name, last_value, local from londiste.get_seq_list(%s)"
curs.execute(q, [self.set_name])
res = {}
for row in curs.fetchall():
res[row[0]] = row
return res
def sync_seq_list(self, dst_curs, src_seqs, dst_seqs):
for seq in src_seqs.keys():
q = "select * from londiste.global_update_seq(%s, %s, %s)"
if seq not in dst_seqs:
self.log.info("Sequence %s info missing from subscriber, adding", seq)
self.exec_cmd(dst_curs, q, [self.set_name, seq, src_seqs[seq]['last_value']])
tmp = src_seqs[seq].copy()
tmp['local'] = False
dst_seqs[seq] = tmp
for seq in dst_seqs.keys():
q = "select * from londiste.global_remove_seq(%s, %s)"
if seq not in src_seqs:
self.log.info("Sequence %s gone but exists on subscriber, removing", seq)
self.exec_cmd(dst_curs, q, [self.set_name, seq])
del dst_seqs[seq]
def cmd_remove_seq(self, *args):
"""Detach seqs(s) from local node."""
q = "select * from londiste.local_remove_seq(%s, %s)"
db = self.get_database('db')
args = self.expand_arg_list(db, 'S', True, args)
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_resync(self, *args):
"""Reload data from provider node."""
db = self.get_database('db')
args = self.expand_arg_list(db, 'r', True, args)
if not self.options.find_copy_node:
self.load_local_info()
src_db = self.get_provider_db()
src_curs = src_db.cursor()
src_tbls = self.fetch_set_tables(src_curs)
src_db.commit()
problems = 0
for tbl in args:
tbl = skytools.fq_name(tbl)
if tbl not in src_tbls or not src_tbls[tbl]['local']:
self.log.error("Table %s does not exist on provider, need to switch to different provider", tbl)
problems += 1
if problems > 0:
self.log.error("Problems, cancelling operation")
sys.exit(1)
if self.options.find_copy_node or self.options.copy_node:
q = "select table_name, table_attrs from londiste.get_table_list(%s) where local"
cur = db.cursor()
cur.execute(q, [self.set_name])
for row in cur.fetchall():
if row['table_name'] not in args:
continue
attrs = skytools.db_urldecode (row['table_attrs'] or '')
if self.options.find_copy_node:
attrs['copy_node'] = '?'
elif self.options.copy_node:
attrs['copy_node'] = self.options.copy_node
attrs = skytools.db_urlencode (attrs)
q = "select * from londiste.local_set_table_attrs (%s, %s, %s)"
self.exec_cmd(db, q, [self.set_name, row['table_name'], attrs])
q = "select * from londiste.local_set_table_state(%s, %s, null, null)"
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_tables(self):
"""Show attached tables."""
q = """select table_name, merge_state, table_attrs
from londiste.get_table_list(%s) where local
order by table_name"""
db = self.get_database('db')
def show_attr(a):
if a:
return skytools.db_urldecode(a)
return ''
self.display_table(db, "Tables on node", q, [self.set_name],
fieldfmt = {'table_attrs': show_attr})
def cmd_seqs(self):
"""Show attached seqs."""
q = "select seq_name, local, last_value from londiste.get_seq_list(%s)"
db = self.get_database('db')
self.display_table(db, "Sequences on node", q, [self.set_name])
def cmd_missing(self):
"""Show missing tables on local node."""
q = "select * from londiste.local_show_missing(%s)"
db = self.get_database('db')
self.display_table(db, "Missing objects on node", q, [self.set_name])
def cmd_check(self):
"""TODO: check if structs match"""
pass
def cmd_fkeys(self):
"""TODO: show removed fkeys."""
pass
def cmd_triggers(self):
"""TODO: show removed triggers."""
pass
def cmd_show_handlers(self, *args):
"""Show help about handlers."""
londiste.handler.show(args)
def cmd_execute(self, *files):
db = self.get_database('db')
curs = db.cursor()
tables = self.fetch_set_tables(curs)
seqs = self.fetch_seqs(curs)
# generate local maps
local_tables = {}
local_seqs = {}
for tbl in tables.values():
if tbl['local']:
local_tables[tbl['table_name']] = tbl['dest_table']
for seq in seqs.values():
if seq['local']:
local_seqs[seq['seq_name']] = seq['seq_name']
# set replica role for EXECUTE transaction
if db.server_version >= 80300:
curs.execute("set local session_replication_role = 'local'")
for fn in files:
fname = os.path.basename(fn)
sql = open(fn, "r").read()
attrs = ExecAttrs(sql = sql)
q = "select * from londiste.execute_start(%s, %s, %s, true, %s)"
res = self.exec_cmd(db, q, [self.queue_name, fname, sql, attrs.to_urlenc()], commit = False)
ret = res[0]['ret_code']
if ret > 200:
self.log.warning("Skipping execution of '%s'", fname)
continue
if attrs.need_execute(curs, local_tables, local_seqs):
self.log.info("%s: executing sql", fname)
xsql = attrs.process_sql(sql, local_tables, local_seqs)
for stmt in skytools.parse_statements(xsql):
curs.execute(stmt)
else:
self.log.info("%s: This SQL does not need to run on this node.", fname)
q = "select * from londiste.execute_finish(%s, %s)"
self.exec_cmd(db, q, [self.queue_name, fname], commit = False)
db.commit()
def get_provider_db(self):
if self.options.copy_node:
# use custom node for copy
source_node = self.options.copy_node
m = self.queue_info.get_member(source_node)
if not m:
raise skytools.UsageError("Cannot find node <%s>", source_node)
if source_node == self.local_node:
raise skytools.UsageError("Cannot use itself as provider")
self.provider_location = m.location
if not self.provider_location:
db = self.get_database('db')
q = 'select * from pgq_node.get_node_info(%s)'
res = self.exec_cmd(db, q, [self.queue_name], quiet = True)
self.provider_location = res[0]['provider_location']
return self.get_database('provider_db', connstr = self.provider_location, profile = 'remote')
def expand_arg_list(self, db, kind, existing, args, needs_tbl=True):
curs = db.cursor()
if kind == 'S':
q1 = "select seq_name, local from londiste.get_seq_list(%s) where local"
elif kind == 'r':
q1 = "select table_name, local from londiste.get_table_list(%s) where local"
else:
raise Exception("bug")
q2 = "select obj_name from londiste.local_show_missing(%%s) where obj_kind = '%s'" % kind
lst_exists = []
map_exists = {}
curs.execute(q1, [self.set_name])
for row in curs.fetchall():
lst_exists.append(row[0])
map_exists[row[0]] = 1
lst_missing = []
map_missing = {}
curs.execute(q2, [self.set_name])
for row in curs.fetchall():
lst_missing.append(row[0])
map_missing[row[0]] = 1
db.commit()
if not args and self.options.all:
if existing:
return lst_exists
else:
return lst_missing
allow_nonexist = not needs_tbl
if existing:
res = self.solve_globbing(args, lst_exists, map_exists, map_missing, allow_nonexist)
else:
res = self.solve_globbing(args, lst_missing, map_missing, map_exists, allow_nonexist)
if not res:
self.log.info("what to do ?")
return res
def solve_globbing(self, args, full_list, full_map, reverse_map, allow_nonexist):
def glob2regex(s):
s = s.replace('.', '[.]').replace('?', '.').replace('*', '.*')
return '^%s$' % s
res_map = {}
res_list = []
err = 0
for a in args:
if a.find('*') >= 0 or a.find('?') >= 0:
if a.find('.') < 0:
a = 'public.' + a
rc = re.compile(glob2regex(a))
for x in full_list:
if rc.match(x):
if not x in res_map:
res_map[x] = 1
res_list.append(x)
else:
a = skytools.fq_name(a)
if a in res_map:
continue
elif a in full_map:
res_list.append(a)
res_map[a] = 1
elif a in reverse_map:
self.log.info("%s already processed", a)
elif allow_nonexist:
res_list.append(a)
res_map[a] = 1
elif self.options.force:
self.log.warning("%s not available, but --force is used", a)
res_list.append(a)
res_map[a] = 1
else:
self.log.warning("%s not available", a)
err = 1
if err:
raise skytools.UsageError("Cannot proceed")
return res_list
def load_extra_status(self, curs, node):
"""Fetch extra info."""
# must be thread-safe (!)
CascadeAdmin.load_extra_status(self, curs, node)
curs.execute("select * from londiste.get_table_list(%s)", [self.queue_name])
n_ok = n_half = n_ign = 0
for tbl in curs.fetchall():
if not tbl['local']:
n_ign += 1
elif tbl['merge_state'] == 'ok':
n_ok += 1
else:
n_half += 1
node.add_info_line('Tables: %d/%d/%d' % (n_ok, n_half, n_ign))
def cmd_wait_sync(self):
self.load_local_info()
dst_db = self.get_database('db')
self.wait_for_sync(dst_db)
def wait_for_sync(self, dst_db):
self.log.info("Waiting until all tables are in sync")
q = "select table_name, merge_state, local"\
" from londiste.get_table_list(%s) where local"
dst_curs = dst_db.cursor()
partial = {}
startup_info = 0
while 1:
dst_curs.execute(q, [self.queue_name])
rows = dst_curs.fetchall()
dst_db.commit()
total_count = 0
cur_count = 0
done_list = []
for row in rows:
if not row['local']:
continue
total_count += 1
tbl = row['table_name']
if row['merge_state'] != 'ok':
partial[tbl] = 0
cur_count += 1
elif tbl in partial:
if partial[tbl] == 0:
partial[tbl] = 1
done_list.append(tbl)
done_count = total_count - cur_count
if not startup_info:
self.log.info("%d/%d table(s) to copy", cur_count, total_count)
startup_info = 1
for done in done_list:
self.log.info("%s: finished (%d/%d)", done, done_count, total_count)
if cur_count == 0:
break
self.sleep(2)
self.log.info("All done")
def resurrect_dump_event(self, ev, stats, batch_info):
"""Collect per-table stats."""
super(LondisteSetup, self).resurrect_dump_event(ev, stats, batch_info)
ROLLBACK = 'can rollback'
NO_ROLLBACK = 'cannot rollback'
if ev.ev_type == 'TRUNCATE':
if 'truncated_tables' not in stats:
stats['truncated_tables'] = []
tlist = stats['truncated_tables']
tbl = ev.ev_extra1
if tbl not in tlist:
tlist.append(tbl)
elif ev.ev_type[:2] in ('I:', 'U:', 'D:', 'I', 'U', 'D'):
op = ev.ev_type[0]
tbl = ev.ev_extra1
bak = ev.ev_extra3
tblkey = 'table: %s' % tbl
if tblkey not in stats:
stats[tblkey] = [0,0,0,ROLLBACK]
tinfo = stats[tblkey]
if op == 'I':
tinfo[0] += 1
elif op == 'U':
tinfo[1] += 1
if not bak:
tinfo[3] = NO_ROLLBACK
elif op == 'D':
tinfo[2] += 1
if not bak and ev.ev_type == 'D':
tinfo[3] = NO_ROLLBACK
| |
"""
PyBusyInfo constructs a busy info window and displays a message in it.
Description
===========
PyBusyInfo constructs a busy info window and displays a message in it.
This class makes it easy to tell your user that the program is temporarily busy.
Just create a PyBusyInfo object, and within the current scope, a message window
will be shown.
For example::
busy = PyBusyInfo("Please wait, working...")
for i in xrange(10000):
DoACalculation()
del busy
It works by creating a window in the constructor, and deleting it in the destructor.
You may also want to call `wx.Yield()` to refresh the window periodically (in case
it had been obscured by other windows, for example).
Supported Platforms
===================
PyBusyInfo has been tested on the following platforms:
* Windows (Windows XP).
Window Styles
=============
`No particular window styles are available for this class.`
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
PyBusyInfo is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 03 Dec 2009, 09.00 GMT
Version 0.1
"""
import wx
_ = wx.GetTranslation
class PyInfoFrame(wx.Frame):
""" Base class for L{PyBusyInfo}. """
def __init__(self, parent, message, title, icon):
"""
Default class constructor.
:param `parent`: the frame parent;
:param `message`: the message to display in the L{PyBusyInfo};
:param `title`: the main L{PyBusyInfo} title;
:param `icon`: an icon to draw as the frame icon, an instance of `wx.Bitmap`.
"""
wx.Frame.__init__(self, parent, wx.ID_ANY, title, wx.DefaultPosition,
wx.DefaultSize, wx.NO_BORDER|wx.FRAME_TOOL_WINDOW|wx.FRAME_SHAPED|wx.STAY_ON_TOP)
panel = wx.Panel(self)
panel.SetCursor(wx.HOURGLASS_CURSOR)
self._message = message
self._title = title
self._icon = icon
dc = wx.ClientDC(self)
textWidth, textHeight, dummy = dc.GetMultiLineTextExtent(self._message)
sizeText = wx.Size(textWidth, textHeight)
self.SetClientSize((max(sizeText.x, 340) + 60, max(sizeText.y, 40) + 60))
# need to size the panel correctly first so that text.Centre() works
panel.SetSize(self.GetClientSize())
# Bind the events to draw ourselves
panel.Bind(wx.EVT_PAINT, self.OnPaint)
panel.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Centre(wx.BOTH)
# Create a non-rectangular region to set the frame shape
size = self.GetSize()
bmp = wx.EmptyBitmap(size.x, size.y)
dc = wx.BufferedDC(None, bmp)
dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0), wx.SOLID))
dc.Clear()
dc.SetPen(wx.Pen(wx.Colour(0, 0, 0), 1))
dc.DrawRoundedRectangle(0, 0, size.x, size.y, 12)
r = wx.RegionFromBitmapColour(bmp, wx.Colour(0, 0, 0))
# Store the non-rectangular region
self.reg = r
if wx.Platform == "__WXGTK__":
self.Bind(wx.EVT_WINDOW_CREATE, self.SetBusyShape)
else:
self.SetBusyShape()
# Add a custom bitmap at the top (if any)
def SetBusyShape(self, event=None):
"""
Sets L{PyInfoFrame} shape using the region created from the bitmap.
:param `event`: a `wx.WindowCreateEvent` event (GTK only, as GTK supports setting
the window shape only during window creation).
"""
self.SetShape(self.reg)
if event:
# GTK only
event.Skip()
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{PyInfoFrame}.
:param `event`: a `wx.PaintEvent` to be processed.
"""
panel = event.GetEventObject()
dc = wx.BufferedPaintDC(panel)
dc.Clear()
# Fill the background with a gradient shading
startColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
endColour = wx.WHITE
rect = panel.GetRect()
dc.GradientFillLinear(rect, startColour, endColour, wx.SOUTH)
# Draw the label
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc.SetFont(font)
# Draw the message
rect2 = wx.Rect(*rect)
rect2.height += 20
dc.DrawLabel(self._message, rect2, alignment=wx.ALIGN_CENTER|wx.ALIGN_CENTER)
# Draw the top title
font.SetWeight(wx.BOLD)
dc.SetFont(font)
dc.SetPen(wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_CAPTIONTEXT)))
dc.SetTextForeground(wx.SystemSettings_GetColour(wx.SYS_COLOUR_CAPTIONTEXT))
if self._icon.IsOk():
iconWidth, iconHeight = self._icon.GetWidth(), self._icon.GetHeight()
dummy, textHeight = dc.GetTextExtent(self._title)
textXPos, textYPos = iconWidth + 10, (iconHeight-textHeight)/2
dc.DrawBitmap(self._icon, 5, 5, True)
else:
textXPos, textYPos = 5, 0
dc.DrawText(self._title, textXPos, textYPos+5)
dc.DrawLine(5, 25, rect.width-5, 25)
size = self.GetSize()
dc.SetPen(wx.Pen(startColour, 1))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRoundedRectangle(0, 0, size.x, size.y-1, 12)
def OnErase(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{PyInfoFrame}.
:param `event`: a `wx.EraseEvent` event to be processed.
:note: This method is intentionally empty to reduce flicker.
"""
# This is empty on purpose, to avoid flickering
pass
# -------------------------------------------------------------------- #
# The actual PyBusyInfo implementation
# -------------------------------------------------------------------- #
class PyBusyInfo(object):
"""
Constructs a busy info window as child of parent and displays a message in it.
"""
def __init__(self, message, parent=None, title=_("Busy"), icon=wx.NullBitmap):
"""
Default class constructor.
:param `parent`: the L{PyBusyInfo} parent;
:param `message`: the message to display in the L{PyBusyInfo};
:param `title`: the main L{PyBusyInfo} title;
:param `icon`: an icon to draw as the frame icon, an instance of `wx.Bitmap`.
:note: If `parent` is not ``None`` you must ensure that it is not closed
while the busy info is shown.
"""
self._infoFrame = PyInfoFrame(parent, message, title, icon)
if parent and parent.HasFlag(wx.STAY_ON_TOP):
# we must have this flag to be in front of our parent if it has it
self._infoFrame.SetWindowStyleFlag(wx.STAY_ON_TOP)
self._infoFrame.Show(True)
self._infoFrame.Refresh()
self._infoFrame.Update()
def __del__(self):
""" Overloaded method, for compatibility with wxWidgets. """
self._infoFrame.Show(False)
self._infoFrame.Destroy()
| |
import numpy
import theano
import theano.tensor as tensor
import cPickle as pkl
from copy import copy
import utils
import time
import warnings
from network import Network
from Optimizers import RMSPropOptimizer
from utils import HomogeneousData
from utils import TheanoFunctionWrapper as TFW
from sklearn.cross_validation import KFold
class Trainer:
def __init__(self,
train=None,
validate=None,
test=None,
worddict=None,
use_noise=None,
nn_params=None,
f_init=None,
f_next=None,
f_grad=None,
f_update=None,
f_probs=None,
kf_valid=None,
kf_test=None,
save_network=None,
options=None):
self.__train = train
self.__validate = validate
self.__test = test
self.__worddict = worddict
self.use_noise=use_noise
# index 0 and 1 always code for the end of sentence and unknown token
# wworddict arranges words/vocab in the order of frequency. highest frequent words come first.
self.__word_idict = dict()
for kk, vv in worddict.iteritems():
self.__word_idict[vv] = kk
self.__word_idict[0] = '<eos>'
self.__word_idict[1] = 'UNK'
self.options=options
self.nn_params=nn_params
# we need to pass in reference to model to invoke save_network
self.save_network=save_network
# theano functions
self.kf_valid = kf_valid
self.kf_test = kf_test
self.f_init=f_init
self.f_next = f_next
self.f_update = f_update
self.f_grad = f_grad
self.f_probs= f_probs
# history_errs is a bare-bones training log that holds the validation and test error
self.history_errs=[]
self.best_p = None
self.bad_counter = 0
# [See note in section 4.3 of paper]
self.caption_batch_iterator = HomogeneousData(self.__train,
batch_size=options['batch_size'],
maxlen=options['maxlen'])
self.update_index=0
self.epoch_stop = False
def print_sample(self, x, mask, ctx, captions):
if numpy.mod(self.update_index, self.options['sampleFreq']) == 0:
# turn off dropout first
self.use_noise.set_value(0.)
x_s = x
mask_s = mask
ctx_s = ctx
# generate and decode the a subset of the current training batch
for jj in xrange(numpy.minimum(10, len(captions))):
sample, score = Model.generate_sample(self.f_init, self.f_next, ctx_s[jj], self.options, k=5, maxlen=30)
# Decode the sample from encoding back to words
print 'Truth ', jj, ': ',
for vv in x_s[:, jj]:
if vv == 0:
break
if vv in self.__word_idict:
print self.__word_idict[vv],
else:
print 'UNK',
print
for kk, ss in enumerate([sample[0]]):
print 'Sample (', kk, ') ', jj, ': ',
for vv in ss:
if vv == 0:
break
if vv in self.__word_idict:
print self.__word_idict[vv],
else:
print 'UNK',
print
# Return false to break the loop
def log_validation_loss(self, epoch_index):
options = self.options
if numpy.mod(self.update_index, self.options['validFreq']) == 0:
self.use_noise.set_value(0.)
train_err = 0
valid_err = 0
test_err = 0
if self.__validate:
valid_err = -Model.predict_probs(self.f_probs, self.options, self.__worddict,
utils.extract_input, self.__validate, self.kf_valid).mean()
if self.__test:
test_err = -Model.predict_probs(self.f_probs, self.options, self.__worddict,
utils.extract_input, self.__test, self.kf_test).mean()
self.history_errs.append([valid_err, test_err])
# the model with the best validation long likelihood is saved seperately with a different name
if self.update_index == 0 or valid_err <= numpy.array(self.history_errs)[:, 0].min():
self.best_p = utils.get_nn_params(self.nn_params)
print 'Saving model with best validation ll'
self.save_network(name='_bestll',best_p=self.best_p,update_index=self.update_index,history_errs=self.history_errs)
self.bad_counter = 0
# abort training if perplexity has been increasing for too long
if epoch_index > options['patience'] and len(self.history_errs) > options['patience'] and valid_err >= numpy.array(self.history_errs)[
:-options['patience'], 0].min():
self.bad_counter += 1
if self.bad_counter > options['patience']:
print 'Early Stop!'
self.epoch_stop = True
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
def iterate(self, epoch_index, captions):
options = self.options
# preprocess the caption, recording the
# time spent to help detect bottlenecks
x, mask, ctx = utils.extract_input(captions,
self.__train[1],
self.__worddict,
maxlen=options['maxlen'],
n_words=options['n_words'])
if x is None:
print 'Minibatch with zero sample under length ', self.options['maxlen']
return False
# get the cost for the minibatch, and update the weights
ud_start = time.time()
cost = self.f_grad(x, mask, ctx) # cost here is scalar value.
self.f_update(options['lrate'])
ud_duration = time.time() - ud_start # some monitoring for each mini-batch
# Numerical stability check
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
self.epoch_stop = True
return False
if numpy.mod(self.update_index, options['dispFreq']) == 0:
print 'Epoch ', epoch_index, 'Update ', self.update_index, 'Cost ', cost, 'Time taken ', ud_duration
# Checkpoint
self.save_network(best_p=self.best_p,update_index=self.update_index,history_errs=self.history_errs)
# Print a generated sample as a sanity check
self.print_sample(x, mask, ctx, captions)
# Log validation loss + checkpoint the model with the best validation log likelihood
self.log_validation_loss(epoch_index)
return True
def run(self):
options = self.options
for epoch_index in xrange(options['max_epochs']):
n_samples = 0
print 'Epoch ', epoch_index
for captions in self.caption_batch_iterator:
n_samples += len(captions)
self.update_index += 1
# turn on dropout
self.use_noise.set_value(1.)
self.iterate(epoch_index, captions)
if self.epoch_stop:
break
print 'Seen %d samples' % n_samples
if self.epoch_stop:
break
class Model:
def __init__(self,
train=None,
validate=None,
test=None,
worddict=None,
options=None):
default_options = {
"alpha_c":1.0, # doubly stochastic coeff
"alpha_entropy_c":0.002, # hard attn param
"attn_type":'deterministic', # [see section 4 from paper]
"batch_size":64,
"ctx2out":True, # Feed attention weighted ctx into logit
"ctx_dim":512, # context vector dimensionality
"dataset":'flickr8k',
"decay_c":0.0, # weight decay coeff
"dictionary":None, # word dictionary
"dim":1800, # the number of LSTM units
"dim_word":512, # word vector dimensionality
"dispFreq":1,
"lrate":0.01, # used only for SGD
"lstm_encoder":False, # if True, run bidirectional LSTM on input units
"max_epochs":5000,
"maxlen":100, # maximum length of the description
"n_layers_att":2, # number of layers used to compute the attention weights
"n_layers_init":2, # number of layers to initialize LSTM at time 0
"n_layers_lstm":1, # number of lstm layers
"n_layers_out":1, # number of layers used to compute logit
"n_words":10000, # vocab size
"optimizer":'rmsprop',
"patience":10,
"prev2out":True, # Feed previous word into logit
"RL_sumCost":True, # hard attn param
"sampleFreq":250, # generate some samples after every sampleFreq updates
"save_per_epoch":False, # this saves down the model every epoch
"saveFreq":1000, # save the parameters after every saveFreq updates
"saveto":'caption_model', # relative path of saved model file
"semi_sampling_p":0.5, # hard attn param
"temperature":1.0, # hard attn param
"use_dropout":True, # setting this true turns on dropout at various points
"use_dropout_lstm":False, # dropout on lstm gates
"valid_batch_size":64,
"validFreq":2000
}
self.__options = default_options
self.__options.update(options)
self.validate_options()
self.__train = train
self.__validate = validate
self.__test = test
self.__worddict = worddict
self.__worddict[0] = '<eos>'
self.__worddict[1] = 'UNK'
@staticmethod
def generate_sample(f_init, f_next, ctx0, options, k=1, maxlen=30):
sample = []
sample_score = []
dead_k = 0
prev_samples = [[]] * 1
prev_scores = numpy.zeros(1).astype('float32')
# init = f_init(ctx0)
# ctx0 = init[0]
# next_state = [init[1]]
# next_memory = [init[2]]
init_results = f_init(ctx0)
ctx0 = init_results['context']
next_state = [init_results['state']]
next_memory = [init_results['memory']]
# reminder: if next_w = -1, the switch statement
# in build_sampler is triggered -> (empty word embeddings)
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
# our "next" state/memory in our previous step is now our "initial" state and memory
# next = f_next(*([next_w, ctx0] + [next_state] + [next_memory]))
# next_p = next[0]
# next_w = next[1]
next_results = f_next(*([next_w, ctx0] + [next_state] + [next_memory]))
next_p = next_results['probs']
next_w = next_results['sample']
# extract all the states and memories
next_state = next_results['state'] #next[2]
next_memory = next_results['memory'] #next[3]
cand_scores = prev_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k - dead_k)] # (k-dead_k) numpy array of with min nll
dict_size = next_p.shape[1]
trans_indices = ranks_flat / dict_size
word_indices = ranks_flat % dict_size
costs = cand_flat[ranks_flat] # extract costs from top hypothesis
# a bunch of lists to hold future hypothesis
curr_samples = []
curr_scores = []
curr_states = []
curr_memories = []
# get the corresponding hypothesis and append the predicted word
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
prev_sample = prev_samples[ti] + [wi]
if(prev_sample[-1] == 0):
sample.append(prev_sample)
sample_score.append(copy(costs[idx]))
dead_k += 1 # completed sample!
else:
curr_samples.append(prev_samples[ti] + [wi])
curr_scores.append(copy(costs[idx]))
curr_states.append(copy(next_state[ti]))
curr_memories.append(copy(next_memory[ti]))
live_k = k - dead_k
# generated all the k best samples
if live_k < 1 or dead_k >= k:
break
prev_samples = curr_samples
prev_scores = numpy.array(curr_scores)
next_w = numpy.array([w[-1] for w in curr_samples])
next_state = numpy.array(curr_states)
next_memory = numpy.array(curr_memories)
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(prev_samples[idx])
sample_score.append(prev_scores[idx])
return sample, sample_score
@staticmethod
def predict_probs(f_probs, options, worddict, prepare_data, data, iterator):
# Get log probabilities of captions
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 1)).astype('float32')
for _, index in iterator:
x, mask, ctx = prepare_data([data[0][t] for t in index], data[1],
worddict, maxlen=None, n_words=options['n_words'])
pred_probs = f_probs(x, mask, ctx)['pred_probs']
probs[index] = pred_probs[:, None]
return probs
def validate_options(self):
# Put friendly reminders here
if self.__options['dim_word'] > self.__options['dim']:
warnings.warn('dim_word should only be as large as dim.')
if self.__options['use_dropout_lstm']:
warnings.warn('dropout in the lstm seems not to help')
# Other checks:
if self.__options['attn_type'] not in ['deterministic']:
raise ValueError("specified attention type is not correct")
def build_network(self):
print 'Building network...'
self.nn_network = Network(self.__options)
return self.nn_network.build_training_graph(self.__options)
def add_l2_regularization(self):
# add L2 regularization costs
nn_params = self.nn_network.params()
self.__decay_c = theano.shared(numpy.float32(self.__options['decay_c']), name='decay_c')
self.__weight_decay = 0.
for vv in self.nn_network.params():
self.__weight_decay += (vv ** 2).sum()
self.__weight_decay *= self.__decay_c
self.__cost += self.__weight_decay
def add_doubly_stochastic_regularization(self, alphas):
# Doubly stochastic regularization
self.__alpha_c = theano.shared(numpy.float32(self.__options['alpha_c']), name='alpha_c')
alpha_reg = self.__alpha_c * ((1. - alphas.sum(0)) ** 2).sum(0).mean()
self.__cost += alpha_reg
def optimize(self, input=None):
# f_grad computes the cost and updates adaptive learning rate variables
# f_update updates the weights of the model
nn_params = self.nn_network.params()
self.__lr = tensor.scalar(name='lr')
rmsopt = RMSPropOptimizer(nn_params, self.__lr)
return rmsopt.minimize(input, self.__cost)
def save_network(self, name ='_snapshot', best_p=None,update_index=0,history_errs=[]):
if numpy.mod(update_index, self.__options['saveFreq']) == 0:
print 'Saving...',
if best_p is not None:
params = copy(best_p)
else:
params = utils.get_nn_params(self.nn_network.params())
numpy.savez(self.__options['saveto'] +name, history_errs=history_errs, **params)
pkl.dump(self.__options, open('%s.pkl' % self.__options['saveto'], 'wb'))
print 'Done'
def train(self):
options = self.__options
use_noise, inps, alphas, cost = self.build_network()
print('Buliding a sample inference')
f_init, f_next = self.nn_network.infer()
# we want the cost without any the regularizers
f_probs = TFW(inps, {'pred_probs':-cost}, profile=False, updates=None)
self.__cost = cost.mean()
if self.__options['decay_c'] > 0.:
self.add_l2_regularization()
if self.__options['alpha_c'] > 0.:
self.add_doubly_stochastic_regularization(alphas)
# f_grad computes the cost and updates adaptive learning rate variables
# f_update updates the weights of the model
f_grad, f_update = self.optimize(input=inps)
print 'Optimization'
kf_test=None
kf_valid=None
if self.__validate:
kf_valid = KFold(len(self.__validate[0]),
n_folds=len(self.__validate[0]) / options['valid_batch_size'],
shuffle=False)
if self.__test:
kf_test = KFold(len(self.__test[0]),
n_folds=len(self.__test[0]) / options['valid_batch_size'],
shuffle=False)
trainer = Trainer(
train=self.__train,
validate=self.__validate,
test=self.__test,
worddict=self.__worddict,
nn_params=self.nn_network.params(),
use_noise=use_noise,
f_init=f_init,
f_next=f_next,
f_grad=f_grad,
f_update=f_update,
f_probs=f_probs,
kf_valid=kf_valid,
kf_test=kf_test,
save_network=self.save_network,
options=options)
trainer.run()
def infer(self, path, feat_maps):
self.build_network()
f_init, f_next = self.nn_network.infer()
nn_params = self.nn_network.params()
params = numpy.load(path+'caption_model_bestll.npz')
utils.set_nn_params(params, nn_params)
word_idict = dict()
for kk, vv in self.__worddict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
for ctx_s in feat_maps:
ctx_s = ctx_s.reshape(196, 512)
sample, score = self.generate_sample(f_init, f_next, ctx_s, self.__options, k=5, maxlen=30)
for kk, ss in enumerate([sample[0]]):
print 'Sample (', kk, ') '
for vv in ss:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
| |
import re
import csv
import xlrd
import unicodecsv
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id, slugify
from .datasource import Datasource
"""
North Carolina elections have a mixture of CSV, tab-delimited text and Excel files for results. These files contain precinct-level data for each of the state's
counties, and includes all contests in that county.
Although some of the CSV files have a `district` column, the district information is contained in the `contest` column and needs to be parsed out. The
Excel files cover separate offices and have sheets for individual contests. CSV files also have totals for one-stop, absentee, provisional and
transfer votes, which appear as "precincts" in the data.
"""
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if any(s in election_id for s in ['2014']):
loader = NCTsv2014Loader()
elif any(s in election_id for s in ['nc-2008-11-04-general', '2010', '2012']):
loader = NCCsvLoader()
elif election_id == 'nc-2008-05-06-primary':
loader = NCTsv2008Loader()
elif any(s in election_id for s in ['2004', '2006', '2008']):
loader = NCTextLoader()
elif any(s in election_id for s in ['2002', '2000-11-07']):
loader = NCTsv20022000Loader()
else:
loader = NCXlsLoader()
loader.run(mapping)
class NCBaseLoader(BaseLoader):
datasource = Datasource()
target_offices = set([
'PRESIDENT AND VICE PRESIDENT OF THE UNITED STATES',
'PRESIDENT-VICE PRESIDENT',
'STRAIGHT PARTY',
'US HOUSE OF REPRESENTATIVES',
'US HOUSE OF REP.',
'US CONGRESS',
'US CONGRESS DISTRICT',
'US SENATE',
'NC GOVERNOR',
'GOVERNOR',
'NC LIEUTENANT GOVERNOR',
'LIEUTENANT GOVERNOR',
'NC SECRETARY OF STATE',
'NC ATTORNEY GENERAL',
'ATTORNEY GENERAL',
'NC AUDITOR',
'AUDITOR',
'NC COMMISSIONER OF AGRICULTURE',
'COMMISSIONER OF AGRICULTURE',
'NC COMMISSIONER OF INSURANCE',
'NC COMMISSIONER OF LABOR',
'COMMISSIONER OF LABOR',
'NC SUPERINTENDENT OF PUBLIC INSTRUCTION',
'SUPER. OF PUBLIC INSTRUCTION',
'NC TREASURER',
'TREASURER',
'NC HOUSE OF REPRESENTATIVES',
'NC STATE SENATE',
'SENATE',
'NC STATE HOUSE',
'HOUSE',
])
district_offices = set([
'US HOUSE OF REPRESENTATIVES',
'US CONGRESS',
'NC HOUSE OF REPRESENTATIVES',
'NC STATE SENATE',
'NC STATE HOUSE',
'HOUSE',
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
if type(val) is str:
if val.strip() == '':
return 0
try:
return int(float(val))
except ValueError:
# Count'y convert value from string
return 0
def _base_kwargs(self, row):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
return kwargs
class NCTsv2014Loader(NCBaseLoader):
"""
Loads North Carolina results in tab-delimited format.
Absentee, provisional and 'transfer' vote totals are also included, but as "precincts" so need to be handled.
"""
def load(self):
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
# Store result instances for bulk loading
results = []
with self._file_handle as tsvfile:
tsv = [x.replace('\0', '') for x in tsvfile] # remove NULL bytes
reader = unicodecsv.DictReader(tsv, delimiter='\t', encoding='latin-1')
for row in reader:
if self._skip_row(row):
continue
if row['Precinct'] in ('CURBSIDE', 'PROVISIONAL', 'ABSENTEE BY MAIL', 'ONESTOP', 'TRANSFER'):
results.append(self._prep_county_result(row))
else:
results.append(self._prep_precinct_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if any(o in row['Contest Name'] for o in self.target_offices):
return False
else:
return True
def _build_contest_kwargs(self, row):
if 'DISTRICT' in row['Contest Name']:
office = row['Contest Name'].split(' DISTRICT ')[0].strip()
district = row['Contest Name'].split(' DISTRICT ')[1].split(' - ')[0].split(' ')[0]
else:
office = row['Contest Name'].split(' - ')[0].strip()
district = None
kwargs = {
'office': office,
'district': district,
'primary_party': row['Choice Party'].strip()
}
return kwargs
def _build_candidate_kwargs(self, row):
full_name = row['Choice'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
kwargs.update(self._build_contest_kwargs(row))
kwargs.update(self._build_candidate_kwargs(row))
precinct = str(row['Precinct']).strip()
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['County'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': row['Choice Party'].strip(),
'votes': self._votes(row['Total Votes']),
'vote_breakdowns': self._breakdowns(row)
})
return RawResult(**kwargs)
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
kwargs.update(self._build_contest_kwargs(row))
kwargs.update(self._build_candidate_kwargs(row))
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['County'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'county',
'jurisdiction': row['County'],
'ocd_id': county_ocd_id,
'party': row['Choice Party'].strip(),
'votes_type': row['Precinct'],
'votes': self._votes(row['Total Votes'])
})
return RawResult(**kwargs)
def _breakdowns(self, row):
return { 'election_day': self._votes((row['Election Day'])), 'absentee_by_mail': self._votes(row['Absentee by Mail']), 'one_stop': self._votes(row['One Stop']), 'provisional': self._votes(row['Provisional'])}
class NCCsvLoader(NCBaseLoader):
"""
Parse North Carolina election results in CSV format.
"""
def load(self):
with self._file_handle as csvfile:
results = []
reader = unicodecsv.DictReader(csvfile, encoding='latin-1')
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
results.append(self._prep_precinct_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if " ".join(row['contest'].split(' ')[:3]) in self.target_offices:
return False
else:
return True
def _build_contest_kwargs(self, row):
if 'DISTRICT' in row['contest']:
try:
office, district = row['contest'].split(' DISTRICT ')
except:
print row['contest']
raise
else:
office = row['contest'].strip()
district = None
if 'primary' in self.source:
party = row['party'].strip()
else:
party = None
kwargs = {
'office': office,
'district': district,
'primary_party': party
}
return kwargs
def _build_candidate_kwargs(self, row):
full_name = row['choice'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
'name_slug': slug,
}
return kwargs
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
kwargs.update(self._build_contest_kwargs(row))
kwargs.update(self._build_candidate_kwargs(row))
precinct = str(row['precinct'])
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['county'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': row['party'].strip(),
'votes': self._votes(row['total votes']),
'vote_breakdowns': self._breakdowns(row, kwargs),
})
return RawResult(**kwargs)
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'] == row['CountyName']][0]['ocd_id']
kwargs.update({
'reporting_level': 'county',
'jurisdiction': row['CountyName'],
'ocd_id': county_ocd_id,
'party': row['PartyName'].strip(),
'votes': self._votes(row['Votes']),
'vote_breakdowns': {},
})
return RawResult(**kwargs)
def _breakdowns(self, row, kwargs):
if any(s in kwargs['election_id'] for s in ['2010', '2012']):
breakdows = { 'election_day': self._votes((row['Election Day'])), 'one_stop': self._votes(row['One Stop']), 'absentee_mail': self._votes(row['Absentee by Mail']), 'provisional': self._votes(row['Provisional'])}
else:
breakdows = { 'election_day': self._votes((row['Election Day'])), 'absentee_onestop': self._votes(row['Absentee / One Stop']), 'provisional': self._votes(row['Provisional'])}
return breakdows
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except KeyError:
write_in = None
return write_in
class NCTsv2008Loader(NCBaseLoader):
"""
Loads North Carolina 2008 primary tab-delimited results.
"""
def load(self):
headers = [
'county',
'date',
'precinct',
'contest',
'choice',
'party',
'election_day',
'absentee',
'provisional',
'total_votes'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, delimiter='\t', fieldnames = headers, encoding='latin-1')
for row in reader:
if self._skip_row(row):
continue
results.append(self._prep_precinct_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if " ".join(row['contest'].split(' ')[:3]) in self.target_offices:
return False
else:
return True
def _build_contest_kwargs(self, row):
if 'DISTRICT' in row['contest']:
office = row['contest'].split(' DISTRICT ')[0]
district = row['contest'].split(' DISTRICT ')[1].split(' - ')[0]
else:
office = row['contest'].split(' - ')[0]
district = None
kwargs = {
'office': office,
'district': district,
'primary_party': row['party'].strip()
}
return kwargs
def _build_candidate_kwargs(self, row):
full_name = row['choice'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
kwargs.update(self._build_contest_kwargs(row))
kwargs.update(self._build_candidate_kwargs(row))
precinct = str(row['precinct'])
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['county'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': row['party'].strip(),
'votes': self._votes(row['total_votes']),
'vote_breakdowns': self._breakdowns(row, kwargs)
})
return RawResult(**kwargs)
def _breakdowns(self, row, kwargs):
return { 'election_day': self._votes(row['election_day']), 'absentee_mail': self._votes(row['absentee']), 'provisional': self._votes(row['provisional'])}
class NCTextLoader(NCBaseLoader):
"""
Loads North Carolina results in tab-delimited format (although 2004 are CSV, but same headers).
Absentee, provisional and 'transfer' vote totals are also included, but as "precincts" so need to be handled.
"""
def load(self):
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
if '2004' in self.mapping['election']:
reader = unicodecsv.DictReader(csvfile, delimiter=',', encoding='latin-1')
else:
reader = unicodecsv.DictReader(csvfile, delimiter='\t', encoding='latin-1')
for row in reader:
if self._skip_row(row):
continue
if row['precinct'] == 'ABSENTEE' or row['precinct'] == 'PROV':
results.append(self._prep_county_result(row))
else:
results.append(self._prep_precinct_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if any(o in row['contest_name'] for o in self.target_offices):
return False
else:
return True
def _build_contest_kwargs(self, row):
if 'DISTRICT' in row['contest_name']:
office = row['contest_name'].split(' DISTRICT ')[0].strip()
district = row['contest_name'].split(' DISTRICT ')[1].split(' - ')[0].strip()
else:
office = row['contest_name'].split(' - ')[0].strip()
district = None
kwargs = {
'office': office,
'district': district,
'primary_party': row['party_cd'].strip()
}
return kwargs
def _build_candidate_kwargs(self, row):
full_name = row['name_on_ballot'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
kwargs.update(self._build_contest_kwargs(row))
kwargs.update(self._build_candidate_kwargs(row))
precinct = str(row['precinct']).strip()
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['county'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': row['party_cd'].strip(),
'votes': self._votes(row['ballot_count'])
})
return RawResult(**kwargs)
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['county'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'county',
'jurisdiction': row['county'],
'ocd_id': county_ocd_id,
'party': row['party'].strip(),
'votes': self._votes(row['total_votes'])
})
return RawResult(**kwargs)
class NCTsv20022000Loader(NCBaseLoader):
"""
Loads North Carolina 2002 primary and general, plus 2000 general tab-delimited precinct-level results. Absentee/provisional totals are
at the county level.
"""
def load(self):
headers = [
'county',
'date',
'precinct_abbrev',
'precinct',
'contest',
'choice',
'party',
'total_votes',
'timestamp'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, delimiter='\t', fieldnames = headers, encoding='latin-1')
for row in reader:
if self._skip_row(row):
continue
if row['precinct'] == 'absentee/provisional':
results.append(self._prep_county_result(row))
else:
results.append(self._prep_precinct_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if " ".join(row['contest'].split(' ')[:3]) in self.target_offices:
return False
else:
return True
def _build_contest_kwargs(self, row):
if 'DISTRICT' in row['contest'] and row['date'] == '11/07/2000':
office = row['contest'].split(' DISTRICT ')[0].strip()
district = row['contest'].split(' DISTRICT ')[1]
elif 'DISTRICT' in row['contest']:
office = row['contest'].split('(')[0].strip()
district = row['contest'].split('(')[1].split(' ')[0]
elif row['contest'][0:2] == 'NC':
office = row['contest'].split('(')[0].strip()
district = row['contest'].split('(')[1].split(')')[0]
else:
office = row['contest'].strip()
district = None
kwargs = {
'office': office,
'district': district,
'primary_party': row['party'].strip()
}
return kwargs
def _build_candidate_kwargs(self, row):
full_name = row['choice'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
precinct = str(row['precinct']).strip()
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['county'].upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': row['party'].strip(),
'votes': self._votes(row['total_votes'])
})
return RawResult(**kwargs)
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == row['county'].upper()][0]['ocd_id']
if row['precinct'] == 'absentee/provisional':
votes_type = 'absentee_provisional'
else:
votes_type = None
kwargs.update({
'reporting_level': 'county',
'jurisdiction': row['county'],
'ocd_id': county_ocd_id,
'party': row['party'].strip(),
'votes': self._votes(row['total_votes']),
'votes_type': votes_type,
})
return RawResult(**kwargs)
class NCXlsLoader(NCBaseLoader):
"""
Loads North Carolina 2000 primary results, which are contained in office-specific Excel files. For district-level
offices, each district is represented on a separate worksheet.
"""
def load(self):
headers = [
'county',
'precinct',
'contest',
'choice',
'party',
'total_votes',
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'precinct'
# Store result instances for bulk loading
results = []
xlsfile = xlrd.open_workbook(self._xls_file_path)
if 'house' in self.source or 'state_senate' in self.source:
sheets = xlsfile.sheets()
elif 'republican__primary__lieutenant_governor' in self.source:
sheets = [xlsfile.sheets()[5]]
else:
sheets = [xlsfile.sheets()[0]]
for sheet in sheets:
office, district = self._detect_office(sheet)
if sheet.name == '83rd NC House':
cands = [c for c in sheet.row_values(1)[2:] if c != '']
parties = [x.replace('(','').replace(')','') for x in sheet.row_values(2)[2:] if x != '']
start_row = 3
elif sheet.name == '97th NC House':
cands = [c for c in sheet.row_values(2)[2:] if c != '']
parties = [x.replace('(','').replace(')','') for x in sheet.row_values(3)[2:] if x != '']
start_row = 4
elif sheet.row_values(0)[1].upper() == 'PRECINCT' or sheet.row_values(0)[2] == 'John Cosgrove' or sheet.row_values(0)[1].upper() == 'PRECINCTS' or sheet.row_values(0)[2] == 'Paul Luebke' or sheet.row_values(0)[1] == 'Precinct Name':
cands = [c for c in sheet.row_values(0)[2:] if c != '']
parties = [x.replace('(','').replace(')','') for x in sheet.row_values(1)[2:] if x != '']
start_row = 2
else:
cands = [c for c in sheet.row_values(2)[2:] if c != '']
parties = [x.replace('(','').replace(')','') for x in sheet.row_values(3)[2:] if x != '']
start_row = 2
candidates = zip(cands, parties)
for i in xrange(start_row, sheet.nrows):
row = [r for r in sheet.row_values(i)]
if self._skip_row(row):
continue
for idx, cand in enumerate(candidates):
if row[1] == '':
county = row[0]
results.append(self._prep_county_result(row, office, district, cand, county, row[idx+2]))
else:
results.append(self._prep_precinct_result(row, office, district, cand, county, row[idx+2]))
RawResult.objects.insert(results)
def _skip_row(self, row):
if row == []:
return True
elif row[0] == '' and row[1] == '':
return True
elif row[0] == ' ' and row[1] == '':
return True
elif row[0].upper() == 'TOTAL':
return True
elif row[0] == 'County':
return True
else:
return False
def _detect_office(self, sheet):
district = None
if 'state_house' in self.source:
office = 'NC HOUSE OF REPRESENTATIVES'
district = sheet.name.split(' ')[0]
elif 'state_senate' in self.source:
office = 'NC STATE SENATE'
district = sheet.name.split(' ')[0]
elif 'house' in self.source:
office = 'US HOUSE OF REPRESENTATIVES'
district = sheet.name.split(' ')[0]
elif 'lieutenant_governor' in self.source:
office = 'LIEUTENANT GOVERNOR'
elif 'governor' in self.source:
office = 'GOVERNOR'
elif 'auditor' in self.source:
office = 'AUDITOR'
elif 'commissioner_of_agriculture' in self.source:
office = 'COMMISSIONER OF AGRICULTURE'
elif 'commissioner_of_labor' in self.source:
office = 'COMMISSIONER OF LABOR'
elif 'treasurer' in self.source:
office = 'TREASURER'
elif 'president' in self.source:
office = 'PRESIDENT-VICE PRESIDENT'
return [office, district]
def _build_contest_kwargs(self, office, district, party):
kwargs = {
'office': office,
'district': district,
'primary_party': party,
}
return kwargs
def _build_candidate_kwargs(self, candidate):
full_name = candidate[0]
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _prep_precinct_result(self, row, office, district, candidate, county, votes):
kwargs = self._base_kwargs(row, office, district, candidate)
precinct = str(row[1]).strip()
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == county.upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'ocd_id': "{}/precinct:{}".format(county_ocd_id, ocd_type_id(precinct)),
'party': candidate[1],
'votes': self._votes(votes)
})
return RawResult(**kwargs)
def _prep_county_result(self, row, office, district, candidate, county, votes):
kwargs = self._base_kwargs(row, office, district, candidate)
county_ocd_id = [c for c in self.datasource._jurisdictions() if c['county'].upper() == county.upper()][0]['ocd_id']
kwargs.update({
'reporting_level': 'county',
'jurisdiction': county,
'ocd_id': county_ocd_id,
'party': candidate[1],
'votes': self._votes(votes)
})
return RawResult(**kwargs)
def _base_kwargs(self, row, office, district, candidate):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
contest_kwargs = self._build_contest_kwargs(office, district, candidate[1])
candidate_kwargs = self._build_candidate_kwargs(candidate)
kwargs.update(contest_kwargs)
kwargs.update(candidate_kwargs)
return kwargs
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Updates generated docs from Python doc comments.
Updates the documentation files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import re
import sys
_arg_re = re.compile(" *([*]{0,2}[a-zA-Z][a-zA-Z0-9_]*):")
_section_re = re.compile("([A-Z][a-zA-Z ]*):$")
_always_drop_symbol_re = re.compile("_[_a-zA-Z0-9]")
_anchor_re = re.compile(r"^[\w.]+$")
_member_mark = "@@"
class Document(object):
"""Base class for an automatically generated document."""
def write_markdown_to_file(self, f):
"""Writes a Markdown-formatted version of this document to file `f`.
Args:
f: The output file.
"""
raise NotImplementedError("Document.WriteToFile")
class Index(Document):
"""An automatically generated index for a collection of documents."""
def __init__(self, module_to_name, members, filename_to_library_map,
path_prefix):
"""Creates a new Index.
Args:
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
filename_to_library_map: A list of (filename, Library) pairs. The order
corresponds to the order in which the libraries appear in the index.
path_prefix: Prefix to add to links in the index.
"""
self._module_to_name = module_to_name
self._members = members
self._filename_to_library_map = filename_to_library_map
self._path_prefix = path_prefix
def write_markdown_to_file(self, f):
"""Writes this index to file `f`.
The output is formatted as an unordered list. Each list element
contains the title of the library, followed by a list of symbols
in that library hyperlinked to the corresponding anchor in that
library.
Args:
f: The output file.
"""
print("<!-- This file is machine generated: DO NOT EDIT! -->", file=f)
print("", file=f)
print("# TensorFlow Python reference documentation", file=f)
print("", file=f)
fullname_f = lambda name: self._members[name][0]
anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name))
for filename, library in self._filename_to_library_map:
sorted_names = sorted(library.mentioned, key=lambda x: (str.lower(x), x))
member_names = [n for n in sorted_names if n in self._members]
# TODO: This is a hack that should be removed as soon as the website code
# allows it.
full_filename = self._path_prefix + filename
links = ["[`%s`](%s#%s)" % (name, full_filename, anchor_f(name))
for name in member_names]
if links:
print("* **[%s](%s)**:" % (library.title, full_filename), file=f)
for link in links:
print(" * %s" % link, file=f)
print("", file=f)
def collect_members(module_to_name, exclude=()):
"""Collect all symbols from a list of modules.
Args:
module_to_name: Dictionary mapping modules to short names.
exclude: Set of fully qualified names to exclude.
Returns:
Dictionary mapping name to (fullname, member) pairs.
"""
members = {}
for module, module_name in module_to_name.items():
all_names = getattr(module, "__all__", None)
for name, member in inspect.getmembers(module):
if ((inspect.isfunction(member) or inspect.isclass(member)) and
not _always_drop_symbol_re.match(name) and
(all_names is None or name in all_names)):
fullname = '%s.%s' % (module_name, name)
if fullname in exclude:
continue
if name in members:
other_fullname, other_member = members[name]
if member is not other_member:
raise RuntimeError("Short name collision between %s and %s" %
(fullname, other_fullname))
if len(fullname) == len(other_fullname):
raise RuntimeError("Can't decide whether to use %s or %s for %s: "
"both full names have length %d" %
(fullname, other_fullname, name, len(fullname)))
if len(fullname) > len(other_fullname):
continue # Use the shorter full name
members[name] = fullname, member
return members
def _get_anchor(module_to_name, fullname):
"""Turn a full member name into an anchor.
Args:
module_to_name: Dictionary mapping modules to short names.
fullname: Fully qualified name of symbol.
Returns:
HTML anchor string. The longest module name prefix of fullname is
removed to make the anchor.
Raises:
ValueError: If fullname uses characters invalid in an anchor.
"""
if not _anchor_re.match(fullname):
raise ValueError("'%s' is not a valid anchor" % fullname)
anchor = fullname
for module_name in module_to_name.values():
if fullname.startswith(module_name + "."):
rest = fullname[len(module_name)+1:]
# Use this prefix iff it is longer than any found before
if len(anchor) > len(rest):
anchor = rest
return anchor
class Library(Document):
"""An automatically generated document for a set of functions and classes."""
def __init__(self,
title,
module,
module_to_name,
members,
documented,
exclude_symbols=(),
prefix=None):
"""Creates a new Library.
Args:
title: A human-readable title for the library.
module: Module to pull high level docstring from (for table of contents,
list of Ops to document, etc.).
module_to_name: Dictionary mapping modules to short names.
members: Dictionary mapping member name to (fullname, member).
documented: Set of documented names to update.
exclude_symbols: A list of specific symbols to exclude.
prefix: A string to include at the beginning of the page.
"""
self._title = title
self._module = module
self._module_to_name = module_to_name
self._members = dict(members) # Copy since we mutate it below
self._exclude_symbols = frozenset(exclude_symbols)
documented.update(exclude_symbols)
self._documented = documented
self._mentioned = set()
self._prefix = prefix or ""
@property
def title(self):
"""The human-readable title for this library."""
return self._title
@property
def mentioned(self):
"""Set of names mentioned in this library."""
return self._mentioned
@property
def exclude_symbols(self):
"""Set of excluded symbols."""
return self._exclude_symbols
def _should_include_member(self, name, member):
"""Returns True if this member should be included in the document."""
# Always exclude symbols matching _always_drop_symbol_re.
if _always_drop_symbol_re.match(name):
return False
# Finally, exclude any specifically-excluded symbols.
if name in self._exclude_symbols:
return False
return True
def get_imported_modules(self, module):
"""Returns the list of modules imported from `module`."""
for name, member in inspect.getmembers(module):
if inspect.ismodule(member):
yield name, member
def get_class_members(self, cls_name, cls):
"""Returns the list of class members to document in `cls`.
This function filters the class member to ONLY return those
defined by the class. It drops the inherited ones.
Args:
cls_name: Qualified name of `cls`.
cls: An inspect object of type 'class'.
Yields:
name, member tuples.
"""
for name, member in inspect.getmembers(cls):
# Only show methods and properties presently. In Python 3,
# methods register as isfunction.
is_method = inspect.ismethod(member) or inspect.isfunction(member)
if not (is_method or isinstance(member, property)):
continue
if ((is_method and member.__name__ == "__init__")
or self._should_include_member(name, member)):
yield name, ("%s.%s" % (cls_name, name), member)
def _generate_signature_for_function(self, func):
"""Given a function, returns a string representing its args."""
args_list = []
argspec = inspect.getargspec(func)
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
for arg in argspec.args[:first_arg_with_default]:
if arg == "self":
# Python documentation typically skips `self` when printing method
# signatures.
continue
args_list.append(arg)
# TODO(mrry): This is a workaround for documenting signature of
# functions that have the @contextlib.contextmanager decorator.
# We should do something better.
if argspec.varargs == "args" and argspec.keywords == "kwds":
original_func = func.__closure__[0].cell_contents
return self._generate_signature_for_function(original_func)
if argspec.defaults:
for arg, default in zip(
argspec.args[first_arg_with_default:], argspec.defaults):
if callable(default):
args_list.append("%s=%s" % (arg, default.__name__))
else:
args_list.append("%s=%r" % (arg, default))
if argspec.varargs:
args_list.append("*" + argspec.varargs)
if argspec.keywords:
args_list.append("**" + argspec.keywords)
return "(" + ", ".join(args_list) + ")"
def _remove_docstring_indent(self, docstring):
"""Remove indenting.
We follow Python's convention and remove the minimum indent of the lines
after the first, see:
https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
preserving relative indentation.
Args:
docstring: A docstring.
Returns:
A list of strings, one per line, with the minimum indent stripped.
"""
docstring = docstring or ""
lines = docstring.strip().split("\n")
min_indent = len(docstring)
for l in lines[1:]:
l = l.rstrip()
if l:
i = 0
while i < len(l) and l[i] == " ":
i += 1
if i < min_indent: min_indent = i
for i in range(1, len(lines)):
l = lines[i].rstrip()
if len(l) >= min_indent:
l = l[min_indent:]
lines[i] = l
return lines
def _print_formatted_docstring(self, docstring, f):
"""Formats the given `docstring` as Markdown and prints it to `f`."""
lines = self._remove_docstring_indent(docstring)
# Output the lines, identifying "Args" and other section blocks.
i = 0
def _at_start_of_section():
"""Returns the header if lines[i] is at start of a docstring section."""
l = lines[i]
match = _section_re.match(l)
if match and i + 1 < len(
lines) and lines[i + 1].startswith(" "):
return match.group(1)
else:
return None
while i < len(lines):
l = lines[i]
section_header = _at_start_of_section()
if section_header:
if i == 0 or lines[i-1]:
print("", file=f)
# Use at least H4 to keep these out of the TOC.
print("##### " + section_header + ":", file=f)
print("", file=f)
i += 1
outputting_list = False
while i < len(lines):
l = lines[i]
# A new section header terminates the section.
if _at_start_of_section():
break
match = _arg_re.match(l)
if match:
if not outputting_list:
# We need to start a list. In Markdown, a blank line needs to
# precede a list.
print("", file=f)
outputting_list = True
suffix = l[len(match.group()):].lstrip()
print("* <b>`" + match.group(1) + "`</b>: " + suffix, file=f)
else:
# For lines that don't start with _arg_re, continue the list if it
# has enough indentation.
outputting_list &= l.startswith(" ")
print(l, file=f)
i += 1
else:
print(l, file=f)
i += 1
def _print_function(self, f, prefix, fullname, func):
"""Prints the given function to `f`."""
heading = prefix + " `" + fullname
if not isinstance(func, property):
heading += self._generate_signature_for_function(func)
heading += "` {#%s}" % _get_anchor(self._module_to_name, fullname)
print(heading, file=f)
print("", file=f)
self._print_formatted_docstring(inspect.getdoc(func), f)
print("", file=f)
def _write_member_markdown_to_file(self, f, prefix, name, member):
"""Print `member` to `f`."""
if (inspect.isfunction(member) or inspect.ismethod(member) or
isinstance(member, property)):
print("- - -", file=f)
print("", file=f)
self._print_function(f, prefix, name, member)
print("", file=f)
elif inspect.isclass(member):
print("- - -", file=f)
print("", file=f)
print("%s `class %s` {#%s}" % (prefix, name,
_get_anchor(self._module_to_name, name)),
file=f)
print("", file=f)
self._write_class_markdown_to_file(f, name, member)
print("", file=f)
else:
raise RuntimeError("Member %s has unknown type %s" % (name, type(member)))
def _write_docstring_markdown_to_file(self, f, prefix, docstring, members,
imports):
for l in self._remove_docstring_indent(docstring):
if l.startswith(_member_mark):
name = l[len(_member_mark):].strip(" \t")
if name in members:
self._documented.add(name)
self._mentioned.add(name)
self._write_member_markdown_to_file(f, prefix, *members[name])
del members[name]
elif name in imports:
self._write_module_markdown_to_file(f, imports[name])
else:
raise ValueError("%s: unknown member `%s`" % (self._title, name))
else:
print(l, file=f)
def _write_class_markdown_to_file(self, f, name, cls):
"""Write the class doc to `f`.
Args:
f: File to write to.
prefix: Prefix for names.
cls: class object.
name: name to use.
"""
# Build the list of class methods to document.
methods = dict(self.get_class_members(name, cls))
# Used later to check if any methods were called out in the class
# docstring.
num_methods = len(methods)
try:
self._write_docstring_markdown_to_file(f, "####", inspect.getdoc(cls),
methods, {})
except ValueError as e:
raise ValueError(str(e) + " in class `%s`" % cls.__name__)
# If some methods were not described, describe them now if they are
# defined by the class itself (not inherited). If NO methods were
# described, describe all methods.
#
# TODO(touts): when all methods have been categorized make it an error
# if some methods are not categorized.
any_method_called_out = (len(methods) != num_methods)
if any_method_called_out:
other_methods = {n: m for n, m in methods.items() if n in cls.__dict__}
if other_methods:
print("\n#### Other Methods", file=f)
else:
other_methods = methods
for name in sorted(other_methods):
self._write_member_markdown_to_file(f, "####", *other_methods[name])
def _write_module_markdown_to_file(self, f, module):
imports = dict(self.get_imported_modules(module))
self._write_docstring_markdown_to_file(f, "###", inspect.getdoc(module),
self._members, imports)
def write_markdown_to_file(self, f):
"""Prints this library to file `f`.
Args:
f: File to write to.
Returns:
Dictionary of documented members.
"""
print("<!-- This file is machine generated: DO NOT EDIT! -->", file=f)
print("", file=f)
# TODO(touts): Do not insert these. Let the doc writer put them in
# the module docstring explicitly.
print("#", self._title, file=f)
if self._prefix:
print(self._prefix, file=f)
print("[TOC]", file=f)
print("", file=f)
if self._module is not None:
self._write_module_markdown_to_file(f, self._module)
def write_other_members(self, f, catch_all=False):
"""Writes the leftover members to `f`.
Args:
f: File to write to.
catch_all: If true, document all missing symbols from any module.
Otherwise, document missing symbols from just this module.
"""
if catch_all:
names = self._members.items()
else:
names = inspect.getmembers(self._module)
all_names = getattr(self._module, "__all__", None)
if all_names is not None:
names = [(n, m) for n, m in names if n in all_names]
leftovers = []
for name, _ in names:
if name in self._members and name not in self._documented:
leftovers.append(name)
if leftovers:
print("%s: undocumented members: %d" % (self._title, len(leftovers)))
print("\n## Other Functions and Classes", file=f)
for name in sorted(leftovers):
print(" %s" % name)
self._documented.add(name)
self._mentioned.add(name)
self._write_member_markdown_to_file(f, "###", *self._members[name])
def assert_no_leftovers(self):
"""Generate an error if there are leftover members."""
leftovers = []
for name in self._members.keys():
if name in self._members and name not in self._documented:
leftovers.append(name)
if leftovers:
raise RuntimeError("%s: undocumented members: %s" %
(self._title, ", ".join(leftovers)))
def write_libraries(dir, libraries):
"""Write a list of libraries to disk.
Args:
dir: Output directory.
libraries: List of (filename, library) pairs.
"""
files = [open(os.path.join(dir, k), "w") for k, _ in libraries]
# Document mentioned symbols for all libraries
for f, (_, v) in zip(files, libraries):
v.write_markdown_to_file(f)
# Document symbols that no library mentioned. We do this after writing
# out all libraries so that earlier libraries know what later libraries
# documented.
for f, (_, v) in zip(files, libraries):
v.write_other_members(f)
f.close()
| |
# -*- coding: utf-8 -*-
import os
import sys
import fileinput
import shutil
IGNORE_DIRS = [
'tools',
'build',
'.git',
'.svn'
]
IGNORE_EXTENSIONS = [
'.swp'
]
# This function will take the generic "template" files and will adapt them
# to the current projet (i.e putting the user copyright header etc.)
def adapt_files(
rootDir,
replacements,
ignoreDirs = [],
ignoreExtensions = []
):
# we do a recursive walk
for path, dirs, files in os.walk(rootDir):
# we ignore some directories
for ignoreDir in ignoreDirs:
if ignoreDir in dirs:
dirs.remove(ignoreDir)
if files:
for file in files:
#if the file is not a source file, we do not try to modify it
if file[-4:] in ignoreExtensions:
continue
fullPathFile = os.path.join(path,file)
print(fullPathFile)
# in all the files we search and replace for the placeholders
for line in fileinput.FileInput(fullPathFile,inplace=1):
for find,replace in replacements.items():
line = line.replace(find,replace)
#note FileInput redirect STDIN to the file that's why
# we use print and nothing is displayed
print(line, end='')
def create_template_local_copy(
originTemplateRoot,
localTemplateRoot,
):
# we create a copy of the templates in order to
# adapt them to the projet (and if the user need to custom
# them a little for a given app
shutil.copytree(
originTemplateRoot,
localTemplateRoot
)
# generic function to move a file
def move_file(
localTemplateRoot,
appRoot,
fileName
):
src = localTemplateRoot + "/" + fileName
dst =appRoot + "/" + fileName
print(src)
print(dst)
os.rename(src, dst)
# to move the CMake configuration file
# that will compile the website
def move_cmake_file(
localTemplateRoot,
appRoot
):
fileName = 'CMakeLists.txt'
move_file(
localTemplateRoot,
appRoot,
fileName
)
# move the main.cpp file from the template's files
# to the application folder
def move_main(
localTemplateRoot,
appRoot
):
fileName = 'main.cpp'
move_file(
localTemplateRoot,
appRoot + "/src",
fileName
)
# The application class file has by default a generic name
# rename it with the actual application name
def rename_app_files(
localTemplateRoot,
placeholderName,
appName
):
os.rename(
localTemplateRoot + "/" + placeholderName + ".cpp",
localTemplateRoot + "/" + appName + ".cpp",
)
os.rename(
localTemplateRoot + "/" + placeholderName + ".h",
localTemplateRoot + "/" + appName + ".h",
)
# move the application class file from the template directoy
# to the actual application folder
def move_app_files(
localTemplateRoot,
appRoot,
appName
):
fileName = appName + '.cpp'
move_file(
localTemplateRoot,
appRoot + '/src',
fileName
)
fileName = appName + '.h'
move_file(
localTemplateRoot,
appRoot + '/src',
fileName
)
# By default the main application contain a sub-application
# named "Pages" that is used to display "nearly static" web pages
# i.e homepage/credit pages etc.
def move_pages_files(
localTemplateRoot,
appRoot
):
appName = "Pages"
subdir = '/controllers/webs'
fileName = appName + '.cpp'
move_file(
localTemplateRoot + subdir,
appRoot + '/src' + subdir,
fileName
)
fileName = appName + '.h'
move_file(
localTemplateRoot + subdir,
appRoot + '/src' + subdir,
fileName
)
subdir = '/contents'
fileName = appName + '.h'
move_file(
localTemplateRoot + subdir,
appRoot + '/src' + subdir,
fileName
)
# move the configuration file of the website
# from the template directory to the application's one
def move_config_js(
localTemplateRoot,
appRoot
):
fileName = 'config.js'
move_file(
localTemplateRoot,
appRoot,
fileName
)
def move_template(
localTemplateRoot,
appRoot,
templateName
):
#TODO replace by a constant in constants.py
websRoot = appRoot + '/src/views/webs'
fileName = '/views/webs/' + templateName
localFile = localTemplateRoot + fileName
for skinDir in os.listdir(websRoot):
dst = os.path.join(websRoot,skinDir,templateName)
print(localFile + " => " + dst)
os.rename(
localFile,
dst
)
# move homepage.tmpl, the file that contain the html of
# the main page when you go on domain.tld/
def move_homeplage(
localTemplateRoot,
appRoot
):
move_template(localTemplateRoot,appRoot,'pages/homepage.tmpl')
def move_master_layout(
localTemplateRoot,
appRoot
):
move_template(localTemplateRoot,appRoot,'layouts/master.tmpl')
def move_and_renamed_files(
localTemplateRoot,
appRoot,
replacements,
mainAppPlaceholder,
):
#move Cmake
move_cmake_file(localTemplateRoot, appRoot)
# move config.js
move_config_js(localTemplateRoot, appRoot)
# move main.cpp file
move_main(localTemplateRoot, appRoot)
# move homepage.tmpl
move_homeplage(localTemplateRoot, appRoot)
# rename @mainAppPlaceholder@.h/cpp@ into mainApp.h/cpp
mainApp = replacements[mainAppPlaceholder]
rename_app_files(
localTemplateRoot,
mainAppPlaceholder,
mainApp
)
# move the mainApp.cpp/h files
move_app_files(
localTemplateRoot,
appRoot,
mainApp
)
move_pages_files(localTemplateRoot, appRoot)
move_master_layout(localTemplateRoot, appRoot)
| |
# -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Common module class form Dafs* modules
"""
import time
import re
import os
from urlparse import urlparse
from classes.Registry import Registry
from classes.kernel.WSModule import WSModule
from classes.kernel.WSException import WSException
from classes.kernel.WSCounter import WSCounter
from classes.models.HostsModel import HostsModel
from classes.models.UrlsBaseModel import UrlsBaseModel
from classes.models.UrlsModel import UrlsModel
from classes.jobs.DafsJob import DafsJob
from classes.threads.DafsThread import DafsThread
from classes.threads.SDafsThread import SDafsThread
class DafsModules(WSModule):
""" Common module class form Dafs* modules """
logger_enable = True
logger_name = 'dafs'
logger_have_items = True
def load_objects(self, queue):
""" Method for prepare check objects, here abstract """
pass
def _insert_urls(self, urls):
""" Add found urls in db """
UrlsBase = UrlsBaseModel()
pid = Registry().get('pData')['id']
host_id = HostsModel().get_id_by_name(pid, self.options['host'].value)
Urls = UrlsModel()
added = 0
for url in urls:
if Urls.add(pid, host_id, url['url'], '', url['code'], url['time'], 'dafs'):
added += 1
paths = urlparse(url['url']).path.split("/")
while len(paths) != 1:
del paths[-1]
if Urls.add(pid, host_id, "/".join(paths) + "/", '', 0, 0, 'dafs'):
added += 1
UrlsBase.add_url(host_id, url['url'])
return added
def validate_main(self):
""" Check users params """
super(DafsModules, self).validate_main()
if self.options['template'].value[0] != '/':
raise WSException("Template must start from the root ('/') !")
def scan_action(self):
""" Scan action of module """
self.enable_logger()
self.validate_main()
self.pre_start_inf()
if self.options['proxies'].value:
Registry().get('proxies').load(self.options['proxies'].value)
if self.options['template'].value.find(self.options['msymbol'].value) == -1:
raise WSException(
"Symbol of object position ({0}) not found in URL ({1}) ".
format(self.options['msymbol'].value, self.options['template'].value)
)
result = []
q = DafsJob()
loaded = self.load_objects(q)
self.logger.log(
"Loaded {0} words ({1}-{2}) from all {3}.".format(
(loaded['end'] - loaded['start']), loaded['start'], loaded['end'], loaded['all'])
if (int(self.options['parts'].value) and int(self.options['part'].value)) else
"Loaded {0} words from source.".format(loaded['all'])
)
counter = WSCounter(5, 300, loaded['all'] if not loaded['end'] else loaded['end']-loaded['start'])
w_thrds = []
for _ in range(int(self.options['threads'].value)):
if self.options['selenium'].value:
worker = SDafsThread(
q,
self.options['protocol'].value,
self.options['host'].value,
self.options['template'].value,
self.options['method'].value.lower(),
self.options['msymbol'].value,
self.options['not-found-re'].value,
self.options['delay'].value,
self.options['ddos-detect-phrase'].value,
self.options['ddos-human-action'].value,
self.options['browser-recreate-re'].value,
self.options['ignore-words-re'].value,
counter,
result
)
else:
worker = DafsThread(
q,
self.options['protocol'].value,
self.options['host'].value,
self.options['template'].value,
self.options['method'].value.lower(),
self.options['msymbol'].value,
self.options['not-found-re'].value,
self.options['not-found-size'].value,
self.options['not-found-codes'].value.lower(),
self.options['retest-codes'].value.lower(),
self.options['delay'].value,
self.options['ignore-words-re'].value,
counter,
result
)
worker.setDaemon(True)
worker.start()
w_thrds.append(worker)
time.sleep(1)
timeout_threads_count = 0
while len(w_thrds):
if Registry().get('proxy_many_died'):
self.logger.log("Proxy many died, stop scan")
if Registry().get('proxy_many_died') or Registry().get('positive_limit_stop'):
worker.done = True
time.sleep(3)
for worker in w_thrds:
if worker.done:
del w_thrds[w_thrds.index(worker)]
if int(time.time()) - worker.last_action > int(Registry().get('config')['main']['kill_thread_after_secs']):
self.logger.log(
"Thread killed by time, resurected {0} times from {1}".format(
timeout_threads_count,
Registry().get('config')['main']['timeout_threads_resurect_max_count']
)
)
del w_thrds[w_thrds.index(worker)]
if timeout_threads_count <= int(Registry().get('config')['main']['timeout_threads_resurect_max_count']):
if self.options['selenium'].value:
worker = SDafsThread(
q,
self.options['protocol'].value,
self.options['host'].value,
self.options['template'].value,
self.options['method'].value.lower(),
self.options['msymbol'].value,
self.options['not-found-re'].value,
self.options['delay'].value,
self.options['ddos-detect-phrase'].value,
self.options['ddos-human-action'].value,
self.options['browser-recreate-re'].value,
self.options['ignore-words-re'].value,
counter,
result
)
else:
worker = DafsThread(
q,
self.options['protocol'].value,
self.options['host'].value,
self.options['template'].value,
self.options['method'].value.lower(),
self.options['msymbol'].value,
self.options['not-found-re'].value,
self.options['not-found-size'].value,
self.options['not-found-codes'].value.lower(),
self.options['retest-codes'].value.lower(),
self.options['delay'].value,
self.options['ignore-words-re'].value,
counter,
result
)
worker.setDaemon(True)
worker.start()
w_thrds.append(worker)
timeout_threads_count += 1
time.sleep(2)
if Registry().get('positive_limit_stop'):
self.logger.log("\nMany positive detections. Please, look items logs")
self.logger.log("Last items:")
for i in range(1, 5):
print result[-i]
exit(0)
if int(Registry().get('config')['main']['put_data_into_db']):
self.logger.log("\nInsert links in DB...")
added = self._insert_urls(result)
for result_row in result:
self.logger.log("{0} {1}".format(result_row['code'], result_row['url']))
self.logger.log("\nFound {0} URLs, inserted in database (new) - {1}.".format(len(result), added))
else:
self.logger.log("\n")
for result_row in result:
self.logger.log("{0} {1}".format(result_row['code'], result_row['url']))
self.done = True
| |
import os
import os.path as osp
import sys
import google.protobuf as pb
from argparse import ArgumentParser
import numpy as np
import caffe
from caffe.proto import caffe_pb2
def _get_include(phase):
inc = caffe_pb2.NetStateRule()
if phase == 'train':
inc.phase = caffe_pb2.TRAIN
elif phase == 'test':
inc.phase = caffe_pb2.TEST
else:
raise ValueError("Unknown phase {}".format(phase))
return inc
def _get_param(num_param):
if num_param == 1:
# only weight
param = caffe_pb2.ParamSpec()
param.lr_mult = 1
param.decay_mult = 1
return [param]
elif num_param == 2:
# weight and bias
param_w = caffe_pb2.ParamSpec()
param_w.lr_mult = 1
param_w.decay_mult = 1
param_b = caffe_pb2.ParamSpec()
param_b.lr_mult = 2
param_b.decay_mult = 0
return [param_w, param_b]
else:
raise ValueError("Unknown num_param {}".format(num_param))
def Input(name, top, shape):
layer = caffe_pb2.LayerParameter()
layer.name = name
layer.type = 'Input'
layer.top.append(top)
blobShape = layer.input_param.shape.add()
blobShape.dim.extend(shape)
return layer
def Conv(name, bottom, num_output, kernel_size, stride = 1, pad = 0, nobias = False):
layer = caffe_pb2.LayerParameter()
layer.name = name
layer.type = 'Convolution'
layer.bottom.extend([bottom])
layer.top.extend([name])
layer.convolution_param.num_output = num_output
layer.convolution_param.kernel_size.extend([kernel_size])
layer.convolution_param.stride.extend([stride])
layer.convolution_param.pad.extend([pad])
layer.convolution_param.weight_filler.type = 'msra'
layer.convolution_param.bias_term = not(nobias)
layer.param.extend(_get_param(1 if nobias else 2))
return layer
def DeConv(name, bottom, num_output, kernel_size, stride = 1, pad = 0, nobias = False):
layer = caffe_pb2.LayerParameter()
layer.name = name
layer.type = 'Deconvolution'
layer.bottom.extend([bottom])
layer.top.extend([name])
layer.convolution_param.num_output = num_output
layer.convolution_param.kernel_size.extend([kernel_size])
layer.convolution_param.stride.extend([stride])
layer.convolution_param.pad.extend([pad])
layer.convolution_param.weight_filler.type = 'msra'
layer.convolution_param.bias_term = not(nobias)
layer.param.extend(_get_param(1 if nobias else 2))
return layer
def Sigmoid(name, bottom):
top_name = name + '_sigmoid'
# ReLU
sigmoid_layer = caffe_pb2.LayerParameter()
sigmoid_layer.name = name + '_sigmoid'
sigmoid_layer.type = 'Sigmoid'
sigmoid_layer.bottom.extend([bottom])
sigmoid_layer.top.extend([top_name])
return sigmoid_layer
def Relu(name, bottom):
top_name = name + '_relu'
# ReLU
relu_layer = caffe_pb2.LayerParameter()
relu_layer.name = name + '_relu'
relu_layer.type = 'ReLU'
relu_layer.bottom.extend([bottom])
relu_layer.top.extend([top_name])
return relu_layer
def LeakyRelu(name, bottom, negative_slope):
top_name = name + '_relu'
# LeakyRelu
relu_layer = caffe_pb2.LayerParameter()
relu_layer.name = name + '_relu'
relu_layer.type = 'ReLU'
relu_layer.relu_param.negative_slope = negative_slope
relu_layer.bottom.extend([bottom])
relu_layer.top.extend([top_name])
return relu_layer
def ConvLeakyRelu(name, bottom, num_output, kernel_size, stride = 1, pad = 0, negative_slope = 0.1):
layers = []
layers.append(Conv(name, bottom, num_output, kernel_size, stride, pad))
layers.append(LeakyRelu(name, layers[-1].top[0], negative_slope))
return layers
def GlobalAvgPool(name, bottom, stride = 1, pad = 0):
top_name = name + '_globalavgpool'
layer = caffe_pb2.LayerParameter()
layer.name = name + '_globalavgpool'
layer.type = 'Pooling'
layer.bottom.extend([bottom])
layer.top.extend([top_name])
layer.pooling_param.pool = caffe_pb2.PoolingParameter.AVE
layer.pooling_param.stride = stride
layer.pooling_param.pad = pad
layer.pooling_param.global_pooling = True
layer.pooling_param.engine = caffe_pb2.PoolingParameter.CAFFE
return layer
def Linear(name, bottom, num_output):
layer = caffe_pb2.LayerParameter()
layer.name = name
layer.type = 'InnerProduct'
layer.bottom.extend([bottom])
layer.top.extend([name])
layer.inner_product_param.num_output = num_output
layer.inner_product_param.weight_filler.type = 'msra'
layer.inner_product_param.bias_filler.value = 0
layer.param.extend(_get_param(2))
return layer
def Crop(name, bottom, crop_size):
layer = caffe_pb2.LayerParameter()
layer.name = name
layer.type = 'CropCenter'
layer.bottom.extend([bottom])
layer.top.extend([name])
layer.crop_center_param.crop_size.extend(crop_size)
return layer
def Add(name, bottoms):
layer = caffe_pb2.LayerParameter()
layer.name = name
layer.type = 'Eltwise'
layer.bottom.extend(bottoms)
layer.top.extend([name])
return layer
def Axpy(name, bottoms):
layer = caffe_pb2.LayerParameter()
layer.name = name
layer.type = 'Axpy'
layer.bottom.extend(bottoms)
layer.top.extend([name])
return layer
def SEResBlock(name, bottom, in_channels, out_channels, r=16, slope=0.1):
layers = []
res_bottom = bottom
layers.extend(ConvLeakyRelu(name + '/conv1', res_bottom, out_channels, 3, 1, 0, slope))
layers.extend(ConvLeakyRelu(name + '/conv2', layers[-1].top[0], out_channels, 3, 1, 0, slope))
h = layers[-1].top[0]
layers.append(GlobalAvgPool(name + '/fc1', h))
layers.append(Linear(name + '/fc1', layers[-1].top[0], out_channels // r))
layers.append(Relu(name + '/fc1', layers[-1].top[0]))
layers.append(Linear(name + '/fc2', layers[-1].top[0], out_channels))
layers.append(Sigmoid(name + '/fc2', layers[-1].top[0]))
se = layers[-1].top[0]
layers.append(Crop(name + '/crop', res_bottom, [0, 0, 2, 2]))
x = layers[-1].top[0]
if in_channels != out_channels:
layers.extend(ConvLeakyRelu(name + '/conv_bridge', x, out_channels, 3, 1, 0, slope))
x = layers[-1].top[0]
# h * se + x
layers.append(Axpy(name + '/axpy', [se, h, x]))
return layers
def create_model(ch):
model = caffe_pb2.NetParameter()
model.name = 'UpResNet10_{}'.format(ch)
layers = []
layers.append(Input('data', 'input', [1, ch, 90, 90]))
layers.extend(ConvLeakyRelu('/conv_pre', layers[-1].top[0], 64, 3, 1, 0, 0.1))
skip = layers[-1].top[0]
layers.extend(SEResBlock('/res1', layers[-1].top[0], 64, 64, 4))
layers.extend(SEResBlock('/res2', layers[-1].top[0], 64, 64, 4))
layers.extend(SEResBlock('/res3', layers[-1].top[0], 64, 64, 4))
layers.extend(SEResBlock('/res4', layers[-1].top[0], 64, 64, 4))
layers.extend(SEResBlock('/res5', layers[-1].top[0], 64, 64, 4))
layers.extend(ConvLeakyRelu('/conv_bridge', layers[-1].top[0], 64, 3, 1, 0, 0.1))
h = layers[-1].top[0]
layers.append(Crop('/crop', skip, [0, 0, 11, 11]))
skip = layers[-1].top[0]
layers.append(Add('/add', [h, skip]))
layers.append(DeConv('/conv_post', layers[-1].top[0], ch, 4, 2, 3, True))
model.layer.extend(layers)
return model
def main(args):
model = create_model(args.ch)
if args.output is None:
args.output = osp.join(osp.dirname(__file__),
'upresnet10_{}.prototxt'.format(args.ch))
with open(args.output, 'w') as f:
f.write(pb.text_format.MessageToString(model))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--ch', type=int, default=3,
choices=[1, 3])
parser.add_argument('-o', '--output', type=str)
args = parser.parse_args()
main(args)
| |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pygogo
~~~~~~
A Python logging library with super powers
Examples:
basic usage::
>>> logger = Gogo('basic').logger
>>> logger.debug('hello world')
hello world
intermediate usage::
>>> formatter = logging.Formatter('IP: %(ip)s - %(message)s')
>>> kwargs = {'low_formatter': formatter}
>>> logger = Gogo('intermediate', **kwargs).get_logger(ip='1.1.1.1')
>>> logger.debug('hello world')
IP: 1.1.1.1 - hello world
advanced usage::
>>> from io import StringIO
>>> from json import loads
>>>
>>> high = StringIO()
>>> low = StringIO()
>>> kwargs = {
... 'monolog': True, 'high_hdlr': handlers.fileobj_hdlr(high),
... 'low_hdlr': handlers.fileobj_hdlr(low)}
>>> logger = Gogo('adv', **kwargs).get_structured_logger(ip='1.1.1.1')
>>> logger.debug('debug')
>>> logger.error('error')
>>> loads(low.getvalue()) == {'ip': '1.1.1.1', 'message': 'debug'}
True
>>> loads(high.getvalue()) == {'ip': '1.1.1.1', 'message': 'error'}
True
"""
import logging
import hashlib
import sys
from copy import copy
from . import formatters, handlers, utils
__version__ = "1.2.0"
__all__ = ["formatters", "handlers", "utils"]
__title__ = "pygogo"
__author__ = "Reuben Cummings"
__description__ = "A Python logging library with super powers"
__email__ = "reubano@gmail.com"
__license__ = "MIT"
__copyright__ = "Copyright 2015 Reuben Cummings"
module_hdlr = logging.StreamHandler(sys.stdout)
module_fltr = logging.Filter(name="%s.init" % __name__)
# prevent handler from logging `pygogo.main` events
module_hdlr.addFilter(module_fltr)
module_logger = logging.getLogger(__name__)
module_logger.addHandler(module_hdlr)
class Gogo(object):
"""A logging class that logs events via different handlers depending on the
severity
http://stackoverflow.com/a/28743317/408556
Attributes:
loggers (set[str]): Set of existing loggers
monolog (bool): Log high level events only to high pass handler.
name (string): The logger name.
levels (dict): The min levels to log to handlers.
handlers (dict): The high/low pass log handlers.
formatters (dict): The high/low pass log formatter.
+------------------------------------+-----------------+
| messages level | message handler |
+====================================+=================+
| < levels['low'] | none |
+------------------------------------+-----------------+
| >= levels['low'], < levels['high'] | low handler |
+------------------------------------+-----------------+
| >= levels['high'] | both handlers * |
+------------------------------------+-----------------+
* This is the case when :attr:`monolog` is `False`. If :attr:`monolog`
is True, then :attr:`handlers['high']` will be the only
message handler
Args:
name (string): The logger name.
high_level (string): The min level to log to high_hdlr
(default: warning).
low_level (string): The min level to log to low_hdlr
(default: debug).
kwargs (dict): Keyword arguments.
+---------------------+---------------+
| messages level | Numeric value |
+=====================+===============+
| critical | 50 |
+---------------------+---------------+
| error | 40 |
+---------------------+---------------+
| warning | 30 |
+---------------------+---------------+
| info | 20 |
+---------------------+---------------+
| debug | 10 |
+---------------------+---------------+
Kwargs:
high_hdlr (obj): The high pass log handler (a :class:`logging.handlers`
instance, default: `stderr` StreamHandler).
low_hdlr (obj): The low pass log handler (a :class:`logging.handlers`
instance, default: `stdout` StreamHandler).
high_formatter (obj): The high pass log formatter (a
:class:`logging.Formatter` instance, default:
:data:`pygogo.formatters.basic_formatter`).
low_formatter (obj): The low pass log formatter (a
:class:`logging.Formatter` instance, default:
:data:`pygogo.formatters.basic_formatter`).
verbose (bool): If False, set low level to `info`, if True, set low
level to `debug`, overrides `levels['low']` if specified
(default: None).
monolog (bool): Log high level events only to high pass handler (
default: False)
Returns:
New instance of :class:`pygogo.Gogo`
Examples:
>>> Gogo('name').logger.debug('message')
message
"""
def __init__(self, name="root", high_level=None, low_level=None, **kwargs):
"""Initialization method.
Examples:
>>> Gogo('name') # doctest: +ELLIPSIS
<pygogo.Gogo object at 0x...>
"""
verbose = kwargs.get("verbose")
high_level = high_level or "warning"
if verbose is None:
low_level = low_level or "debug"
elif verbose:
low_level = "debug"
else:
low_level = "info"
self.levels = {
"high": getattr(logging, high_level.upper(), None),
"low": getattr(logging, low_level.upper(), None),
}
if not isinstance(self.levels["high"], int):
raise ValueError("Invalid high_level: %s" % self.levels["high"])
elif not isinstance(self.levels["low"], int):
raise ValueError("Invalid low_level: %s" % self.levels["low"])
elif self.levels["high"] < self.levels["low"]:
raise ValueError("high_level must be >= low_level")
self.loggers = set()
self.name = name
self.handlers = {
"high": kwargs.get("high_hdlr", handlers.stderr_hdlr()),
"low": kwargs.get("low_hdlr", handlers.stdout_hdlr()),
}
self.formatters = {
"high": kwargs.get("high_formatter", formatters.basic_formatter),
"low": kwargs.get("low_formatter", formatters.basic_formatter),
}
self.monolog = kwargs.get("monolog")
@property
def logger(self):
"""The logger property.
Returns:
New instance of :class:`logging.Logger`
Examples:
>>> from io import StringIO
>>> s = StringIO()
>>> sys.stderr = s
>>> logger = Gogo('default').logger
>>> logger # doctest: +ELLIPSIS
<...Logger...>
>>> logger.debug('stdout')
stdout
>>> kwargs = {'low_level': 'info', 'monolog': True}
>>> logger = Gogo('ignore_if_lt_info', **kwargs).logger
>>> logger.debug('ignored')
>>> logger.info('stdout')
stdout
>>> logger.warning('stderr')
>>> s.getvalue().strip() == 'stderr'
True
"""
return self.get_logger()
def update_hdlr(self, hdlr, level, formatter=None, monolog=False, **kwargs):
"""Update a handler with a formatter, level, and filters.
Args:
hdlr (obj): A :class:`logging.handlers` instance.
level (int): The min level to log to to `hdlr`.
formatter (obj): The log formatter (a :class:`logging.Formatter`
instance, default: :data:`pygogo.formatters.basic_formatter`)
monolog (bool): Log high level events only to high pass handler (
default: False)
kwargs (dict): Keyword arguemnts passed to
`pygogo.utils.get_structured_filter`
See also:
:meth:`pygogo.Gogo.get_logger`
:meth:`pygogo.Gogo.get_structured_logger`
:func:`pygogo.utils.LogFilter`
:func:`pygogo.utils.get_structured_filter`
Examples:
>>> low_hdlr = logging.StreamHandler(sys.stdout)
>>> going = Gogo(low_hdlr=low_hdlr, monolog=True)
>>> hdlr = going.handlers['low']
>>> [hdlr.formatter, hdlr.filters, hdlr.level]
[None, [], 0]
>>> fmtr = going.formatters['low']
>>> kwargs = {'formatter': fmtr, 'monolog': going.monolog}
>>> going.update_hdlr(hdlr, going.levels['low'], **kwargs)
>>> [hdlr.formatter, hdlr.filters, hdlr.level]
... # doctest: +ELLIPSIS
[<...Formatter obj...>, [<pygogo.utils.LogFilter obj...>], 10]
"""
hdlr.setLevel(level)
if monolog:
log_filter = utils.LogFilter(self.levels["high"])
hdlr.addFilter(log_filter)
if kwargs:
structured_filter = utils.get_structured_filter(**kwargs)
hdlr.addFilter(structured_filter)
hdlr.setFormatter(formatter)
def zip(self, *fmtrs):
"""Format high/low handler properties so that they can be conveniently
passed to `update_hdlr`.
Args:
fmtrs (seq[obj]): A sequence of :class:`logging.Formatter`
instances.
See also:
:meth:`pygogo.Gogo.update_hdlr`
:meth:`pygogo.Gogo.get_logger`
:meth:`pygogo.Gogo.get_structured_logger`
Returns:
New instance of :class:`logging.handlers`
Examples:
>>> hdlr = logging.StreamHandler(sys.stdout)
>>> copy_hdlr(hdlr) # doctest: +ELLIPSIS
<...StreamHandler...>
"""
hdlrs = [self.handlers["high"], self.handlers["low"]]
levels = [self.levels["high"], self.levels["low"]]
monologs = [False, self.monolog]
return zip(hdlrs, levels, fmtrs, monologs)
def get_logger(self, name="base", **kwargs):
"""Retrieve a named logger.
Args:
name (string): The logger name.
See also:
:func:`pygogo.copy_hdlr`
:meth:`pygogo.Gogo.update_hdlr`
:meth:`pygogo.Gogo.zip`
Returns:
New instance of :class:`logging.Logger`
Examples:
>>> going = Gogo()
>>> logger = going.get_logger('default')
>>> logger # doctest: +ELLIPSIS
<...Logger...>
>>> logger.info('from default')
from default
>>> going.get_logger('new').info('from new')
from new
"""
lggr_name = "%s.%s" % (self.name, name)
logger = logging.getLogger(lggr_name)
if lggr_name not in self.loggers:
self.loggers.add(lggr_name)
if kwargs:
kwargs["name"] = lggr_name
fmtrs = [self.formatters["high"], self.formatters["low"]]
for zipped in self.zip(*fmtrs):
hdlr, level, fmtr, monolog = zipped
copied_hdlr = copy_hdlr(hdlr)
self.update_hdlr(copied_hdlr, level, fmtr, monolog, **kwargs)
logger.addHandler(copied_hdlr)
logger.setLevel(self.levels["low"])
return logger
def get_structured_logger(self, name=None, **kwargs):
"""Retrieve a structured data logger
Args:
name (string): The logger name.
kwargs (dict): Keyword arguments to include in every log message.
See also:
:func:`pygogo.copy_hdlr`
:meth:`pygogo.Gogo.update_hdlr`
:meth:`pygogo.Gogo.zip`
:class:`pygogo.utils.StructuredAdapter`
Returns:
New instance of :class:`pygogo.utils.StructuredAdapter`
Examples
>>> from io import StringIO
>>> from json import loads
>>> s = StringIO()
>>> going = Gogo('structured', low_hdlr=handlers.fileobj_hdlr(s))
>>> logger = going.get_structured_logger(all='true')
>>> logger # doctest: +ELLIPSIS
<...StructuredAdapter...>
>>> logger.debug('hello')
>>> logger.debug('extra', extra={'key': 'value'})
>>> s.seek(0) or 0
0
>>> loads(next(s)) == {'all': 'true', 'message': 'hello'}
True
>>> loads(next(s)) == {
... 'all': 'true', 'message': 'extra', 'key': 'value'}
True
"""
# pylint: disable=dict-items-not-iterating
values = frozenset(kwargs.items())
name = name or hashlib.md5(str(values).encode("utf-8")).hexdigest()
lggr_name = "%s.structured.%s" % (self.name, name)
logger = logging.getLogger(lggr_name)
if lggr_name not in self.loggers:
self.loggers.add(lggr_name)
formatter = formatters.basic_formatter
for zipped in self.zip(formatter, formatter):
hdlr, level, fmtr, monolog = zipped
copied_hdlr = copy_hdlr(hdlr)
self.update_hdlr(copied_hdlr, level, fmtr, monolog)
logger.addHandler(copied_hdlr)
logger.setLevel(self.levels["low"])
return utils.StructuredAdapter(logger, kwargs)
def copy_hdlr(hdlr):
"""Safely copy a handler and its associated filters.
Args:
hdlr (obj): A :class:`logging.handlers` instance.
See also:
:meth:`pygogo.Gogo.get_logger`
:meth:`pygogo.Gogo.get_structured_logger`
Returns:
New instance of :class:`logging.handlers`
Examples:
>>> hdlr = logging.StreamHandler(sys.stdout)
>>> copy_hdlr(hdlr) # doctest: +ELLIPSIS
<...StreamHandler...>
"""
copied_hdlr = copy(hdlr)
copied_hdlr.filters = [copy(f) for f in hdlr.filters]
return copied_hdlr
logger = Gogo().logger
| |
import unittest
import reactivex
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestMerge(unittest.TestCase):
def test_merge_never2(self):
scheduler = TestScheduler()
n1 = reactivex.never()
n2 = reactivex.never()
def create():
return reactivex.merge(n1, n2)
results = scheduler.start(create)
assert results.messages == []
def test_merge_never3(self):
scheduler = TestScheduler()
n1 = reactivex.never()
n2 = reactivex.never()
n3 = reactivex.never()
def create():
return reactivex.merge(n1, n2, n3)
results = scheduler.start(create)
assert results.messages == []
def test_merge_empty2(self):
scheduler = TestScheduler()
e1 = reactivex.empty()
e2 = reactivex.empty()
def create():
return reactivex.merge(e1, e2)
results = scheduler.start(create)
assert results.messages == [on_completed(200)]
def test_merge_empty3(self):
scheduler = TestScheduler()
e1 = reactivex.empty()
e2 = reactivex.empty()
e3 = reactivex.empty()
def create():
return reactivex.merge(e1, e2, e3)
results = scheduler.start(create)
assert results.messages == [on_completed(200)]
def test_merge_empty_delayed2_right_last(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_completed(240)]
r_msgs = [on_next(150, 1), on_completed(250)]
e1 = scheduler.create_hot_observable(l_msgs)
e2 = scheduler.create_hot_observable(r_msgs)
def create():
return reactivex.merge(e1, e2)
results = scheduler.start(create)
assert results.messages == [on_completed(250)]
def test_merge_empty_delayed2_left_last(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_completed(250)]
r_msgs = [on_next(150, 1), on_completed(240)]
e1 = scheduler.create_hot_observable(l_msgs)
e2 = scheduler.create_hot_observable(r_msgs)
def create():
return reactivex.merge(e1, e2)
results = scheduler.start(create)
assert results.messages == [on_completed(250)]
def test_merge_empty_delayed3_middle_last(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(245)]
msgs2 = [on_next(150, 1), on_completed(250)]
msgs3 = [on_next(150, 1), on_completed(240)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
e3 = scheduler.create_hot_observable(msgs3)
def create():
return reactivex.merge(e1, e2, e3)
results = scheduler.start(create)
assert results.messages == [on_completed(250)]
def test_merge_empty_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(245)]
e1 = scheduler.create_hot_observable(msgs1)
n1 = reactivex.never()
def create():
return reactivex.merge(e1, n1)
results = scheduler.start(create)
assert results.messages == []
def test_merge_never_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(245)]
e1 = scheduler.create_hot_observable(msgs1)
n1 = reactivex.never()
def create():
return reactivex.merge(n1, e1)
results = scheduler.start(create)
assert results.messages == []
def test_merge_return_never(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(245)]
r1 = scheduler.create_hot_observable(msgs1)
n1 = reactivex.never()
def create():
return reactivex.merge(r1, n1)
results = scheduler.start(create)
assert results.messages == [on_next(210, 2)]
def test_merge_never_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(245)]
r1 = scheduler.create_hot_observable(msgs1)
n1 = reactivex.never()
def create():
return reactivex.merge(n1, r1)
results = scheduler.start(create)
assert results.messages == [on_next(210, 2)]
def test_merge_error_never(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_error(245, ex)]
e1 = scheduler.create_hot_observable(msgs1)
n1 = reactivex.never()
def create():
return reactivex.merge(e1, n1)
results = scheduler.start(create)
assert results.messages == [on_next(210, 2), on_error(245, ex)]
def test_merge_never_error(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_error(245, ex)]
e1 = scheduler.create_hot_observable(msgs1)
n1 = reactivex.never()
def create():
return reactivex.merge(n1, e1)
results = scheduler.start(create)
assert results.messages == [on_next(210, 2), on_error(245, ex)]
def test_merge_empty_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(245)]
msgs2 = [on_next(150, 1), on_next(210, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
r1 = scheduler.create_hot_observable(msgs2)
def create():
return reactivex.merge(e1, r1)
results = scheduler.start(create)
assert results.messages == [on_next(210, 2), on_completed(250)]
def test_merge_return_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(245)]
msgs2 = [on_next(150, 1), on_next(210, 2), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
r1 = scheduler.create_hot_observable(msgs2)
def create():
return reactivex.merge(r1, e1)
results = scheduler.start(create)
assert results.messages == [on_next(210, 2), on_completed(250)]
def test_merge_lots2(self):
scheduler = TestScheduler()
msgs1 = [
on_next(150, 1),
on_next(210, 2),
on_next(220, 4),
on_next(230, 6),
on_next(240, 8),
on_completed(245),
]
msgs2 = [
on_next(150, 1),
on_next(215, 3),
on_next(225, 5),
on_next(235, 7),
on_next(245, 9),
on_completed(250),
]
o1 = scheduler.create_hot_observable(msgs1)
o2 = scheduler.create_hot_observable(msgs2)
def create():
return reactivex.merge(o1, o2)
results = scheduler.start(create).messages
assert len(results) == 9
for i, result in enumerate(results[:-1]):
assert result.value.kind == "N"
assert result.time == 210 + i * 5
assert result.value.value == i + 2
assert results[8].value.kind == "C" and results[8].time == 250
def test_merge_lots3(self):
scheduler = TestScheduler()
msgs1 = [
on_next(150, 1),
on_next(210, 2),
on_next(225, 5),
on_next(240, 8),
on_completed(245),
]
msgs2 = [
on_next(150, 1),
on_next(215, 3),
on_next(230, 6),
on_next(245, 9),
on_completed(250),
]
msgs3 = [on_next(150, 1), on_next(220, 4), on_next(235, 7), on_completed(240)]
o1 = scheduler.create_hot_observable(msgs1)
o2 = scheduler.create_hot_observable(msgs2)
o3 = scheduler.create_hot_observable(msgs3)
def create():
return reactivex.merge(o1, o2, o3)
results = scheduler.start(create).messages
assert len(results) == 9
for i, result in enumerate(results[:-1]):
assert (
results[i].value.kind == "N"
and results[i].time == 210 + i * 5
and results[i].value.value == i + 2
)
assert results[8].value.kind == "C" and results[8].time == 250
def test_merge_error_left(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_error(245, ex)]
msgs2 = [on_next(150, 1), on_next(215, 3), on_completed(250)]
o1 = scheduler.create_hot_observable(msgs1)
o2 = scheduler.create_hot_observable(msgs2)
def create():
return reactivex.merge(o1, o2)
results = scheduler.start(create)
assert results.messages == [on_next(210, 2), on_next(215, 3), on_error(245, ex)]
def test_merge_error_causes_disposal(self):
ex = "ex"
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(210, ex)]
msgs2 = [on_next(150, 1), on_next(220, 1), on_completed(250)]
source_not_disposed = [False]
o1 = scheduler.create_hot_observable(msgs1)
def action():
source_not_disposed[0] = True
o2 = scheduler.create_hot_observable(msgs2).pipe(ops.do_action(on_next=action))
def create():
return reactivex.merge(o1, o2)
results = scheduler.start(create)
assert results.messages == [on_error(210, ex)]
assert not source_not_disposed[0]
def test_merge_observable_of_observable_data(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
300,
scheduler.create_cold_observable(
on_next(10, 101),
on_next(20, 102),
on_next(110, 103),
on_next(120, 104),
on_next(210, 105),
on_next(220, 106),
on_completed(230),
),
),
on_next(
400,
scheduler.create_cold_observable(
on_next(10, 201),
on_next(20, 202),
on_next(30, 203),
on_next(40, 200),
on_completed(50),
),
),
on_next(
500,
scheduler.create_cold_observable(
on_next(10, 301),
on_next(20, 302),
on_next(30, 303),
on_next(40, 304),
on_next(120, 305),
on_completed(150),
),
),
on_completed(600),
)
def create():
return xs.pipe(ops.merge_all())
results = scheduler.start(create)
assert results.messages == [
on_next(310, 101),
on_next(320, 102),
on_next(410, 103),
on_next(410, 201),
on_next(420, 104),
on_next(420, 202),
on_next(430, 203),
on_next(440, 200),
on_next(510, 105),
on_next(510, 301),
on_next(520, 106),
on_next(520, 302),
on_next(530, 303),
on_next(540, 304),
on_next(620, 305),
on_completed(650),
]
def test_merge_observable_of_observable_data_non_overlapped(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
300,
scheduler.create_cold_observable(
on_next(10, 101), on_next(20, 102), on_completed(230)
),
),
on_next(
400,
scheduler.create_cold_observable(
on_next(10, 201),
on_next(20, 202),
on_next(30, 203),
on_next(40, 200),
on_completed(50),
),
),
on_next(
500,
scheduler.create_cold_observable(
on_next(10, 301),
on_next(20, 302),
on_next(30, 303),
on_next(40, 304),
on_completed(50),
),
),
on_completed(600),
)
def create():
return xs.pipe(ops.merge_all())
results = scheduler.start(create)
assert results.messages == [
on_next(310, 101),
on_next(320, 102),
on_next(410, 201),
on_next(420, 202),
on_next(430, 203),
on_next(440, 200),
on_next(510, 301),
on_next(520, 302),
on_next(530, 303),
on_next(540, 304),
on_completed(600),
]
def test_merge_observable_of_observable_inner_throws(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
300,
scheduler.create_cold_observable(
on_next(10, 101), on_next(20, 102), on_completed(230)
),
),
on_next(
400,
scheduler.create_cold_observable(
on_next(10, 201),
on_next(20, 202),
on_next(30, 203),
on_next(40, 200),
on_error(50, ex),
),
),
on_next(
500,
scheduler.create_cold_observable(
on_next(10, 301),
on_next(20, 302),
on_next(30, 303),
on_next(40, 304),
on_completed(50),
),
),
on_completed(600),
)
def create():
return xs.pipe(ops.merge_all())
results = scheduler.start(create)
assert results.messages == [
on_next(310, 101),
on_next(320, 102),
on_next(410, 201),
on_next(420, 202),
on_next(430, 203),
on_next(440, 200),
on_error(450, ex),
]
def test_merge_observable_of_observable_outer_throws(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
300,
scheduler.create_cold_observable(
on_next(10, 101), on_next(20, 102), on_completed(230)
),
),
on_next(
400,
scheduler.create_cold_observable(
on_next(10, 201),
on_next(20, 202),
on_next(30, 203),
on_next(40, 200),
on_completed(50),
),
),
on_error(500, ex),
)
def create():
return xs.pipe(ops.merge_all())
results = scheduler.start(create)
assert results.messages == [
on_next(310, 101),
on_next(320, 102),
on_next(410, 201),
on_next(420, 202),
on_next(430, 203),
on_next(440, 200),
on_error(500, ex),
]
def test_mergeconcat_basic(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
210,
scheduler.create_cold_observable(
on_next(50, 1), on_next(100, 2), on_next(120, 3), on_completed(140)
),
),
on_next(
260,
scheduler.create_cold_observable(
on_next(20, 4), on_next(70, 5), on_completed(200)
),
),
on_next(
270,
scheduler.create_cold_observable(
on_next(10, 6), on_next(90, 7), on_next(110, 8), on_completed(130)
),
),
on_next(
320,
scheduler.create_cold_observable(
on_next(210, 9), on_next(240, 10), on_completed(300)
),
),
on_completed(400),
)
def create():
return xs.pipe(ops.merge(max_concurrent=2))
results = scheduler.start(create)
assert results.messages == [
on_next(260, 1),
on_next(280, 4),
on_next(310, 2),
on_next(330, 3),
on_next(330, 5),
on_next(360, 6),
on_next(440, 7),
on_next(460, 8),
on_next(670, 9),
on_next(700, 10),
on_completed(760),
]
assert xs.subscriptions == [subscribe(200, 400)]
def test_mergeconcat_basic_long(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
210,
scheduler.create_cold_observable(
on_next(50, 1), on_next(100, 2), on_next(120, 3), on_completed(140)
),
),
on_next(
260,
scheduler.create_cold_observable(
on_next(20, 4), on_next(70, 5), on_completed(300)
),
),
on_next(
270,
scheduler.create_cold_observable(
on_next(10, 6), on_next(90, 7), on_next(110, 8), on_completed(130)
),
),
on_next(
320,
scheduler.create_cold_observable(
on_next(210, 9), on_next(240, 10), on_completed(300)
),
),
on_completed(400),
)
def create():
return xs.pipe(ops.merge(max_concurrent=2))
results = scheduler.start(create)
assert results.messages == [
on_next(260, 1),
on_next(280, 4),
on_next(310, 2),
on_next(330, 3),
on_next(330, 5),
on_next(360, 6),
on_next(440, 7),
on_next(460, 8),
on_next(690, 9),
on_next(720, 10),
on_completed(780),
]
assert xs.subscriptions == [subscribe(200, 400)]
def test_mergeconcat_basic_wide(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
210,
scheduler.create_cold_observable(
on_next(50, 1), on_next(100, 2), on_next(120, 3), on_completed(140)
),
),
on_next(
260,
scheduler.create_cold_observable(
on_next(20, 4), on_next(70, 5), on_completed(300)
),
),
on_next(
270,
scheduler.create_cold_observable(
on_next(10, 6), on_next(90, 7), on_next(110, 8), on_completed(130)
),
),
on_next(
420,
scheduler.create_cold_observable(
on_next(210, 9), on_next(240, 10), on_completed(300)
),
),
on_completed(450),
)
def create():
return xs.pipe(ops.merge(max_concurrent=3))
results = scheduler.start(create)
assert results.messages == [
on_next(260, 1),
on_next(280, 4),
on_next(280, 6),
on_next(310, 2),
on_next(330, 3),
on_next(330, 5),
on_next(360, 7),
on_next(380, 8),
on_next(630, 9),
on_next(660, 10),
on_completed(720),
]
assert xs.subscriptions == [subscribe(200, 450)]
def test_mergeconcat_basic_late(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
210,
scheduler.create_cold_observable(
on_next(50, 1), on_next(100, 2), on_next(120, 3), on_completed(140)
),
),
on_next(
260,
scheduler.create_cold_observable(
on_next(20, 4), on_next(70, 5), on_completed(300)
),
),
on_next(
270,
scheduler.create_cold_observable(
on_next(10, 6), on_next(90, 7), on_next(110, 8), on_completed(130)
),
),
on_next(
420,
scheduler.create_cold_observable(
on_next(210, 9), on_next(240, 10), on_completed(300)
),
),
on_completed(750),
)
def create():
return xs.pipe(ops.merge(max_concurrent=3))
results = scheduler.start(create)
assert results.messages == [
on_next(260, 1),
on_next(280, 4),
on_next(280, 6),
on_next(310, 2),
on_next(330, 3),
on_next(330, 5),
on_next(360, 7),
on_next(380, 8),
on_next(630, 9),
on_next(660, 10),
on_completed(750),
]
assert xs.subscriptions == [subscribe(200, 750)]
def test_mergeconcat_disposed(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
210,
scheduler.create_cold_observable(
on_next(50, 1), on_next(100, 2), on_next(120, 3), on_completed(140)
),
),
on_next(
260,
scheduler.create_cold_observable(
on_next(20, 4), on_next(70, 5), on_completed(200)
),
),
on_next(
270,
scheduler.create_cold_observable(
on_next(10, 6), on_next(90, 7), on_next(110, 8), on_completed(130)
),
),
on_next(
320,
scheduler.create_cold_observable(
on_next(210, 9), on_next(240, 10), on_completed(300)
),
),
on_completed(400),
)
def create():
return xs.pipe(ops.merge(max_concurrent=2))
results = scheduler.start(create, disposed=450)
assert results.messages == [
on_next(260, 1),
on_next(280, 4),
on_next(310, 2),
on_next(330, 3),
on_next(330, 5),
on_next(360, 6),
on_next(440, 7),
]
assert xs.subscriptions == [subscribe(200, 400)]
def test_mergeconcat_outererror(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
210,
scheduler.create_cold_observable(
on_next(50, 1), on_next(100, 2), on_next(120, 3), on_completed(140)
),
),
on_next(
260,
scheduler.create_cold_observable(
on_next(20, 4), on_next(70, 5), on_completed(200)
),
),
on_next(
270,
scheduler.create_cold_observable(
on_next(10, 6), on_next(90, 7), on_next(110, 8), on_completed(130)
),
),
on_next(
320,
scheduler.create_cold_observable(
on_next(210, 9), on_next(240, 10), on_completed(300)
),
),
on_error(400, ex),
)
def create():
return xs.pipe(ops.merge(max_concurrent=2))
results = scheduler.start(create)
assert results.messages == [
on_next(260, 1),
on_next(280, 4),
on_next(310, 2),
on_next(330, 3),
on_next(330, 5),
on_next(360, 6),
on_error(400, ex),
]
assert xs.subscriptions == [subscribe(200, 400)]
def test_mergeconcat_innererror(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(
210,
scheduler.create_cold_observable(
on_next(50, 1), on_next(100, 2), on_next(120, 3), on_completed(140)
),
),
on_next(
260,
scheduler.create_cold_observable(
on_next(20, 4), on_next(70, 5), on_completed(200)
),
),
on_next(
270,
scheduler.create_cold_observable(
on_next(10, 6), on_next(90, 7), on_next(110, 8), on_error(140, ex)
),
),
on_next(
320,
scheduler.create_cold_observable(
on_next(210, 9), on_next(240, 10), on_completed(300)
),
),
on_completed(400),
)
def create():
return xs.pipe(ops.merge(max_concurrent=2))
results = scheduler.start(create)
assert results.messages == [
on_next(260, 1),
on_next(280, 4),
on_next(310, 2),
on_next(330, 3),
on_next(330, 5),
on_next(360, 6),
on_next(440, 7),
on_next(460, 8),
on_error(490, ex),
]
assert xs.subscriptions == [subscribe(200, 400)]
def test_merge_112233(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(250, 1), on_next(300, 2), on_next(350, 3), on_completed(360)
)
ys = scheduler.create_hot_observable(
on_next(250, 1), on_next(300, 2), on_next(320, 3), on_completed(340)
)
def create():
return xs.pipe(ops.merge(ys))
results = scheduler.start(create)
assert results.messages == [
on_next(250, 1),
on_next(250, 1),
on_next(300, 2),
on_next(300, 2),
on_next(320, 3),
on_next(350, 3),
on_completed(360),
]
assert xs.subscriptions == [subscribe(200, 360)]
| |
# -*- coding: utf-8 -*-
"Contains initialization code"
import numpy as np
import struct
import sys
import threading
import queue
import cv2
import time
from multiprocessing import Process
from crypto.aes import Aes
from image.format import PacketFormat
from image.webcam import Webcam
from image.frame import FrameDisplay
from image.encodings import JpegEncoding
from image.logging import Logging
from network.udpclient import UDPclient
from network.tcpclient import TCPclient
class MoVi:
"Begin of moVi"
def integer_bytes_encode(self, ints):
b = bytearray()
for i in ints:
# < is for little endian
# H is for unsigned short (16 bits)
b += struct.pack("<H", i)
return b
def integer_bytes_decode(self, byte_array):
repr(byte_array)
return struct.unpack("<H", byte_array)[0]
def __init__(self, mode, broker_ip):
if mode != "SERVER" and mode != "CLIENT":
print("Wrong mode")
exit(1)
# Set this to whichever encoding you want to test
self.jpeg_quality = 70
self.jpeg_lower = 40
self.jpeg_upper = 85
self.img_format = JpegEncoding(self.jpeg_quality)
self.packetFormat = PacketFormat()
self.logging = Logging()
self.regionSize = 150
self.time_last = 0
self.time_waiting = 0.25
self.threshold = 280
self.lower_thresh = 250
self.upper_thresh = 330
self.recved_frames = 0
broker_client = TCPclient(8500, broker_ip)
self.network_client = UDPclient(3000)
self.network_client.update((broker_ip, 3500))
self.network_client.send("ping".encode())
self.network_client_ack = UDPclient(4000)
self.network_client_ack.update((broker_ip, 4500))
self.network_client_ack.send("ping".encode())
(other_ip, tcp_port,
udp_port1, udp_port2) = broker_client.udp_hole_mapping()
self.udp_host = other_ip
self.key = "abcd"
broker_client.close()
if mode == "SERVER":
print("Running as server")
# tcpserver = TCPserver(port, host)
# for connection in tcpserver.connection_information(3000):
# # For every connection to tcpserver
# print("Got connection from: {}:{}"
# .format(connection[0], connection[1][1]))
self.signing = Aes(self.key)
self.network_client.update((self.udp_host, udp_port1))
self.network_client_ack.update((self.udp_host, udp_port2))
# Begin sending data
# self.sender_single()
self.runner("SERVER")
# Finally close TCP
# tcpserver.close()
else:
print("Running as client")
# Talk to target TCP process
# tcpclient = TCPclient(tcp_port, self.udp_host)
# Get the secret key and udp_port for video from TCP
# key, udp_port = tcpclient.get_information(2000)
# tcpclient.close()
# Bind to a UDP port to talk
# self.network_client = UDPclient(2000)
self.network_client.update((self.udp_host, udp_port1))
# self.network_client_ack = UDPclient(4000)
self.network_client_ack.update((self.udp_host, udp_port2))
self.signing = Aes(self.key)
# Begin receiving
# self.receiver_single()
self.runner("CLIENT")
def runner(self, frame_name):
self.frame_name = frame_name
# t1 = threading.Thread(target=self.send_state)
# t2 = threading.Thread(target=self.recv_state)
t1 = Process(target=self.send_state)
t2 = Process(target=self.recv_state)
t1.start()
t2.start()
t1.join()
t2.join()
def sender_single(self):
self.frame_name = "SENDER"
self.send_state()
def receiver_single(self):
self.frame_name = "RECEIVER"
self.recv_state()
def xy_mapping(self, x, y):
max_x = 450//self.regionSize + 1
return (y//self.regionSize)*max_x + (x//self.regionSize)
def send_state(self):
ret = True
display = FrameDisplay('{}: Sending frame'.format(self.frame_name))
self.cam = Webcam()
self.currentSeqNo = [1]*(self.xy_mapping(450, 600) + 1)
self.lastAck = [1]*(self.xy_mapping(450, 600) + 1)
self.cheating = [1]*(self.xy_mapping(450, 600) + 1)
self.queue = [queue.Queue()
for i in range(0, 1 + self.xy_mapping(450, 600))]
self.last = [np.zeros((self.regionSize, self.regionSize, 3),
dtype=np.uint8)
for i in range(0, 1+self.xy_mapping(450, 600))]
t1 = threading.Thread(target=self.recv_ack)
for x in range(0, 450, self.regionSize):
for y in range(0, 600, self.regionSize):
self.currentSeqNo[self.xy_mapping(x, y)] = 0
self.lastAck[self.xy_mapping(x, y)] = 0
t1.start()
fn = 0
self.frames_sent = 1
self.ack_recvd = 1
while ret:
ret, frame = self.cam.getFrame()
if ret:
frame = cv2.GaussianBlur(frame, (3, 3), 0)
ret = display.showFrame(frame)
flag = 0
if time.time() - self.time_last > self.time_waiting:
flag = 1
self.time_last = time.time()
for x in range(0, 450, self.regionSize):
for y in range(0, 600, self.regionSize):
# self.cheating[self.xy_mapping(x,y)] ^= 1
# if self.cheating[self.xy_mapping(x,y)]:
# fn += 1
# continue
while self.queue[self.xy_mapping(x, y)].qsize() > 100:
self.queue[self.xy_mapping(x, y)].get()
self.lastAck[self.xy_mapping(x, y)] += 1
if (flag or
np.sum(np.absolute(
self.last[self.xy_mapping(x, y)] -
frame[x:min(x + self.regionSize, 450),
y:min(y + self.regionSize, 600)])) >
(self.regionSize *
self.regionSize *
self.threshold)):
self.currentSeqNo[self.xy_mapping(x, y)] += 1
self.queue[self.xy_mapping(x, y)].put(
frame[x:min(x + self.regionSize, 450),
y:min(y + self.regionSize, 600)])
frame_data = self.img_format.encode(
frame[x:min(x + self.regionSize, 450),
y:min(y + self.regionSize, 600)])
packet_data = self.packetFormat.pack(
x, y, self.currentSeqNo[self.xy_mapping(x, y)],
self.signing.sign(frame_data), frame_data)
try:
self.network_client.send(packet_data)
self.frames_sent += 1
except:
print("network unreachable")
print("Current stats: ", fn, self.frames_sent,
self.ack_recvd, self.threshold,
self.jpeg_quality, end="\r")
# self.logging.log(("Sent frame ", x, " ", y,
# "of length", len(packet_data),
# self.currentSeqNo[self.xy_mapping(x, y)]))
else:
fn += 1
# self.logging.log(("Frame not sent ",fn, fs))
def recv_ack(self):
last = 0
print("Receiving ack")
while 1:
data, new_addr = self.network_client_ack.recv()
x, y, ack, sign = self.packetFormat.unpack_ack(data)
self.recved_frames += 1
# self.logging.log("Received ack")
if(ack <= self.lastAck[self.xy_mapping(x, y)]):
continue
for i in range(0, min(ack - self.lastAck[self.xy_mapping(x, y)],
self.queue[self.xy_mapping(x, y)].qsize())):
self.queue[self.xy_mapping(x, y)].get()
self.ack_recvd += 1
self.last[self.xy_mapping(x, y)] = (
self.queue[self.xy_mapping(x, y)].get())
self.lastAck[self.xy_mapping(x, y)] = ack
self.network_client_ack.update(new_addr)
if time.time() - last > 2:
last = time.time()
if self.ack_recvd > 0.5*self.frames_sent:
if self.threshold > self.lower_thresh:
self.threshold *= 0.99
if self.jpeg_upper > self.jpeg_quality:
self.jpeg_quality *= 1.01
else:
if self.threshold < self.upper_thresh:
self.threshold *= 1.01
if self.jpeg_lower < self.jpeg_quality:
self.jpeg_quality *= 0.99
self.img_format.set_quality(self.jpeg_quality)
def recv_state(self):
matrix_img = np.zeros((480, 640, 3), dtype=np.uint8)
display = FrameDisplay('{}: Receiving frame'.format(self.frame_name))
ret = True
while ret:
data, new_addr = self.network_client.recv()
x, y, ack, sign, frame_data = self.packetFormat.unpack(data)
# Check validity of packet
if self.signing.check_sign(sign, frame_data):
# self.logging.log((x, " ", y,
# "Got frame of length ", len(data)))
matrix_img[x:min(x + self.regionSize, 450),
y:min(y + self.regionSize, 600)] = (
self.img_format.decode(frame_data))
ret = display.showFrame(matrix_img)
packet_data = self.packetFormat.pack_ack(
x, y, ack, sign)
if self.network_client_ack.send(packet_data) < 1:
print("could not send ack")
# Update the latest address
# Should be handled inside recv
self.network_client.update(new_addr)
# self.network_client_ack.update(new_addr)
# Begin execution
if len(sys.argv) < 3:
print("Usage: python movi.py SERVER|CLIENT host port")
exit(1)
MoVi(sys.argv[1], sys.argv[2])
| |
# Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import basename, isdir, join
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Builder, Default, DefaultEnvironment)
from platformio.util import get_serial_ports
def BeforeUpload(target, source, env): # pylint: disable=W0613,W0621
env.AutodetectUploadPort()
upload_options = {}
if "BOARD" in env:
upload_options = env.BoardConfig().get("upload", {})
if not bool(upload_options.get("disable_flushing", False)):
env.FlushSerialBuffer("$UPLOAD_PORT")
before_ports = get_serial_ports()
if bool(upload_options.get("use_1200bps_touch", False)):
env.TouchSerialPort("$UPLOAD_PORT", 1200)
if bool(upload_options.get("wait_for_upload_port", False)):
env.Replace(UPLOAD_PORT=env.WaitForNewSerialPort(before_ports))
# use only port name for BOSSA
if ("/" in env.subst("$UPLOAD_PORT") and
env.subst("$UPLOAD_PROTOCOL") == "sam-ba"):
env.Replace(UPLOAD_PORT=basename(env.subst("$UPLOAD_PORT")))
env = DefaultEnvironment()
env.SConscript("compat.py", exports="env")
platform = env.PioPlatform()
board = env.BoardConfig()
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
build_mcu = env.get("BOARD_MCU", board.get("build.mcu", ""))
env.Replace(
AR="arm-none-eabi-gcc-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-gcc-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
if "zephyr" in env.get("PIOFRAMEWORK", []):
env.SConscript(
join(platform.get_package_dir(
"framework-zephyr"), "scripts", "platformio", "platformio-build-pre.py"),
exports={"env": env}
)
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm = join("$BUILD_DIR", "${PROGNAME}.%s" %
("hex" if upload_protocol == "stk500v2" else "bin"))
else:
target_elf = env.BuildProgram()
if upload_protocol == "stk500v2":
target_firm = env.ElfToHex(
join("$BUILD_DIR", "${PROGNAME}"), target_elf)
else:
target_firm = env.ElfToBin(
join("$BUILD_DIR", "${PROGNAME}"), target_elf)
env.Depends(target_firm, "checkprogsize")
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
debug_tools = board.get("debug.tools", {})
upload_actions = []
if upload_protocol.startswith("blackmagic"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor %s_scan" %
("jtag" if upload_protocol == "blackmagic-jtag" else "swdp"),
"-ex", "attach 1",
"-ex", "load",
"-ex", "compare-sections",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $BUILD_DIR/${PROGNAME}.elf"
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for BlackMagic port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [
"h",
"loadbin %s, %s" % (source, env.BoardConfig().get(
"upload.offset_address", "0x0")),
"r",
"q"
]
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", env.BoardConfig().get("debug", {}).get("jlink_device"),
"-speed", env.GetProjectOption("debug_speed", "4000"),
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1",
"-NoGui", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
elif upload_protocol == "sam-ba":
env.Replace(
UPLOADER="bossac",
UPLOADERFLAGS=[
"--port", '"$UPLOAD_PORT"',
"--write",
"--verify",
"--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCES"
)
if board.get("build.core") in ("adafruit", "seeed", "sparkfun") and board.get(
"build.mcu").startswith(("samd51", "same51")):
# special flags for the latest bossac tool
env.Append(
UPLOADERFLAGS=[
"-U", "--offset", board.get("upload.offset_address")])
else:
env.Append(UPLOADERFLAGS=[
"--erase",
"-U", "true"
if env.BoardConfig().get("upload.native_usb", False) else "false"
])
if "sam3x8e" in build_mcu:
env.Append(UPLOADERFLAGS=["--boot"])
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
env.Prepend(UPLOADERFLAGS=["--info", "--debug"])
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "stk500v2":
env.Replace(
UPLOADER="avrdude",
UPLOADERFLAGS=[
"-p", "atmega2560", # Arduino M0/Tian upload hook
"-C", join(
platform.get_package_dir("tool-avrdude") or "",
"avrdude.conf"),
"-c", "$UPLOAD_PROTOCOL",
"-P", '"$UPLOAD_PORT"',
"-b", "$UPLOAD_SPEED",
"-u"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS -U flash:w:$SOURCES:i"
)
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
env.Prepend(UPLOADERFLAGS=["-v"])
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "mbctool":
env.Replace(
UPLOADER=join(
platform.get_package_dir("tool-mbctool") or "", "bin", "mbctool"),
UPLOADERFLAGS=[
"--device", "samd",
"--speed", "1500000",
"--port", '"$UPLOAD_PORT"',
"--upload", "$SOURCES",
],
UPLOADCMD='"$UPLOADER" $UPLOADERFLAGS'
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort,
"Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol in debug_tools:
openocd_args = [
"-d%d" % (2 if int(ARGUMENTS.get("PIOVERBOSE", 0)) else 1)
]
openocd_args.extend(
debug_tools.get(upload_protocol).get("server").get("arguments", []))
if env.GetProjectOption("debug_speed"):
openocd_args.extend(
["-c", "adapter speed %s" % env.GetProjectOption("debug_speed")]
)
openocd_args.extend([
"-c", "program {$SOURCE} %s verify reset; shutdown;" %
board.get("upload.offset_address", "")
])
openocd_args = [
f.replace("$PACKAGE_DIR",
platform.get_package_dir("tool-openocd") or "")
for f in openocd_args
]
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=openocd_args,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
# custom upload tool
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", target_firm, upload_actions))
#
# Information about obsolete method of specifying linker scripts
#
if any("-Wl,-T" in f for f in env.get("LINKFLAGS", [])):
print("Warning! '-Wl,-T' option for specifying linker scripts is deprecated. "
"Please use 'board_build.ldscript' option in your 'platformio.ini' file.")
#
# Setup default targets
#
Default([target_buildprog, target_size])
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import functools
import itertools as it
class TestReduceOps(hu.HypothesisTestCase):
@given(
d0=st.integers(1, 5),
d1=st.integers(1, 5),
d2=st.integers(1, 5),
d3=st.integers(1, 5),
keepdims=st.integers(0, 1),
seed=st.integers(0, 2**32 - 1),
**hu.gcs_cpu_only)
def test_reduce_sum_mean(self, d0, d1, d2, d3, keepdims, seed, gc, dc):
def reduce_mean_ref(data, axis, keepdims):
return [np.mean(data, axis=axis, keepdims=keepdims)]
def reduce_sum_ref(data, axis, keepdims):
return [np.sum(data, axis=axis, keepdims=keepdims)]
def reduce_op_test(op_name, op_ref, data, axes, keepdims, device):
op = core.CreateOperator(
op_name,
["data"],
["Y"],
axes=axes,
keepdims=keepdims,
)
self.assertReferenceChecks(device, op, [data],
functools.partial(
op_ref,
axis=axes,
keepdims=keepdims))
np.random.seed(seed)
for axes in it.combinations(range(4), 2):
data = np.random.randn(d0, d1, d2, d3).astype(np.float32)
reduce_op_test("ReduceMean", reduce_mean_ref, data, axes, keepdims,
gc)
reduce_op_test("ReduceSum", reduce_sum_ref, data, axes, keepdims,
gc)
for axes in it.combinations(range(3), 2):
data = np.random.randn(d0, d1, d2).astype(np.float32)
reduce_op_test("ReduceMean", reduce_mean_ref, data, axes, keepdims,
gc)
reduce_op_test("ReduceSum", reduce_sum_ref, data, axes, keepdims,
gc)
for axes in it.combinations(range(2), 2):
data = np.random.randn(d0, d1).astype(np.float32)
reduce_op_test("ReduceMean", reduce_mean_ref, data, axes, keepdims,
gc)
reduce_op_test("ReduceSum", reduce_sum_ref, data, axes, keepdims,
gc)
for axes in it.combinations(range(1), 1):
data = np.random.randn(d0).astype(np.float32)
reduce_op_test("ReduceMean", reduce_mean_ref, data, axes, keepdims,
gc)
reduce_op_test("ReduceSum", reduce_sum_ref, data, axes, keepdims,
gc)
class TestReduceFrontReductions(hu.HypothesisTestCase):
def grad_variant_input_test(self, grad_op_name, X, ref, num_reduce_dim):
workspace.ResetWorkspace()
Y = np.array(ref(X)[0]).astype(np.float32)
dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)
shape = np.array(X.shape).astype(np.int64)
workspace.FeedBlob("X", X)
workspace.FeedBlob("dY", dY)
workspace.FeedBlob("shape", shape)
grad_op = core.CreateOperator(
grad_op_name, ["dY", "X"], ["dX"], num_reduce_dim=num_reduce_dim)
grad_op1 = core.CreateOperator(
grad_op_name, ["dY", "shape"], ["dX1"],
num_reduce_dim=num_reduce_dim)
workspace.RunOperatorOnce(grad_op)
workspace.RunOperatorOnce(grad_op1)
dX = workspace.FetchBlob("dX")
dX1 = workspace.FetchBlob("dX1")
np.testing.assert_array_equal(dX, dX1)
def max_op_test(self, op_name, num_reduce_dim, gc, dc, in_data, in_names, ref_max):
op = core.CreateOperator(
op_name,
in_names,
["outputs"],
num_reduce_dim=num_reduce_dim
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=in_data,
reference=ref_max,
)
# Skip gradient check because it is too unreliable with max.
# Just check CPU and CUDA have same results
Y = np.array(ref_max(*in_data)[0]).astype(np.float32)
dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)
if len(in_data) == 2:
grad_in_names = ["dY", in_names[0], "Y", in_names[1]]
grad_in_data = [dY, in_data[0], Y, in_data[1]]
else:
grad_in_names = ["dY", in_names[0], "Y"]
grad_in_data = [dY, in_data[0], Y]
grad_op = core.CreateOperator(
op_name + "Gradient",
grad_in_names,
["dX"],
num_reduce_dim=num_reduce_dim
)
self.assertDeviceChecks(dc, grad_op, grad_in_data, [0])
def reduce_op_test(self, op_name, op_ref, in_data, in_names,
num_reduce_dims, device):
op = core.CreateOperator(
op_name,
in_names,
["outputs"],
num_reduce_dim=num_reduce_dims
)
self.assertReferenceChecks(
device_option=device,
op=op,
inputs=in_data,
reference=op_ref
)
self.assertGradientChecks(
device, op, in_data, 0, [0], stepsize=1e-2, threshold=1e-2)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
def test_reduce_front_sum(self, num_reduce_dim, gc, dc):
X = np.random.rand(7, 4, 3, 5).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test(
"ReduceFrontSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceFrontSumGradient", X, ref_sum, num_reduce_dim)
@given(**hu.gcs)
def test_reduce_front_sum_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_sum(X, lengths):
Y = X.reshape(d, lengths.size)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.sum(Y[:lengths[ii], ii])
return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
self.reduce_op_test(
"ReduceFrontSum", ref_sum, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
def test_reduce_front_mean(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test(
"ReduceFrontMean", ref_mean, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceFrontMeanGradient", X, ref_mean, num_reduce_dim)
@given(**hu.gcs)
def test_reduce_front_mean_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_mean(X, lengths):
Y = X.reshape(d, lengths.size)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.mean(Y[:lengths[ii], ii])
return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
self.reduce_op_test(
"ReduceFrontMean", ref_mean, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
def test_reduce_front_max(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_frontmax(X):
return [np.max(X, axis=(tuple(range(num_reduce_dim))))]
self.max_op_test(
"ReduceFrontMax", num_reduce_dim, gc, dc, [X], ["X"], ref_frontmax)
@given(**hu.gcs)
def test_reduce_front_max_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_max(X, lengths):
Y = X.reshape(d, lengths.size)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.max(Y[:lengths[ii], ii])
return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]
self.max_op_test(
"ReduceFrontMax", num_reduce_dim, gc, dc, [X, lengths],
["X", "lengths"], ref_max)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
def test_reduce_back_max(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_backmax(X):
return [np.max(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
self.max_op_test(
"ReduceBackMax", num_reduce_dim, gc, dc, [X], ["X"], ref_backmax)
@given(**hu.gcs)
def test_reduce_back_max_with_length(self, gc, dc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_max(X, lengths):
Y = X.reshape(lengths.size, d)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.max(Y[ii, :lengths[ii]])
return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
self.max_op_test(
"ReduceBackMax", num_reduce_dim, gc, dc, [X, lengths],
["X", "lengths"], ref_max)
@given(**hu.gcs)
def test_reduce_back_sum(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
self.reduce_op_test(
"ReduceBackSum", ref_sum, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceBackSumGradient", X, ref_sum, num_reduce_dim)
@given(**hu.gcs)
def test_reduce_back_sum_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_sum(X, lengths):
Y = X.reshape(lengths.size, d)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.sum(Y[ii, :lengths[ii]])
return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
self.reduce_op_test(
"ReduceBackSum", ref_sum, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
@given(num_reduce_dim=st.integers(0, 4), **hu.gcs)
def test_reduce_back_mean(self, num_reduce_dim, dc, gc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]
self.reduce_op_test(
"ReduceBackMean", ref_mean, [X], ["input"], num_reduce_dim, gc)
self.grad_variant_input_test(
"ReduceBackMeanGradient", X, ref_mean, num_reduce_dim)
@given(**hu.gcs)
def test_reduce_back_mean_with_length(self, dc, gc):
num_reduce_dim = 1
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))
d = 120 // batch_size
lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)
def ref_mean(X, lengths):
Y = X.reshape(lengths.size, d)
rv = np.zeros((lengths.size, 1)).astype(np.float32)
for ii in range(lengths.size):
rv[ii] = np.mean(Y[ii, :lengths[ii]])
return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]
self.reduce_op_test(
"ReduceBackMean", ref_mean, [X, lengths], ["input", "lengths"],
num_reduce_dim, gc)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import json
import time
from kubernetes_py.K8sExceptions import DrainNodeException, TimedOutException, NotFoundException
from kubernetes_py.K8sNamespace import K8sNamespace
from kubernetes_py.K8sObject import K8sObject
from kubernetes_py.K8sPod import K8sPod
from kubernetes_py.K8sConfig import K8sConfig
from kubernetes_py.models.v1.Node import Node
from kubernetes_py.models.v1.NodeCondition import NodeCondition
from kubernetes_py.utils import is_valid_string, is_valid_list
from kubernetes_py.models.v1.Taint import Taint
class K8sNode(K8sObject):
DRAIN_WAIT_TIMEOUT_SECONDS = 60
def __init__(self, config=None, name=None):
super(K8sNode, self).__init__(config=config, name=name, obj_type="Node")
# ------------------------------------------------------------------------------------- override
def create(self):
super(K8sNode, self).create()
self.get()
return self
def update(self):
super(K8sNode, self).update()
self.get()
return self
def list(self, pattern=None, labels=None):
ls = super(K8sNode, self).list(labels=labels)
nodes = list(map(lambda x: Node(x), ls))
if pattern is not None:
nodes = list(filter(lambda x: pattern in x.name, nodes))
k8s = []
for x in nodes:
j = K8sNode(config=self.config, name=x.name).from_model(x)
k8s.append(j)
return k8s
# ------------------------------------------------------------------------------------- get
def get(self):
self.model = Node(self.get_model())
return self
def get_annotation(self, k=None):
if k in self.model.metadata.annotations:
return self.model.metadata.annotations[k]
return None
def get_label(self, k=None):
if k in self.model.metadata.labels:
return self.model.metadata.labels[k]
return None
# ------------------------------------------------------------------------------------- pod_cidr
@property
def pod_cidr(self):
return self.model.spec.pod_cidr
@pod_cidr.setter
def pod_cidr(self, v=None):
self.model.spec.pod_cidr = v
# ------------------------------------------------------------------------------------- external_id
@property
def external_id(self):
return self.model.spec.external_id
@external_id.setter
def external_id(self, v=None):
self.model.spec.external_id = v
# ------------------------------------------------------------------------------------- provider_id
@property
def provider_id(self):
return self.model.spec.provider_id
@provider_id.setter
def provider_id(self, v=None):
self.model.spec.provider_id = v
# ------------------------------------------------------------------------------------- unschedulable
@property
def unschedulable(self):
return self.model.spec.unschedulable
@unschedulable.setter
def unschedulable(self, v=None):
self.model.spec.unschedulable = v
# ------------------------------------------------------------------------------------- status
@property
def status(self):
return self.model.status
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self.model.metadata.name
@name.setter
def name(self, name=None):
self.model.metadata.name = name
# ------------------------------------------------------------------------------------- is_ready
@property
def is_ready(self):
rc = False
if self.model.status.conditions is not None:
for c in self.model.status.conditions:
assert isinstance(c, NodeCondition)
if c.condition_type == "Ready" and c.status == "True":
rc = True
break
else:
continue
return rc
# ------------------------------------------------------------------------------------- filter
@staticmethod
def get_by_name(config=None, name=None):
nodes = K8sNode(config=config, name=name).list()
filtered = list(filter(lambda x: x.name == name, nodes))
if filtered:
return filtered[0]
return None
@staticmethod
def get_by_labels(config=None, labels=None):
if config is not None and not isinstance(config, K8sConfig):
raise SyntaxError("K8sNode.get_by_labels(): config: [ {0} ] is invalid.".format(config))
if not isinstance(labels, dict):
raise SyntaxError("K8sNode.get_by_labels() labels: [ {0} ] is invalid.".format(labels))
node_list = list()
nodes = K8sNode(config=config, name="whatever").list(labels=labels)
for n in nodes:
try:
model = Node(n)
obj = K8sNode(config=config, name=model.metadata.name).from_model(n)
node_list.append(obj.get())
except NotFoundException:
pass
return node_list
# ------------------------------------------------------------------------------------- pods
@property
def pods(self):
return self._pod_inventory()
@pods.setter
def pods(self, p=None):
raise NotImplementedError("K8sNode: pods is read-only.")
# ------------------------------------------------------------------------------------- drain
def drain(self, ignore_daemonsets=False, delete_local_storage=False, force=False):
"""
Removes all K8sPods from this K8sNode,
and prevents additional K8sPods from being scheduled.
:param ignore_daemonsets: a boolean.
If false, will fail if a K8sDaemonSet-managed K8sPod is present.
If true, will continue even if a K8sDaemonSet-managed K8sPod is present.
:param delete_local_storage: a boolean.
If false, will fail if a K8sVolume of type 'emptyDir' is found.
If true, will continue even if an 'emptyDir' K8sVolume is found.
:param force: a boolean.
If false, will fail if any K8sPods unmanaged by a parent object are found.
If true, will continue and any unmanaged K8sPods are lost.
:return: self.
"""
# inventory of K8sPods found on this node.
daemonset_pods = []
pods = self._pod_inventory()
# cordon the node.
self.unschedulable = True
self.update()
# loop through all pods and delete them.
for pod in pods:
if self._is_daemonset(pod):
if not ignore_daemonsets:
raise DrainNodeException("K8sNode: pod: [ {} ] is managed by a DaemonSet.".format(pod.name))
else:
daemonset_pods.append(pod)
continue
if self._has_local_storage(pod) and not delete_local_storage:
raise DrainNodeException("K8sNode: pod: [ {} ] has local storage that will be lost.".format(pod.name))
if self._is_orphan(pod) and not force:
raise DrainNodeException("K8sNode: pod: [ {} ] is unmanaged and will be lost.".format(pod.name))
pod.delete()
self._wait_for_pod_deletion(daemonset_pods)
return self
def _pod_inventory(self):
"""
Returns the list of all K8sPods found on this K8sNode.
:return: A list of K8sPods.
"""
pods = []
namespaces = K8sNamespace(config=self.config, name="yo").list()
for ns in namespaces:
cfg = self.config
cfg.namespace = ns.name
p = K8sPod(config=cfg, name="yo").list()
filtered = filter(lambda x: x.node_name == self.name, p)
pods += filtered
return pods
def _is_daemonset(self, pod=None):
"""
Determines if a K8sPod is part of a K8sDaemonSet.
:param pod: The K8sPod we're interested in.
:return: a boolean.
"""
if "kubernetes.io/created-by" in pod.annotations:
parent = json.loads(pod.annotations["kubernetes.io/created-by"])
if parent["reference"]["kind"] == "DaemonSet":
return True
return False
def _has_local_storage(self, pod=None):
"""
Determines if a K8sPod has any local storage susceptible to be lost.
:param pod: The K8sPod we're interested in.
:return: a boolean.
"""
for vol in pod.volumes:
if vol.emptyDir is not None:
return True
return False
def _is_orphan(self, pod=None):
"""
Determines if a K8sPod is unmanaged by a parent object, and is susceptible to be lost.
:param pod: The K8sPod we're interested in.
:return: a boolean.
"""
if "kubernetes.io/created-by" not in pod.annotations:
return True
return False
def _wait_for_pod_deletion(self, daemonset_pods=None):
"""
Wait until this K8sNode has evicted all its K8sPods.
:param daemonset_pods: A list of K8sPods on this K8sNode that are managed by a K8sDaemonSet.
:return: None
"""
pods = self._pod_inventory()
start_time = time.time()
while len(pods) > 0:
if len(pods) == len(daemonset_pods):
break
pods = self._pod_inventory()
self._check_timeout(start_time)
time.sleep(1)
return
def _check_timeout(self, start_time=None):
elapsed_time = time.time() - start_time
if elapsed_time >= self.DRAIN_WAIT_TIMEOUT_SECONDS: # timeout
raise TimedOutException("Timed out draining K8sNode: [ {0} ]".format(self.name))
# ------------------------------------------------------------------------------------- uncordon
def uncordon(self):
"""
Returns this K8sNode into the pool addressable by the kube-scheduler.
:return: self
"""
self.unschedulable = False
self.update()
return self
# ------------------------------------------------------------------------------------- taint
@property
def taints(self):
return self.model.spec.taints
@taints.setter
def taints(self, t=None):
if not is_valid_list(t, Taint):
raise SyntaxError("K8sNode: taints: [ {} ] is invalid.".format(t))
self.model.spec.taints = t
def taint(self, key=None, value=None, effect=None):
if not (key and value and effect):
raise SyntaxError("K8sNode: taint: you must specify a key, a value and an effect.")
if not is_valid_string(key) or not is_valid_string(value):
raise SyntaxError("K8sNode: taint: key: [ {} ] or value: [ {} ] is invalid.".format(key, value))
if effect not in Taint.VALID_TAINT_EFFECTS:
raise SyntaxError("K8sNode: taint: effect must be in {}".format(Taint.VALID_TAINT_EFFECTS))
t = Taint()
t.key = key
t.value = value
t.effect = effect
exists = False
for existing_taint in self.taints:
if existing_taint.key == key and existing_taint.value == value and existing_taint.effect == effect:
exists = True
if not exists:
self.taints.append(t)
self.update()
return self
def untaint(self, key=None, value=None):
if key and value:
if not is_valid_string(key) or not is_valid_string(value):
raise SyntaxError("K8sNode: taint: key: [ {} ] or value: [ {} ] is invalid.".format(key, value))
remaining_taints = []
for t in self.taints:
if key and value:
if t.key != key and t.value != value:
remaining_taints.append(t)
self.taints = remaining_taints
self.update()
return self
| |
"""
Django settings for homeasset project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (homeasset/config/settings/base.py - 3 = homeasset/)
APPS_DIR = ROOT_DIR.path('homeasset')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'reversion', # Versioning of models
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'homeasset.users.apps.UsersConfig',
# Your stuff: custom apps go here
'homeasset.assets.apps.AssetsConfig',
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'homeasset.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Michael Anderson""", 'mikeand654@gmail.com'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///homeasset'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Australia/Brisbane'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-AU'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'homeasset.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'homeasset.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| |
'''
@author Juan
'''
import sys
import os
import time
import lib.xbee as xbee
import lib.imu as imu
import lib.variables as var
x = xbee.xbee("/dev/ttyUSB1");
challenges = ['autonomous','speed', 'follow', 'path', 'docking', 'return'];
courses = ['courseA', 'courseB', 'courseC'];
#To do: Sacar coordenadas de la imu
lat_long = ["29.151098","-81.016505"];
# challenge string
#challenge = 'N'
enable_pos = 0
R_kill_switch = 1
status = 2
course = 3
challenge_pos = 4
dock = 5
dockId = 0;
def start_mission():
imu.init();
for i in range(1,5):
send_start(lat_long)
while(var.currChallenge != 'd'):
send_heartbeat()
# Aqui esta haciendo el docking
resp = '0'
counter = 0
print("Receiving docking data...")
while resp == '0':
s = x.receive_from_station();
resp = s[enable_pos];
counter += 1
print("Go to dock : ", s[dock])
if(counter < 5):
time.sleep(6-counter);
else:
print("Algo salio mal con la respuesta del dock")
# To Do: Mover el barco hacia s[dock]
# Espera a que se mande la 'e' de fin de mision
while(var.currChallenge != 'e'):
send_heartbeat()
def send_heartbeat():
# To Do: Obtener coordenadas y cambiar en la siguiente linea
coords = imu.get_gps_coords();
la = str(round(coords['latitude'],6)).zfill(10)
lo = str(round(coords['longitud'],6)).zfill(10)
x.set_latlong(la,lo);
x.set_challenge(var.currChallenge)
x.send2station()
time.sleep(1.05)
def send_start(latlong):
x.set_flying("0");
x.set_takeoff("0");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('s');
x.send2station();
time.sleep(1.05);
def send_end(latlong):
x.set_flying("0");
x.set_takeoff("0");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('e');
x.send2station();
time.sleep(1.05);
def send_return(latlong):
x.set_flying("0");
x.set_takeoff("0");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('r');
x.send2station();
time.sleep(1.05);
def send_follow(latlong):
x.set_flying("0");
x.set_takeoff("0");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('f');
x.send2station();
time.sleep(1.05);
def send_docking(latlong):
x.set_flying("0");
x.set_takeoff("0");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('d');
x.send2station();
time.sleep(1.05);
def send_heart_beat(latlong):
x.set_flying("0");
x.set_takeoff("0");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('N');
x.send2station();
time.sleep(1.05);
def send_takeoff(latlong):
x.set_flying("0");
x.set_takeoff("1");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('d');
x.send2station();
time.sleep(1.05);
def send_flying(latlong):
x.set_flying("1");
x.set_takeoff("1");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('d');
x.send2station();
time.sleep(1.05);
def send_landing(latlong):
x.set_flying("0");
x.set_takeoff("0");
x.set_landing("1");
x.set_latlong(latlong[0],latlong[1]);
x.set_challenge('d');
x.send2station();
time.sleep(1.05);
#Send Testing OK
def send_testing():
global enable_pos
#while True:
send_start(lat_long);
send_start(lat_long);
send_start(lat_long);
for i in range(1,5):
send_heart_beat(lat_long);
if i == 2:
send_docking(lat_long);
send_docking(lat_long);
time.sleep(2);
resp = '0'
counter = 0
while resp == '0':
print("Receiving")
s = x.receive_from_station();
resp = s[enable_pos];
counter += 1;
dockId = int(s[dock]);
print("Go to dock : ", s[dock]);
if(counter < 5):
time.sleep(6-counter);
send_takeoff(lat_long);
send_flying(lat_long);
send_landing(lat_long);
elif i == 5:
send_follow(lat_long);
elif i == 9:
send_return(lat_long);
send_end(lat_long);
send_end(lat_long);
send_end(lat_long);
def send_testing_2():
resp = ''
while(resp != '1'):
s = x.receive_from_station();
resp = s[enable_pos];
while(s[challenge_pos] == '0'):
send_start(lat_long);
s = x.receive_from_station();
print("Starting")
x.send_end();
#Receive Testing
def receive_testing():
s = x.receive_from_station();
print(s)
return s
if __name__ == '__main__':
start_mission()
'''
while True:
s = receive_testing();
if (s[enable_pos] == '1'):
send_heart_beat()
'''
| |
"""
Main Random Variables Module.
Defines abstract random variable type.
Contains interfaces for probability space object (PSpace) as well as standard
operators, P, E, sample, density, where
See Also
========
diofant.stats.crv
diofant.stats.frv
diofant.stats.rv_interface
"""
from ..abc import x
from ..core import (Add, Eq, Equality, Expr, Integer, Lambda, Symbol, Tuple,
oo, sympify)
from ..core.logic import fuzzy_or
from ..core.relational import Relational
from ..functions import DiracDelta
from ..logic.boolalg import Boolean, false, true
from ..sets import FiniteSet, ProductSet
from ..solvers import solve
from ..utilities import lambdify
class RandomDomain(Expr):
"""
Represents a set of variables and the values which they can take
See Also
========
diofant.stats.crv.ContinuousDomain
diofant.stats.frv.FiniteDomain
"""
is_ProductDomain = False
is_Finite = False
is_Continuous = False
def __new__(cls, symbols, *args):
symbols = FiniteSet(*symbols)
return Expr.__new__(cls, symbols, *args)
@property
def set(self):
return self.args[1]
def __contains__(self, other):
raise NotImplementedError
def integrate(self, expr):
raise NotImplementedError
class SingleDomain(RandomDomain):
"""
A single variable and its domain
See Also
========
diofant.stats.crv.SingleContinuousDomain
diofant.stats.frv.SingleFiniteDomain
"""
def __new__(cls, symbol, set):
assert symbol.is_Symbol
return Expr.__new__(cls, symbol, set)
@property
def symbol(self):
return self.args[0]
@property
def symbols(self):
return FiniteSet(self.symbol)
class ConditionalDomain(RandomDomain):
"""
A RandomDomain with an attached condition
See Also
========
diofant.stats.crv.ConditionalContinuousDomain
diofant.stats.frv.ConditionalFiniteDomain
"""
def __new__(cls, fulldomain, condition):
condition = condition.xreplace({rs: rs.symbol
for rs in random_symbols(condition)})
return Expr.__new__(cls, fulldomain, condition)
@property
def symbols(self):
return self.fulldomain.symbols
@property
def fulldomain(self):
return self.args[0]
@property
def condition(self):
return self.args[1]
@property
def set(self): # pragma: no cover
raise NotImplementedError('Set of Conditional Domain not Implemented')
class PSpace(Expr):
"""
A Probability Space
Probability Spaces encode processes that equal different values
probabilistically. These underly Random Symbols which occur in Diofant
expressions and contain the mechanics to evaluate statistical statements.
See Also
========
diofant.stats.crv.ContinuousPSpace
diofant.stats.frv.FinitePSpace
"""
is_Finite = None
is_Continuous = None
@property
def values(self):
return frozenset(RandomSymbol(self, sym) for sym in self.domain.symbols)
@property
def symbols(self):
return self.domain.symbols
def where(self, condition):
raise NotImplementedError
def compute_density(self, expr):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def probability(self, condition):
raise NotImplementedError
def integrate(self, expr):
raise NotImplementedError
class SinglePSpace(PSpace):
"""
Represents the probabilities of a set of random events that can be
attributed to a single variable/symbol.
"""
def __new__(cls, s, distribution):
if isinstance(s, str):
s = Symbol(s)
if not isinstance(s, Symbol):
raise TypeError('s should have been string or Symbol')
return Expr.__new__(cls, s, distribution)
@property
def value(self):
return RandomSymbol(self, self.symbol)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[1]
class RandomSymbol(Expr):
"""
Random Symbols represent ProbabilitySpaces in Diofant Expressions
In principle they can take on any value that their symbol can take on
within the associated PSpace with probability determined by the PSpace
Density.
Random Symbols contain pspace and symbol properties.
The pspace property points to the represented Probability Space
The symbol is a standard Diofant Symbol that is used in that probability space
for example in defining a density.
You can form normal Diofant expressions using RandomSymbols and operate on
those expressions with the Functions
E - Expectation of a random expression
P - Probability of a condition
density - Probability Density of an expression
given - A new random expression (with new random symbols) given a condition
An object of the RandomSymbol type should almost never be created by the
user. They tend to be created instead by the PSpace class's value method.
Traditionally a user doesn't even do this but instead calls one of the
convenience functions Normal, Exponential, Coin, Die, FiniteRV, etc....
"""
def __new__(cls, pspace, symbol):
if not isinstance(symbol, Symbol):
raise TypeError('symbol should be of type Symbol')
if not isinstance(pspace, PSpace):
raise TypeError('pspace variable should be of type PSpace')
return Expr.__new__(cls, pspace, symbol)
is_finite = True
is_Symbol = True
is_Atom = True
_diff_wrt = True
pspace = property(lambda self: self.args[0])
symbol = property(lambda self: self.args[1])
name = property(lambda self: self.symbol.name)
def _eval_is_positive(self):
return self.symbol.is_positive
def _eval_is_integer(self):
return self.symbol.is_integer
def _eval_is_extended_real(self):
return fuzzy_or([self.symbol.is_extended_real,
self.pspace.is_extended_real])
def _eval_is_commutative(self):
return self.symbol.is_commutative
def _hashable_content(self):
return self.pspace, self.symbol
@property
def free_symbols(self):
return {self}
class ProductPSpace(PSpace):
"""
A probability space resulting from the merger of two independent probability
spaces.
Often created using the function, pspace
"""
def __new__(cls, *spaces):
rs_space_dict = {}
for space in spaces:
for value in space.values:
rs_space_dict[value] = space
symbols = FiniteSet(*[val.symbol for val in rs_space_dict])
# Overlapping symbols
if len(symbols) < sum(len(space.symbols) for space in spaces):
raise ValueError('Overlapping Random Variables')
new_cls = cls
if all(space.is_Finite for space in spaces):
from .frv import ProductFinitePSpace
new_cls = ProductFinitePSpace
if all(space.is_Continuous for space in spaces):
from .crv import ProductContinuousPSpace
new_cls = ProductContinuousPSpace
obj = Expr.__new__(new_cls, *FiniteSet(*spaces))
return obj
@property
def rs_space_dict(self):
d = {}
for space in self.spaces:
for value in space.values:
d[value] = space
return d
@property
def symbols(self):
return FiniteSet(*[val.symbol for val in self.rs_space_dict])
@property
def spaces(self):
return FiniteSet(*self.args)
@property
def values(self):
return sumsets(space.values for space in self.spaces)
def integrate(self, expr, rvs=None, **kwargs):
rvs = rvs or self.values
rvs = frozenset(rvs)
for space in self.spaces:
expr = space.integrate(expr, rvs & space.values, **kwargs)
return expr
@property
def domain(self):
return ProductDomain(*[space.domain for space in self.spaces])
@property
def density(self): # pragma: no cover
raise NotImplementedError('Density not available for ProductSpaces')
def sample(self):
return {k: v for space in self.spaces
for k, v in space.sample().items()}
class ProductDomain(RandomDomain):
"""
A domain resulting from the merger of two independent domains
See Also
========
diofant.stats.crv.ProductContinuousDomain
diofant.stats.frv.ProductFiniteDomain
"""
is_ProductDomain = True
def __new__(cls, *domains):
# Flatten any product of products
domains2 = []
for domain in domains:
if not domain.is_ProductDomain:
domains2.append(domain)
else:
domains2.extend(domain.domains)
domains2 = FiniteSet(*domains2)
new_cls = cls
if all(domain.is_Finite for domain in domains2):
from .frv import ProductFiniteDomain
new_cls = ProductFiniteDomain
if all(domain.is_Continuous for domain in domains2):
from .crv import ProductContinuousDomain
new_cls = ProductContinuousDomain
return Expr.__new__(new_cls, *domains2)
@property
def symbols(self):
return FiniteSet(*[sym for domain in self.domains
for sym in domain.symbols])
@property
def domains(self):
return self.args
@property
def set(self):
return ProductSet(domain.set for domain in self.domains)
def __contains__(self, other):
# Split event into each subdomain
for domain in self.domains:
# Collect the parts of this event which associate to this domain
elem = frozenset(item for item in other
if domain.symbols.contains(item[0]) == true)
# Test this sub-event
if elem not in domain:
return False
# All subevents passed
return True
def random_symbols(expr):
"""Returns all RandomSymbols within a Diofant Expression."""
try:
return list(expr.atoms(RandomSymbol))
except AttributeError:
return []
def pspace(expr):
"""
Returns the underlying Probability Space of a random expression.
For internal use.
Examples
========
>>> from diofant.stats import Normal
>>> X = Normal('X', 0, 1)
>>> pspace(2*X + 1) == X.pspace
True
"""
expr = sympify(expr)
rvs = random_symbols(expr)
if not rvs:
raise ValueError(f'Expression containing Random Variable expected, not {expr}')
# If only one space present
if all(rv.pspace == rvs[0].pspace for rv in rvs):
return rvs[0].pspace
# Otherwise make a product space
return ProductPSpace(*[rv.pspace for rv in rvs])
def sumsets(sets):
"""Union of sets."""
return frozenset().union(*sets)
def rs_swap(a, b):
"""
Build a dictionary to swap RandomSymbols based on their underlying symbol.
i.e.
if ``X = ('x', pspace1)``
and ``Y = ('x', pspace2)``
then ``X`` and ``Y`` match and the key, value pair
``{X:Y}`` will appear in the result
Inputs: collections a and b of random variables which share common symbols
Output: dict mapping RVs in a to RVs in b
"""
d = {}
for rsa in a:
d[rsa] = [rsb for rsb in b if rsa.symbol == rsb.symbol][0]
return d
def given(expr, condition=None, **kwargs):
r"""Conditional Random Expression.
From a random expression and a condition on that expression creates a new
probability space from the condition and returns the same expression on that
conditional probability space.
Examples
========
>>> from diofant.stats import Die
>>> X = Die('X', 6)
>>> Y = given(X, X > 3)
>>> density(Y).dict
{4: 1/3, 5: 1/3, 6: 1/3}
Following convention, if the condition is a random symbol then that symbol
is considered fixed.
>>> from diofant.stats import Normal
>>> X = Normal('X', 0, 1)
>>> Y = Normal('Y', 0, 1)
>>> pprint(density(X + Y, Y)(z), use_unicode=False)
2
-(-Y + z)
-----------
___ 2
\/ 2 *E
------------------
____
2*\/ pi
"""
if not random_symbols(condition) or pspace_independent(expr, condition):
return expr
if isinstance(condition, RandomSymbol):
condition = Eq(condition, condition.symbol)
condsymbols = random_symbols(condition)
if (isinstance(condition, Equality) and len(condsymbols) == 1 and
not isinstance(pspace(expr).domain, ConditionalDomain)):
rv = tuple(condsymbols)[0]
results = solve(condition, rv)
return sum(expr.subs(res) for res in results)
# Get full probability space of both the expression and the condition
fullspace = pspace(Tuple(expr, condition))
# Build new space given the condition
space = fullspace.conditional_space(condition, **kwargs)
# Dictionary to swap out RandomSymbols in expr with new RandomSymbols
# That point to the new conditional space
swapdict = rs_swap(fullspace.values, space.values)
# Swap random variables in the expression
expr = expr.xreplace(swapdict)
return expr
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs):
"""
Returns the expected value of a random expression
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the expectation value
given : Expr containing RandomSymbols
A conditional expression. E(X, X>0) is expectation of X given X > 0
numsamples : int
Enables sampling and approximates the expectation with this many samples
evalf : Bool (defaults to True)
If sampling return a number rather than a complex expression
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from diofant.stats import E, Die
>>> X = Die('X', 6)
>>> E(X)
7/2
>>> E(2*X + 1)
8
>>> E(X, X > 3) # Expectation of X given that it is above 3
5
"""
if not random_symbols(expr): # expr isn't random?
return expr
if numsamples: # Computing by monte carlo sampling?
return sampling_E(expr, condition, numsamples=numsamples, **kwargs)
# Create new expr and recompute E
if condition is not None: # If there is a condition
return expectation(given(expr, condition), evaluate=evaluate)
# A few known statements for efficiency
if expr.is_Add: # We know that E is Linear
return Add(*[expectation(arg, evaluate=evaluate)
for arg in expr.args])
# Otherwise case is simple, pass work off to the ProbabilitySpace
result = pspace(expr).integrate(expr)
if evaluate and hasattr(result, 'doit'):
return result.doit(**kwargs)
else:
return result
def probability(condition, given_condition=None, numsamples=None,
evaluate=True, **kwargs):
"""
Probability that a condition is true, optionally given a second condition
Parameters
==========
condition : Combination of Relationals containing RandomSymbols
The condition of which you want to compute the probability
given_condition : Combination of Relationals containing RandomSymbols
A conditional expression. P(X > 1, X > 0) is expectation of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the probability with this many samples
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from diofant.stats import P, Die
>>> X, Y = Die('X', 6), Die('Y', 6)
>>> P(X > 3)
1/2
>>> P(Eq(X, 5), X > 2) # Probability that X == 5 given that X > 2
1/4
>>> P(X > Y)
5/12
"""
condition = sympify(condition)
given_condition = sympify(given_condition)
if given_condition is not None and \
not isinstance(given_condition, (Relational, Boolean)):
raise ValueError('%s is not a relational or combination of relationals'
% (given_condition))
if given_condition == false:
return Integer(0)
if not isinstance(condition, (Relational, Boolean)):
raise ValueError('%s is not a relational or combination of relationals'
% condition)
if condition == true:
return Integer(1)
if condition == false:
return Integer(0)
if numsamples:
return sampling_P(condition, given_condition, numsamples=numsamples,
**kwargs)
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return probability(given(condition, given_condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
result = pspace(condition).probability(condition, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
class Density(Expr):
"""Probability density."""
expr = property(lambda self: self.args[0])
@property
def condition(self):
if len(self.args) > 1:
return self.args[1]
def doit(self, **kwargs):
evaluate = kwargs.pop('evaluate', True)
expr, condition = self.expr, self.condition
if condition is not None:
# Recompute on new conditional expr
expr = given(expr, condition, **kwargs)
if not random_symbols(expr):
return Lambda(x, DiracDelta(x - expr))
if (isinstance(expr, RandomSymbol) and
hasattr(expr.pspace, 'distribution') and
isinstance(pspace(expr), SinglePSpace)):
return expr.pspace.distribution
result = pspace(expr).compute_density(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def density(expr, condition=None, evaluate=True, numsamples=None, **kwargs):
"""
Probability density of a random expression, optionally given a second
condition.
This density will take on different forms for different types of
probability spaces. Discrete variables produce Dicts. Continuous
variables produce Lambdas.
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the density value
condition : Relational containing RandomSymbols
A conditional expression. density(X > 1, X > 0) is density of X > 1
given X > 0
numsamples : int
Enables sampling and approximates the density with this many samples
Examples
========
>>> from diofant.stats import Die, Normal
>>> D = Die('D', 6)
>>> X = Normal(x, 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> density(2*D).dict
{2: 1/6, 4: 1/6, 6: 1/6, 8: 1/6, 10: 1/6, 12: 1/6}
>>> density(X)(x)
sqrt(2)*E**(-x**2/2)/(2*sqrt(pi))
"""
if numsamples:
return sampling_density(expr, condition, numsamples=numsamples,
**kwargs)
kwargs['evaluate'] = evaluate
return Density(expr, condition).doit(**kwargs)
def cdf(expr, condition=None, evaluate=True, **kwargs):
"""
Cumulative Distribution Function of a random expression.
optionally given a second condition
This density will take on different forms for different types of
probability spaces.
Discrete variables produce Dicts.
Continuous variables produce Lambdas.
Examples
========
>>> from diofant.stats import Die, Normal
>>> D = Die('D', 6)
>>> X = Normal('X', 0, 1)
>>> density(D).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> cdf(D)
{1: 1/6, 2: 1/3, 3: 1/2, 4: 2/3, 5: 5/6, 6: 1}
>>> cdf(3*D, D > 2)
{9: 1/4, 12: 1/2, 15: 3/4, 18: 1}
>>> cdf(X)
Lambda(_z, erf(sqrt(2)*_z/2)/2 + 1/2)
"""
if condition is not None: # If there is a condition
# Recompute on new conditional expr
return cdf(given(expr, condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
result = pspace(expr).compute_cdf(expr, **kwargs)
if evaluate and hasattr(result, 'doit'):
return result.doit()
else:
return result
def where(condition, given_condition=None, **kwargs):
"""
Returns the domain where a condition is True.
Examples
========
>>> from diofant.stats import Die, Normal
>>> D1, D2 = Die('a', 6), Die('b', 6)
>>> a, b = D1.symbol, D2.symbol
>>> X = Normal('x', 0, 1)
>>> where(X**2 < 1)
Domain: (-1 < x) & (x < 1)
>>> where(X**2 < 1).set
(-1, 1)
>>> where(And(D1 <= D2, D2 < 3))
Domain: (Eq(a, 1) & Eq(b, 1)) | (Eq(a, 1) & Eq(b, 2)) | (Eq(a, 2) & Eq(b, 2))
"""
if given_condition is not None: # If there is a condition
# Recompute on new conditional expr
return where(given(condition, given_condition, **kwargs), **kwargs)
# Otherwise pass work off to the ProbabilitySpace
return pspace(condition).where(condition, **kwargs)
def sample(expr, condition=None, **kwargs):
"""
A realization of the random expression
Examples
========
>>> from diofant.stats import Die
>>> X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
>>> die_roll = sample(X + Y + Z) # A random realization of three dice
"""
return next(sample_iter(expr, condition, numsamples=1))
def sample_iter(expr, condition=None, numsamples=oo, **kwargs):
"""
Returns an iterator of realizations from the expression given a condition
expr: Random expression to be realized
condition: A conditional expression (optional)
numsamples: Length of the iterator (defaults to infinity)
Examples
========
>>> from diofant.stats import Normal
>>> X = Normal('X', 0, 1)
>>> expr = X*X + 3
>>> iterator = sample_iter(expr, numsamples=3)
>>> list(iterator) # doctest: +SKIP
[12, 4, 7]
See Also
========
diofant.stats.sample
diofant.stats.rv.sampling_P
diofant.stats.rv.sampling_E
"""
if condition is not None:
ps = pspace(Tuple(expr, condition))
else:
ps = pspace(expr)
rvs = list(ps.values)
fn = lambdify(rvs, expr, **kwargs)
if condition is not None:
given_fn = lambdify(rvs, condition, **kwargs)
# Check that lambdify can handle the expression
# Some operations like Sum can prove difficult
try:
d = ps.sample() # a dictionary that maps RVs to values
args = [d[rv] for rv in rvs]
fn(*args)
if condition is not None:
given_fn(*args)
except (TypeError, ValueError):
raise TypeError('Expr/condition too complex for lambdify')
def return_generator():
count = 0
while count < numsamples:
d = ps.sample() # a dictionary that maps RVs to values
args = [d[rv] for rv in rvs]
if condition is not None: # Check that these values satisfy the condition
gd = given_fn(*args)
if gd not in (True, False):
raise ValueError(
'Conditions must not contain free symbols')
if not gd: # If the values don't satisfy then try again
continue
yield fn(*args)
count += 1
return return_generator()
def sampling_P(condition, given_condition=None, numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of P
See Also
========
diofant.stats.P
diofant.stats.rv.sampling_E
diofant.stats.rv.sampling_density
"""
count_true = 0
count_false = 0
samples = sample_iter(condition, given_condition,
numsamples=numsamples, **kwargs)
for x in samples:
if x:
count_true += 1
else:
count_false += 1
result = Integer(count_true) / numsamples
return result.evalf()
def sampling_E(expr, given_condition=None, numsamples=1,
evalf=True, **kwargs):
"""
Sampling version of E
See Also
========
diofant.stats.P
diofant.stats.rv.sampling_P
diofant.stats.rv.sampling_density
"""
samples = sample_iter(expr, given_condition,
numsamples=numsamples, **kwargs)
result = Add(*list(samples)) / numsamples
return result.evalf(strict=False)
def sampling_density(expr, given_condition=None, numsamples=1, **kwargs):
"""
Sampling version of density
See Also
========
diofant.stats.density
diofant.stats.rv.sampling_P
diofant.stats.rv.sampling_E
"""
results = {}
for result in sample_iter(expr, given_condition,
numsamples=numsamples, **kwargs):
results[result] = results.get(result, 0) + 1
return results
def dependent(a, b):
"""
Dependence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from diofant.stats import Normal
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> dependent(X, Y)
False
>>> dependent(2*X + Y, -Y)
True
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> dependent(X, Y)
True
See Also
========
diofant.stats.rv.independent
"""
if pspace_independent(a, b):
return False
z = Symbol('z', extended_real=True)
# Dependent if density is unchanged when one is given information about
# the other
return (density(a, Eq(b, z)) != density(a) or
density(b, Eq(a, z)) != density(b))
def independent(a, b):
"""
Independence of two random expressions
Two expressions are independent if knowledge of one does not change
computations on the other.
Examples
========
>>> from diofant.stats import Normal
>>> X, Y = Normal('X', 0, 1), Normal('Y', 0, 1)
>>> independent(X, Y)
True
>>> independent(2*X + Y, -Y)
False
>>> X, Y = given(Tuple(X, Y), Eq(X + Y, 3))
>>> independent(X, Y)
False
See Also
========
diofant.stats.rv.dependent
"""
return not dependent(a, b)
def pspace_independent(a, b):
"""
Tests for independence between a and b by checking if their PSpaces have
overlapping symbols. This is a sufficient but not necessary condition for
independence and is intended to be used internally.
Notes
=====
pspace_independent(a, b) implies independent(a, b)
independent(a, b) does not imply pspace_independent(a, b)
"""
a_symbols = set(pspace(b).symbols)
b_symbols = set(pspace(a).symbols)
if len(a_symbols.intersection(b_symbols)) == 0:
return True
def rv_subs(expr):
"""Given a random expression replace all random variables with their symbols."""
symbols = random_symbols(expr)
if not symbols:
return expr
swapdict = {rv: rv.symbol for rv in symbols}
return expr.xreplace(swapdict)
class NamedArgsMixin:
"""Helper class for named arguments."""
_argnames = ()
def __getattr__(self, attr):
try:
return self.args[self._argnames.index(attr)]
except ValueError:
raise AttributeError("'%s' object has not attribute '%s'" % (
type(self).__name__, attr))
def _value_check(condition, message):
"""
Check a condition on input value.
Raises ValueError with message if condition is not True
"""
if condition != true:
raise ValueError(message)
| |
#!/usr/bin/env python
import rospy
import rosbag
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointField
from std_msgs.msg import Int64
from laser_geometry import LaserProjection
import numpy as np
import matplotlib.pyplot as plt
import math
import os
BASE_DIR = os.path.dirname( os.path.abspath(__file__) )
# each scan is along x axis
# raw z is negative
def rotate_2d ( angle ):
R = np.array( [ [ np.cos(angle), -np.sin( angle ) ],[ np.sin( angle ), np.cos( angle ) ] ] )
return R
class RotateTo3D:
'''
self.status: 'waitting' --start--> 'scanning' --stop--> 'waitting'
'''
def __init__(self):
self.separate_models = False
self.auto_pub_ref_at_frame = 5
pi_angle = 41
speed = 1.0 * math.pi / pi_angle
fre = 49.5
self.increment_theta = 1.0 * speed / fre
self.z0_offset = 4 * 0.01
#self.z0_offset = 10 * 0.01
#self.z0_offset = 0 * 0.01
self.status = 'waiting'
self.pcl_3d = None
self.all_3d_points_ls = []
self.scanN = 0
self.theta = math.pi * 0.2
self.pcl_n = 0
self.pcl_3d_pub = rospy.Publisher('pcl_3d',PointCloud2,queue_size=10)
self.fig_dif = plt.figure()
self.ax_dif = self.fig_dif.add_subplot(111)
self.received_n = 0
res_path = os.path.join( BASE_DIR,'3d_res' )
if not os.path.exists( res_path ):
os.makedirs( res_path )
self.res_bag_name = os.path.join( res_path, 'pcl_3d-zofs_%d-piangle_%d-fre_%d.bag'%(self.z0_offset*100, pi_angle, fre*10) )
self.pcl3d_bag = rosbag.Bag( self.res_bag_name,'w')
rospy.loginfo( 'res path:%s'%(self.res_bag_name) )
def start(self):
self.status = 'start'
self.scanN = 0
self.pcl_3d = None
self.all_3d_points_ls = []
rospy.loginfo('received sart command')
def stop(self):
self.status = 'stop'
rospy.loginfo('received stop command, theta: %0.2f'%(self.theta*180.0/math.pi))
def from_2D_to_3D( self, point_2d ):
x0 = point_2d[0]
y0 = point_2d[1]
self.theta = theta = self.scanN * self.increment_theta
xy = np.matmul( rotate_2d( self.theta ), np.array( [[y0],[self.z0_offset]] ) )
point_3d = [ xy[0,0], xy[1,0], x0, point_2d[3], point_2d[4] ]
return point_3d
#def from_2D_to_3D( self, point_2d ):
# x0 = point_2d[1]
# y0 = point_2d[0]
# self.theta = theta = self.scanN * self.increment_theta
# x = x0 * math.cos(theta)
# y = -x0 * math.sin(theta)
# z = y0
# point_3d = [x, y, z, point_2d[3], point_2d[4]]
# return point_3d
def add_data( self, pcl_LaserScan, dif_start=None, dif_end=None ) :
gen_data = pc2.read_points(pcl_LaserScan, field_names=None, skip_nans=True)
curscan_points = []
#if self.pcl_3d != None:
# gen_trunk = pc2.read_points(self.pcl_3d, field_names=None,skip_nans=True)
# for p in gen_trunk:
# curscan_points.append(list(p))
for idx, p in enumerate(gen_data):
if dif_start==None or ( idx >= dif_start and idx <= dif_end ):
point_2d = list(p) #[ x,y,z,?,? ] z==0
point_3d = self.from_2D_to_3D( point_2d )
curscan_points.append(point_3d)
#if self.scanN % 100 == 0 and idx==0:
# rospy.loginfo( 'scanN= %d, point_2d:%s, point_3d:%s'%( self.scanN, point_2d, point_3d ) )
self.all_3d_points_ls += curscan_points
self.pcl_3d = pc2.create_cloud(pcl_LaserScan.header, pcl_LaserScan.fields, curscan_points)
def xyz_from_pcl(self,pcl):
gen = pc2.read_points(pcl, field_names=None, skip_nans=True)
points = []
for p in gen:
xyz = np.array(list(p)[1:4])
if points == []:
points = xyz
else:
points = np.vstack((points,xyz))
return points
def update_scan_increment(self):
'''
do this at the end
'''
self.increment = self.trunk_length / self.scanN
rospy.loginfo('increment = %f / %d = %f',self.trunk_length,self.scanN,self.increment)
def push(self,data_LaserScan):
# rospy.loginfo('project data_LaserScan to PointCloud OK')
pcl_LaserScan = LaserProjection().projectLaser(data_LaserScan)
points_xyz = self.xyz_from_pcl(pcl_LaserScan) # points_xyz: [N,3] [:,1]=0
# print "scan point N = ",points_xyz.shape[0]," / ", pcl_LaserScan.width, " rangesN = ",len(data_LaserScan.ranges)
if self.status == 'start' or self.status == 'scanning':
if self.status == 'start':
self.status = 'scanning'
self.add_data( pcl_LaserScan )
self.scanN += 1
self.pcl_3d_pub.publish(self.pcl_3d)
self.pcl3d_bag.write( 'pcl_3d', self.pcl_3d )
elif self.status == 'stop':
self.status = 'waitting'
if self.separate_models:
self.pcl_n = self.pcl_n + 1
self.reset()
self.pcl3d_bag.close()
rospy.loginfo('stop recording, save this model: ' + self.res_bag_name )
if self.status == 'scanning' and self.theta > 181.0 * math.pi / 180:
self.stop()
return self.scanN, self.theta
def dif_range(self,points_xyz):
'''
Compare the difference between points_xyz and self.ref_points_xyz.
Return the index of dif_start and dif_end
'''
min_N = min(points_xyz.shape[0],self.ref_points_xyz.shape[0])
dif = points_xyz[0:min_N,self.height_axis] - self.ref_points_xyz[0:min_N,self.height_axis]
dif = np.fabs(dif)
threshold = self.dif_threshold
dif_N = sum([ d > threshold for d in dif ])
self.scan_difN_pub.publish(dif_N)
if dif_N > 5:
dif_start = len(dif)
dif_end = 0
for i,d in enumerate(dif):
if dif_start==len(dif) and d > threshold and i+3<len(dif) and dif[i+1] > threshold and dif[i+3] > threshold:
dif_start = i
self.scan_difStart_pub.publish(dif_start)
if dif_start < len(dif) and i > dif_start and ( d < threshold or (d > threshold and i==len(dif)-1 ) ):
dif_end = i
self.scan_difEnd_pub.publish(dif_end)
if dif_end - dif_start > 3:
break
else:
# rospy.loginfo('short dif_range: dif_start= %d dif_end= %d dif_len= %d',dif_start,dif_end,dif_end-dif_start)
dif_start = len(dif)
dif_end = 0
return True,dif_start,dif_end
else:
return False,0,0
def volume_from_bag(self,model_bag_file):
model_bag = rosbag.Bag(model_bag_file)
msg_gen = model_bag.read_messages(topics='pcl_3d')
for topic,msg,t in msg_gen:
self. pcl_volume(msg)
if __name__ == '__main__':
print 'in main'
#TVD = RotateTo3D()
#TVD.volume_from_bag('model_result_new/empty.bag')
| |
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run Horovod distributed Tensorflow Training benchmark."""
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import nvidia_driver
FLAGS = flags.FLAGS
MACHINEFILE = 'HOSTFILE'
BENCHMARK_VERSION = 0.34
BENCHMARK_NAME = 'horovod'
BENCHMARK_CONFIG = """
horovod:
description: Runs Horovod. Specify the number of VMs with --num_vms
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-highmem-96
zone: us-central1-a
image_family: tf-latest-gpu-gvnic-debian-10
image_project: deeplearning-platform-release
boot_disk_size: 300
gpu_type: v100
gpu_count: 8
AWS:
machine_type: p3dn.24xlarge
zone: us-east-1a
boot_disk_size: 300
Azure:
machine_type: Standard_NC24rs_v3
image: microsoft-dsvm:aml-workstation:ubuntu:19.11.13
zone: eastus
boot_disk_size: 300
vm_count: null
"""
# TODO(user): Use NVIDIA's repo after
# https://github.com/NVIDIA/DeepLearningExamples/pull/386 is merged
GITHUB_MODELS_URL = 'https://github.com/changlan/DeepLearningExamples.git'
BERT_BASE_URL = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip'
BERT_LARGE_URL = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip'
flags.DEFINE_enum(
'horovod_model', 'resnet-50',
['resnet-50', 'bert-base', 'bert-large', 'maskrcnn', 'resnext-101'],
'name of the model to run.')
flags.DEFINE_integer('horovod_batch_size', 64, 'Batch size per compute device.')
flags.DEFINE_integer('horovod_num_steps', 10,
'Number of steps (epochs for BERT) to train for. ')
flags.DEFINE_bool('horovod_synthetic', False,
'Whether to train with synthetic data.')
flags.DEFINE_enum('horovod_max_seq_len', '128', ['128', '384'],
'Max sequence length for BERT.')
flags.DEFINE_enum('horovod_precision', 'fp16', ['fp16', 'fp32'], 'Precision.')
flags.DEFINE_bool('horovod_bert_finetune', True,
'Pretrain or finetune a BERT model.')
flags.DEFINE_bool('horovod_timeline', False, 'Enable timeline in Horovod.')
class HorovodParseOutputError(errors.Benchmarks.RunError):
pass
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _CopyAndUpdateRunScripts(model, vm):
"""Copy and update all necessary run scripts on the given vm.
Args:
model: name of the model
vm: vm to place and update run scripts on
"""
vm.RemoteCommand(
'[ -d "DeepLearningExamples" ] || git clone --branch clan-dev %s' %
GITHUB_MODELS_URL)
# MaskRCNN
if model == 'maskrcnn':
vm.RemoteCommand(
'wget -q -N http://models.tensorpack.com/FasterRCNN/ImageNet-R50-AlignPadding.npz'
)
vm.RemoteCommand(
'mkdir -p coco && cd coco && '
'wget -q -N http://images.cocodataset.org/zips/train2017.zip && '
'wget -q -N http://images.cocodataset.org/zips/val2017.zip && '
'wget -q -N http://images.cocodataset.org/annotations/annotations_trainval2017.zip && '
'unzip -q -o train2017.zip && unzip -q -o val2017.zip && '
'unzip -q -o annotations_trainval2017.zip && rm *.zip')
# BERT
bert_base_dir = 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT'
if model == 'bert-base' or model == 'bert-large':
vm.RemoteCommand(
'mkdir -p {bert}/data/download/google_pretrained_weights &&'
'mkdir -p {bert}/data/download/squad/v1.1 && '
'cd {bert}/data/download/squad/v1.1 && '
'wget -q https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json'
.format(bert=bert_base_dir))
get_bert_data_cmd = ('cd {bert}/data/download/google_pretrained_weights/ && '
'wget -q {url} && unzip -o $(basename {url})')
if model == 'bert-base':
vm.RemoteCommand(
get_bert_data_cmd.format(bert=bert_base_dir, url=BERT_BASE_URL))
if model == 'bert-large':
vm.RemoteCommand(
get_bert_data_cmd.format(bert=bert_base_dir, url=BERT_LARGE_URL))
def PrepareHorovod(vm):
"""Install dependencies on a single vm.
Args:
vm: vm to operate on
"""
logging.info('Installing Horovod on %s', vm)
vm.AuthenticateVm()
vm.Install('google_cloud_sdk')
vm.Install('openmpi')
vm.InstallPackages('wget git unzip')
vm.Install('nccl')
pip = 'pip'
if FLAGS.cloud == 'GCP':
pip = '/opt/conda/bin/pip'
vm.RemoteCommand(f'sudo {pip} install --force-reinstall pyarrow')
elif FLAGS.cloud == 'AWS':
vm.RobustRemoteCommand('. anaconda3/bin/activate tensorflow_p37')
pip = 'anaconda3/envs/tensorflow_p37/bin/pip'
# 10.0 -> 110
cuda_version = cuda_toolkit.GetCudaToolkitVersion(vm).replace('.', '')
vm.RemoteCommand(
f'sudo {pip} install '
'--extra-index-url https://developer.download.nvidia.com/compute/redist/ '
'git+https://github.com/NVIDIA/dllogger.git '
f'nvidia-dali-cuda{cuda_version}')
vm.RemoteCommand(
f'sudo {pip} install '
'--extra-index-url https://developer.download.nvidia.com/compute/redist/ '
f'nvidia-dali-tf-plugin-cuda{cuda_version}')
vm.RemoteCommand(f'sudo {pip} uninstall -y horovod')
vm.RemoteCommand(
f'sudo HOROVOD_GPU_OPERATIONS=NCCL HOROVOD_WITH_TENSORFLOW=1 HOROVOD_WITH_MPI=1 {pip} install -U --no-cache horovod'
)
vm.RemoteCommand(
f'sudo {pip} install pynvml cython scipy \'opencv-python==3.4.2.17\'')
vm.RemoteCommand(
f'sudo {pip} install \'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI\''
)
vm.RemoteCommand(
f'[ -d "tensorpack" ] || git clone https://github.com/tensorpack/tensorpack.git && sudo {pip} install ./tensorpack'
)
_CopyAndUpdateRunScripts(FLAGS.horovod_model, vm)
def Prepare(benchmark_spec):
"""Install and set up Horovod on the target vms.
Args:
benchmark_spec: The benchmark specification
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(PrepareHorovod, vms)
hpc_util.CreateMachineFile(vms, nvidia_driver.QueryNumberOfGpus, MACHINEFILE)
def _CreateMetadataDict(vms):
"""Create metadata dict to be used in run results.
Args:
vms: A list of worker VMs.
Returns:
metadata dict
"""
vm = vms[0]
gpus_per_node = nvidia_driver.QueryNumberOfGpus(vm)
num_vms = len(vms)
total_gpus = gpus_per_node * num_vms
metadata = dict()
metadata.update(cuda_toolkit.GetMetadata(vm))
metadata['benchmark_version'] = BENCHMARK_VERSION
metadata['num_nodes'] = len(vms)
metadata['total_gpus'] = int(total_gpus)
metadata['model'] = FLAGS.horovod_model
metadata['batch_size'] = FLAGS.horovod_batch_size
metadata['num_steps'] = FLAGS.horovod_num_steps
metadata['synthetic'] = FLAGS.horovod_synthetic
metadata['precision'] = FLAGS.horovod_precision
metadata['max_seq_len'] = int(FLAGS.horovod_max_seq_len)
metadata['nccl_version'] = FLAGS.nccl_version
metadata['nccl_net_plugin'] = FLAGS.nccl_net_plugin
metadata['cuda_visible_devices'] = FLAGS.nccl_cuda_visible_devices
metadata['nccl_extra_params'] = FLAGS.nccl_extra_params
return metadata
def _ExtractResNetThroughput(output):
"""Extract throughput from Horovod output.
Args:
output: Horovod output
Returns:
A tuple of:
Average throuput in images per second (float)
Unit of the throughput metric (str)
"""
# Start from last line and iterate backwards.
avg_throughput = 0
for line in output.splitlines()[::-1]:
if 'train_throughput' in line:
split_line = line.split()
avg_throughput = float(split_line[-1])
break
return round(avg_throughput, 1), 'images/second'
def _ExtractBertThroughput(output):
"""Extract throughput from Horovod output.
Args:
output: Horovod output
Returns:
A tuple of:
Average throughput in sentences per second (float)
Unit of the throughput metric (str)
"""
# Start from last line and iterate backwards.
avg_throughput = 0
for line in output.splitlines()[::-1]:
if 'Throughput Average (sentences/sec) =' in line:
split_line = line.split()
avg_throughput = float(split_line[-1])
break
return round(avg_throughput, 1), 'sentences/second'
def _ExtractMaskRCNNThroughput(output):
"""Extract throughput from Horovod output.
Args:
output: Horovod output
Returns:
A tuple of:
Average throughput in sentences per second (float)
Unit of the throughput metric (str)
"""
total_xput, unit = [], None
for line in output.splitlines()[::-1]:
if 'Throughput' in line:
split_line = line.split()
xput, unit = float(split_line[-1]), split_line[-2][1:-2]
total_xput.append(xput)
if not total_xput:
raise ValueError('No "Throughput" found in {}'.format(output))
return round(sum(total_xput) / len(total_xput), 1), unit
def _MakeSamplesFromOutput(vms, stdout, stderr):
"""Create a sample continaing the measured Horovod throughput.
Args:
vms: a list of worker VMs
stdout: stdout
stderr: stderr
Returns:
list of a Sample containing the Horovod throughput
"""
metadata = _CreateMetadataDict(vms)
output = stdout + stderr
extractor = {
'resnet-50': _ExtractResNetThroughput,
'resnext-101': _ExtractResNetThroughput,
'bert-base': _ExtractBertThroughput,
'bert-large': _ExtractBertThroughput,
'maskrcnn': _ExtractMaskRCNNThroughput,
}
throughput, unit = extractor[FLAGS.horovod_model](output)
samples = []
samples.append(
sample.Sample('Training throughput', throughput, unit, metadata))
return samples
def Run(benchmark_spec):
"""Wrapper of RunWithVMs.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
return RunWithVMs(benchmark_spec.vms)
def RunWithVMs(vms, extra_envs=None):
"""Run Horovod on the cluster.
Args:
vms: A list of worker VMs.
extra_envs: A dictionary of environment variables.
Returns:
A list of sample.Sample objects.
"""
vm_util.RunThreaded(lambda vm: vm.RemoteCommand('rm -rf /tmp/models'), vms)
master_vm = vms[0]
gpus_per_node = nvidia_driver.QueryNumberOfGpus(master_vm)
num_vms = len(vms)
total_gpus = gpus_per_node * num_vms
# GCP should work out of the box with the deep learning image but the AWS
# image requires us to use the correct Tensorflow Python environment.
if FLAGS.cloud == 'AWS':
master_vm.RobustRemoteCommand('. anaconda3/bin/activate tensorflow_p37')
python_interpreter = 'anaconda3/envs/tensorflow_p37/bin/python'
else:
python_interpreter = '/opt/conda/bin/python'
nccl_params = {
'TF_CPP_MIN_LOG_LEVEL': 0,
'NCCL_SOCKET_IFNAME': '^lo,docker0',
'NCCL_DEBUG': 'INFO',
}
if FLAGS.horovod_timeline:
nccl_params['HOROVOD_TIMELINE_MARK_CYCLES'] = 1
nccl_params['HOROVOD_TIMELINE'] = f'{vm_util.VM_TMP_DIR}/timeline.json'
if FLAGS.nccl_cuda_visible_devices:
nccl_params['CUDA_VISIBLE_DEVICES'] = FLAGS.nccl_cuda_visible_devices
if FLAGS.nccl_extra_params:
for extra_param in FLAGS.nccl_extra_params:
k, v = extra_param.split('=', 1)
nccl_params[k] = v
if extra_envs:
nccl_params.update(extra_envs)
run_command = (
'{mpi} -np {num_gpus} -hostfile {host_file} '
'-mca plm_rsh_no_tree_spawn 1 '
'--allow-run-as-root '
'-bind-to socket -map-by slot '
'{nccl_params} '
'-mca pml ob1 -mca btl ^openib '
'-mca btl_tcp_if_exclude lo,docker0 '
'{python} ').format(
mpi=FLAGS.nccl_mpi,
num_gpus=total_gpus,
host_file=MACHINEFILE,
python=python_interpreter,
nccl_params=' '.join(
[f'-x {key}={value}' for key, value in nccl_params.items()]))
if FLAGS.horovod_model == 'resnet-50':
run_flags = {
'arch': 'resnet50',
'mode': 'training_benchmark',
'warmup_steps': 101,
'results_dir': '/tmp/models',
'gpu_memory_fraction': 0.95,
'static_loss_scale': 128,
'lr_init': 0.016,
'lr_warmup_epochs': 8,
'momentum': 0.875,
'weight_decay': 3.0517578125e-05,
'iter_unit': 'batch'
}
run_flags.update({
'batch_size': FLAGS.horovod_batch_size,
'num_iter': FLAGS.horovod_num_steps,
})
if FLAGS.horovod_precision == 'fp16':
run_flags['amp'] = None
# Load ImageNet training data from GCS if benchmark is not in synthetic mode
if not FLAGS.horovod_synthetic:
run_flags['data_dir'] = 'gs://cloud-ml-nas-public/classification/imagenet'
run_command += 'DeepLearningExamples/TensorFlow/Classification/ConvNets/main.py '
run_command += ' '.join([
'--{}'.format(key) if value is None else '--{}={}'.format(key, value)
for key, value in sorted(run_flags.items())
])
elif FLAGS.horovod_model == 'resnext-101':
run_flags = {
'arch': 'resnext101-32x4d',
'mode': 'training_benchmark',
'warmup_steps': 101,
'results_dir': '/tmp/models',
'gpu_memory_fraction': 0.95,
'use_static_loss_scaling': None,
'loss_scale': 128,
'lr_init': 0.016,
'lr_warmup_epochs': 8,
'momentum': 0.875,
'weight_decay': 3.0517578125e-05,
'weight_init': 'fan_in',
'iter_unit': 'batch'
}
run_flags.update({
'precision': FLAGS.horovod_precision,
'batch_size': FLAGS.horovod_batch_size,
'num_iter': FLAGS.horovod_num_steps,
})
# Load ImageNet training data from GCS if benchmark is not in synthetic mode
if not FLAGS.horovod_synthetic:
run_flags['data_dir'] = 'gs://cloud-ml-nas-public/classification/imagenet'
run_command += 'DeepLearningExamples/TensorFlow/Classification/ConvNets/main.py '
run_command += ' '.join([
'--{}'.format(key) if value is None else '--{}={}'.format(key, value)
for key, value in sorted(run_flags.items())
])
elif FLAGS.horovod_model.startswith('bert'): # bert
if not FLAGS.horovod_bert_finetune:
raise NotImplementedError('BERT pretraining is not supported.')
bert_dir = 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT/data/download/google_pretrained_weights/{}'.format(
'uncased_L-12_H-768_A-12' if FLAGS.horovod_model ==
'bert-base' else 'uncased_L-24_H-1024_A-16')
squad_train_file = 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT/data/download/squad/v1.1/train-v1.1.json'
run_flags = {
'vocab_file': '{}/vocab.txt'.format(bert_dir),
'bert_config_file': '{}/bert_config.json'.format(bert_dir),
'init_checkpoint': '{}/bert_model.ckpt'.format(bert_dir),
'do_train': None,
'train_file': squad_train_file,
'learning_rate': 5e-6,
'output_dir': '/tmp/models',
'horovod': None,
'dllog_path': '/tmp/bert_dllog.json',
'save_checkpoints_steps': 0,
}
run_flags.update({
'precision': FLAGS.horovod_precision,
'train_batch_size': FLAGS.horovod_batch_size,
'num_train_epochs': FLAGS.horovod_num_steps,
'max_seq_length': FLAGS.horovod_max_seq_len,
'doc_stride': 64 if FLAGS.horovod_max_seq_len == 128 else 128,
'amp': FLAGS.horovod_precision == 'fp16'
})
run_command += 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT/run_squad.py '
run_command += ' '.join([
'--{}'.format(key) if value is None else '--{}={}'.format(key, value)
for key, value in sorted(run_flags.items())
])
else:
run_command += (
'tensorpack/examples/FasterRCNN/train.py --config '
'BACKBONE.WEIGHTS=ImageNet-R50-AlignPadding.npz '
'DATA.BASEDIR=coco '
'TRAINER=horovod '
'TRAIN.EVAL_PERIOD=0 '
# LR_SCHEDULE means equivalent steps when the total batch size is 8.
'TRAIN.LR_SCHEDULE="[{step}, {step}, {step}]" '
'--logdir {log_dir}/maskrcnn ').format(
log_dir=vm_util.VM_TMP_DIR,
step=FLAGS.horovod_num_steps * total_gpus // 8)
stdout, stderr = master_vm.RobustRemoteCommand(run_command, should_log=True)
if FLAGS.horovod_timeline:
master_vm.PullFile(vm_util.GetTempDir(),
'{}/timeline.json'.format(vm_util.VM_TMP_DIR))
return _MakeSamplesFromOutput(vms, stdout, stderr)
def Cleanup(benchmark_spec):
"""Cleanup Horovod on the cluster."""
del benchmark_spec
| |
#!/home/ubuntu/anaconda2/envs/tensorflow/bin/python
# MIT License
# Copyright (c) 2016 Druce Vertes drucev@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pull requests gratefully accepted.
# possible improvements
# - run fast on GPU. currently pin to device("/cpu:0") which runs ~5x faster
# - optimizer with adaptive learning rate and momentum - AdamOptimizer gives error
from __future__ import print_function
import argparse
import pickle
from time import strftime
import sys
import six
import random
import pdb
import tensorflow as tf
import numpy as np
import pandas as pd
import lifetable
import matplotlib.pyplot as plt
#%matplotlib inline
# TensorFlow numeric type to use for floating point variables
# tf.float32 is 2x faster but doesn't provide necessary accuracy
# tf.float64 will run out of accuracy for high gamma (> 8)
float_type = tf.float64
############################################################
# returns 1928-2015
############################################################
first_year = 1928
last_year = 2015
years = range(first_year, last_year+1) # pythonically yields [1928, 1929...2015]
years_history = len(years)
years_retired = 30
num_cohorts = years_history - years_retired + 1
num_assets = 2
#gamma = 1.0
sp500 = pd.Series([0.4381,-0.083,-0.2512,-0.4384,-0.0864,0.4998,-0.0119,0.4674,0.3194,-0.3534,0.2928,-0.011,
-0.1067,-0.1277,0.1917,0.2506,0.1903,0.3582,-0.0843,0.052,0.057,0.183,0.3081,0.2368,0.1815,
-0.0121,0.5256,0.326,0.0744,-0.1046,0.4372,0.1206,0.0034,0.2664,-0.0881,0.2261,0.1642,0.124,
-0.0997,0.238,0.1081,-0.0824,0.0356,0.1422,0.1876,-0.1431,-0.259,0.37,0.2383,-0.0698,0.0651,
0.1852,0.3174,-0.047,0.2042,0.2234,0.0615,0.3124,0.1849,0.0581,0.1654,0.3148,-0.0306,0.3023,
0.0749,0.0997,0.0133,0.372,0.2268,0.331,0.2834,0.2089,-0.0903,-0.1185,-0.2197,0.2836,0.1074,
0.0483,0.1561,0.0548,-0.3655,0.2594,0.1482,0.021,0.1589,0.3215,0.1352,0.0136], index = years)
bonds=pd.Series([0.0084,0.042,0.0454,-0.0256,0.0879,0.0186,0.0796,0.0447,0.0502,0.0138,0.0421,0.0441,
0.054,-0.0202,0.0229,0.0249,0.0258,0.038,0.0313,0.0092,0.0195,0.0466,0.0043,-0.003,
0.0227,0.0414,0.0329,-0.0134,-0.0226,0.068,-0.021,-0.0265,0.1164,0.0206,0.0569,0.0168,
0.0373,0.0072,0.0291,-0.0158,0.0327,-0.0501,0.1675,0.0979,0.0282,0.0366,0.0199,0.0361,
0.1598,0.0129,-0.0078,0.0067,-0.0299,0.082,0.3281,0.032,0.1373,0.2571,0.2428,-0.0496,
0.0822,0.1769,0.0624,0.15,0.0936,0.1421,-0.0804,0.2348,0.0143,0.0994,0.1492,-0.0825,
0.1666,0.0557,0.1512,0.0038,0.0449,0.0287,0.0196,0.1021,0.201,-0.1112,0.0846,0.1604,
0.0297,-0.091,0.1075,0.0128], index=years)
cpi=pd.Series([-0.0115607,0.005848,-0.0639535,-0.0931677,-0.1027397,0.0076336,0.0151515,0.0298507,
0.0144928,0.0285714,-0.0277778,0,0.0071429,0.0992908,0.0903226,0.0295858,0.0229885,
0.0224719,0.1813187,0.0883721,0.0299145,-0.0207469,0.059322,0.06,0.0075472,0.0074906,
-0.0074349,0.0037453,0.0298507,0.0289855,0.0176056,0.017301,0.0136054,0.0067114,0.0133333,
0.0164474,0.0097087,0.0192308,0.0345912,0.0303951,0.0471976,0.0619718,0.0557029,0.0326633,
0.0340633,0.0870588,0.1233766,0.0693642,0.0486486,0.0670103,0.0901771,0.1329394,0.125163,
0.0892236,0.0382979,0.0379098,0.0394867,0.0379867,0.010979,0.0443439,0.0441941,0.046473,
0.0610626,0.0306428,0.0290065,0.0274841,0.026749,0.0253841,0.0332248,0.017024,0.016119,
0.0268456,0.0338681,0.0155172,0.0237691,0.0187949,0.0325556,0.0341566,0.0254065,0.0408127,
0.0009141,0.0272133,0.0149572,0.0296,0.0174,0.015,0.0076,0.0073], index=years)
# Default start
# 50/50 allocations
stock_start_alloc = 0.8
stock_end_alloc = 0.5
# 2% each const and var spending
startval = 100
const_spend_pct = .02
const_spend = startval * const_spend_pct
var_spend_pcts = pd.Series(np.ones(years_retired) * 0.02)
real_stocks = sp500 - cpi
real_bonds = bonds - cpi
############################################################
# generate a life table for a 65 year-old male retiree
# a 30-year retirement
# not used in this notebook example
############################################################
lt = lifetable.genLifetable(lifetable.MlivesSeries, lifetable.MLEseries, 65, 30)
#print(lt)
survival = np.array(lt.survival)
#print(survival)
#print(survival.shape)
class SafeWithdrawalModel:
"initialize graph and parameters shared by all retirement cohorts"
def __init__(self,
returns_list, # series with returns for assets
names_list, # names of assets
start_alloc, # starting stock allocation
end_alloc, #ending stock allocation
start_val, # starting portfolio value e.g. 100
const_spend,
var_spend_pcts,
gamma,
survival,
verbose=False):
# read params, initialize Tensorflow graph and session
# set up ops specific to model
self.verbose=verbose
self.startval=startval
self.returns_list = returns_list
self.names_list = names_list
self.start_alloc = start_alloc
self.end_alloc = end_alloc
self.num_assets = len(self.names_list)
self.start_val = start_val
self.ret_years = len(var_spend_pcts)
self.const_spend = const_spend
self.var_spend_pcts = var_spend_pcts
self.survival=survival
self.gamma = gamma
# model will have a cohort_history object, optimizer object
# initialize with placeholder, needs rest of model initialized first
self.cohort_history = None
self.optimizer = None
self.first_year = returns_list[0].index[0]
self.last_year = returns_list[0].index[-1]
self.total_cohorts = len(returns_list[0])
self.ret_cohorts = self.total_cohorts - self.ret_years + 1
print('%s Create TensorFlow graph and session' % strftime("%H:%M:%S"))
self.graph = tf.Graph()
self.sess = tf.Session(graph = self.graph)
self.return_ops = []
self.allocation_ops = []
with self.graph.as_default():
with tf.device("/cpu:0"):
# some constants
self.zero = tf.constant(0.0, dtype=float_type, name="zero")
self.one = tf.constant(1.0, dtype=float_type, name="one")
self.one_hundred = tf.constant(100.0, dtype=float_type, name="one_hundred")
self.ten_thousand = tf.constant(10000.0, dtype=float_type, name="ten_thousand")
self.one_hundred_thousand = tf.constant(100000.0, dtype=float_type, name="one_million")
self.one_million = tf.constant(1000000.0, dtype=float_type, name="one_million")
self.very_small_amts = tf.constant(np.array([0.000001] * self.ret_years), dtype=float_type, name="very_small_amts")
self.zero_years = tf.constant(np.zeros(self.ret_years), dtype=float_type, name = "zero_years")
self.one_years = tf.constant(np.ones(self.ret_years), dtype=float_type, name="one_years")
self.ret_years_op = tf.constant(self.ret_years, dtype=float_type, name="ret_years")
#gamma
self.gamma_op = tf.constant(gamma, dtype=float_type, name="gamma")
self.one_minus_gamma = tf.sub(self.one, self.gamma, name="one_minus_gamma")
self.inv_one_minus_gamma = tf.div(self.one, self.one_minus_gamma, name="inv_one_minus_gamma")
self.cost_multiplier = self.ten_thousand
# generate op for start_val
self.start_val_op = tf.constant(100.0, dtype=float_type, name ="port_start_val")
# generate ops for returns
for prefix, return_series in zip(names_list, returns_list):
self.return_ops.append(self.gen_tf_const_list(return_series, "%s_return" % prefix,
verbose=self.verbose))
# only implemented for n=2 assets
# generate ops for allocations for first n-1 assets
self.start_alloc_op = tf.Variable(self.start_alloc, dtype=float_type, name ="port_start_val")
self.sess.run(self.start_alloc_op.initializer)
self.end_alloc_op = tf.Variable(self.end_alloc, dtype=float_type, name ="port_start_val")
self.sess.run(self.end_alloc_op.initializer)
prefix = "alloc_%s" % names_list[0]
print("hello %d" % self.ret_years)
stock_alloc_ops = self.gen_stock_allocs(self.start_alloc_op, self.end_alloc_op, self.ret_years, prefix, verbose=False)
print("hello %d" % len(stock_alloc_ops))
self.allocation_ops.append(stock_alloc_ops)
# TODO: just constrain start and end
# ops for soft constraints: 0 < stock allocation < 1
self.alloc_min_0_ops = self.gen_zero_min_list(stock_alloc_ops, "alloc_min_0", verbose=self.verbose)
self.cost_alloc_min_0_op = tf.mul(self.cost_multiplier,
tf.add_n(self.alloc_min_0_ops, name="cost_alloc_min_0"))
self.alloc_max_1_ops = self.gen_one_max_list(stock_alloc_ops, "alloc_max_1", verbose=self.verbose)
self.cost_alloc_max_1_op = tf.mul(self.cost_multiplier,
tf.add_n(self.alloc_max_1_ops, name = "cost_alloc_max_1"))
# TODO: just constrain start and end
# ops for soft constraints: declining stock allocation
# why do we do this? for exmple, 1966 is the worst cohort, and 1974 is its worst stock return (-40%)
# to maximize CE, optimization sets stock allocation at a minimum to not run out of money in worst cohort
# it will go e.g. 80% stock alloc in year 8 and add 56% in year 9, return to 80% in year 10
# to avoid artifacts like that, knowing stock allocation should decline over time, we add this constraint
# add a large penalty when stock allocation increases from one year to next
self.alloc_decrease_ops = self.gen_diff_list(stock_alloc_ops, "alloc_decrease", verbose=self.verbose)
self.cost_alloc_decrease_op = tf.mul(self.cost_multiplier,
tf.add_n(self.alloc_decrease_ops, name="alloc_decrease_cost_op"))
# last asset is 1-previous assets, we ignore if allocs are provided
bond_alloc_ops = []
var_prefix = "%s_alloc" % names_list[1]
print ('%s Create ops for %s' % (strftime("%H:%M:%S"), var_prefix))
for ix, op in enumerate(stock_alloc_ops):
var_name = "%s_%d" % (var_prefix, ix)
if self.verbose:
print("Create %s" % (var_name))
var_op = tf.sub(self.one, stock_alloc_ops[ix], name=var_name)
bond_alloc_ops.append(var_op)
self.allocation_ops.append(bond_alloc_ops)
# generate ops for const, var spending
self.const_spending_op = tf.Variable(const_spend, dtype=float_type, name="const_spend")
self.sess.run(self.const_spending_op.initializer)
self.var_spending_ops = self.gen_tf_var_list(self.var_spend_pcts, "var_spend", verbose=self.verbose)
# all ops to be trained
self.all_var_ops = [self.const_spending_op] + self.var_spending_ops + self.allocation_ops[0]
# op for soft constraint: const spending > 0
self.cspend_min_0_op = tf.maximum(self.zero, tf.neg(self.const_spending_op,
name="neg_cspend_min_0_op"),
name="cspend_min_0_op")
self.cost_cspend_min_0_op = tf.mul(self.cost_multiplier, self.cspend_min_0_op, name="cost_cspend_min_0")
# op for soft constraint: var spending > 0
self.vspend_min_0_ops = self.gen_zero_min_list(self.var_spending_ops, "vspend_min_0", verbose=self.verbose)
self.cost_vspend_min_0_op = tf.mul(self.cost_multiplier,
tf.add_n(self.vspend_min_0_ops, name="cost_vspend_min_0"))
if survival is not None:
survival_array=np.array(survival)
self.survival_tensor = tf.constant(survival_array, dtype=float_type, name="survival_tensor")
# global step counter
self.step_count = tf.Variable(0, dtype=float_type, name="step_count", trainable=False)
self.increment_step = self.step_count.assign_add(1)
#init op
self.init_op = tf.initialize_all_variables()
def gen_stock_allocs(self, startval_op, endval_op, ret_years, prefix, verbose=False):
"take a start op, end op, generate list of ret_years ops interpolating from startval_op to endval_op"
print ('%s Create stock alloc ops %s' % (strftime("%H:%M:%S"), prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
stock_allocs = []
stock_allocs.append(startval_op)
increment_op = tf.div(
tf.sub(endval_op, startval_op, name="sub_%s" % prefix),
tf.constant(ret_years-1, dtype=float_type, name="divisor_%s" % prefix),
name="inc_%s" % prefix)
#print(self.sess.run(increment_op))
for ix in range(1, ret_years-1):
op = tf.add(startval_op,
tf.mul(increment_op, ix, name="%s_offset_%d" % (prefix, ix)),
name="%s_%d" % (prefix, ix))
#print(self.sess.run(op))
stock_allocs.append(op)
stock_allocs.append(endval_op)
return stock_allocs
def __del__(self):
"""When deleting model, close session, clear default graph"""
print("Destructor reset graph")
try:
with self.graph.as_default():
tf.reset_default_graph()
except Exception, e:
print ("Destructor couldn't reset graph: %s" % str(e))
try:
print ("Destructor close Tensorflow session")
self.sess.close()
except Exception, e:
print ("Destructor couldn't close session: %s" % str(e))
def gen_tf_const_list(self, const_iter, const_prefix, start_index=0, verbose=False):
"take a list or iterator of values, generate and return tensorflow constant ops for each"
print ('%s Create constants %s' % (strftime("%H:%M:%S"), const_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
const_list = []
for ix, const in enumerate(const_iter):
const_name = "%s_%d" % (const_prefix, start_index + ix)
if verbose:
print("Set constant %s to %f" % (const_name, const))
const_list.append(tf.constant(const, dtype=float_type, name=const_name))
return const_list
def gen_tf_var_list(self, var_iter, var_prefix, start_index=0, verbose=False):
"take a list or iterator of values, generate and return tensorflow Variable ops for each"
print ('%s Create variables %s' % (strftime("%H:%M:%S"), var_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
var_list = []
for ix, var in enumerate(var_iter):
var_name = "%s_%d" % (var_prefix, start_index + ix)
if verbose:
print("Create variable %s to %f" % (var_name, var))
var_op = tf.Variable(var, dtype=float_type, name=var_name)
self.sess.run(var_op.initializer)
var_list.append(var_op)
return var_list
def get_op_from_list(self, op_list, op_index):
"take a list of ops, return value of op specified by op_index"
op = op_list[op_index]
retval = self.sess.run([op])
return retval
def gen_zero_min_list(self, op_iter, op_prefix, start_index=0, verbose=False):
"take a list or iterator of ops, generate and return an op with is max(-op, 0) for soft constraints > 0"
print ('%s Create ops for soft constraint %s > 0' % (strftime("%H:%M:%S"), op_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
op_list = []
for ix, op in enumerate(op_iter):
op_name = "%s_%d" % (op_prefix, start_index + ix)
if verbose:
print("Zero_min op %s" % (op_name))
new_op = tf.maximum(self.zero, tf.neg(op, name="neg_%s" % op_name), name=op_name)
op_list.append(new_op)
return op_list
def gen_one_max_list(self, op_iter, op_prefix, start_index=0, verbose=False):
"take a list or iterator of ops, generate and return an op with is max(op-1, 0) for soft constraints > 0"
print ('%s Create ops for soft constraint %s < 1' % (strftime("%H:%M:%S"), op_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
op_list = []
for ix, op in enumerate(op_iter):
op_name = "%s_%d" % (op_prefix, start_index + ix)
if verbose:
print("One_max op %s" % (op_name))
new_op = tf.maximum(self.zero, tf.sub(op, self.one, name="one_minus_%s" % op_name), name=op_name)
op_list.append(new_op)
return op_list
def gen_diff_list(self, op_iter, op_prefix, start_index=0, verbose=False):
"generate and return an op for declining stock alloc constraint over time, max of 0 and decrease"
print ('%s Create ops for soft constraint, declining stock alloc %s' % (strftime("%H:%M:%S"), op_prefix))
with self.graph.as_default():
with tf.device("/cpu:0"):
op_list = []
for ix, op in enumerate(op_iter):
if ix == 0:
continue;
op_name = "%s_%d" % (op_prefix, start_index + ix)
if verbose:
print("diff op %s" % (op_name))
new_op = tf.maximum(self.zero, tf.sub(op_iter[ix], op_iter[ix-1]))
op_list.append(new_op)
return op_list
def gen_ce(self, input_tensor, prefix, survival_tensor=None):
with tf.device("/cpu:0"):
with self.graph.as_default():
input_length = np.float64(input_tensor.get_shape().as_list()[0])
print("%s Create ce op with gamma: %f" % (strftime("%H:%M:%S"), self.gamma))
if self.gamma == 1.0:
u = tf.reduce_mean(tf.log(input_tensor), name="%s_u" % (prefix))
#print(self.sess.run(u))
if survival_tensor is not None:
u0 = u
u = tf.reduce_mean(tf.mul(u0, survival_tensor, name="%s_u_surv" % (prefix)), name="%s_u" % (prefix))
ce = tf.mul(tf.exp(u), input_length, name="%s_ce" % (prefix))
print ('%s Create CE op %f' % (strftime("%H:%M:%S"), self.sess.run(ce)))
else:
# for high gamma numerical error is significant, and calculation is most accurate near 1 and
# so divide by mean
input_mean = tf.reduce_mean(input_tensor, name="%s_mean" % (prefix))
input_conditioned = tf.div(input_tensor, input_mean, name="%s_conditioned" % (prefix))
u1 = tf.pow(input_conditioned, self.one_minus_gamma, name="%s_u1" % (prefix))
u2 = tf.sub(u1, self.one, name="%s_u2" % (prefix))
u3 = tf.mul(u2, self.inv_one_minus_gamma, name="%s_u3" % (prefix))
u = tf.reduce_mean(u3, name="%s_u" % (prefix))
if survival_tensor is not None:
u4 = u
u = tf.reduce_mean(tf.mul(u4, survival_tensor, name="%s_u_surv" % (prefix)), name="%s_u" % (prefix))
ce1 = tf.mul(self.one_minus_gamma, u, name="%s_ce1" % (prefix))
ce2 = tf.add(ce1, self.one, name="%s_ce2" % (prefix))
ce3 = tf.pow(ce2, self.inv_one_minus_gamma, name="%s_ce3" % (prefix))
ce = tf.mul(input_mean, ce3, name="%s_ce" % (prefix))
print ('%s Create CE op %f' % (strftime("%H:%M:%S"), self.sess.run(ce)))
return ce
class Cohort:
"""Cohort represents experience of an individual
- retiring in a given year
- using the specified SafeWithdrawal model"""
def __init__(self, model, cohort_start_year):
self.model = model
self.cohort_start_year = cohort_start_year
self.name = "cohort_%d" % cohort_start_year
print("%s Instantiating cohort %s" % (strftime("%H:%M:%S"), self.name))
self.gen_tf_ops()
def gen_tf_ops(self):
stock_returns = self.model.return_ops[0]
bond_returns = self.model.return_ops[1]
stock_allocs = self.model.allocation_ops[0]
bond_allocs = self.model.allocation_ops[1]
self.port_returns_list = []
self.port_prespend_list = []
self.port_end_vals_list = []
self.spend_amts_list = []
self.spend_amts_nonzero_list = []
with self.model.graph.as_default():
with tf.device("/cpu:0"):
print ("%s Generating %d years from %d" % (strftime("%H:%M:%S"), self.model.ret_years, self.cohort_start_year))
start_year_ix = self.cohort_start_year - self.model.first_year
for ix in range(self.model.ret_years):
op_stock_return = stock_returns[start_year_ix + ix]
op_stock_alloc = stock_allocs[ix]
op_bond_return = bond_returns[start_year_ix + ix]
op_bond_alloc = bond_allocs[ix]
op_const_spend = self.model.const_spending_op
op_var_spend = self.model.var_spending_ops[ix]
op_total_real_return = tf.add(tf.mul(op_stock_alloc, op_stock_return, name="%s_stock_%d" % (self.name, ix)),
tf.mul(op_bond_alloc, op_bond_return, name="%s_bond_%d" % (self.name, ix)),
name="%s_total_return_%d" % (self.name, ix))
self.port_returns_list.append(op_total_real_return)
if ix == 0:
prev_val = self.model.start_val_op
else:
prev_val = self.port_end_vals_list[ix-1]
op_port_end_val_prespend = tf.add(prev_val,
tf.mul(prev_val, self.port_returns_list[ix], name="%s_dolreturn_%d" % (self.name, ix)),
name="%s_prespend_%d" % (self.name, ix))
self.port_prespend_list.append(op_port_end_val_prespend)
desired_spend_amt = tf.add(tf.mul(op_var_spend, op_port_end_val_prespend, name="%s_des_vspend_%d" % (self.name, ix)),
op_const_spend,
name="%s_desired_spend_amt_%d" % (self.name, ix))
#spend minimum of tmp_spend_amt, port value
spend_amt = tf.minimum(desired_spend_amt, op_port_end_val_prespend, name="%s_actual_spend_amt_%d" % (self.name, ix))
self.spend_amts_list.append(spend_amt)
op_port_end_val = tf.sub(op_port_end_val_prespend, spend_amt, name="%s_endval_%d" % (self.name, ix))
self.port_end_vals_list.append(op_port_end_val)
#now that we've computed cohort paths we pack results into 1D Tensors to calc objective
self.spend_amts = tf.pack(self.spend_amts_list, name="%s_spend_amts" % (self.name))
self.port_end_vals = tf.pack(self.port_end_vals_list, name="%s_port_end_vals" % (self.name))
self.mean_spending = tf.reduce_mean(self.spend_amts, name="%s_mean_spending" % (self.name))
self.sd_spending = tf.sqrt(tf.reduce_mean(tf.pow(tf.sub(self.spend_amts, self.mean_spending), 2)),
name="%s_sd_spending" % (self.name))
self.min_spending = tf.reduce_min(self.spend_amts, name="%s_min_spending" % (self.name))
self.max_spending = tf.reduce_max(self.spend_amts, name="%s_max_spending" % (self.name))
if self.model.gamma == 1.0:
#spend a tiny amount even if spend is 0 so log is not NaN
#doesn't really seem like best practice but...
#0 spend years can't be in final solution
#and don't want divide by zero errors if optimizer attempts one
#chain new op off old op but keep a reference to old op around just in case
self.spend_amts_maybe_zero = self.spend_amts
self.spend_amts = tf.maximum(self.spend_amts_maybe_zero,
self.model.very_small_amts,
name="%s_actual_spend_nonzero_%d" % (self.name, ix))
self.total_spending = tf.reduce_sum(self.spend_amts, name="%s_total_spending_nonzero" % (self.name))
else:
self.total_spending = tf.reduce_sum(self.spend_amts, name="%s_total_spending" % (self.name))
if self.model.survival is not None:
self.ce = self.model.gen_ce_survival(self.spend_amts,
self.model.survival_tensor,
"%s_ce" % (self.name))
else:
self.ce = self.model.gen_ce(self.spend_amts,
"%s_ce" % (self.name))
#print (self.as_dataframe())
def get_tf_ops(self):
return self.model.start_val, self.port_returns_list, self.port_prespend_list, self.spend_amts_list, self.port_end_vals_list, self.total_spending
def as_dataframe(self):
port_returns_ops = self.port_returns_list
port_prespend_ops = self.port_prespend_list
spend_amts_ops = self.spend_amts_list
port_end_vals_ops = self.port_end_vals_list
total_spending = self.total_spending
port_returns = self.model.sess.run(port_returns_ops)
port_prespend = self.model.sess.run(port_prespend_ops)
spend_amts = self.model.sess.run(spend_amts_ops)
port_end_vals = self.model.sess.run(port_end_vals_ops)
retlist = []
for ix in range(self.model.ret_years):
retlist.append([port_returns[ix],
port_prespend[ix],
spend_amts[ix],
port_end_vals[ix]
])
years = range(self.cohort_start_year, self.cohort_start_year+self.model.ret_years)
return pd.DataFrame(retlist, index = years, columns=['portreturn', 'prespend', 'spend_amt', 'end_val'])
class CohortHistory:
"""represents a set of cohorts retiring in different years using a strategy,
to enabling aggregating and summarizing their experiences"""
def __init__(self, model, cohort_years = None):
self.model = model
if cohort_years is None:
cohort_years = [year for year in range(self.model.first_year,
self.model.first_year + self.model.ret_cohorts)]
print('%s Create cohort history, years %d to %d' % (strftime("%H:%M:%S"),
cohort_years[0], cohort_years[-1]))
self.cohort_list = [Cohort(model, year) for year in cohort_years]
self.total_spending_ops = [cohort.total_spending for cohort in self.cohort_list]
def as_dataframe(self):
"""report on on each cohort by year, e.g. 1928"""
total_spending_ops = [cohort.total_spending for cohort in self.model.cohort_history.cohort_list]
mean_spending_ops = [cohort.mean_spending for cohort in self.model.cohort_history.cohort_list]
sd_spending_ops = [cohort.sd_spending for cohort in self.model.cohort_history.cohort_list]
min_spending_ops = [cohort.min_spending for cohort in self.model.cohort_history.cohort_list]
max_spending_ops = [cohort.max_spending for cohort in self.model.cohort_history.cohort_list]
ce_ops = [cohort.ce for cohort in self.model.cohort_history.cohort_list]
retlist = []
years = range(self.model.first_year, self.model.first_year + self.model.ret_cohorts)
for year, meanspend, sdspend, minspend, maxspend, totalspend, ce in zip(years,
self.model.sess.run(mean_spending_ops),
self.model.sess.run(sd_spending_ops),
self.model.sess.run(min_spending_ops),
self.model.sess.run(max_spending_ops),
self.model.sess.run(total_spending_ops),
self.model.sess.run(ce_ops)):
retlist.append([meanspend, sdspend, minspend, maxspend, totalspend, ce])
return pd.DataFrame(retlist, index = years, columns=['mean_spend', 'sd_spend', 'min_spend', 'max_spend', 'total_spend', 'ce'])
def spend_by_year(self):
"""report spending by year for each cohort (ret_years rows x num_cohorts)"""
dataframes = [cohort.as_dataframe() for cohort in self.model.cohort_history.cohort_list]
years = range(self.model.ret_years)
cohorts = range(len(dataframes))
retlist = []
for ix in years:
spendlist = [df.spend_amt.iloc[ix] for df in dataframes]
retlist.append(spendlist)
colnames = ["%d" % (cohort+self.model.first_year) for cohort in cohorts]
return pd.DataFrame(retlist, index = years, columns=colnames)
def returns_by_year(self):
"""report returns by year for each cohort (ret_years rows x num_cohorts)"""
dataframes = [cohort.as_dataframe() for cohort in self.model.cohort_history.cohort_list]
years = range(self.model.ret_years)
cohorts = range(len(dataframes))
retlist = []
for ix in years:
returnlist = [df.portreturn.iloc[ix] for df in dataframes]
retlist.append(returnlist)
colnames = ["%d" % (cohort+self.model.first_year) for cohort in cohorts]
return pd.DataFrame(retlist, index = years, columns=colnames)
def summarize_by_year(self):
"""report on outcomes by retirement year, e.g. retirement year 1, 2...30"""
dataframes = [cohort.as_dataframe() for cohort in self.model.cohort_history.cohort_list]
years = range(self.model.ret_years)
retlist = []
for ix in years:
spendlist = np.array([df.spend_amt.iloc[ix] for df in dataframes])
spend_mean = np.mean(spendlist)
spend_sd = np.std(spendlist)
spend_min = np.min(spendlist)
spend_max = np.max(spendlist)
retlist.append([spend_mean, spend_sd, spend_min, spend_max])
return pd.DataFrame(retlist, index = years, columns=['spend_mean', 'spend_sd', 'spend_min', 'spend_max'])
# Optimizer
# Create an op which is the sum of spending in all years
# - negate it so it will be minimized
# - add large penalty when a stock allocation is < 0 as a soft constraint
# - add large penalty when a stock allocation is > 1 as a soft constraint
# - add large penalty when const or var spencint is < 0 as a soft constraint
# - result is an op which can be minimized by gradient descent
class CohortHistoryOptimize():
def __init__(self, model):
self.model = model
self.best_objective = 0.0
self.best_step = 0
graph = self.model.graph
with graph.as_default():
with tf.device("/cpu:0"):
print ('%s Create optimizer class' % strftime("%H:%M:%S"))
print ('%s Run variable initializers' % strftime("%H:%M:%S"))
self.model.sess.run(model.init_op)
print('%s Create cost ops' % strftime("%H:%M:%S"))
print('%s Sum %d ce ops' % (strftime("%H:%M:%S"), len(self.model.cohort_history.cohort_list)))
ce_ops = [cohort.ce for cohort in self.model.cohort_history.cohort_list]
ce_tensor = tf.pack(ce_ops, name="all_cohorts_ce_tensor")
# ce over ret_cohorts years
self.total_ce_op = self.model.gen_ce(ce_tensor, "all_cohorts_ce")
print("%s Total CE spend, all cohorts: %f" % (strftime("%H:%M:%S"), self.model.sess.run(self.total_ce_op)))
# basic cost
cost_op_1 = tf.neg(self.total_ce_op, name="basic_cost")
print("%s Raw cost objective: %f" % (strftime("%H:%M:%S"), self.model.sess.run(cost_op_1)))
cost_op_2 = tf.add(cost_op_1, model.cost_alloc_min_0_op, name="cost_add_alloc_min_0")
print("%s Add soft constraint penalty if stock alloc < 0: %f" % (strftime("%H:%M:%S"),
self.model.sess.run(cost_op_2)))
cost_op_3 = tf.add(cost_op_2, model.cost_alloc_max_1_op, name="cost_add_alloc_max_1")
print("%s Add soft constraint penalty if stock alloc > 1: %f" % (strftime("%H:%M:%S"),
self.model.sess.run(cost_op_3)))
cost_op_4 = tf.add(cost_op_3, model.cost_vspend_min_0_op, name="cost_vspend_min_0")
print("%s Add soft constraint penalty if var spending < 0: %f" % (strftime("%H:%M:%S"),
self.model.sess.run(cost_op_4)))
cost_op_5 = tf.add(cost_op_4, model.cost_cspend_min_0_op, name="cost_cspend_min_0")
print("%s Add soft constraint if const spending < 0: %f" % (strftime("%H:%M:%S"),
self.model.sess.run(cost_op_5)))
self.cost_op = tf.add(cost_op_5, model.cost_alloc_decrease_op, name="cost_alloc_decrease")
print("%s Add soft constraint if stock alloc increases in any year: %f" % (strftime("%H:%M:%S"),
self.model.sess.run(self.cost_op)))
self.best_objective = -self.model.sess.run(self.cost_op)
print("%s All inclusive objective to be minimized: %f" % (strftime("%H:%M:%S"),
-self.best_objective))
self.best_const_spend = self.model.sess.run(model.const_spending_op)
self.best_var_spend = self.model.sess.run(model.var_spending_ops)
self.best_stock_alloc = self.model.sess.run(model.allocation_ops[0])
def run_step(self, report_steps=1):
"""run one step of optimizer
calc gradients
apply gradients * learning rate to each variable to descend gradient and improve objective
increment global step to remember how many steps we've run
if (hopefully) new objective is best to date, save params and objective"""
_, step = self.model.sess.run([self.optimize_step,
self.model.increment_step])
self.steps_ago +=1
cost = self.model.sess.run(self.cost_op)
assert not(np.isnan(cost)), "Objective is nan"
objective = - cost
#print objective each step
#print("objective %f best %f" %(objective, self.best_objective))
if np.isnan(cost):
sys.stdout.write('X')
sys.stdout.flush()
elif objective > self.best_objective:
self.best_objective = objective
self.best_const_spend = self.model.sess.run(model.const_spending_op)
self.best_var_spend = self.model.sess.run(model.var_spending_ops)
self.best_stock_alloc = self.model.sess.run(model.allocation_ops[0])
self.best_step = step
self.steps_ago = 0
sys.stdout.write('!')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
if step % report_steps == 0:
sys.stdout.write("\n%s step %d objective %f best %f (%d steps ago)\n" % (strftime("%H:%M:%S"),
step,
objective,
self.best_objective,
self.steps_ago))
# print variables optimized and gradients for debugging
# sys.stdout.write("\n")
# var_vals = self.model.sess.run(self.model.all_var_ops)
# print("%s Variables" % strftime("%H:%M:%S"))
# print(var_vals)
# grad_vals = self.model.sess.run([grad[0] for grad in self.grads])
# print("%s Gradients" % strftime("%H:%M:%S"))
# print(grad_vals)
sys.stdout.flush()
# every 10 report_steps show current best
if step % (report_steps * 10) == 0:
print ("\n#Objective: %f\n" % (self.best_objective))
print ("const_spend = %f" % self.best_const_spend)
print ("var_spend_pcts = pd.Series(%s)" % str(self.best_var_spend))
print ("stock_allocations = pd.Series(%s)\n" %str(self.best_stock_alloc))
def optimize(self, learning_rate, steps):
"""create the op for the optimizer using specified learning_rate, run for specified steps"""
self.learning_rate = learning_rate
self.steps = steps
self.steps_ago = 0 # how many steps since objective improved
print("%s Objective: %f" % (strftime("%H:%M:%S"), self.best_objective))
print("%s Constant spending: %f" % (strftime("%H:%M:%S"), self.best_const_spend))
print("%s Variable spending by year" % strftime("%H:%M:%S"))
print(self.best_var_spend)
print("%s Stock allocation by year" % strftime("%H:%M:%S"))
print(self.best_stock_alloc)
with self.model.graph.as_default():
with tf.device("/cpu:0"):
# minimize op
print('%s Create optimizer (learning rate %.12f)' % (strftime("%H:%M:%S"), self.learning_rate))
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.grads = self.optimizer.compute_gradients(self.cost_op)
self.optimize_step = self.optimizer.apply_gradients(self.grads)
# following line is equivalent to previous 2 lines
# self.optimize_step = self.optimizer.minimize(self.cost_op)
print('%s Create optimizer op and run %d steps' % (strftime("%H:%M:%S"), self.steps))
for i in range(self.steps):
self.run_step()
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='safewithdrawal.py',
description='run optimization with specified learning rate, max unimproved steps',
epilog="""example:
./safewithdrawal.py 0.000001 100 2.0 opt02
"""
)
parser.add_argument('learning_rate', type=float)
parser.add_argument('steps', type=float)
parser.add_argument('gamma', type=float)
parser.add_argument('fileprefix')
args = parser.parse_args()
picklefile = "%s.pickle" % args.fileprefix
csvfile = "summary_%s.csv" % args.fileprefix
yearsfile = "years_%s.csv" % args.fileprefix
returnsfile = "retyears_%s.csv" % args.fileprefix
print('%s Start optimization session' % strftime("%H:%M:%S"))
print('%s learning_rate: %.12f steps %d picklefile %s' % (strftime("%H:%M:%S"), args.learning_rate, args.steps, picklefile))
print("opening picklefile %s" % picklefile)
const_spend, var_spend_pcts, start_alloc, end_alloc = pickle.load( open(picklefile, "rb" ) )
print ("const spend: %f" % const_spend)
print ("variable spend:")
print (var_spend_pcts)
print ("stock allocation:" )
print ("%f to %f", (start_alloc, end_alloc))
model = SafeWithdrawalModel(returns_list = [real_stocks, real_bonds],
names_list = ["stocks","bonds"],
start_alloc = start_alloc,
end_alloc = end_alloc,
start_val = 100.0,
const_spend = const_spend,
var_spend_pcts = var_spend_pcts,
gamma = args.gamma,
survival=None
)
# generate cohorts
model.cohort_history = CohortHistory(model)
print('%s Summary by cohort' % strftime("%H:%M:%S"))
print(model.cohort_history.as_dataframe())
all_years = model.cohort_history.spend_by_year()
all_years.to_csv(yearsfile, format="%.18f")
ret_years = model.cohort_history.returns_by_year()
ret_years.to_csv(returnsfile, format="%.18f")
summary = model.cohort_history.summarize_by_year()
print(summary)
summary.to_csv(csvfile, format="%.18f")
# run optimizer
# set an initial learning rate that improves objective by a reasonable amount each step
learning_rate = args.learning_rate
model.optimizer = CohortHistoryOptimize(model)
model.optimizer.optimize(learning_rate, steps=1)
# continue optimizing without re-initializing vars or optimizer
# reduce learning rate if no improvement for a while
# end when learning rate is too small to make significant improvement
max_steps = 100001 # add 1 to get one last iteration to print
max_steps_unimproved = args.steps
report_steps = 50
learning_rate = model.optimizer.learning_rate
for i in range(max_steps):
model.optimizer.run_step(report_steps=report_steps)
if model.optimizer.steps_ago >= max_steps_unimproved: # no improvement for too long
break
const_spend = model.optimizer.best_const_spend
var_spend_pcts = pd.Series(model.optimizer.best_var_spend)
stock_allocations = pd.Series(model.optimizer.best_stock_alloc)
bond_allocations = 1 - stock_allocations
pickle_list = [const_spend, var_spend_pcts, stock_allocations, bond_allocations]
pickle.dump( pickle_list, open( picklefile, "wb" ) )
| |
#
# Copyright (c) 2017 Luis F. Simoes (github: @lfsimoes)
#
# Licensed under the MIT License. See the LICENSE file for details.
from math import sqrt, exp
from functools import wraps, partial #, total_ordering
import numpy as np
import PyKEP as pk
from scipy.optimize import minimize_scalar
from .constants import MU_SUN, G0, SEC2DAY, DAY2SEC, I_sp, T_max, thrust_tol
from .multiobjective import pareto_front
# ==================================== ## ==================================== #
class obj_value(float):
"""
Subclass of `float` that takes its value from a newly instantiated object's
`.get_value()` method. The instantiated object is made available in `.obj`.
Allows for arithmetic and comparison operations over the object to be
redirected to the methods of `float`, acting on the object's reference
value, without the need to define such methods in the object's class.
"""
def __new__(cls, obj_class, *args, **kwargs):
obj = obj_class(*args, **kwargs)
self = float.__new__(cls, obj.get_value())
self.obj = obj
return self
def expand_kwargs(func):
"""
Allows for kwargs that are to be sent to a function to be provided within a
dictionary sent as the last *arg. Useful when a function's caller can be set
to redirect *args, but not *kwargs (e.g.: np.apply_along_axis, until 1.9.0).
"""
@wraps(func)
def wrapper(*args, **kwargs):
if isinstance(args[-1], dict):
return func(*args[:-1], **args[-1])
else:
return func(*args, **kwargs)
return wrapper
#@total_ordering
class lambert_eval(object):
@expand_kwargs
def __init__(self, leg_dT, dep_ast, arr_ast, dep_t, dep_m, *args, **kwargs):
self.dT = leg_dT
self.dep_ast = dep_ast
self.arr_ast = arr_ast
self.dep_t = dep_t
self.arr_t = self.dep_t + self.dT
self.dep_m = dep_m
s = self.solve(*args, **kwargs)
if s is not None:
self.select(*(s + args), **kwargs)
self.inspect(*args, **kwargs)
def get_value(self):
"Key value determining the instance's solution quality."
return self.dV
def solve(self, *args, validate_barker=True, verbose=False, **kwargs):
"Solves Lambert's problem for the requested transfer."
# departure and arrival epochs
dep_t = pk.epoch(self.dep_t, 'mjd')
arr_t = pk.epoch(self.arr_t, 'mjd')
# departure and arrival positions & velocities
# /-> (could obtain `dep_ast_eph` in `lambert_optimize_dt` and pass it as
# | argument to avoid having it being repeatedly calculated here)
# r1, v1 = self.dep_ast.eph(dep_t) if dep_ast_eph is None else dep_ast_eph
r1, v1 = self.dep_ast.eph(dep_t)
r2, v2 = self.arr_ast.eph(arr_t)
# Barker equation used to skip useless Lambert computations
# https://en.wikipedia.org/wiki/Parabolic_trajectory#Barker.27s_equation
if validate_barker and self.dT < pk.barker(r1, r2, MU_SUN) * SEC2DAY:
if verbose:
print(self.dT, 'Fails Barker:',
self.dT, pk.barker(r1, r2, MU_SUN) * SEC2DAY)
self.fail()
return None
l = pk.lambert_problem(r1, r2, self.dT * DAY2SEC, MU_SUN)
# don't compute any multi-rev solutions:
#l = pk.lambert_problem(r1, r2, self.dT * DAY2SEC, MU_SUN, False, 0)
return l, v1, v2
def select(self, lamb_sol, v_body1, v_body2, *args, **kwargs):
"""
Selects one of the Lambert's problem solutions
(in case multiple revolution solutions were found).
Selection criterion: solution with the smallest dV.
"""
# get, per solution, the spacecraft's velocity at each body
v1sc = lamb_sol.get_v1()
v2sc = lamb_sol.get_v2()
# determine each solution's dV
solutions = []
for v1, v2 in zip(v1sc, v2sc):
dV1 = sqrt(sum((a - b) * (a - b) for (a, b) in zip(v_body1, v1)))
dV2 = sqrt(sum((a - b) * (a - b) for (a, b) in zip(v_body2, v2)))
solutions.append((dV1 + dV2, v1, v2))
# pick the solution with smallest dV, and log the spacecraft's
# velocities at each body
self.dV, *self.v_sc = min(solutions)
def inspect(self, validate_acc=True, verbose=False, *args, **kwargs):
"Validation and post-processing of the selected solution."
if validate_acc:
# feasibility check on the acceleration
leg_accel = self.dV / (self.dT * DAY2SEC)
max_accel = thrust_tol * T_max / self.dep_m
if leg_accel >= max_accel:
if verbose:
print(self.dT, 'Fails Accel.:', leg_accel, max_accel)
self.fail()
return
self.feasible = True
# get the arrival mass (kg), given mass at departure (kg) and dV (m/s)
# https://en.wikipedia.org/wiki/Tsiolkovsky_rocket_equation
self.arr_m = self.dep_m * exp(self.dV / (-I_sp * G0))
def fail(self):
"Signals the failure to identify a feasible solution."
# self.dV = float('inf')
self.dV = 100000.
self.v_sc = None
self.feasible = False
self.arr_m = None
# def __lt__(self, other):
# return self.get_value() < other.get_value()
#
# def __eq__(self, other):
# return self.get_value() == other.get_value()
# ==================================== ## ==================================== #
def lambert_optimize_dt(dep_ast, arr_ast, dep_t, dep_m,
leg_dT=None, leg_dT_bounds=None, nr_evals=50,
obj_fun=lambert_eval, grid=True,
random_pareto=False, random=None,
**kwargs):
"""
Find the transfer time between two given asteroids that optimizes the
given objective function.
Parameters:
-----------
dep_ast
departure asteroid
arr_ast
arrival asteroid
dep_t
time of departure (mjd) from asteroid `dep_ast`
dep_m
spacecraft's mass (kg) at departure
leg_dT
an exact leg_dT to be used in the Lambert arc
(if specified, no optimization is then performed over leg_dT)
leg_dT_bounds
bounds on the time of flight
(used if `leg_dT=None`)
nr_evals
number of solutions to evaluate
(an exact amount if doing grid search; an upper bound if optimizing)
obj_fun
objective function that creates & evaluates Lambert arcs (goal: min)
grid
if True: finds best leg_dT over an evenly spaced grid of options
if False: optimizes leg_dT with minimize_scalar()
random_pareto
indication of whether to choose the transfer time by picking a random
solution from the Pareto front of solutions in the grid
(requires `grid=True`)
random
random number generator to use, if `random_pareto=True`
(defaults to np.random)
**kwargs
extra arguments to be sent to `obj_fun`
"""
assert (leg_dT_bounds is None) ^ (leg_dT is None), 'One (and only one) of' \
' leg_dT_bounds or leg_dT must be specified.'
assert not random_pareto or (random_pareto and grid), 'random_pareto=True' \
' requires grid=True.'
if leg_dT is not None:
grid = True
random_pareto = False
# if grid and random_pareto and random is None:
# random = np.random
# \-> (for call to be reproducible, caller must specify a seeded `random`)
# wrap object created by `obj_fun` in a `obj_value` instance, making the
# objective value under optimization more accessible and simpler to handle
obj_fun = partial(obj_value, obj_fun)
# extra arguments to be sent to `obj_fun`
eval_args = (dep_ast, arr_ast, dep_t, dep_m, kwargs)
if grid:
# Generate `nr_evals` evenly spaced values in between the leg dT bounds.
# Unless: if a specific leg_dT is provided, only that one is attempted.
if leg_dT is None:
leg_dTs = np.linspace(*leg_dT_bounds, num=nr_evals)
else:
leg_dTs = [leg_dT]
grid = (obj_fun(leg_dT, *eval_args) for leg_dT in leg_dTs)
if not random_pareto:
# obtain the point in the grid with minimal cost value
best = min(grid).obj
else:
# pick a random point from the the Pareto front of trade-offs
# between transfer time and leg cost
grid_l = list(grid)
grid = [t.obj for t in grid_l if t.obj.feasible]
if grid == []:
return grid_l[0].obj
trade_offs = [(t.dT, t.get_value()) for t in grid]
pf = pareto_front(trade_offs)
best = grid[random.choice(pf)]
else:
# minimize scalar function of one variable
# https://docs.scipy.org/doc/scipy/reference/optimize.html
best = minimize_scalar(obj_fun, args=eval_args,
method='bounded', bounds=leg_dT_bounds,
options=dict(maxiter=nr_evals)
).fun.obj
return best
| |
# -*- coding: utf-8 -*-
"""
Maximum flow (and minimum cut) algorithms on capacitated graphs.
"""
import networkx as nx
# Define the default flow function for computing maximum flow.
from .edmondskarp import edmonds_karp
from .fordfulkerson import ford_fulkerson
from .preflowpush import preflow_push
from .shortestaugmentingpath import shortest_augmenting_path
from .utils import build_flow_dict
default_flow_func = preflow_push
__all__ = ['maximum_flow',
'maximum_flow_value',
'minimum_cut',
'minimum_cut_value']
def maximum_flow(G, s, t, capacity='capacity', flow_func=None, **kwargs):
"""Find a maximum single-commodity flow.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
flow_func : function
A function for computing the maximum flow among a pair of nodes
in a capacitated graph. The function has to accept at least three
parameters: a Graph or Digraph, a source node, and a target node.
And return a residual network that follows NetworkX conventions
(see Notes). If flow_func is None, the default maximum
flow function (:meth:`preflow_push`) is used. See below for
alternative algorithms. The choice of the default function may change
from version to version and should not be relied on. Default value:
None.
kwargs : Any other keyword parameter is passed to the function that
computes the maximum flow.
Returns
-------
flow_value : integer, float
Value of the maximum flow, i.e., net outflow from the source.
flow_dict : dict
A dictionary containing the value of the flow that went through
each edge.
Raises
------
NetworkXError
The algorithm does not support MultiGraph and MultiDiGraph. If
the input graph is an instance of one of these two classes, a
NetworkXError is raised.
NetworkXUnbounded
If the graph has a path of infinite capacity, the value of a
feasible flow on the graph is unbounded above and the function
raises a NetworkXUnbounded.
See also
--------
:meth:`maximum_flow_value`
:meth:`minimum_cut`
:meth:`minimum_cut_value`
:meth:`edmonds_karp`
:meth:`ford_fulkerson`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Notes
-----
The function used in the flow_func paramter has to return a residual
network that follows NetworkX conventions:
The residual network :samp:`R` from an input graph :samp:`G` has the
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
in :samp:`G`.
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
in :samp:`G` or zero otherwise. If the capacity is infinite,
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
that does not affect the solution of the problem. This value is stored in
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
The flow value, defined as the total flow into :samp:`t`, the sink, is
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
only edges :samp:`(u, v)` such that
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
:samp:`s`-:samp:`t` cut.
Specific algorithms may store extra data in :samp:`R`.
The function should supports an optional boolean parameter value_only. When
True, it can optionally terminate the algorithm as soon as the maximum flow
value and the minimum cut can be determined.
The legacy :meth:`ford_fulkerson` maximum flow implementation doesn't
follow this conventions but it is supported as a valid flow_func.
Examples
--------
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity=3.0)
>>> G.add_edge('x','b', capacity=1.0)
>>> G.add_edge('a','c', capacity=3.0)
>>> G.add_edge('b','c', capacity=5.0)
>>> G.add_edge('b','d', capacity=4.0)
>>> G.add_edge('d','e', capacity=2.0)
>>> G.add_edge('c','y', capacity=2.0)
>>> G.add_edge('e','y', capacity=3.0)
maximum_flow returns both the value of the maximum flow and a
dictionary with all flows.
>>> flow_value, flow_dict = nx.maximum_flow(G, 'x', 'y')
>>> flow_value
3.0
>>> print(flow_dict['x']['b'])
1.0
You can also use alternative algorithms for computing the
maximum flow by using the flow_func parameter.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> flow_value == nx.maximum_flow(G, 'x', 'y',
... flow_func=shortest_augmenting_path)[0]
True
"""
if flow_func is None:
if kwargs:
raise nx.NetworkXError("You have to explicitly set a flow_func if"
" you need to pass parameters via kwargs.")
flow_func = default_flow_func
if not callable(flow_func):
raise nx.NetworkXError("flow_func has to be callable.")
if flow_func is ford_fulkerson:
R = flow_func(G, s, t, capacity=capacity)
flow_dict = R.graph['flow_dict']
else:
R = flow_func(G, s, t, capacity=capacity, value_only=False, **kwargs)
flow_dict = build_flow_dict(G, R)
return (R.graph['flow_value'], flow_dict)
def maximum_flow_value(G, s, t, capacity='capacity', flow_func=None, **kwargs):
"""Find the value of maximum single-commodity flow.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
flow_func : function
A function for computing the maximum flow among a pair of nodes
in a capacitated graph. The function has to accept at least three
parameters: a Graph or Digraph, a source node, and a target node.
And return a residual network that follows NetworkX conventions
(see Notes). If flow_func is None, the default maximum
flow function (:meth:`preflow_push`) is used. See below for
alternative algorithms. The choice of the default function may change
from version to version and should not be relied on. Default value:
None.
kwargs : Any other keyword parameter is passed to the function that
computes the maximum flow.
Returns
-------
flow_value : integer, float
Value of the maximum flow, i.e., net outflow from the source.
Raises
------
NetworkXError
The algorithm does not support MultiGraph and MultiDiGraph. If
the input graph is an instance of one of these two classes, a
NetworkXError is raised.
NetworkXUnbounded
If the graph has a path of infinite capacity, the value of a
feasible flow on the graph is unbounded above and the function
raises a NetworkXUnbounded.
See also
--------
:meth:`maximum_flow`
:meth:`minimum_cut`
:meth:`minimum_cut_value`
:meth:`edmonds_karp`
:meth:`ford_fulkerson`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Notes
-----
The function used in the flow_func paramter has to return a residual
network that follows NetworkX conventions:
The residual network :samp:`R` from an input graph :samp:`G` has the
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
in :samp:`G`.
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
in :samp:`G` or zero otherwise. If the capacity is infinite,
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
that does not affect the solution of the problem. This value is stored in
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
The flow value, defined as the total flow into :samp:`t`, the sink, is
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
only edges :samp:`(u, v)` such that
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
:samp:`s`-:samp:`t` cut.
Specific algorithms may store extra data in :samp:`R`.
The function should supports an optional boolean parameter value_only. When
True, it can optionally terminate the algorithm as soon as the maximum flow
value and the minimum cut can be determined.
The legacy :meth:`ford_fulkerson` maximum flow implementation doesn't
follow this conventions but it is supported as a valid flow_func.
Examples
--------
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity=3.0)
>>> G.add_edge('x','b', capacity=1.0)
>>> G.add_edge('a','c', capacity=3.0)
>>> G.add_edge('b','c', capacity=5.0)
>>> G.add_edge('b','d', capacity=4.0)
>>> G.add_edge('d','e', capacity=2.0)
>>> G.add_edge('c','y', capacity=2.0)
>>> G.add_edge('e','y', capacity=3.0)
maximum_flow_value computes only the value of the
maximum flow:
>>> flow_value = nx.maximum_flow_value(G, 'x', 'y')
>>> flow_value
3.0
You can also use alternative algorithms for computing the
maximum flow by using the flow_func parameter.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> flow_value == nx.maximum_flow_value(G, 'x', 'y',
... flow_func=shortest_augmenting_path)
True
"""
if flow_func is None:
if kwargs:
raise nx.NetworkXError("You have to explicitly set a flow_func if"
" you need to pass parameters via kwargs.")
flow_func = default_flow_func
if not callable(flow_func):
raise nx.NetworkXError("flow_func has to be callable.")
if flow_func is ford_fulkerson:
R = flow_func(G, s, t, capacity=capacity)
else:
R = flow_func(G, s, t, capacity=capacity, value_only=True, **kwargs)
return R.graph['flow_value']
def minimum_cut(G, s, t, capacity='capacity', flow_func=None, **kwargs):
"""Compute the value and the node partition of a minimum (s, t)-cut.
Use the max-flow min-cut theorem, i.e., the capacity of a minimum
capacity cut is equal to the flow value of a maximum flow.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
flow_func : function
A function for computing the maximum flow among a pair of nodes
in a capacitated graph. The function has to accept at least three
parameters: a Graph or Digraph, a source node, and a target node.
And return a residual network that follows NetworkX conventions
(see Notes). If flow_func is None, the default maximum
flow function (:meth:`preflow_push`) is used. See below for
alternative algorithms. The choice of the default function may change
from version to version and should not be relied on. Default value:
None.
kwargs : Any other keyword parameter is passed to the function that
computes the maximum flow.
Returns
-------
cut_value : integer, float
Value of the minimum cut.
partition : pair of node sets
A partitioning of the nodes that defines a minimum cut.
Raises
------
NetworkXUnbounded
If the graph has a path of infinite capacity, all cuts have
infinite capacity and the function raises a NetworkXError.
See also
--------
:meth:`maximum_flow`
:meth:`maximum_flow_value`
:meth:`minimum_cut_value`
:meth:`edmonds_karp`
:meth:`ford_fulkerson`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Notes
-----
The function used in the flow_func paramter has to return a residual
network that follows NetworkX conventions:
The residual network :samp:`R` from an input graph :samp:`G` has the
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
in :samp:`G`.
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
in :samp:`G` or zero otherwise. If the capacity is infinite,
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
that does not affect the solution of the problem. This value is stored in
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
The flow value, defined as the total flow into :samp:`t`, the sink, is
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
only edges :samp:`(u, v)` such that
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
:samp:`s`-:samp:`t` cut.
Specific algorithms may store extra data in :samp:`R`.
The function should supports an optional boolean parameter value_only. When
True, it can optionally terminate the algorithm as soon as the maximum flow
value and the minimum cut can be determined.
Examples
--------
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity = 3.0)
>>> G.add_edge('x','b', capacity = 1.0)
>>> G.add_edge('a','c', capacity = 3.0)
>>> G.add_edge('b','c', capacity = 5.0)
>>> G.add_edge('b','d', capacity = 4.0)
>>> G.add_edge('d','e', capacity = 2.0)
>>> G.add_edge('c','y', capacity = 2.0)
>>> G.add_edge('e','y', capacity = 3.0)
minimum_cut computes both the value of the
minimum cut and the node partition:
>>> cut_value, partition = nx.minimum_cut(G, 'x', 'y')
>>> reachable, non_reachable = partition
'partition' here is a tuple with the two sets of nodes that define
the minimum cut. You can compute the cut set of edges that induce
the minimum cut as follows:
>>> cutset = set()
>>> for u, nbrs in ((n, G[n]) for n in reachable):
... cutset.update((u, v) for v in nbrs if v in non_reachable)
>>> print(sorted(cutset))
[('c', 'y'), ('x', 'b')]
>>> cut_value == sum(G.edge[u][v]['capacity'] for (u, v) in cutset)
True
You can also use alternative algorithms for computing the
minimum cut by using the flow_func parameter.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> cut_value == nx.minimum_cut(G, 'x', 'y',
... flow_func=shortest_augmenting_path)[0]
True
"""
if flow_func is None:
if kwargs:
raise nx.NetworkXError("You have to explicitly set a flow_func if"
" you need to pass parameters via kwargs.")
flow_func = default_flow_func
if not callable(flow_func):
raise nx.NetworkXError("flow_func has to be callable.")
if (kwargs.get('cutoff') is not None and
flow_func in (edmonds_karp, ford_fulkerson, preflow_push,
shortest_augmenting_path)):
raise nx.NetworkXError("cutoff should not be specified.")
if flow_func is ford_fulkerson:
R = flow_func(G, s, t, capacity=capacity)
# legacy always removes saturated edges
cutset = None
else:
R = flow_func(G, s, t, capacity=capacity, value_only=True, **kwargs)
# Remove saturated edges from the residual network
cutset = [(u, v, d) for u, v, d in R.edges(data=True)
if d['flow'] == d['capacity']]
R.remove_edges_from(cutset)
# Then, reachable and non reachable nodes from source in the
# residual network form the node partition that defines
# the minimum cut.
non_reachable = set(nx.shortest_path_length(R, target=t))
partition = (set(G) - non_reachable, non_reachable)
# Finaly add again cutset edges to the residual network to make
# sure that it is reusable.
if cutset is not None:
R.add_edges_from(cutset)
return (R.graph['flow_value'], partition)
def minimum_cut_value(G, s, t, capacity='capacity', flow_func=None, **kwargs):
"""Compute the value of a minimum (s, t)-cut.
Use the max-flow min-cut theorem, i.e., the capacity of a minimum
capacity cut is equal to the flow value of a maximum flow.
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
flow_func : function
A function for computing the maximum flow among a pair of nodes
in a capacitated graph. The function has to accept at least three
parameters: a Graph or Digraph, a source node, and a target node.
And return a residual network that follows NetworkX conventions
(see Notes). If flow_func is None, the default maximum
flow function (:meth:`preflow_push`) is used. See below for
alternative algorithms. The choice of the default function may change
from version to version and should not be relied on. Default value:
None.
kwargs : Any other keyword parameter is passed to the function that
computes the maximum flow.
Returns
-------
cut_value : integer, float
Value of the minimum cut.
Raises
------
NetworkXUnbounded
If the graph has a path of infinite capacity, all cuts have
infinite capacity and the function raises a NetworkXError.
See also
--------
:meth:`maximum_flow`
:meth:`maximum_flow_value`
:meth:`minimum_cut`
:meth:`edmonds_karp`
:meth:`ford_fulkerson`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Notes
-----
The function used in the flow_func paramter has to return a residual
network that follows NetworkX conventions:
The residual network :samp:`R` from an input graph :samp:`G` has the
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
in :samp:`G`.
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
in :samp:`G` or zero otherwise. If the capacity is infinite,
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
that does not affect the solution of the problem. This value is stored in
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
The flow value, defined as the total flow into :samp:`t`, the sink, is
stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using
only edges :samp:`(u, v)` such that
:samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
:samp:`s`-:samp:`t` cut.
Specific algorithms may store extra data in :samp:`R`.
The function should supports an optional boolean parameter value_only. When
True, it can optionally terminate the algorithm as soon as the maximum flow
value and the minimum cut can be determined.
Examples
--------
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_edge('x','a', capacity = 3.0)
>>> G.add_edge('x','b', capacity = 1.0)
>>> G.add_edge('a','c', capacity = 3.0)
>>> G.add_edge('b','c', capacity = 5.0)
>>> G.add_edge('b','d', capacity = 4.0)
>>> G.add_edge('d','e', capacity = 2.0)
>>> G.add_edge('c','y', capacity = 2.0)
>>> G.add_edge('e','y', capacity = 3.0)
minimum_cut_value computes only the value of the
minimum cut:
>>> cut_value = nx.minimum_cut_value(G, 'x', 'y')
>>> cut_value
3.0
You can also use alternative algorithms for computing the
minimum cut by using the flow_func parameter.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> cut_value == nx.minimum_cut_value(G, 'x', 'y',
... flow_func=shortest_augmenting_path)
True
"""
if flow_func is None:
if kwargs:
raise nx.NetworkXError("You have to explicitly set a flow_func if"
" you need to pass parameters via kwargs.")
flow_func = default_flow_func
if not callable(flow_func):
raise nx.NetworkXError("flow_func has to be callable.")
if (kwargs.get('cutoff') is not None and
flow_func in (edmonds_karp, ford_fulkerson, preflow_push,
shortest_augmenting_path)):
raise nx.NetworkXError("cutoff should not be specified.")
if flow_func is ford_fulkerson:
R = flow_func(G, s, t, capacity=capacity)
else:
R = flow_func(G, s, t, capacity=capacity, value_only=True, **kwargs)
return R.graph['flow_value']
| |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Core iPOPO implementation
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import copy
import inspect
import logging
import threading
# Pelix
from pelix.constants import SERVICE_ID, BundleActivator
from pelix.framework import Bundle, BundleException
from pelix.internals.events import BundleEvent, ServiceEvent
from pelix.utilities import add_listener, remove_listener, is_string
# iPOPO constants
import pelix.ipopo.constants as constants
import pelix.ipopo.handlers.constants as handlers_const
# iPOPO beans
from pelix.ipopo.contexts import FactoryContext, ComponentContext
from pelix.ipopo.instance import StoredInstance
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Prepare the module logger
_logger = logging.getLogger("ipopo.core")
# Built-in handlers, automatically installed
BUILTIN_HANDLERS = ('pelix.ipopo.handlers.properties',
'pelix.ipopo.handlers.provides',
'pelix.ipopo.handlers.requires',
'pelix.ipopo.handlers.requiresbest',
'pelix.ipopo.handlers.requiresmap',
'pelix.ipopo.handlers.requiresvarfilter',
'pelix.ipopo.handlers.temporal')
# ------------------------------------------------------------------------------
def _set_factory_context(factory_class, bundle_context):
"""
Transforms the context data dictionary into its FactoryContext object form.
:param factory_class: A manipulated class
:param bundle_context: The class bundle context
:return: The factory context, None on error
"""
try:
# Try to get the factory context (built using decorators)
context = getattr(factory_class, constants.IPOPO_FACTORY_CONTEXT)
except AttributeError:
# The class has not been manipulated, or too badly
return None
if not context.completed:
# Partial context (class not manipulated)
return None
# Associate the factory to the bundle context
context.set_bundle_context(bundle_context)
return context
def _load_bundle_factories(bundle):
"""
Retrieves a list of pairs (FactoryContext, factory class) with all
readable manipulated classes found in the bundle.
:param bundle: A Bundle object
:return: The list of factories loaded from the bundle
"""
result = []
# Get the Python module
module = bundle.get_module()
# Get the bundle context
bundle_context = bundle.get_bundle_context()
# Get all classes defined in the module
for inspect_member in inspect.getmembers(module, inspect.isclass):
# Get the class in the result tuple
factory_class = inspect_member[1]
if inspect.getmodule(factory_class) is not module:
# Ignore classes imported from other modules
continue
context = _set_factory_context(factory_class, bundle_context)
if context is None:
# Error setting up the factory context
continue
result.append((context, factory_class))
return result
# ------------------------------------------------------------------------------
class _IPopoService(object):
"""
The iPOPO registry and service
"""
def __init__(self, bundle_context):
"""
Sets up the iPOPO registry
:param bundle_context: The iPOPO bundle context
"""
# Store the bundle context
self.__context = bundle_context
# Factories registry : name -> factory class
self.__factories = {}
# Instances registry : name -> StoredInstance object
self.__instances = {}
# Event listeners
self.__listeners = []
# Auto-restarted components (Bundle -> [(factory, name, properties)]
self.__auto_restart = {}
# Service state
self.running = False
# Registries locks
self.__factories_lock = threading.RLock()
self.__instances_lock = threading.RLock()
self.__listeners_lock = threading.RLock()
self.__handlers_lock = threading.RLock()
# Handlers factories
self._handlers_refs = set()
self._handlers = {}
# Instances waiting for a handler: Name -> (ComponentContext, instance)
self.__waiting_handlers = {}
# Register the service listener
bundle_context.add_service_listener(
self, None, handlers_const.SERVICE_IPOPO_HANDLER_FACTORY)
self.__find_handler_factories()
def __find_handler_factories(self):
"""
Finds all registered handler factories and stores them
"""
# Get the references
svc_refs = self.__context.get_all_service_references(
handlers_const.SERVICE_IPOPO_HANDLER_FACTORY)
if svc_refs:
for svc_ref in svc_refs:
# Store each handler factory
self.__add_handler_factory(svc_ref)
def __add_handler_factory(self, svc_ref):
"""
Stores a new handler factory
:param svc_ref: ServiceReference of the new handler factory
"""
with self.__handlers_lock:
# Get the handler ID
handler_id = svc_ref.get_property(handlers_const.PROP_HANDLER_ID)
if handler_id in self._handlers:
# Duplicated ID
_logger.warning("Already registered handler ID: %s",
handler_id)
else:
# Store the service
self._handlers_refs.add(svc_ref)
self._handlers[handler_id] = \
self.__context.get_service(svc_ref)
# Try to instantiate waiting components
succeeded = set()
for name, (context, instance) \
in self.__waiting_handlers.items():
if self.__try_instantiate(context, instance):
succeeded.add(name)
# Remove instantiated component from the waiting list
for name in succeeded:
del self.__waiting_handlers[name]
def __remove_handler_factory(self, svc_ref):
"""
Removes an handler factory
:param svc_ref: ServiceReference of the handler factory to remove
"""
with self.__handlers_lock:
# Get the handler ID
handler_id = svc_ref.get_property(handlers_const.PROP_HANDLER_ID)
# Check if this is the handler we use
if svc_ref not in self._handlers_refs:
return
# Clean up
self.__context.unget_service(svc_ref)
self._handlers_refs.remove(svc_ref)
del self._handlers[handler_id]
# List the components using this handler
to_stop = set()
for factory_name in self.__factories:
_, context = self.__get_factory_with_context(factory_name)
if handler_id in context.get_handlers_ids():
to_stop.update(self.__get_stored_instances(factory_name))
with self.__instances_lock:
for stored_instance in to_stop:
# Extract information
context = stored_instance.context
name = context.name
instance = stored_instance.instance
# Clean up the stored instance (iPOPO side)
del self.__instances[name]
stored_instance.kill()
# Add the component to the waiting queue
self.__waiting_handlers[name] = (context, instance)
# Try to find a new handler factory
new_ref = self.__context.get_service_reference(
handlers_const.SERVICE_IPOPO_HANDLER_FACTORY,
"({0}={1})".format(handlers_const.PROP_HANDLER_ID, handler_id))
if new_ref is not None:
self.__add_handler_factory(new_ref)
def __get_factory_with_context(self, factory_name):
"""
Retrieves the factory registered with the given and its factory context
:param factory_name: The name of the factory
:return: A (factory, context) tuple
:raise TypeError: Unknown factory, or factory not manipulated
"""
factory = self.__factories.get(factory_name)
if factory is None:
raise TypeError("Unknown factory '{0}'"
.format(factory_name))
# Get the factory context
factory_context = getattr(factory, constants.IPOPO_FACTORY_CONTEXT,
None)
if factory_context is None:
raise TypeError("Factory context missing in '{0}'"
.format(factory_name))
return factory, factory_context
def __get_handler_factories(self, handlers_ids):
"""
Returns the list of Handler Factories for the given Handlers IDs.
Raises a KeyError exception is a handler factory is missing.
:param handlers_ids: List of handlers IDs
:raise KeyError: A handler is missing
"""
# Look for the required handlers
return {self._handlers[handler_id] for handler_id in handlers_ids}
def __get_stored_instances(self, factory_name):
"""
Retrieves the list of all stored instances objects corresponding to
the given factory name
:param factory_name: A factory name
:return: All components instantiated from the given factory
"""
with self.__instances_lock:
return [stored_instance
for stored_instance in self.__instances.values()
if stored_instance.factory_name == factory_name]
def __try_instantiate(self, component_context, instance):
"""
Instantiates a component, if all of its handlers are there. Returns
False if a handler is missing.
:param component_context: A ComponentContext bean
:param instance: The component instance
:return: True if the component has started,
False if a handler is missing
"""
with self.__instances_lock:
# Extract information about the component
factory_context = component_context.factory_context
handlers_ids = factory_context.get_handlers_ids()
name = component_context.name
factory_name = factory_context.name
try:
# Get handlers
handler_factories = self.__get_handler_factories(handlers_ids)
except KeyError:
# A handler is missing, stop here
return False
# Instantiate the handlers
all_handlers = set()
for handler_factory in handler_factories:
handlers = handler_factory.get_handlers(component_context,
instance)
if handlers:
all_handlers.update(handlers)
# Prepare the stored instance
stored_instance = StoredInstance(self, component_context, instance,
all_handlers)
# Manipulate the properties
for handler in all_handlers:
handler.manipulate(stored_instance, instance)
# Store the instance
self.__instances[name] = stored_instance
# Start the manager
stored_instance.start()
# Notify listeners now that every thing is ready to run
self._fire_ipopo_event(constants.IPopoEvent.INSTANTIATED,
factory_name, name)
# Try to validate it
stored_instance.update_bindings()
stored_instance.check_lifecycle()
return True
def _autorestart_store_components(self, bundle):
"""
Stores the components of the given bundle with the auto-restart
property
:param bundle: A Bundle object
"""
with self.__instances_lock:
# Prepare the list of components
store = self.__auto_restart.setdefault(bundle, [])
for stored_instance in self.__instances.values():
# Get the factory name
factory = stored_instance.factory_name
if self.get_factory_bundle(factory) is bundle:
# Factory from this bundle
# Test component properties
properties = stored_instance.context.properties
if properties.get(constants.IPOPO_AUTO_RESTART):
# Auto-restart property found
store.append((factory, stored_instance.name,
properties))
def _autorestart_components(self, bundle):
"""
Restart the components of the given bundle
:param bundle: A Bundle object
"""
with self.__instances_lock:
instances = self.__auto_restart.get(bundle)
if not instances:
# Nothing to do
return
for factory, name, properties in instances:
try:
# Instantiate the given component
self.instantiate(factory, name, properties)
except Exception as ex:
# Log error, but continue to work
_logger.exception("Error restarting component '%s' ('%s')"
"from bundle %s (%d): %s", name, factory,
bundle.get_symbolic_name(),
bundle.get_bundle_id(), ex)
def _autorestart_clear_components(self, bundle):
"""
Clear the list of auto-restart components of the given bundle
:param bundle: A Bundle object
"""
with self.__instances_lock:
# Simply delete the entry, if any
try:
del self.__auto_restart[bundle]
except KeyError:
pass
def _fire_ipopo_event(self, kind, factory_name, instance_name=None):
"""
Triggers an iPOPO event
:param kind: Kind of event
:param factory_name: Name of the factory associated to the event
:param instance_name: Name of the component instance associated to the
event
"""
with self.__listeners_lock:
# Use a copy of the list of listeners
listeners = self.__listeners[:]
for listener in listeners:
try:
listener.handle_ipopo_event(
constants.IPopoEvent(kind, factory_name, instance_name))
except:
_logger.exception("Error calling an iPOPO event handler")
def _prepare_instance_properties(self, properties, factory_properties):
"""
Prepares the properties of a component instance, based on its
configuration, factory and framework properties
:param properties: Component instance properties
:param factory_properties: Component factory "default" properties
:return: The merged properties
"""
# Normalize given properties
if properties is None or not isinstance(properties, dict):
properties = {}
# Use framework properties to fill missing ones
framework = self.__context.get_bundle(0)
for property_name in factory_properties:
if property_name not in properties:
# Missing property
value = framework.get_property(property_name)
if value is not None:
# Set the property value
properties[property_name] = value
return properties
def _register_bundle_factories(self, bundle):
"""
Registers all factories found in the given bundle
:param bundle: A bundle
"""
assert isinstance(bundle, Bundle)
# Load the bundle factories
factories = _load_bundle_factories(bundle)
for context, factory_class in factories:
try:
# Register each found factory
self._register_factory(context.name, factory_class, False)
except ValueError as ex:
# Already known factory
_logger.error("Cannot register factory '%s' of bundle %d (%s):"
" %s", context.name, bundle.get_bundle_id(),
bundle.get_symbolic_name(), ex)
_logger.error("class: %s -- module: %s", factory_class,
factory_class.__module__)
else:
# Instantiate components
for name, properties in context.get_instances().items():
self.instantiate(context.name, name, properties)
def _register_factory(self, factory_name, factory, override):
"""
Registers a component factory
:param factory_name: The name of the factory
:param factory: The factory class object
:param override: If true, previous factory is overridden, else an
exception is risen if a previous factory with that
name already exists
:raise ValueError: The factory name already exists or is invalid
:raise TypeError: Invalid factory type
"""
if not factory_name or not is_string(factory_name):
raise ValueError("A factory name must be a non-empty string")
if not inspect.isclass(factory):
raise TypeError("Invalid factory class '{0}'"
.format(type(factory).__name__))
with self.__factories_lock:
if factory_name in self.__factories:
if override:
_logger.info("Overriding factory '%s'", factory_name)
else:
raise ValueError("'{0}' factory already exist"
.format(factory_name))
self.__factories[factory_name] = factory
# Trigger an event
self._fire_ipopo_event(constants.IPopoEvent.REGISTERED,
factory_name)
def _unregister_all_factories(self):
"""
Unregisters all factories. This method should be called only after the
iPOPO service has been unregistered (that's why it's not locked)
"""
factories = list(self.__factories.keys())
for factory_name in factories:
self.unregister_factory(factory_name)
def _unregister_bundle_factories(self, bundle):
"""
Unregisters all factories of the given bundle
:param bundle: A bundle
"""
assert isinstance(bundle, Bundle)
with self.__factories_lock:
# Find out which factories must be removed
to_remove = [factory_name
for factory_name in self.__factories
if self.get_factory_bundle(factory_name) is bundle]
# Remove all of them
for factory_name in to_remove:
try:
self.unregister_factory(factory_name)
except ValueError as ex:
_logger.warning("Error unregistering factory '%s': %s",
factory_name, ex)
def _stop(self):
"""
iPOPO is stopping: clean everything up
"""
# Running flag down
self.running = False
# Unregister the service listener
self.__context.remove_service_listener(self)
# Clean up handler factories usages
with self.__instances_lock:
for svc_ref in self._handlers_refs:
self.__context.unget_service(svc_ref)
self._handlers.clear()
self._handlers_refs.clear()
def framework_stopping(self):
"""
Called by the framework when it is about to stop
"""
self._stop()
def bundle_changed(self, event):
"""
A bundle event has been triggered
:param event: The bundle event
"""
assert isinstance(event, BundleEvent)
kind = event.get_kind()
bundle = event.get_bundle()
if kind == BundleEvent.STOPPING_PRECLEAN:
# A bundle is gone, remove its factories after the deactivator has
# been called. That way, the deactivator can kill manually started
# components.
self._unregister_bundle_factories(bundle)
elif kind == BundleEvent.STARTED:
# A bundle is staring, register its factories before its activator
# is called. That way, the activator can use the registered
# factories.
self._register_bundle_factories(bundle)
elif kind == BundleEvent.UPDATE_BEGIN:
# A bundle will be updated, store its auto-restart component
self._autorestart_store_components(bundle)
elif kind == BundleEvent.UPDATED:
# Update has finished, restart stored components
self._autorestart_components(bundle)
self._autorestart_clear_components(bundle)
elif kind == BundleEvent.UPDATE_FAILED:
# Update failed, clean the stored components
self._autorestart_clear_components(bundle)
def service_changed(self, event):
"""
Called when a handler factory service is un/registered
"""
# Call sub-methods
kind = event.get_kind()
svc_ref = event.get_service_reference()
if kind == ServiceEvent.REGISTERED:
# Service coming
with self.__instances_lock:
self.__add_handler_factory(svc_ref)
elif kind == ServiceEvent.UNREGISTERING:
# Service gone
with self.__instances_lock:
self.__remove_handler_factory(svc_ref)
def instantiate(self, factory_name, name, properties=None):
"""
Instantiates a component from the given factory, with the given name
:param factory_name: Name of the component factory
:param name: Name of the instance to be started
:param properties: Initial properties of the component instance
:return: The component instance
:raise TypeError: The given factory is unknown
:raise ValueError: The given name or factory name is invalid, or an
instance with the given name already exists
:raise Exception: Something wrong occurred in the factory
"""
# Test parameters
if not factory_name or not is_string(factory_name):
raise ValueError("Invalid factory name")
if not name or not is_string(name):
raise ValueError("Invalid component name")
if not self.running:
# Stop working if the framework is stopping
raise ValueError("Framework is stopping")
with self.__instances_lock:
if name in self.__instances or name in self.__waiting_handlers:
raise ValueError("'{0}' is an already running instance name"
.format(name))
with self.__factories_lock:
# Can raise a TypeError exception
factory, factory_context = \
self.__get_factory_with_context(factory_name)
# Check if the factory is singleton and if a component is
# already started
if factory_context.is_singleton and \
factory_context.is_singleton_active:
raise ValueError("{0} is a singleton: {1} can't be "
"instantiated."
.format(factory_name, name))
# Create component instance
try:
instance = factory()
except:
_logger.exception("Error creating the instance '%s' "
"from factory '%s'", name, factory_name)
raise TypeError("Factory '{0}' failed to create '{1}'"
.format(factory_name, name))
# Instantiation succeeded: update singleton status
if factory_context.is_singleton:
factory_context.is_singleton_active = True
# Normalize the given properties
properties = self._prepare_instance_properties(
properties, factory_context.properties)
# Set up the component instance context
component_context = ComponentContext(factory_context, name,
properties)
# Try to instantiate the component immediately
if not self.__try_instantiate(component_context, instance):
# A handler is missing, put the component in the queue
self.__waiting_handlers[name] = (component_context, instance)
return instance
def retry_erroneous(self, name, properties_update=None):
"""
Removes the ERRONEOUS state of the given component, and retries a
validation
:param name: Name of the component to retry
:param properties_update: A dictionary to update the initial properties
of the component
:return: The new state of the component
:raise ValueError: Invalid component name
"""
with self.__instances_lock:
try:
stored_instance = self.__instances[name]
except KeyError:
raise ValueError(
"Unknown component instance '{0}'".format(name))
else:
return stored_instance.retry_erroneous(properties_update)
def invalidate(self, name):
"""
Invalidates the given component
:param name: Name of the component to invalidate
:raise ValueError: Invalid component name
"""
with self.__instances_lock:
try:
stored_instance = self.__instances[name]
except KeyError:
raise ValueError(
"Unknown component instance '{0}'".format(name))
else:
# Call back the component during the invalidation
stored_instance.invalidate(True)
def is_registered_factory(self, name):
"""
Tests if the given name is in the factory registry
:param name: A factory name to be tested
"""
with self.__factories_lock:
return name in self.__factories
def is_registered_instance(self, name):
"""
Tests if the given name is in the instance registry or in the waiting
queue
:param name: A component name to be tested
"""
with self.__instances_lock:
return name in self.__instances
def kill(self, name):
"""
Kills the given component
:param name: Name of the component to kill
:raise ValueError: Invalid component name
"""
if not name:
raise ValueError("Name can't be None or empty")
with self.__instances_lock:
try:
# Running instance
stored_instance = self.__instances.pop(name)
# Store the reference to the factory context
factory_context = stored_instance.context.factory_context
# Kill it
stored_instance.kill()
# Update the singleton state flag
factory_context.is_singleton_active = False
except KeyError:
# Queued instance
try:
# Extract the component context
context, _ = self.__waiting_handlers.pop(name)
# Update the singleton state flag
context.factory_context.is_singleton_active = False
except KeyError:
raise ValueError("Unknown component instance '{0}'"
.format(name))
def register_factory(self, bundle_context, factory):
"""
Registers a manually created factory, using decorators programmatically
:param bundle_context: The factory bundle context
:param factory: A manipulated class
:return: True if the factory has been registered
:raise ValueError: Invalid parameter, or factory already registered
:raise TypeError: Invalid factory type (not a manipulated class)
"""
if factory is None or bundle_context is None:
# Invalid parameter, to nothing
raise ValueError("Invalid parameter")
context = _set_factory_context(factory, bundle_context)
if not context:
raise TypeError("Not a manipulated class (no context found)")
self._register_factory(context.name, factory, False)
return True
def unregister_factory(self, factory_name):
"""
Unregisters the given component factory
:param factory_name: Name of the factory to unregister
:return: True the factory has been removed, False if the factory is
unknown
"""
if not factory_name or not is_string(factory_name):
# Invalid name
return False
with self.__factories_lock:
try:
# Remove the factory from the registry
factory_class = self.__factories.pop(factory_name)
except KeyError:
# Unknown factory
return False
# Trigger an event
self._fire_ipopo_event(constants.IPopoEvent.UNREGISTERED,
factory_name)
# Invalidate and delete all components of this factory
with self.__instances_lock:
# Compute the list of __instances to remove
to_remove = self.__get_stored_instances(factory_name)
# Remove instances from the registry: avoids dependencies \
# update to link against a component from this factory again.
for instance in to_remove:
try:
# Kill the instance
self.kill(instance.name)
except ValueError:
# Unknown instance: already killed by the invalidation
# callback of a component killed in this loop
# => ignore
pass
# Remove waiting component
names = [name
for name, (context, _)
in self.__waiting_handlers.items()
if context.factory_context.name == factory_name]
for name in names:
del self.__waiting_handlers[name]
# Clear the bundle context of the factory
_set_factory_context(factory_class, None)
return True
def add_listener(self, listener):
"""
Register an iPOPO event listener.
The event listener must have a method with the following prototype::
def handle_ipopo_event(self, event):
'''
event: A IPopoEvent object
'''
# ...
:param listener: The listener to register
:return: True if the listener has been added to the registry
"""
with self.__listeners_lock:
return add_listener(self.__listeners, listener)
def remove_listener(self, listener):
"""
Unregister an iPOPO event listener.
:param listener: The listener to register
:return: True if the listener has been removed from the registry
"""
with self.__listeners_lock:
return remove_listener(self.__listeners, listener)
def get_instances(self):
"""
Retrieves the list of the currently registered component instances
:return: A list of (name, factory name, state) tuples.
"""
with self.__instances_lock:
return sorted(
(name, stored_instance.factory_name, stored_instance.state)
for name, stored_instance in self.__instances.items())
def get_waiting_components(self):
"""
Returns the list of the instances waiting for their handlers
:return: A list of (name, factory name, missing handlers) tuples
"""
with self.__instances_lock:
result = []
for name, (context, _) in self.__waiting_handlers.items():
# Compute missing handlers
missing = set(context.factory_context.get_handlers_ids())
missing.difference_update(self._handlers.keys())
result.append((name, context.factory_context.name, missing))
result.sort()
return result
def get_instance_details(self, name):
"""
Retrieves a snapshot of the given component instance.
The result dictionary has the following keys:
* name: The component name
* factory: The name of the component factory
* bundle_id: The ID of the bundle providing the component factory
* state: The current component state
* services: A {Service ID -> Service reference} dictionary, with all
services provided by the component
* dependencies: A dictionary associating field names with the following
dictionary:
* handler: The name of the type of the dependency handler
* filter (optional): The requirement LDAP filter
* optional: A flag indicating whether the requirement is optional or
not
* aggregate: A flag indicating whether the requirement is a set of
services or not
* binding: A list of the ServiceReference the component is bound to
* properties: A dictionary key -> value, with all properties of the
component. The value is converted to its string representation, to
avoid unexcepted behaviors.
:param name: The name of a component instance
:return: A dictionary of details
:raise ValueError: Invalid component name
"""
if not is_string(name):
raise ValueError("Component name must be a string")
with self.__instances_lock:
if name not in self.__instances:
raise ValueError("Unknown component: {0}".format(name))
stored_instance = self.__instances[name]
assert isinstance(stored_instance, StoredInstance)
with stored_instance._lock:
result = {}
result["name"] = stored_instance.name
# Factory name
result["factory"] = stored_instance.factory_name
# Factory bundle
result["bundle_id"] = \
stored_instance.bundle_context.get_bundle().get_bundle_id()
# Component state
result["state"] = stored_instance.state
# Error details
result["error_trace"] = stored_instance.error_trace
# Provided service
result["services"] = {}
for handler in stored_instance.get_handlers(
handlers_const.KIND_SERVICE_PROVIDER):
svc_ref = handler.get_service_reference()
if svc_ref is not None:
svc_id = svc_ref.get_property(SERVICE_ID)
result["services"][svc_id] = svc_ref
# Dependencies
result["dependencies"] = {}
for dependency in stored_instance.get_handlers(
handlers_const.KIND_DEPENDENCY):
# Dependency
info = result["dependencies"][dependency.get_field()] = {}
info["handler"] = type(dependency).__name__
# Requirement
req = dependency.requirement
info["specification"] = req.specification
info["filter"] = str(req.filter) if req.filter else None
info["optional"] = req.optional
info["aggregate"] = req.aggregate
# Bindings
info["bindings"] = dependency.get_bindings()
# Properties
properties = stored_instance.context.properties.items()
result["properties"] = dict((str(key), str(value))
for key, value in properties)
# All done
return result
def get_factories(self):
"""
Retrieves the names of the registered factories
:return: A list of factories. Can be empty.
"""
with self.__factories_lock:
return sorted(self.__factories.keys())
def get_factory_bundle(self, name):
"""
Retrieves the Pelix Bundle object that registered the given factory
:param name: The name of a factory
:return: The Bundle that registered the given factory
:raise ValueError: Invalid factory
"""
with self.__factories_lock:
try:
factory = self.__factories[name]
except KeyError:
raise ValueError("Unknown factory '{0}'".format(name))
else:
# Bundle Context is stored in the Factory Context
factory_context = getattr(
factory, constants.IPOPO_FACTORY_CONTEXT)
return factory_context.bundle_context.get_bundle()
def get_factory_details(self, name):
"""
Retrieves a dictionary with details about the given factory
* name: The factory name
* bundle: The Bundle object of the bundle providing the factory
* properties: Copy of the components properties defined by the factory
* requirements: List of the requirements defined by the factory
* id: Requirement ID (field where it is injected)
* specification: Specification of the required service
* aggregate: If True, multiple services will be injected
* optional: If True, the requirement is optional
* services: List of the specifications of the services provided by
components of this factory
* handlers: Dictionary of the non-built-in handlers required by this
factory. The dictionary keys are handler IDs, and it contains a tuple
with:
* A copy of the configuration of the handler (0)
* A flag indicating if the handler is present or not
:param name: The name of a factory
:return: A dictionary describing the factory
:raise ValueError: Invalid factory
"""
with self.__factories_lock:
try:
factory = self.__factories[name]
except KeyError:
raise ValueError("Unknown factory '{0}'".format(name))
context = getattr(factory, constants.IPOPO_FACTORY_CONTEXT)
assert isinstance(context, FactoryContext)
result = {}
# Factory name & bundle
result["name"] = context.name
result["bundle"] = context.bundle_context.get_bundle()
# Configurable properties
# Name -> Default value
result["properties"] = {
prop_name: context.properties.get(prop_name)
for prop_name in context.properties_fields.values()}
# Requirements (list of dictionaries)
reqs = result["requirements"] = []
handler_requires = context.get_handler(constants.HANDLER_REQUIRES)
if handler_requires is not None:
for field, requirement in handler_requires.items():
reqs.append({"id": field,
"specification": requirement.specification,
"aggregate": requirement.aggregate,
"optional": requirement.optional,
"filter": requirement.original_filter})
# Provided services (list of list of specifications)
handler_provides = context.get_handler(constants.HANDLER_PROVIDES)
if handler_provides is not None:
result["services"] = [
specs_controller[0]
for specs_controller in handler_provides]
else:
result["services"] = []
# Other handlers
handlers = set(context.get_handlers_ids())
handlers.difference_update((constants.HANDLER_PROPERTY,
constants.HANDLER_PROVIDES,
constants.HANDLER_REQUIRES))
result["handlers"] = {
handler: copy.deepcopy(context.get_handler(handler))
for handler in handlers}
return result
# ------------------------------------------------------------------------------
@BundleActivator
class _IPopoActivator(object):
"""
The iPOPO bundle activator for Pelix
"""
def __init__(self):
"""
Sets up the activator
"""
self._registration = None
self._service = None
self._bundles = []
def start(self, context):
"""
The bundle has started
:param context: The bundle context
"""
# Automatically install handlers bundles
for handler in BUILTIN_HANDLERS:
try:
bundle = context.install_bundle(handler)
bundle.start()
self._bundles.append(bundle)
except BundleException as ex:
_logger.error("Error installing handler %s: %s", handler, ex)
# Register the iPOPO service
self._service = _IPopoService(context)
self._registration = context.register_service(constants.SERVICE_IPOPO,
self._service, {})
# Register as a bundle listener
context.add_bundle_listener(self._service)
# Register the service as a framework stop listener
context.add_framework_stop_listener(self._service)
# Service enters in "run" mode
self._service.running = True
# Get all factories
for bundle in context.get_bundles():
if bundle.get_state() == Bundle.ACTIVE:
# Bundle is active, register its factories
self._service._register_bundle_factories(bundle)
def stop(self, context):
"""
The bundle has stopped
:param context: The bundle context
"""
# The service is not in the "run" mode anymore
self._service._stop()
# Unregister the listener
context.remove_bundle_listener(self._service)
# Unregister the framework stop listener
context.remove_framework_stop_listener(self._service)
# Unregister the iPOPO service
self._registration.unregister()
# Clean up the service
self._service._unregister_all_factories()
# Remove handler bundles
for bundle in self._bundles:
bundle.uninstall()
del self._bundles[:]
# Clean up references
self._registration = None
self._service = None
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import (
product_bidding_category_constant,
)
from google.ads.googleads.v8.services.types import (
product_bidding_category_constant_service,
)
from .transports.base import (
ProductBiddingCategoryConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ProductBiddingCategoryConstantServiceGrpcTransport
class ProductBiddingCategoryConstantServiceClientMeta(type):
"""Metaclass for the ProductBiddingCategoryConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ProductBiddingCategoryConstantServiceTransport]]
_transport_registry[
"grpc"
] = ProductBiddingCategoryConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ProductBiddingCategoryConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ProductBiddingCategoryConstantServiceClient(
metaclass=ProductBiddingCategoryConstantServiceClientMeta
):
"""Service to fetch Product Bidding Categories."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProductBiddingCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProductBiddingCategoryConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ProductBiddingCategoryConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
ProductBiddingCategoryConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def product_bidding_category_constant_path(
country_code: str, level: str, id: str,
) -> str:
"""Return a fully-qualified product_bidding_category_constant string."""
return "productBiddingCategoryConstants/{country_code}~{level}~{id}".format(
country_code=country_code, level=level, id=id,
)
@staticmethod
def parse_product_bidding_category_constant_path(
path: str,
) -> Dict[str, str]:
"""Parse a product_bidding_category_constant path into its component segments."""
m = re.match(
r"^productBiddingCategoryConstants/(?P<country_code>.+?)~(?P<level>.+?)~(?P<id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, ProductBiddingCategoryConstantServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the product bidding category constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ProductBiddingCategoryConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(
transport, ProductBiddingCategoryConstantServiceTransport
):
# transport is a ProductBiddingCategoryConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ProductBiddingCategoryConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_product_bidding_category_constant(
self,
request: product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> product_bidding_category_constant.ProductBiddingCategoryConstant:
r"""Returns the requested Product Bidding Category in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetProductBiddingCategoryConstantRequest`):
The request object. Request message for
[ProductBiddingCategoryConstantService.GetProductBiddingCategoryConstant][google.ads.googleads.v8.services.ProductBiddingCategoryConstantService.GetProductBiddingCategoryConstant].
resource_name (:class:`str`):
Required. Resource name of the
Product Bidding Category to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.ProductBiddingCategoryConstant:
A Product Bidding Category.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest,
):
request = product_bidding_category_constant_service.GetProductBiddingCategoryConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_product_bidding_category_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ProductBiddingCategoryConstantServiceClient",)
| |
#
# Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import argparse
import os
import platform
import re
import sdk_directories
import shutil
import subprocess
def GetPlatformEnvironments():
return { 'Windows' : { 'default_install_directory' : 'C:\\temp\\AWSNativeSDK',
'global_build_call' : [ "msbuild", "ALL_BUILD.vcxproj" ],
'parallel_option' : '-m:??' },
'Linux' : { 'default_install_directory' : '/tmp/AWSNativeSDK',
'global_build_call' : [ "make" ],
'parallel_option' : '-j??' },
'Darwin' : { 'default_install_directory' : '/tmp/AWSNativeSDK',
'global_build_call' : [ "make" ],
'parallel_option' : '-j??' } }
def GetPlatformBuildTargets():
return { 'Windows' : { 'buildPlatforms' : [ 'Windows' ],
'configs' : { 'DebugDynamic' : { 'directory' : '_build_windows_dynamic_debug',
'cmake_params' : "-DSTATIC_LINKING=0",
'build_params' : [ "-p:Configuration=Debug" ],
'config' : 'Debug' },
'DebugStatic' : { 'directory' : '_build_windows_static_debug',
'cmake_params' : "-DSTATIC_LINKING=1",
'build_params' : [ "-p:Configuration=Debug" ],
'config' : 'Debug' },
'ReleaseDynamic' : { 'directory' : '_build_windows_dynamic_release',
'cmake_params' : "-DSTATIC_LINKING=0",
'build_params' : [ "-p:Configuration=Release" ],
'config' : 'Release' },
'ReleaseStatic' : { 'directory' : '_build_windows_static_release',
'cmake_params' : "-DSTATIC_LINKING=1",
'build_params' : [ "-p:Configuration=Release" ],
'config' : 'Release' } },
'global_cmake_params' : "-G \"Visual Studio 12 Win64\" -DCUSTOM_MEMORY_MANAGEMENT=1 -DGENERATE_VERSION_INFO=0 -DCMAKE_CONFIGURATION_TYPES=\"Debug;Release;MinSizeRel;RelWithDebInfo\" -DCMAKE_CXX_FLAGS_DEBUGOPT=\"\" -DCMAKE_EXE_LINKER_FLAGS_DEBUGOPT=\"\" -DCMAKE_SHARED_LINKER_FLAGS_DEBUGOPT=\"\"" },
'Android' : { 'buildPlatforms' : [ 'Linux' ],
'configs' : { 'DebugDynamic' : { 'directory' : '_build_android_dynamic_debug',
'cmake_params' : "-DSTATIC_LINKING=0 -DANDROID_STL=gnustl_shared -DCMAKE_BUILD_TYPE=Debug",
'build_params' : [],
'config' : 'Debug' },
'DebugStatic' : { 'directory' : '_build_android_dynamic_static',
'cmake_params' : "-DSTATIC_LINKING=1 -DANDROID_STL=gnustl_static -DCMAKE_BUILD_TYPE=Debug",
'build_params' : [],
'config' : 'Debug' },
'ReleaseDynamic' : { 'directory' : '_build_android_dynamic_release',
'cmake_params' : "-DSTATIC_LINKING=0 -DANDROID_STL=gnustl_shared -DCMAKE_BUILD_TYPE=Release",
'build_params' : [],
'config' : 'Release' },
'ReleaseStatic' : { 'directory' : '_build_android_static_release',
'cmake_params' : "-DSTATIC_LINKING=1 -DANDROID_STL=gnustl_static -DCMAKE_BUILD_TYPE=Release",
'build_params' : [],
'config' : 'Release' } },
'global_cmake_params' : "-DCUSTOM_MEMORY_MANAGEMENT=1 " \
"-DGENERATE_VERSION_INFO=0" \
"-DANDROID_STL_FORCE_FEATURES=OFF " \
"-DTARGET_ARCH=ANDROID " \
"-DANDROID_ABI=armeabi-v7a " \
"-DANDROID_TOOLCHAIN_NAME=arm-linux-androideabi-4.9 " },
'Linux' : { 'buildPlatforms' : [ 'Linux' ],
'configs' : { 'DebugDynamic' : { 'directory' : '_build_linux_dynamic_debug',
'cmake_params' : "-DSTATIC_LINKING=0 -DCMAKE_BUILD_TYPE=Debug",
'build_params' : [],
'config' : 'Debug' },
'DebugStatic' : { 'directory' : '_build_linux_dynamic_static',
'cmake_params' : "-DSTATIC_LINKING=1 -DCMAKE_BUILD_TYPE=Debug",
'build_params' : [],
'config' : 'Debug' },
'ReleaseDynamic' : { 'directory' : '_build_linux_dynamic_release',
'cmake_params' : "-DSTATIC_LINKING=0 -DCMAKE_BUILD_TYPE=Release",
'build_params' : [],
'config' : 'Release' },
'ReleaseStatic' : { 'directory' : '_build_linux_static_release',
'cmake_params' : "-DSTATIC_LINKING=1 -DCMAKE_BUILD_TYPE=Release",
'build_params' : [],
'config' : 'Release' } },
'global_cmake_params' : "-DCUSTOM_MEMORY_MANAGEMENT=1 -DGENERATE_VERSION_INFO=0" },
'Darwin' : { 'buildPlatforms' : [ 'Darwin' ],
'configs' : { 'DebugDynamic' : { 'directory' : '_build_darwin_dynamic_debug',
'cmake_params' : "-DSTATIC_LINKING=0 -DCMAKE_BUILD_TYPE=Debug",
'build_params' : [],
'config' : 'Debug' },
'DebugStatic' : { 'directory' : '_build_darwin_dynamic_static',
'cmake_params' : "-DSTATIC_LINKING=1 -DCMAKE_BUILD_TYPE=Debug",
'build_params' : [],
'config' : 'Debug' },
'ReleaseDynamic' : { 'directory' : '_build_darwin_dynamic_release',
'cmake_params' : "-DSTATIC_LINKING=0 -DCMAKE_BUILD_TYPE=Release",
'build_params' : [],
'config' : 'Release' },
'ReleaseStatic' : { 'directory' : '_build_darwin_static_release',
'cmake_params' : "-DSTATIC_LINKING=1 -DCMAKE_BUILD_TYPE=Release",
'build_params' : [],
'config' : 'Release' } },
'global_cmake_params' : "-DCUSTOM_MEMORY_MANAGEMENT=1 -DGENERATE_VERSION_INFO=0" } }
def ParseArguments(platformEnvironments):
argMap = {}
platformName = platform.system()
platformEnv = platformEnvironments[ platformName ]
parser = argparse.ArgumentParser(description="AWSNativeSDK 3rdParty Install Script")
parser.add_argument("--installdir", action="store")
parser.add_argument("--cmake_params", action="store")
parser.add_argument("--architecture", action="store")
parser.add_argument("--configs", action="store")
parser.add_argument("--parallel", action="store")
parser.add_argument("--generateClients", action="store")
args = vars( parser.parse_args() )
argMap[ "installDir" ] = args[ "installdir" ] or platformEnv['default_install_directory']
argMap[ "cmakeParams" ] = re.sub(r'^"|"$', '', args[ "cmake_params" ] or "")
argMap[ "architecture" ] = re.sub(r'^"|"$', '', args[ "architecture" ] or platformName)
argMap[ "configs" ] = re.sub(r'^"\"$', '', args[ "configs" ] or "DebugDynamic ReleaseDynamic ReleaseStatic").split()
argMap[ "parallel" ] = args[ "parallel" ] or "2"
argMap[ "generateClients" ] = args[ "generateClients" ] or "0"
return argMap
def CopyPDBs(config, libDir, installDirectoryPrefix):
destDirectory = os.path.join(installDirectoryPrefix, libDir, "windows", "intel64", config)
for rootDir, dirNames, fileNames in os.walk( "." ):
if rootDir == ".":
for dirName in dirNames:
sourceFile = os.path.join(rootDir, dirName, config, dirName + ".pdb")
if os.path.isfile(sourceFile):
subprocess.check_call( "copy " + sourceFile + " " + destDirectory, shell = True )
def CopyAndroidExternalDependencies(config, installDirectory):
for dependentLib in [ "zlib", "openssl", "curl" ]:
uppercaseLib = dependentLib.upper()
dependentInstallFile = os.path.join( uppercaseLib + "-prefix", "src", uppercaseLib + "-build", "cmake_install.cmake" )
dependentInstallDirectory = '"' + os.path.join( installDirectory, "external", dependentLib ) + '"'
dependent_install_call = "cmake -DCMAKE_INSTALL_CONFIG_NAME=" + config + " -DCMAKE_INSTALL_PREFIX=" + dependentInstallDirectory + " -P " + dependentInstallFile + " .."
print( "dependent install call = " + dependent_install_call )
subprocess.check_call( dependent_install_call, shell = True )
def RemoveExternalAndroidDirectories():
for directory in [ "external", "zlib", "openssl", "curl" ]:
if os.path.exists( directory ):
shutil.rmtree( directory )
def Main():
platformBuildTargets = GetPlatformBuildTargets()
platformEnvironments = GetPlatformEnvironments()
sourcePlatform = platform.system()
if not sourcePlatform in platformEnvironments:
print( "Platform " + sourcePlatform + " not supported as a build platform" )
return 1
platformEnv = platformEnvironments[ sourcePlatform ]
arguments = ParseArguments(platformEnvironments)
customCmakeParams = arguments[ "cmakeParams" ] + " "
architecture = arguments[ "architecture" ]
targetConfigs = arguments[ "configs" ]
installDirectory = arguments[ "installDir" ]
parallelJobs = arguments[ "parallel" ]
quotedInstallDirectory = '"' + installDirectory + '"'
generateClients = arguments[ "generateClients" ]
if os.path.exists( installDirectory ):
shutil.rmtree( installDirectory )
if not architecture in platformBuildTargets:
print( "No definition for target architecture " + architecture )
return 1
targetPlatformDef = platformBuildTargets[ architecture ]
if not sourcePlatform in targetPlatformDef[ 'buildPlatforms' ]:
print( "Platform " + sourcePlatform + " does not support building for architecture " + architecture )
return 1
if architecture == 'Android':
RemoveExternalAndroidDirectories()
archConfigs = targetPlatformDef[ 'configs' ]
if generateClients != "0":
sdk_directories.wipeGeneratedCode()
customCmakeParams += "-DREGENERATE_CLIENTS=1 "
for targetConfig in targetConfigs:
if targetConfig in archConfigs:
archConfig = archConfigs[ targetConfig ]
buildDirectory = archConfig[ 'directory' ]
if os.path.exists( buildDirectory ):
shutil.rmtree( buildDirectory )
os.mkdir( buildDirectory )
os.chdir( buildDirectory )
cmake_call_list = "cmake " + customCmakeParams + " " + archConfig[ 'cmake_params' ] + " " + targetPlatformDef[ 'global_cmake_params' ] + " " + ".."
print( "cmake call = " + cmake_call_list )
subprocess.check_call( cmake_call_list, shell = True )
parallelBuildOption = platformEnv[ 'parallel_option' ].replace("??", str(parallelJobs))
build_call_list = platformEnv[ 'global_build_call' ] + [ parallelBuildOption ] + archConfig[ 'build_params' ]
print( "build call = " + str( build_call_list ) )
subprocess.check_call( build_call_list )
install_call = "cmake -DCMAKE_INSTALL_CONFIG_NAME=" + archConfig[ 'config' ] + " -DCMAKE_INSTALL_PREFIX=" + quotedInstallDirectory + " -P cmake_install.cmake .."
print( "install call = " + install_call )
subprocess.check_call( install_call, shell = True )
# platform specific stuff
# Copy Windows PDBs
if architecture == 'Windows':
CopyPDBs( archConfig[ 'config' ], "bin", installDirectory )
# Install Android auxiliary dependencies (zlib, openssl, curl)
if architecture == 'Android':
CopyAndroidExternalDependencies( archConfig[ 'config' ], installDirectory )
os.chdir( ".." )
else:
print("Build target config " + targetConfig + " does not exist for architecture " + architecture)
print( "Aws SDK for C++ finished 3rd party installation into: " + installDirectory )
# On windows: Run from powershell; make sure msbuild is in PATH environment variable
Main()
| |
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A test script which attempts to detect memory leaks by calling C
functions many times and compare process memory usage before and
after the calls. It might produce false positives.
"""
import gc
import os
import socket
import sys
import threading
import time
import psutil
import psutil._common
from psutil._compat import xrange
from test_psutil import (WINDOWS, POSIX, OSX, LINUX, SUNOS, BSD, TESTFN,
RLIMIT_SUPPORT, TRAVIS)
from test_psutil import (reap_children, supports_ipv6, safe_remove,
get_test_subprocess)
if sys.version_info < (2, 7):
import unittest2 as unittest # https://pypi.python.org/pypi/unittest2
else:
import unittest
LOOPS = 1000
TOLERANCE = 4096
SKIP_PYTHON_IMPL = True
def skip_if_linux():
return unittest.skipIf(LINUX and SKIP_PYTHON_IMPL,
"not worth being tested on LINUX (pure python)")
class Base(unittest.TestCase):
proc = psutil.Process()
def execute(self, function, *args, **kwargs):
def call_many_times():
for x in xrange(LOOPS - 1):
self.call(function, *args, **kwargs)
del x
gc.collect()
return self.get_mem()
self.call(function, *args, **kwargs)
self.assertEqual(gc.garbage, [])
self.assertEqual(threading.active_count(), 1)
# RSS comparison
# step 1
rss1 = call_many_times()
# step 2
rss2 = call_many_times()
difference = rss2 - rss1
if difference > TOLERANCE:
# This doesn't necessarily mean we have a leak yet.
# At this point we assume that after having called the
# function so many times the memory usage is stabilized
# and if there are no leaks it should not increase any
# more.
# Let's keep calling fun for 3 more seconds and fail if
# we notice any difference.
stop_at = time.time() + 3
while True:
self.call(function, *args, **kwargs)
if time.time() >= stop_at:
break
del stop_at
gc.collect()
rss3 = self.get_mem()
difference = rss3 - rss2
if rss3 > rss2:
self.fail("rss2=%s, rss3=%s, difference=%s"
% (rss2, rss3, difference))
def execute_w_exc(self, exc, function, *args, **kwargs):
kwargs['_exc'] = exc
self.execute(function, *args, **kwargs)
def get_mem(self):
return psutil.Process().memory_info()[0]
def call(self, *args, **kwargs):
raise NotImplementedError("must be implemented in subclass")
class TestProcessObjectLeaks(Base):
"""Test leaks of Process class methods and properties"""
def setUp(self):
gc.collect()
def tearDown(self):
reap_children()
def call(self, function, *args, **kwargs):
meth = getattr(self.proc, function)
if '_exc' in kwargs:
exc = kwargs.pop('_exc')
self.assertRaises(exc, meth, *args, **kwargs)
else:
try:
meth(*args, **kwargs)
except psutil.Error:
pass
@skip_if_linux()
def test_name(self):
self.execute('name')
@skip_if_linux()
def test_cmdline(self):
self.execute('cmdline')
@skip_if_linux()
def test_exe(self):
self.execute('exe')
@skip_if_linux()
def test_ppid(self):
self.execute('ppid')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_uids(self):
self.execute('uids')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_gids(self):
self.execute('gids')
@skip_if_linux()
def test_status(self):
self.execute('status')
def test_nice_get(self):
self.execute('nice')
def test_nice_set(self):
niceness = psutil.Process().nice()
self.execute('nice', niceness)
@unittest.skipUnless(hasattr(psutil.Process, 'ionice'),
"Linux and Windows Vista only")
def test_ionice_get(self):
self.execute('ionice')
@unittest.skipUnless(hasattr(psutil.Process, 'ionice'),
"Linux and Windows Vista only")
def test_ionice_set(self):
if WINDOWS:
value = psutil.Process().ionice()
self.execute('ionice', value)
else:
self.execute('ionice', psutil.IOPRIO_CLASS_NONE)
self.execute_w_exc(OSError, 'ionice', -1)
@unittest.skipIf(OSX or SUNOS, "feature not supported on this platform")
@skip_if_linux()
def test_io_counters(self):
self.execute('io_counters')
@unittest.skipUnless(WINDOWS, "not worth being tested on posix")
def test_username(self):
self.execute('username')
@skip_if_linux()
def test_create_time(self):
self.execute('create_time')
@skip_if_linux()
def test_num_threads(self):
self.execute('num_threads')
@unittest.skipUnless(WINDOWS, "Windows only")
def test_num_handles(self):
self.execute('num_handles')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_num_fds(self):
self.execute('num_fds')
@skip_if_linux()
def test_threads(self):
self.execute('threads')
@skip_if_linux()
def test_cpu_times(self):
self.execute('cpu_times')
@skip_if_linux()
def test_memory_info(self):
self.execute('memory_info')
@skip_if_linux()
def test_memory_info_ex(self):
self.execute('memory_info_ex')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_terminal(self):
self.execute('terminal')
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_resume(self):
self.execute('resume')
@skip_if_linux()
def test_cwd(self):
self.execute('cwd')
@unittest.skipUnless(WINDOWS or LINUX or BSD,
"Windows or Linux or BSD only")
def test_cpu_affinity_get(self):
self.execute('cpu_affinity')
@unittest.skipUnless(WINDOWS or LINUX or BSD,
"Windows or Linux or BSD only")
def test_cpu_affinity_set(self):
affinity = psutil.Process().cpu_affinity()
self.execute('cpu_affinity', affinity)
if not TRAVIS:
self.execute_w_exc(ValueError, 'cpu_affinity', [-1])
@skip_if_linux()
def test_open_files(self):
safe_remove(TESTFN) # needed after UNIX socket test has run
with open(TESTFN, 'w'):
self.execute('open_files')
# OSX implementation is unbelievably slow
@unittest.skipIf(OSX, "OSX implementation is too slow")
@skip_if_linux()
def test_memory_maps(self):
self.execute('memory_maps')
@unittest.skipUnless(LINUX, "Linux only")
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_get(self):
self.execute('rlimit', psutil.RLIMIT_NOFILE)
@unittest.skipUnless(LINUX, "Linux only")
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_set(self):
limit = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)
self.execute('rlimit', psutil.RLIMIT_NOFILE, limit)
self.execute_w_exc(OSError, 'rlimit', -1)
@skip_if_linux()
# Windows implementation is based on a single system-wide function
@unittest.skipIf(WINDOWS, "tested later")
def test_connections(self):
def create_socket(family, type):
sock = socket.socket(family, type)
sock.bind(('', 0))
if type == socket.SOCK_STREAM:
sock.listen(1)
return sock
socks = []
socks.append(create_socket(socket.AF_INET, socket.SOCK_STREAM))
socks.append(create_socket(socket.AF_INET, socket.SOCK_DGRAM))
if supports_ipv6():
socks.append(create_socket(socket.AF_INET6, socket.SOCK_STREAM))
socks.append(create_socket(socket.AF_INET6, socket.SOCK_DGRAM))
if hasattr(socket, 'AF_UNIX'):
safe_remove(TESTFN)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(TESTFN)
s.listen(1)
socks.append(s)
kind = 'all'
# TODO: UNIX sockets are temporarily implemented by parsing
# 'pfiles' cmd output; we don't want that part of the code to
# be executed.
if SUNOS:
kind = 'inet'
try:
self.execute('connections', kind=kind)
finally:
for s in socks:
s.close()
p = get_test_subprocess()
DEAD_PROC = psutil.Process(p.pid)
DEAD_PROC.kill()
DEAD_PROC.wait()
del p
class TestProcessObjectLeaksZombie(TestProcessObjectLeaks):
"""Same as above but looks for leaks occurring when dealing with
zombie processes raising NoSuchProcess exception.
"""
proc = DEAD_PROC
def call(self, *args, **kwargs):
try:
TestProcessObjectLeaks.call(self, *args, **kwargs)
except psutil.NoSuchProcess:
pass
if not POSIX:
def test_kill(self):
self.execute('kill')
def test_terminate(self):
self.execute('terminate')
def test_suspend(self):
self.execute('suspend')
def test_resume(self):
self.execute('resume')
def test_wait(self):
self.execute('wait')
class TestModuleFunctionsLeaks(Base):
"""Test leaks of psutil module functions."""
def setUp(self):
gc.collect()
def call(self, function, *args, **kwargs):
fun = getattr(psutil, function)
fun(*args, **kwargs)
@skip_if_linux()
def test_cpu_count_logical(self):
psutil.cpu_count = psutil._psplatform.cpu_count_logical
self.execute('cpu_count')
@skip_if_linux()
def test_cpu_count_physical(self):
psutil.cpu_count = psutil._psplatform.cpu_count_physical
self.execute('cpu_count')
@skip_if_linux()
def test_boot_time(self):
self.execute('boot_time')
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_pid_exists(self):
self.execute('pid_exists', os.getpid())
def test_virtual_memory(self):
self.execute('virtual_memory')
# TODO: remove this skip when this gets fixed
@unittest.skipIf(SUNOS,
"not worth being tested on SUNOS (uses a subprocess)")
def test_swap_memory(self):
self.execute('swap_memory')
@skip_if_linux()
def test_cpu_times(self):
self.execute('cpu_times')
@skip_if_linux()
def test_per_cpu_times(self):
self.execute('cpu_times', percpu=True)
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_disk_usage(self):
self.execute('disk_usage', '.')
def test_disk_partitions(self):
self.execute('disk_partitions')
@skip_if_linux()
def test_net_io_counters(self):
self.execute('net_io_counters')
@unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'),
'/proc/diskstats not available on this Linux version')
@skip_if_linux()
def test_disk_io_counters(self):
self.execute('disk_io_counters')
# XXX - on Windows this produces a false positive
@unittest.skipIf(WINDOWS, "XXX produces a false positive on Windows")
def test_users(self):
self.execute('users')
@unittest.skipIf(LINUX,
"not worth being tested on Linux (pure python)")
def test_net_connections(self):
self.execute('net_connections')
def test_net_if_addrs(self):
self.execute('net_if_addrs')
@unittest.skipIf(TRAVIS, "EPERM on travis")
def test_net_if_stats(self):
self.execute('net_if_stats')
def test_main():
test_suite = unittest.TestSuite()
tests = [TestProcessObjectLeaksZombie,
TestProcessObjectLeaks,
TestModuleFunctionsLeaks]
for test in tests:
test_suite.addTest(unittest.makeSuite(test))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not test_main():
sys.exit(1)
| |
import os
import logging
from functools import partial
from capture_gui.vendor.Qt import QtCore, QtWidgets
from capture_gui import plugin, lib
from capture_gui import tokens
log = logging.getLogger("IO")
class IoAction(QtWidgets.QAction):
def __init__(self, parent, filepath):
super(IoAction, self).__init__(parent)
action_label = os.path.basename(filepath)
self.setText(action_label)
self.setData(filepath)
# check if file exists and disable when false
self.setEnabled(os.path.isfile(filepath))
# get icon from file
info = QtCore.QFileInfo(filepath)
icon_provider = QtWidgets.QFileIconProvider()
self.setIcon(icon_provider.icon(info))
self.triggered.connect(self.open_object_data)
def open_object_data(self):
lib.open_file(self.data())
class IoPlugin(plugin.Plugin):
"""Codec widget.
Allows to set format, compression and quality.
"""
id = "IO"
label = "Save"
section = "app"
order = 40
max_recent_playblasts = 5
def __init__(self, parent=None):
super(IoPlugin, self).__init__(parent=parent)
self.recent_playblasts = list()
self._layout = QtWidgets.QVBoxLayout()
self._layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self._layout)
# region Checkboxes
self.save_file = QtWidgets.QCheckBox(text="Save")
self.open_viewer = QtWidgets.QCheckBox(text="View when finished")
self.raw_frame_numbers = QtWidgets.QCheckBox(text="Raw frame numbers")
checkbox_hlayout = QtWidgets.QHBoxLayout()
checkbox_hlayout.setContentsMargins(5, 0, 5, 0)
checkbox_hlayout.addWidget(self.save_file)
checkbox_hlayout.addWidget(self.open_viewer)
checkbox_hlayout.addWidget(self.raw_frame_numbers)
checkbox_hlayout.addStretch(True)
# endregion Checkboxes
# region Path
self.path_widget = QtWidgets.QWidget()
self.browse = QtWidgets.QPushButton("Browse")
self.file_path = QtWidgets.QLineEdit()
self.file_path.setPlaceholderText("(not set; using scene name)")
tip = "Right click in the text field to insert tokens"
self.file_path.setToolTip(tip)
self.file_path.setStatusTip(tip)
self.file_path.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.file_path.customContextMenuRequested.connect(self.show_token_menu)
path_hlayout = QtWidgets.QHBoxLayout()
path_hlayout.setContentsMargins(0, 0, 0, 0)
path_label = QtWidgets.QLabel("Path:")
path_label.setFixedWidth(30)
path_hlayout.addWidget(path_label)
path_hlayout.addWidget(self.file_path)
path_hlayout.addWidget(self.browse)
self.path_widget.setLayout(path_hlayout)
# endregion Path
# region Recent Playblast
self.play_recent = QtWidgets.QPushButton("Play recent playblast")
self.recent_menu = QtWidgets.QMenu()
self.play_recent.setMenu(self.recent_menu)
# endregion Recent Playblast
self._layout.addLayout(checkbox_hlayout)
self._layout.addWidget(self.path_widget)
self._layout.addWidget(self.play_recent)
# Signals / connections
self.browse.clicked.connect(self.show_browse_dialog)
self.file_path.textChanged.connect(self.options_changed)
self.save_file.stateChanged.connect(self.options_changed)
self.raw_frame_numbers.stateChanged.connect(self.options_changed)
self.save_file.stateChanged.connect(self.on_save_changed)
# Ensure state is up-to-date with current settings
self.on_save_changed()
def on_save_changed(self):
"""Update the visibility of the path field"""
state = self.save_file.isChecked()
if state:
self.path_widget.show()
else:
self.path_widget.hide()
def show_browse_dialog(self):
"""Set the filepath using a browser dialog.
:return: None
"""
path = lib.browse()
if not path:
return
# Maya's browser return Linux based file paths to ensure Windows is
# supported we use normpath
path = os.path.normpath(path)
self.file_path.setText(path)
def add_playblast(self, item):
"""
Add an item to the previous playblast menu
:param item: full path to a playblast file
:type item: str
:return: None
"""
# If item already in the recent playblasts remove it so we are
# sure to add it as the new first most-recent
try:
self.recent_playblasts.remove(item)
except ValueError:
pass
# Add as first in the recent playblasts
self.recent_playblasts.insert(0, item)
# Ensure the playblast list is never longer than maximum amount
# by removing the older entries that are at the end of the list
if len(self.recent_playblasts) > self.max_recent_playblasts:
del self.recent_playblasts[self.max_recent_playblasts:]
# Rebuild the actions menu
self.recent_menu.clear()
for playblast in self.recent_playblasts:
action = IoAction(parent=self, filepath=playblast)
self.recent_menu.addAction(action)
def on_playblast_finished(self, options):
"""Take action after the play blast is done"""
playblast_file = options['filename']
if not playblast_file:
return
self.add_playblast(playblast_file)
def get_outputs(self):
"""
Get the output of the widget based on the user's inputs
:return: collection of needed output values
:rtype: dict
"""
output = {"filename": None,
"raw_frame_numbers": self.raw_frame_numbers.isChecked(),
"viewer": self.open_viewer.isChecked()}
save = self.save_file.isChecked()
if not save:
return output
# get path, if nothing is set fall back to default
# project/images/playblast
path = self.file_path.text()
if not path:
path = lib.default_output()
output["filename"] = path
return output
def get_inputs(self, as_preset):
inputs = {"name": self.file_path.text(),
"save_file": self.save_file.isChecked(),
"open_finished": self.open_viewer.isChecked(),
"recent_playblasts": self.recent_playblasts,
"raw_frame_numbers": self.raw_frame_numbers.isChecked()}
if as_preset:
inputs["recent_playblasts"] = []
return inputs
def apply_inputs(self, settings):
directory = settings.get("name", None)
save_file = settings.get("save_file", True)
open_finished = settings.get("open_finished", True)
raw_frame_numbers = settings.get("raw_frame_numbers", False)
previous_playblasts = settings.get("recent_playblasts", [])
self.save_file.setChecked(save_file)
self.open_viewer.setChecked(open_finished)
self.raw_frame_numbers.setChecked(raw_frame_numbers)
for playblast in reversed(previous_playblasts):
self.add_playblast(playblast)
self.file_path.setText(directory)
def token_menu(self):
"""
Build the token menu based on the registered tokens
:returns: Menu
:rtype: QtWidgets.QMenu
"""
menu = QtWidgets.QMenu(self)
registered_tokens = tokens.list_tokens()
for token, value in registered_tokens.items():
label = "{} \t{}".format(token, value['label'])
action = QtWidgets.QAction(label, menu)
fn = partial(self.file_path.insert, token)
action.triggered.connect(fn)
menu.addAction(action)
return menu
def show_token_menu(self, pos):
"""Show custom manu on position of widget"""
menu = self.token_menu()
globalpos = QtCore.QPoint(self.file_path.mapToGlobal(pos))
menu.exec_(globalpos)
| |
from voussoirkit import stringtools
class ErrorTypeAdder(type):
'''
During definition, the Exception class will automatically receive a class
attribute called `error_type` which is just the class's name as a string
in the loudsnake casing style. NoSuchPhoto -> NO_SUCH_PHOTO.
This is used for serialization of the exception object and should
basically act as a status code when displaying the error to the user.
Thanks Unutbu
http://stackoverflow.com/a/18126678
'''
def __init__(cls, name, bases, clsdict):
type.__init__(cls, name, bases, clsdict)
cls.error_type = stringtools.pascal_to_loudsnakes(name)
class EtiquetteException(Exception, metaclass=ErrorTypeAdder):
'''
Base type for all of the Etiquette exceptions.
Subtypes should have a class attribute `error_message`. The error message
may contain {format} strings which will be formatted using the
Exception's constructor arguments.
'''
error_message = ''
def __init__(self, *args, **kwargs):
super().__init__()
self.given_args = args
self.given_kwargs = kwargs
self.error_message = self.error_message.format(*args, **kwargs)
self.args = (self.error_message, args, kwargs)
def __str__(self):
return f'{self.error_type}: {self.error_message}'
def jsonify(self):
j = {
'type': 'error',
'error_type': self.error_type,
'error_message': self.error_message,
}
return j
# NO SUCH ##########################################################################################
class NoSuch(EtiquetteException):
pass
class NoSuchAlbum(NoSuch):
error_message = 'Album "{}" does not exist.'
class NoSuchBookmark(NoSuch):
error_message = 'Bookmark "{}" does not exist.'
class NoSuchGroup(NoSuch):
error_message = 'Group "{}" does not exist.'
class NoSuchPhoto(NoSuch):
error_message = 'Photo "{}" does not exist.'
class NoSuchSynonym(NoSuch):
error_message = 'Synonym "{}" does not exist.'
class NoSuchTag(NoSuch):
error_message = 'Tag "{}" does not exist.'
class NoSuchUser(NoSuch):
error_message = 'User "{}" does not exist.'
# EXISTS ###########################################################################################
# The following inits store a copy of the object so that the exception catcher
# can do something with it. It's not related to the string formatting.
class Exists(EtiquetteException):
pass
class AlbumExists(Exists):
error_message = 'Album "{}" already exists.'
def __init__(self, album):
self.album = album
EtiquetteException.__init__(self, album)
class GroupExists(Exists):
error_message = '{member} already in group {group}.'
class PhotoExists(Exists):
error_message = 'Photo "{}" already exists.'
def __init__(self, photo):
self.photo = photo
EtiquetteException.__init__(self, photo)
class TagExists(Exists):
error_message = 'Tag "{}" already exists.'
def __init__(self, tag):
self.tag = tag
EtiquetteException.__init__(self, tag)
class UserExists(Exists):
error_message = 'User "{}" already exists.'
def __init__(self, user):
self.user = user
EtiquetteException.__init__(self, user)
# TAG ERRORS #######################################################################################
class CantGroupSelf(EtiquetteException):
error_message = 'Cannot group {} into itself.'
class CantSynonymSelf(EtiquetteException):
error_message = 'Cannot make {} a synonym of itself.'
class EasyBakeError(EtiquetteException):
error_message = '{}'
class RecursiveGrouping(EtiquetteException):
error_message = '{group} is an ancestor of {member}.'
class TagTooLong(EtiquetteException):
error_message = 'Tag "{}" is too long.'
class TagTooShort(EtiquetteException):
error_message = 'Tag "{}" has too few valid characters.'
# USER ERRORS ######################################################################################
class AlreadySignedIn(EtiquetteException):
error_message = 'You\'re already signed in.'
class CantDeleteUser(EtiquetteException):
error_message = '{} can\'t be deleted because they still have possessions.'
class InvalidPassword(EtiquetteException):
error_message = 'Password is invalid.'
class InvalidUsername(EtiquetteException):
error_message = 'Username "{username}" is invalid.'
class InvalidUsernameChars(InvalidUsername):
error_message = 'Username "{username}" contains invalid characters: {badchars}.'
class PasswordTooShort(InvalidPassword):
error_message = 'Password is shorter than the minimum of {min_length}.'
class UsernameTooLong(InvalidUsername):
error_message = 'Username "{username}" is longer than maximum of {max_length}.'
class UsernameTooShort(InvalidUsername):
error_message = 'Username "{username}" is shorter than minimum of {min_length}.'
class DisplayNameTooLong(EtiquetteException):
error_message = 'Display name "{display_name}" is longer than maximum of {max_length}.'
class Unauthorized(EtiquetteException):
error_message = 'You\'re not allowed to do that.'
class WrongLogin(EtiquetteException):
error_message = 'Wrong username-password combination.'
# GENERAL ERRORS ###################################################################################
class BadDataDirectory(EtiquetteException):
'''
Raised by PhotoDB __init__ if the requested data_directory is invalid.
'''
error_message = 'Bad data directory "{}"'
OUTOFDATE = '''
Database is out of date. {existing} should be {new}.
Please run utilities\\database_upgrader.py "{filepath.absolute_path}"
'''.strip()
class DatabaseOutOfDate(EtiquetteException):
'''
Raised by PhotoDB __init__ if the user's database is behind.
'''
error_message = OUTOFDATE
class FeatureDisabled(EtiquetteException):
'''
For when features of the system have been disabled by the configuration.
'''
error_message = 'This feature has been disabled. Requires {requires}.'
class MinMaxInvalid(EtiquetteException):
'''
For when the user searches for e.g. width=a-b but the a-b can't be parsed.
If the values can be parsed but are backward, use MinMaxOutOfOrder.
'''
error_message = 'Field "{field}": "{value}" is not a valid request.'
class MinMaxOutOfOrder(EtiquetteException):
'''
For when a requested minmax range (a, b) has b > a
'''
error_message = 'Range "{range}": minimum "{min}" and maximum "{max}" are out of order.'
class NoClosestPhotoDB(EtiquetteException):
'''
For calls to PhotoDB.closest_photodb where none exists between cwd and
drive root.
'''
error_message = 'There is no PhotoDB in "{}" or its parents.'
class NoYields(EtiquetteException):
'''
For when all of the yield_* arguments have been provided as False, and thus
there is nothing for the called function to yield.
'''
error_message = 'At least one of {} must be selected.'
class NotExclusive(EtiquetteException):
'''
For when two or more mutually exclusive actions have been requested.
'''
error_message = 'One and only one of {} must be passed.'
class OrderByBadColumn(EtiquetteException):
'''
For when the user tries to orderby a column that does not exist or is
not allowed.
'''
error_message = '"{column}" is not a sortable column.'
class OrderByBadDirection(EtiquetteException):
'''
For when the user tries to orderby a direction that is not asc or desc.
'''
error_message = 'You can\'t order "{column}" by "{direction}". Should be asc or desc.'
class OrderByInvalid(EtiquetteException):
'''
For when the orderby request cannot be parsed into column and direction.
For example, it contains too many hyphens like a-b-c.
If the column and direction can be parsed but are invalid, use
OrderByBadColumn or OrderByBadDirection
'''
error_message = 'Invalid orderby request "{request}".'
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the samples from the models."""
import logging
import os
import subprocess
import tempfile
import time
import absl.app as app
import absl.flags as flags
import absl.logging as absl_logging
import constants
import task_specific
import tensorflow as tf
import tf_utils
import tqdm
import transformers
import utils
LOGGER = logging.getLogger(__name__)
_ACCEPTABLE_APPROACHES = frozenset([
constants.ApproachTypeChoices.naked_lm,
constants.ApproachTypeChoices.cached_pretok
])
_FLAG_DB_PATH = flags.DEFINE_string(
"db_path",
None,
"Path to the dataset. Can be on Google Cloud."
)
_FLAG_MODEL_PATH = flags.DEFINE_string(
"model_path",
None,
"Path to the model save."
)
_FLAG_APPROACH_TYPE = flags.DEFINE_enum(
"approach_type",
None,
_ACCEPTABLE_APPROACHES,
"Path to the model save."
)
_FLAG_OUTPUT_PATH = flags.DEFINE_string(
"output_path",
None,
"Where to save the generations. A json file. Can be on Google Cloud."
)
_FLAG_DATASET_TYPE = flags.DEFINE_enum(
"dataset_Type",
"tfr",
constants.DatasetTypeChoices.choices(),
"Whether to use the hdf5 or the tfr pipeline."
)
_FLAG_TFR_PREFIX = flags.DEFINE_string(
"tfr_prefix",
None,
"Glob prefix of the tfr files."
)
_FLAG_BATCH_SIZE = flags.DEFINE_integer(
"batch_size",
None,
"Size of the batch PER DEVICE."
)
_FLAG_SPLIT = flags.DEFINE_enum(
"split",
"test",
{"eval", "test"},
"Which split to generate from."
)
_FLAG_GENERATION_LENGTH_LIMIT = flags.DEFINE_integer(
"generation_length_limit",
None,
"Number of tokens to reserve for generation at the end."
)
def main(argv):
if len(argv) > 1:
raise RuntimeError(argv[1:])
absl_logging.use_python_logging()
utils.check_contained(_FLAG_APPROACH_TYPE.value, _ACCEPTABLE_APPROACHES)
db_path = _FLAG_DB_PATH.value
model_path = _FLAG_MODEL_PATH.value
tpu_config = tf_utils.init_tpus()
device_type = tf_utils.devices_to_use()[0].device_type
if device_type == "TPU":
assert isinstance(tpu_config, tf_utils.TpuConfigType)
strategy = tf.distribute.TPUStrategy(tpu_config.resolver)
elif device_type == "GPU" or "CPU":
# MirroredStrategy automatically becomes OneDeviceStrategy if there is
# just one device, like one GPU or only CPUs.
strategy = tf.distribute.MirroredStrategy()
else:
raise RuntimeError()
##############################################################################
# Load Model
##############################################################################
with utils.log_duration(LOGGER, main.__name__, "All of model preparation"):
def make_model_tf(path):
with utils.log_duration(LOGGER, make_model_tf.__name__, "Load model."):
if os.path.exists(path):
config_path = os.path.join(path, "config.json")
model_path = os.path.join(path, "tf_model.h5")
utils.check_exists(config_path)
utils.check_exists(model_path)
config = transformers.GPT2Config.from_pretrained(config_path)
return transformers.TFGPT2LMHeadModel.from_pretrained(
model_path,
config=config
)
else:
return transformers.TFGPT2LMHeadModel.from_pretrained(
path,
)
with strategy.scope():
if model_path.startswith("gs://"):
with utils.log_duration(
LOGGER,
main.__name__,
"Download model from GS"
):
with tempfile.TemporaryDirectory() as td:
td += os.path.sep
if os.path.exists("/root/google-cloud-sdk/bin/gsutil"):
exec_ = "/root/google-cloud-sdk/bin/gsutil"
else:
exec_ = "gsutil"
command = [
exec_,
"-m",
"cp",
"-r",
os.path.join(model_path, "*"),
td,
]
LOGGER.debug("Running bash command: %s", " ".join(command))
subprocess.check_call(command)
LOGGER.debug(
"Files at the temp dir(%s): %s", td, str(os.listdir(td))
)
model = make_model_tf(td)
else:
model = make_model_tf(model_path)
model.__call__ = tf.function(
model.__call__,
experimental_relax_shapes=True,
experimental_compile=True,
)
##############################################################################
# Load Dataset Pipeline
##############################################################################
utils.check_contained(_FLAG_APPROACH_TYPE.value, {
constants.ApproachTypeChoices.naked_lm,
constants.ApproachTypeChoices.naked_lm
})
devices = tf_utils.devices_to_use()
num_replicas = len(devices) if devices[0].device_type in {"GPU", "TPU"} else 1
# Only a batch size of 1 is currently supported. We need attention masks
utils.check_equal(_FLAG_BATCH_SIZE.value, 1)
batch_size = _FLAG_BATCH_SIZE.value * num_replicas
approach_type = _FLAG_APPROACH_TYPE.value
# Things that will never change:
random_seed = 0
use_helper_words = True
retrieval_temperature = 1
context_window_size = 1024
logging.debug("Loading dataset.")
tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2-xl")
ds = task_specific.create_lm_ds_kilt_eli5(
tokenizer=tokenizer,
context_window_size=context_window_size,
dataset_name="kilt_eli5",
batch_size=1, # >> We set our own batch size elsewhere
db_path=db_path,
random_seed=random_seed,
use_subset=False,
subset_size=-1,
use_helper_words=use_helper_words,
approach_type=approach_type,
num_retrievals=5, # Will never change
retrieval_temperature=retrieval_temperature,
retriever=None, # Cached retrievals don't need a retriever
repeat=False, # Will never change
split=_FLAG_SPLIT.value,
enable_debug_checks=False,
retrieval_bank_size=5, # Will never change
dataset_type=_FLAG_DATASET_TYPE.value,
tfr_prefix=_FLAG_TFR_PREFIX.value,
qty_shuffle=1, # Will never change
max_length_generation=_FLAG_GENERATION_LENGTH_LIMIT.value
)
def further_prep_generate_not_test(batch):
batch = tf.boolean_mask(
batch["input_ids"],
tf.logical_and(batch["label_ids"] == -100,
batch["input_ids"] != tokenizer.eos_token_id
)
)
return batch
@tf.function
def further_prep_generate_test(batch):
batch = tf.boolean_mask(
batch["input_ids"], batch["input_ids"] != tokenizer.eos_token_id
)
return batch
if _FLAG_SPLIT.value == constants.SplitChoices.test:
ds = ds.map(further_prep_generate_test)
else:
ds = ds.map(further_prep_generate_not_test)
ds = ds.padded_batch(
batch_size=batch_size, padding_values=tokenizer.eos_token_id
)
ds = strategy.experimental_distribute_dataset(ds)
##############################################################################
# Generate
##############################################################################
LOGGER.debug("Generating.")
generations = []
counter = tqdm.tqdm(
ds,
total=task_specific.DATASET_CARDINALITIES["kilt_eli5"][_FLAG_SPLIT.value]
)
for batch_no, batch in enumerate(counter):
output = strategy.run(model.generate, kwargs=dict(
input_ids=batch,
max_length=_FLAG_GENERATION_LENGTH_LIMIT.value,
use_cache=True,
attention_mask=batch == tokenizer.eos_token_id
))
LOGGER.debug("INPUT: %s", tokenizer.decode(batch[0]))
output = tf_utils.process_strat_output(
strategy_outputs=output,
current_batch_size=batch_size,
strategy=strategy,
name="generations"
)
with utils.log_duration(
LOGGER, "main", "all of tokenizer.decode for a batch."
):
for i in range(batch_size):
text = tokenizer.decode(output.numpy()[i])
LOGGER.debug("Batch %d Generation %d", batch_no, i)
LOGGER.debug(text.replace("\n", " <\\n> "))
generations.append(text)
counter.update(batch.shape[0])
utils.to_json_file(
os.path.join(
_FLAG_OUTPUT_PATH.value, _FLAG_SPLIT.value, _FLAG_APPROACH_TYPE.value,
time.strftime("%Y%m%d-%H%M%S.json")
),
dict(
flags={
flag.name: flag.value
for flag in flags.FLAGS.flags_by_module_dict()[argv[0]]
},
generations=generations
)
)
logging.debug("Saved to: %s", _FLAG_OUTPUT_PATH.value)
if __name__ == "__main__":
app.run(main)
| |
"""
Tests for zipline.utils.validate.
"""
from operator import attrgetter
from types import FunctionType
from unittest import TestCase
from nose_parameterized import parameterized
from numpy import arange, dtype
import pytz
from six import PY3
from zipline.utils.preprocess import call, preprocess
from zipline.utils.input_validation import (
ensure_timezone,
expect_element,
expect_dtypes,
expect_types,
optional,
optionally,
)
def noop(func, argname, argvalue):
assert isinstance(func, FunctionType)
assert isinstance(argname, str)
return argvalue
if PY3:
qualname = attrgetter('__qualname__')
else:
def qualname(ob):
return '.'.join((__name__, ob.__name__))
class PreprocessTestCase(TestCase):
@parameterized.expand([
('too_many', (1, 2, 3), {}),
('too_few', (1,), {}),
('collision', (1,), {'a': 1}),
('unexpected', (1,), {'q': 1}),
])
def test_preprocess_doesnt_change_TypeErrors(self, name, args, kwargs):
"""
Verify that the validate decorator doesn't swallow typeerrors that
would be raised when calling a function with invalid arguments
"""
def undecorated(x, y):
return x, y
decorated = preprocess(x=noop, y=noop)(undecorated)
with self.assertRaises(TypeError) as e:
undecorated(*args, **kwargs)
undecorated_errargs = e.exception.args
with self.assertRaises(TypeError) as e:
decorated(*args, **kwargs)
decorated_errargs = e.exception.args
self.assertEqual(len(decorated_errargs), 1)
self.assertEqual(len(undecorated_errargs), 1)
self.assertEqual(decorated_errargs[0], undecorated_errargs[0])
def test_preprocess_co_filename(self):
def undecorated():
pass
decorated = preprocess()(undecorated)
self.assertEqual(
undecorated.__code__.co_filename,
decorated.__code__.co_filename,
)
def test_preprocess_preserves_docstring(self):
@preprocess()
def func():
"My awesome docstring"
self.assertEqual(func.__doc__, "My awesome docstring")
def test_preprocess_preserves_function_name(self):
@preprocess()
def arglebargle():
pass
self.assertEqual(arglebargle.__name__, 'arglebargle')
@parameterized.expand([
((1, 2), {}),
((1, 2), {'c': 3}),
((1,), {'b': 2}),
((), {'a': 1, 'b': 2}),
((), {'a': 1, 'b': 2, 'c': 3}),
])
def test_preprocess_no_processors(self, args, kwargs):
@preprocess()
def func(a, b, c=3):
return a, b, c
self.assertEqual(func(*args, **kwargs), (1, 2, 3))
def test_preprocess_bad_processor_name(self):
a_processor = preprocess(a=int)
# Should work fine.
@a_processor
def func_with_arg_named_a(a):
pass
@a_processor
def func_with_default_arg_named_a(a=1):
pass
message = "Got processors for unknown arguments: %s." % {'a'}
with self.assertRaises(TypeError) as e:
@a_processor
def func_with_no_args():
pass
self.assertEqual(e.exception.args[0], message)
with self.assertRaises(TypeError) as e:
@a_processor
def func_with_arg_named_b(b):
pass
self.assertEqual(e.exception.args[0], message)
@parameterized.expand([
((1, 2), {}),
((1, 2), {'c': 3}),
((1,), {'b': 2}),
((), {'a': 1, 'b': 2}),
((), {'a': 1, 'b': 2, 'c': 3}),
])
def test_preprocess_on_function(self, args, kwargs):
decorators = [
preprocess(a=call(str), b=call(float), c=call(lambda x: x + 1)),
]
for decorator in decorators:
@decorator
def func(a, b, c=3):
return a, b, c
self.assertEqual(func(*args, **kwargs), ('1', 2.0, 4))
@parameterized.expand([
((1, 2), {}),
((1, 2), {'c': 3}),
((1,), {'b': 2}),
((), {'a': 1, 'b': 2}),
((), {'a': 1, 'b': 2, 'c': 3}),
])
def test_preprocess_on_method(self, args, kwargs):
decorators = [
preprocess(a=call(str), b=call(float), c=call(lambda x: x + 1)),
]
for decorator in decorators:
class Foo(object):
@decorator
def method(self, a, b, c=3):
return a, b, c
@classmethod
@decorator
def clsmeth(cls, a, b, c=3):
return a, b, c
self.assertEqual(Foo.clsmeth(*args, **kwargs), ('1', 2.0, 4))
self.assertEqual(Foo().method(*args, **kwargs), ('1', 2.0, 4))
def test_expect_types(self):
@expect_types(a=int, b=int)
def foo(a, b, c):
return a, b, c
self.assertEqual(foo(1, 2, 3), (1, 2, 3))
self.assertEqual(foo(1, 2, c=3), (1, 2, 3))
self.assertEqual(foo(1, b=2, c=3), (1, 2, 3))
self.assertEqual(foo(1, 2, c='3'), (1, 2, '3'))
for not_int in (str, float):
with self.assertRaises(TypeError) as e:
foo(not_int(1), 2, 3)
self.assertEqual(
e.exception.args[0],
"{qualname}() expected a value of type "
"int for argument 'a', but got {t} instead.".format(
qualname=qualname(foo),
t=not_int.__name__,
)
)
with self.assertRaises(TypeError):
foo(1, not_int(2), 3)
with self.assertRaises(TypeError):
foo(not_int(1), not_int(2), 3)
def test_expect_types_with_tuple(self):
@expect_types(a=(int, float))
def foo(a):
return a
self.assertEqual(foo(1), 1)
self.assertEqual(foo(1.0), 1.0)
with self.assertRaises(TypeError) as e:
foo('1')
expected_message = (
"{qualname}() expected a value of "
"type int or float for argument 'a', but got str instead."
).format(qualname=qualname(foo))
self.assertEqual(e.exception.args[0], expected_message)
def test_expect_optional_types(self):
@expect_types(a=optional(int))
def foo(a=None):
return a
self.assertIs(foo(), None)
self.assertIs(foo(None), None)
self.assertIs(foo(a=None), None)
self.assertEqual(foo(1), 1)
self.assertEqual(foo(a=1), 1)
with self.assertRaises(TypeError) as e:
foo('1')
expected_message = (
"{qualname}() expected a value of "
"type int or NoneType for argument 'a', but got str instead."
).format(qualname=qualname(foo))
self.assertEqual(e.exception.args[0], expected_message)
def test_expect_element(self):
set_ = {'a', 'b'}
@expect_element(a=set_)
def f(a):
return a
self.assertEqual(f('a'), 'a')
self.assertEqual(f('b'), 'b')
with self.assertRaises(ValueError) as e:
f('c')
expected_message = (
"{qualname}() expected a value in {set_!r}"
" for argument 'a', but got 'c' instead."
).format(set_=set_, qualname=qualname(f))
self.assertEqual(e.exception.args[0], expected_message)
def test_expect_dtypes(self):
@expect_dtypes(a=dtype(float), b=dtype('datetime64[ns]'))
def foo(a, b, c):
return a, b, c
good_a = arange(3, dtype=float)
good_b = arange(3).astype('datetime64[ns]')
good_c = object()
a_ret, b_ret, c_ret = foo(good_a, good_b, good_c)
self.assertIs(a_ret, good_a)
self.assertIs(b_ret, good_b)
self.assertIs(c_ret, good_c)
with self.assertRaises(TypeError) as e:
foo(good_a, arange(3), good_c)
expected_message = (
"{qualname}() expected a value with dtype 'datetime64[ns]'"
" for argument 'b', but got 'int64' instead."
).format(qualname=qualname(foo))
self.assertEqual(e.exception.args[0], expected_message)
with self.assertRaises(TypeError) as e:
foo(arange(3, dtype='uint32'), good_c, good_c)
expected_message = (
"{qualname}() expected a value with dtype 'float64'"
" for argument 'a', but got 'uint32' instead."
).format(qualname=qualname(foo))
self.assertEqual(e.exception.args[0], expected_message)
def test_expect_dtypes_with_tuple(self):
allowed_dtypes = (dtype('datetime64[ns]'), dtype('float'))
@expect_dtypes(a=allowed_dtypes)
def foo(a, b):
return a, b
for d in allowed_dtypes:
good_a = arange(3).astype(d)
good_b = object()
ret_a, ret_b = foo(good_a, good_b)
self.assertIs(good_a, ret_a)
self.assertIs(good_b, ret_b)
with self.assertRaises(TypeError) as e:
foo(arange(3, dtype='uint32'), object())
expected_message = (
"{qualname}() expected a value with dtype 'datetime64[ns]' "
"or 'float64' for argument 'a', but got 'uint32' instead."
).format(qualname=qualname(foo))
self.assertEqual(e.exception.args[0], expected_message)
def test_ensure_timezone(self):
@preprocess(tz=ensure_timezone)
def f(tz):
return tz
valid = {
'utc',
'EST',
'US/Eastern',
}
invalid = {
# unfortunatly, these are not actually timezones (yet)
'ayy',
'lmao',
}
# test coercing from string
for tz in valid:
self.assertEqual(f(tz), pytz.timezone(tz))
# test pass through of tzinfo objects
for tz in map(pytz.timezone, valid):
self.assertEqual(f(tz), tz)
# test invalid timezone strings
for tz in invalid:
self.assertRaises(pytz.UnknownTimeZoneError, f, tz)
def test_optionally(self):
error = TypeError('arg must be int')
def preprocessor(func, argname, arg):
if not isinstance(arg, int):
raise error
return arg
@preprocess(a=optionally(preprocessor))
def f(a):
return a
self.assertIs(f(1), 1)
self.assertIsNone(f(None))
with self.assertRaises(TypeError) as e:
f('a')
self.assertIs(e.exception, error)
| |
class OBJECTIVE ( object ) :
current_pix = 0
def __init__( self, name, attributes = [], objective = '', dump = D(), inherit = [] ) :
self.name = name
self.attributes = attributes
self.objective = objective
self.dump = dump
self.inherit = [ e for e in ( [ 'ANY' ] + inherit ) if e != name ]
self.children = []
self.updatePix()
CORE.register_objective( self )
def updatePix( self ) :
self.pix = OBJECTIVE.current_pix
OBJECTIVE.current_pix += 1
def create_objective_function( self ) :
return FUNCTION( 'void ' + self.name + '_objective( ANY CONTEXT )', self.build_objective() )
def create_dump_function( self ) :
return FUNCTION( 'n_string ' + self.name + '_dump( ANY o )', '\n'.join( [
self.name + ' object = $CAST(' + self.name + ',o) ;',
'(void)object ;',
'return nom_format_string( ' + ', '.join( [ '"[\033[0;34m%04zx:\033[0m%s' + ( ' ' if ( self.dump.pattern != '' ) else '' ) + self.dump.pattern + ']"', '(n_integer)o', '"' + self.name + '"' ] + list( self.dump.values ) ) + ' ) ;',
] ) )
def build_base_allocation( self, n ) :
return """
""" + self.name + """ """ + n + """ = $CAST(""" + self.name + """,nom_malloc( sizeof( struct """ + self.name + """_struct ) )) ;
""" + n + """->objective = """ + self.name + """_objective ;
""" + n + """->pix = """ + str( self.pix ) + """ ;
""" + n + """->dump = """ + self.name + """_dump ;
"""
def create_native_constructor_function( self ) :
return FUNCTION( self.name + ' ' + self.name + '_new( ' + self.build_native_constructor_signature() + ' )', '\n'.join( [
# 'printf( "' + self.name + '\\n") ;',
self.build_base_allocation( 'new_object' ),
'\n'.join( [
a.build_set( 'new_object', a.name ) + ' ;'
for a in self.attributes
] ),
'return new_object ;',
] ) )
def build_declare_object_struct( self ) :
return '\n'.join( [
'struct ' + self.name + '_struct {',
'\n'.join( [
' ' + a.type + ' ' + a.name + ' ;'
for a in ( [
A( 'n_objective', 'objective' ),
A( 'n_integer', 'pix' ),
A( 'n_dump', 'dump' ),
] + self.attributes )
] ),
'} ;'
] )
def build_native_constructor_signature( self ) :
if ( self.attributes == [] ) :
return "void"
else :
return ', '.join( [
a.type + ' ' + a.name
for a in self.attributes
] )
def get_action_function( self, pattern ) :
return 'JUMP__' + pattern.build_function_name() + '__fallback'
def build_objective( self ) :
return self.build_objective_box( self.objective )
def build_objective_box( self, objective ) :
return '\n'.join( [
'do {',
'$IFLET( task, FRAME__TASK, CONTEXT ) ;',
'$IFLET( ACTION, ' + self.name + ', task->action ) ;',
'ANY THAT = task->that ; (void)THAT ;',
objective.strip(),
'return ;',
'} while ( $FALSE ) ;',
'$OUT( OBJECTIVE applied on non-task context ) ;',
'nom_fail( CONTEXT, "Unhandled message.", $NONE ) ;',
'return ;',
] ).strip()
def isOk( self ) :
return self.inherit == []
def fix( self ) :
if not self.isOk() :
obj = CORE.get_object( self.inherit[ -1 ] )
if obj.isOk() :
self.merge_inherit( obj )
del self.inherit[ -1 ]
self.fix()
def merge_inherit( self, obj ) :
obj.children.append( self )
self.attributes = obj.attributes + [ a for a in self.attributes if a not in obj.attributes ]
def is_substruct_of( self, obj ) :
for i, a in enumerate( obj.attributes ) :
if a != self.attributes[ i ] :
return False
return True
def get_all_substructs_of( self, obj ) :
if self.is_substruct_of( obj ) :
substructs = [ self ]
for sub in self.children :
substructs += sub.get_all_substructs_of( obj )
return substructs
else :
return []
class OBJECT ( OBJECTIVE ) :
def __init__( self, name, attributes = [], methods = [], dump = D(), inherit = [] ) :
self.name = name
self.methods = methods
super( OBJECT, self ).__init__( name,
inherit = inherit,
attributes = attributes,
dump = dump
)
CORE.register_object( self )
def get_action_function( self, pattern ) :
method = self.get_method( pattern )
if ( method ) :
return method.this.name + '__' + method.pattern.build_function_name() + '__jump_wrapper'
else :
return 'JUMP__' + pattern.build_function_name() + '__fail'
def build_primitive_dispatch( self ) :
return '\n'.join( [
m.build_primitive_dispatch()
for m in sorted( self.methods, key = lambda x : x.buoyancy )
] )
def build_objective( self ) :
return self.build_objective_box( """
ARRAY_MUTABLE_NOLOCK reification = nom_array_mutable_nolock_new() ;
""" + self.build_primitive_dispatch() + """
nom_do_sync( FRAME__TASK_new( task->parent, $CA(UNION_new( $CA(reification) )), THAT ) ) ;
""" )
def fix( self ) :
if not self.isOk() :
obj = CORE.get_object( self.inherit[ -1 ] )
if obj.isOk() :
self.merge_inherit( obj )
del self.inherit[ -1 ]
if self.isOk() :
self.attach_methods()
else :
self.fix()
def merge_inherit( self, obj ) :
obj.children.append( self )
self.attributes = obj.attributes + [ a for a in self.attributes if a not in obj.attributes ]
self.methods = self.methods + [ copy.deepcopy( m ) for m in obj.methods if m not in self.methods ]
def attach_methods( self ) :
for m in self.methods :
m.attach( self )
def get_method( self, pattern ) :
for m in self.methods :
if ( m.has_signature( pattern ) ) :
return m
return False
class FRAME ( OBJECT ) :
def __init__( self, name, attributes = [], methods = [], dump = D() ) :
super( FRAME, self ).__init__( 'FRAME__' + name,
inherit = [ 'FRAME' ],
attributes = attributes,
methods = methods + [ MF() ],
dump = dump
)
def get_action_function( self, pattern ) :
method = self.get_method( pattern )
if ( method ) :
return method.this.name + '__' + method.pattern.build_function_name() + '__jump_wrapper'
else :
return 'JUMP__' + pattern.build_function_name() + '__forward'
class TYPE ( OBJECT ) :
def __init__( self, name, methods = [], dump = D(), primitive = None ) :
self.primitive = primitive
super( TYPE, self ).__init__( name,
inherit = [ 'TYPE' ],
methods = [
MS( ARG( CW( 'test' ), CG( 'ANY', 'object' ) ), """
JUMP__produce_TID__""" + name + """_single( CONTEXT, PARAM_object, """ + name + """_single() ) ;
""" ),
MS( ARG( CW( 'consume' ), CG( 'LIST', 'phrase' ) ), '\n'.join( [
self.build_consume_opt(),
'nom_clause_consume( CONTEXT, $CA(ACTION), PARAM_phrase ) ;',
] ) ),
MS( ARG( CW( 'tid' ) ), """
JUMP__return_ANY( CONTEXT, CONTEXT, $CA(TID_new( """ + name + """_single() )) ) ;
""" ),
] + methods,
dump = dump
)
def build_consume_opt( self ) :
if self.primitive :
return """
$OPT(
$IFLET( array, ARRAY_MUTABLE_NOLOCK, PARAM_phrase ) ;
$ARRAY_MUTABLE_NOLOCK__NONEMPTY( CONTEXT, array ) ;
ANY value = nom_array_mutable_nolock_value( array ) ;
$IFLET_SUBSTRUCT( object, """ + self.primitive + """, value ) ;
JUMP__return_ANY( CONTEXT, CONTEXT, PARAM_phrase );
)
"""
else :
return ''
class CLASS ( OBJECT ) :
def __init__( self, name, attributes = [], methods = [], factory_methods = [], dump = D(), inherit = [] ) :
super( CLASS, self ).__init__( name,
inherit = inherit,
attributes = attributes,
methods = [
MTID_IS( name )
] + methods,
dump = dump
)
TYPE( name + '_FACTORY', factory_methods, primitive = name )
class PRIMITIVE ( CLASS ) :
def __init__( self, name, attributes = [], methods = [], factory_methods = [], dump = D(), inherit = [] ) :
super( PRIMITIVE, self ).__init__( name,
inherit = inherit,
attributes = attributes,
methods = [
MTID_EXTRACT( name ),
] + methods,
factory_methods = factory_methods,
dump = dump
)
TYPE( name + '_EXTRACT_TYPE', primitive = name )
| |
#!/usr/bin/env python
###############
# @file geometry2D.py
# @author Douglas Appleegate
# @date 1/24/08
#
# @brief Some basic geometry tools
###############
__cvs_id__ = "$Id: geometry2D.py,v 1.3 2008-01-25 00:21:53 dapple Exp $"
###############
import numpy
from math import sqrt
import unittest
#####################################
#Geometry
#
# Note that this is only working for 2D geometry!
#
# Note that isOverlap function returns:
# 2 if reg is contained completely within self
# 1 if reg overlaps with self partially
# 0 if there is no overlap
class Region(object):
def isOverlap(self, reg):
if isinstance(reg, Circle):
return self.isOverlapWithCircle(reg)
if isinstance(reg, Polygon):
return self.isOverlapWithPolygon(reg)
raise TypeError('Unsupported Type')
################
class Circle(Region):
def __init__(self, center, radius):
if not isinstance(center, numpy.ndarray):
center = numpy.array(center)
self.center = center
self.radius = radius
def containsPoint(self, point):
dS = self.center - point
return sqrt(numpy.dot(dS, dS)) <= self.radius
def isOverlapWithCircle(self, circ):
dCenter = self.center - circ.center
centerDist = sqrt(numpy.dot(dCenter, dCenter))
if centerDist >= (self.radius + circ.radius):
return 0
if (centerDist + circ.radius) <= self.radius:
return 2
return 1
def isOverlapWithPolygon(self, poly):
overlap = testPolygonCircleOverlap(poly = poly, circ = self)
if overlap == -2:
overlap = 1
return overlap
#########
class Polygon(Region):
'''An arbitrary polygon.
Note that the polygon vertex list doesn't repeat points
'''
def __init__(self, points):
self.points = points
def containsPoint(self, point):
'''Based on the Jordan Curve Theorem. See
http://tog.acm.org/editors/erich/ptinpoly/ and
http://www.ecse.rpi.edu/Homepages/wrf/Research/Short_Notes/pnpoly.html
for more details.
'''
inside = False
for i in xrange(len(self.points)):
j = i-1
if ((self.points[i][1] <= point[1] < self.points[j][1]) or \
(self.points[j][1] <= point[1] < self.points[i][1])) and \
(point[0] < ((self.points[j][0] - self.points[i][0]) * \
(point[1] - self.points[i][1]) / \
(self.points[j][1] - self.points[i][1]) + \
self.points[i][0])):
inside = not inside
return inside
def isOverlapWithCircle(self, circ):
overlap = testPolygonCircleOverlap(poly = self, circ = circ)
if overlap == 2:
overlap = 1
if overlap == -2:
overlap = 2
return overlap
def isOverlapWithPolygon(self, poly):
numPointsInside = 0
for point in poly.points:
if self.containsPoint(point):
numPointsInside += 1
if numPointsInside == len(poly.points):
return 2
numPointsInsideOther = 0
for point in self.points:
if poly.containsPoint(point):
numPointsInsideOther += 1
if numPointsInsideOther == len(self.points):
return 1
def linesIntersect(line1, line2):
p1 = numpy.zeros(3)
p2 = numpy.zeros(3)
p3 = numpy.zeros(3)
p4 = numpy.zeros(3)
p1[0:2] = line1[0]
p2[0:2] = line1[1]
p3[0:2] = line2[0]
p4[0:2] = line2[1]
if ((p1 == p3).all() and (p2 == p4).all()) or \
((p1 == p4).all() and (p2 == p3).all()):
return False
vec1 = p2 - p1
vec2 = p4 - p3
if numpy.cross(vec1,p3-p1)[2] * numpy.cross(vec1,p4-p1)[2] < 0 and \
numpy.cross(vec2,p2-p3)[2] * numpy.cross(vec2,p1-p3)[2] < 0:
return True
return False
for i in xrange(len(self.points)):
j = i-1
for k in xrange(len(poly.points)):
l = k-1
if linesIntersect((self.points[i], self.points[j]),
(poly.points[k], poly.points[l])):
return 1
return 0
#######
def _edgeFormsChord(point1, point2, circ):
vert1 = numpy.array([point1[0], point1[1], 0])
vert2 = numpy.array([point2[0], point2[1], 0])
center = numpy.array([circ.center[0], circ.center[1], 0])
vec1 = vert2 - vert1
vec2 = center - vert1
vec3 = center - vert2
d1 = sqrt(numpy.dot(vec1, vec1))
dist2Line = numpy.cross(vec1,vec2)[2]/d1
if abs(dist2Line) > circ.radius:
return False
if numpy.dot(vec1, vec2) * numpy.dot(vec1, vec3) < 0:
return True
return False
def testPolygonCircleOverlap(poly, circ):
''' returns: 2 if polygon is in circ
1 if overlap
0 for no overlap
-2 if circ in polygon
'''
numVertexsInside = 0
for point in poly.points:
if circ.containsPoint(point):
numVertexsInside += 1
if numVertexsInside == len(poly.points):
return 2
if numVertexsInside > 0:
return 1
if numVertexsInside == 0 and poly.containsPoint(circ.center):
return -2
for i in xrange(len(poly.points)):
j = i-1
if _edgeFormsChord(poly.points[i], poly.points[j], circ):
return 1
return 0
##########
class Rectangle(Polygon):
def __init__(self, coord):
self.coord = coord
vertexList = numpy.zeros((4,2))
vertexList[0][0] = vertexList[3][0] = self.coord[0][0]
vertexList[1][0] = vertexList[2][0] = self.coord[0][1]
vertexList[0][1] = vertexList[1][1] = self.coord[1][0]
vertexList[2][1] = vertexList[3][1] = self.coord[1][1]
Polygon.__init__(self, vertexList)
def containsPoint(self, point):
for i in xrange(len(self.coord)):
if point[i] < self.coord[i][0] or point[i] > self.coord[i][1]:
return False
return True
#########
class TestIsOverlapCircle(unittest.TestCase):
def setUp(self):
self.c1 = Circle(numpy.zeros(2), 1)
def testNoOverlapCircle(self):
c2 = Circle(numpy.array([5,5]), 1)
self.assertEqual(self.c1.isOverlap(c2), 0)
def testSomeOverlapCircle(self):
c2 = Circle(numpy.array([1.2, 0]), .4)
self.assertEqual(self.c1.isOverlap(c2), 1)
def testContainsCircle(self):
c2 = Circle(numpy.array([.3,0]), .1)
self.assertEqual(self.c1.isOverlap(c2), 2)
def testNoOverlapPolygon(self):
poly = Polygon(numpy.array([[5,5],[6,6],[7,0]]))
self.assertEqual(self.c1.isOverlap(poly), 0)
def testSomeOverlapPolygon(self):
poly = Polygon(numpy.array([[.5,3],[.5,-3],[5,-4],[5,4]]))
self.assertEqual(self.c1.isOverlap(poly), 1)
def testContainsPolygon(self):
poly = Polygon(numpy.array([[.2,0],[0,.2],[-.1,-.1]]))
self.assertEqual(self.c1.isOverlap(poly), 2)
def testNoOverlapPolygonCornerCase1(self):
poly = Polygon(numpy.array([[0,5],[5,5],[0,4]]))
self.assertEqual(self.c1.isOverlap(poly), 0)
def testIsContainedInPolygon(self):
poly = Polygon(numpy.array([[10,10], [-10,10], [-10,-10], [10,-10]]))
self.assertEqual(self.c1.isOverlap(poly), 1)
###########
class TestIsOverlapSimplePolygon(unittest.TestCase):
def setUp(self):
self.poly = Polygon(numpy.array([[0,1],[1,-1],[-1,-1]]))
def testNoOverlapCircle(self):
circ = Circle(numpy.array([5,0]), 2)
self.assertEqual(self.poly.isOverlap(circ), 0)
def testContainsCircle(self):
circ = Circle(numpy.array([.1,0]), .1)
self.assertEqual(self.poly.isOverlap(circ), 2)
def testSomeOverlapCircle1(self):
circ = Circle(numpy.array([0,1.2]), .5)
self.assertEqual(self.poly.isOverlap(circ), 1)
def testSomeOverlapCircle2(self):
circ = Circle(numpy.array([0,-1.2]), .5)
self.assertEqual(self.poly.isOverlap(circ), 1)
def testIsContainedInCircle(self):
circ = Circle(numpy.array([.1, 0]), 6)
self.assertEqual(self.poly.isOverlap(circ), 1)
def testNoOverlapPolygon(self):
poly = Polygon(numpy.array([[0,1],[1,-1],[2,2]]))
self.assertEqual(self.poly.isOverlap(poly), 0)
def testContainsPolygon(self):
poly = Polygon(numpy.array([[0,.5],[.15,0],[-.15,0]]))
self.assertEqual(self.poly.isOverlap(poly), 2)
def testIsContainedInPolygon(self):
poly = Polygon(numpy.array([[0,10],[5,-3],[-5,-3]]))
self.assertEqual(self.poly.isOverlap(poly), 1)
def testSomeOverlapPolygon1(self):
poly = Polygon(numpy.array([[0,0], [5,0], [5,1]]))
self.assertEqual(self.poly.isOverlap(poly), 1)
def testSomeOverlapPolygon2(self):
poly = Polygon(numpy.array([[1,1], [0,-1.5], [-1,1]]))
self.assertEqual(self.poly.isOverlap(poly), 1)
class TestIsOverlapConcavePolygon(unittest.TestCase):
def setUp(self):
self.poly = Polygon(numpy.array([[0,0], [1,-1],[1,1],[-1,1],[-1,-1]]))
def testNoOverlapCircle(self):
circ = Circle(numpy.array([0,-.1]),.05)
self.assertEqual(self.poly.isOverlap(circ), 0)
def testContainsCircle(self):
circ = Circle(numpy.array([.5, -.5]), .05)
self.assertEqual(self.poly.isOverlap(circ), 2)
def testSomeOverlapCircle1(self):
circ = Circle(numpy.array([1,-1.1]), .3)
self.assertEqual(self.poly.isOverlap(circ), 1)
def testSomeOverlapCircle2(self):
circ = Circle(numpy.array([-.5, -.6]), .15)
self.assertEqual(self.poly.isOverlap(circ), 1)
def testNoOverlapPolygon(self):
poly = Polygon(numpy.array([[0,-.05], [.9,-1], [-.9,-1]]))
self.assertEqual(self.poly.isOverlap(poly), 0)
###########
if __name__ == '__main__':
suites = []
suites.append(unittest.TestLoader().loadTestsFromTestCase( \
TestIsOverlapCircle))
suites.append(unittest.TestLoader().loadTestsFromTestCase( \
TestIsOverlapSimplePolygon))
suites.append(unittest.TestLoader().loadTestsFromTestCase( \
TestIsOverlapConcavePolygon))
masterSuite = unittest.TestSuite(suites)
unittest.TextTestRunner(verbosity=2).run(masterSuite)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslcertkey_sslvserver_binding(base_resource) :
""" Binding class showing the sslvserver that can be bound to sslcertkey.
"""
def __init__(self) :
self._servername = ""
self._data = 0
self._version = 0
self._certkey = ""
self._vservername = ""
self._vserver = False
self._ca = False
self._crlcheck = ""
self.___count = 0
@property
def vserver(self) :
"""Specify this option to bind the certificate to an SSL virtual server.
Note: The default option is -vServer.
"""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
"""Specify this option to bind the certificate to an SSL virtual server.
Note: The default option is -vServer.
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def crlcheck(self) :
"""The rule for use of CRL corresponding to this CA certificate during client authentication. If crlCheck is set to Mandatory, the system will deny all SSL clients if the CRL is missing, expired - NextUpdate date is in the past, or is incomplete with remote CRL refresh enabled. If crlCheck is set to optional, the system will allow SSL clients in the above error cases.However, in any case if the client certificate is revoked in the CRL, the SSL client will be denied access.<br/>Default value: CRLCHECK_OPTIONAL<br/>Possible values = Mandatory, Optional.
"""
try :
return self._crlcheck
except Exception as e:
raise e
@crlcheck.setter
def crlcheck(self, crlcheck) :
"""The rule for use of CRL corresponding to this CA certificate during client authentication. If crlCheck is set to Mandatory, the system will deny all SSL clients if the CRL is missing, expired - NextUpdate date is in the past, or is incomplete with remote CRL refresh enabled. If crlCheck is set to optional, the system will allow SSL clients in the above error cases.However, in any case if the client certificate is revoked in the CRL, the SSL client will be denied access.<br/>Default value: CRLCHECK_OPTIONAL<br/>Possible values = Mandatory, Optional
"""
try :
self._crlcheck = crlcheck
except Exception as e:
raise e
@property
def ca(self) :
"""If this option is specified, it indicates that the certificate-key pair being bound to the SSL virtual server is a CA certificate. If this option is not specified, the certificate-key pair is bound as a normal server certificate.
Note: In case of a normal server certificate, the certificate-key pair should consist of both the certificate and the private-key.
"""
try :
return self._ca
except Exception as e:
raise e
@ca.setter
def ca(self, ca) :
"""If this option is specified, it indicates that the certificate-key pair being bound to the SSL virtual server is a CA certificate. If this option is not specified, the certificate-key pair is bound as a normal server certificate.
Note: In case of a normal server certificate, the certificate-key pair should consist of both the certificate and the private-key.
"""
try :
self._ca = ca
except Exception as e:
raise e
@property
def vservername(self) :
"""The name of the SSL virtual server name to which the certificate-key pair needs to be bound.
"""
try :
return self._vservername
except Exception as e:
raise e
@vservername.setter
def vservername(self, vservername) :
"""The name of the SSL virtual server name to which the certificate-key pair needs to be bound.
"""
try :
self._vservername = vservername
except Exception as e:
raise e
@property
def servername(self) :
"""Vserver name to which the certificate key pair is bound.
"""
try :
return self._servername
except Exception as e:
raise e
@servername.setter
def servername(self, servername) :
"""Vserver name to which the certificate key pair is bound.
"""
try :
self._servername = servername
except Exception as e:
raise e
@property
def certkey(self) :
"""Name of the certificate-key pair.<br/>Minimum length = 1.
"""
try :
return self._certkey
except Exception as e:
raise e
@certkey.setter
def certkey(self, certkey) :
"""Name of the certificate-key pair.<br/>Minimum length = 1
"""
try :
self._certkey = certkey
except Exception as e:
raise e
@property
def version(self) :
"""Version.
"""
try :
return self._version
except Exception as e:
raise e
@property
def data(self) :
"""Vserver Id.
"""
try :
return self._data
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslcertkey_sslvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslcertkey_sslvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.certkey) :
return str(self.certkey)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, certkey) :
""" Use this API to fetch sslcertkey_sslvserver_binding resources.
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, certkey, filter_) :
""" Use this API to fetch filtered set of sslcertkey_sslvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, certkey) :
""" Use this API to count sslcertkey_sslvserver_binding resources configued on NetScaler.
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, certkey, filter_) :
""" Use this API to count the filtered set of sslcertkey_sslvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertkey_sslvserver_binding()
obj.certkey = certkey
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Crlcheck:
Mandatory = "Mandatory"
Optional = "Optional"
class sslcertkey_sslvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.sslcertkey_sslvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslcertkey_sslvserver_binding = [sslcertkey_sslvserver_binding() for _ in range(length)]
| |
"""Chain is a tiny tool for performing data transformation and data analysis
by successive function calls and successive generator consumption. For example:
>>> from chain import given, ANS
>>> given("abcd")(reversed)(c.upper() for c in ANS)(list).end
['D', 'C', 'B', 'A']
The 'reversed' function runs with "abcd" as argument. Then the generator
expression iterates over the 'ANS' constant. 'ANS' stores the result returned
for 'reversed'. At next, the generator turns each character in the string
to uppercase. Then calls the 'list' function whit the generator. Finally,
lookups the '.end' property that stores the result of the execution.
"""
import collections.abc
import dis
import functools
import types
__all__ = ["ANS", "Link", "given", "unpack", "Cascade"]
# Check if the generator have more than one "for statement".
def _have_nested_for_statement(generator):
matched = [True for instruction in dis.get_instructions(generator)
if instruction.opname == "FOR_ITER"]
return True if len(matched) > 1 else False
# Replace each `ANS` item in the tuple with the given `obj`
def _replace_ans_in_args(obj, args):
return (obj if item is ANS else item for item in args)
# Replace each `ANS` value in the dict with the given `obj`
def _replace_ans_in_kwargs(obj, kwargs):
return {key: obj if value is ANS else value
for (key, value) in kwargs.items()}
# Transforms a method into a single-dispatch generic method.
def _single_dispatch_method(method):
dispatcher = functools.singledispatch(method)
def wrapper(*args, **kwargs):
if len(args) > 1:
return dispatcher.dispatch(args[1].__class__)(*args, **kwargs)
else:
# An args tuple with only one element means that the user
# is trying to lookup a property of the last answer.
return args[0]
wrapper.register = dispatcher.register
functools.update_wrapper(wrapper, method)
return wrapper
# This class will be used to store the output of the
# previous call if ANS is used in a generator expression.
#
# See the NOTE [1]
class _PreviousGenerator:
def __init__(self):
self.ANS = None
def __next__(self):
return next(self.ANS)
# Implements the minimun protocol to have an iterable.
class _LastAnswer:
"""This constant will be used to collect the output of the previous
function or store the previous generator defined in the chain.
"""
def __iter__(self):
return _PreviousGenerator()
def __repr__(self):
return "ANS"
ANS = _LastAnswer()
class Link:
"""Implements the successive call pattern. Allways returns itself.
>>> link = Link("abcd")
>>> link(reversed)
<Link object at 0x7fe2a91b6f28>
>>> link(list) is link
True
"""
def __init__(self, obj):
self.end = obj
# Raises an error if the instruction is not a callable or generator.
@_single_dispatch_method
def __call__(self, instruction, *args, **kwargs):
description = "Expected 'callable' or 'generator'. Got '%s'"
raise TypeError(description % instruction.__class__.__name__)
# Evaluates the function instruction.
@__call__.register(collections.abc.Callable)
def _(self, function, *args, **kwargs):
has_ans_constant = False
if ANS in args:
has_ans_constant = True
args = _replace_ans_in_args(self.end, args)
if ANS in kwargs.values():
has_ans_constant = True
kwargs = _replace_ans_in_kwargs(self.end, kwargs)
# Now the result of this function is the
# input of the next instruction in the chain.
if has_ans_constant:
self.end = function(*args, **kwargs)
else:
self.end = function(self.end, *args, **kwargs)
return self
# Creates a Generator with the last answer as iterable.
@__call__.register(collections.abc.Generator)
def _(self, generator, *args, **kwargs):
if args or kwargs:
description = "Can not accept arguments if you pass "\
"a generator at first (%d given)."
count = len(args) + len(kwargs)
raise TypeError(description % count)
if _have_nested_for_statement(generator):
raise SyntaxError("Multiple for statement are not allowed.")
# NOTE: In CPython, all generator expressions stores the iterable of
# the first "for statement" in a local constant called ".0", the
# "dot zero" constant.
if isinstance(generator.gi_frame.f_locals[".0"], _PreviousGenerator):
# NOTE [1]: Now the current generator can iterate
# over the output of the previous call
generator.gi_frame.f_locals[".0"].ANS = iter(self.end)
# Now the result of this function is the
# input of the next instruction in the chain.
self.end = generator
else:
description = "Can not iterate over '%s', 'ANS' constant only."
class_name = generator.gi_frame.f_locals[".0"].__class__.__name__
raise ValueError(description % class_name)
return self
# lookup the property of the last answer
def __getattr__(self, attribute):
method = getattr(self.end, attribute)
def wrapper(*args, **kwargs):
# Now the result of this function is the
# input of the next instruction in the chain.
self.end = method(*args, **kwargs)
return self
functools.update_wrapper(wrapper, method)
return wrapper
def given(obj):
"""Returns a object that implement the successive calls pattern.
>>> given("abcd")(reversed)(list).end
['d', 'c', 'b', 'a']
"""
return Link(obj)
class Cascade:
""" An adapter class which turns any object
into one with methods that can be chained.
>>> from chain import Cascade
>>> result = Cascade([]).append(2).append(1).reverse().append(3).end
>>> result
[1, 2, 3]
"""
def __init__(self, obj):
self.end = obj
def __getattr__(self, name):
attr = getattr(self.end, name)
if callable(attr):
def selfie(*args, **kwargs):
# Call the method just for side-effects, return self.
attr(*args, **kwargs)
return self
return selfie
else:
return attr
def unpack(obj, function):
"""Call the function with the upacket
object and return their result.
>>> add = lambda a, b: a + b
>>> args = (1, 2)
>>> assert unpack(args, add) == add(*args)
>>> kwargs = dict(a=1, b=2)
>>> assert unpack(kwargs, add) == add(**kwargs)
"""
if isinstance(obj, collections.abc.Mapping):
return function(**obj)
elif isinstance(obj, collections.abc.Sequence):
return function(*obj)
else:
return function(obj)
| |
"""Test different accessory types: Fans."""
from pyhap.const import HAP_REPR_AID, HAP_REPR_CHARS, HAP_REPR_IID, HAP_REPR_VALUE
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PERCENTAGE_STEP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN,
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_PRESET_MODE,
SUPPORT_SET_SPEED,
)
from homeassistant.components.homekit.const import ATTR_VALUE, PROP_MIN_STEP
from homeassistant.components.homekit.type_fans import Fan
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
EVENT_HOMEASSISTANT_START,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry
from tests.common import async_mock_service
async def test_fan_basic(hass, hk_driver, events):
"""Test fan with char state."""
entity_id = "fan.demo"
hass.states.async_set(entity_id, STATE_ON, {ATTR_SUPPORTED_FEATURES: 0})
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.aid == 1
assert acc.category == 3 # Fan
assert acc.char_active.value == 1
# If there are no speed_list values, then HomeKit speed is unsupported
assert acc.char_speed is None
await acc.run()
await hass.async_block_till_done()
assert acc.char_active.value == 1
hass.states.async_set(entity_id, STATE_OFF, {ATTR_SUPPORTED_FEATURES: 0})
await hass.async_block_till_done()
assert acc.char_active.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_active.value == 0
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_active.value == 0
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
hass.states.async_set(entity_id, STATE_ON)
await hass.async_block_till_done()
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
async def test_fan_direction(hass, hk_driver, events):
"""Test fan with direction."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_SUPPORTED_FEATURES: SUPPORT_DIRECTION, ATTR_DIRECTION: DIRECTION_FORWARD},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.char_direction.value == 0
await acc.run()
await hass.async_block_till_done()
assert acc.char_direction.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_DIRECTION: DIRECTION_REVERSE})
await hass.async_block_till_done()
assert acc.char_direction.value == 1
# Set from HomeKit
call_set_direction = async_mock_service(hass, DOMAIN, "set_direction")
char_direction_iid = acc.char_direction.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_set_direction[0]
assert call_set_direction[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[0].data[ATTR_DIRECTION] == DIRECTION_FORWARD
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == DIRECTION_FORWARD
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_direction.client_update_value, 1)
await hass.async_block_till_done()
assert call_set_direction[1]
assert call_set_direction[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[1].data[ATTR_DIRECTION] == DIRECTION_REVERSE
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == DIRECTION_REVERSE
async def test_fan_oscillate(hass, hk_driver, events):
"""Test fan with oscillate."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_SUPPORTED_FEATURES: SUPPORT_OSCILLATE, ATTR_OSCILLATING: False},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.char_swing.value == 0
await acc.run()
await hass.async_block_till_done()
assert acc.char_swing.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_OSCILLATING: True})
await hass.async_block_till_done()
assert acc.char_swing.value == 1
# Set from HomeKit
call_oscillate = async_mock_service(hass, DOMAIN, "oscillate")
char_swing_iid = acc.char_swing.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_swing.client_update_value, 0)
await hass.async_block_till_done()
assert call_oscillate[0]
assert call_oscillate[0].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[0].data[ATTR_OSCILLATING] is False
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is False
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_swing.client_update_value, 1)
await hass.async_block_till_done()
assert call_oscillate[1]
assert call_oscillate[1].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[1].data[ATTR_OSCILLATING] is True
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is True
async def test_fan_speed(hass, hk_driver, events):
"""Test fan with speed."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED,
ATTR_PERCENTAGE: 0,
ATTR_PERCENTAGE_STEP: 25,
},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
# Initial value can be anything but 0. If it is 0, it might cause HomeKit to set the
# speed to 100 when turning on a fan on a freshly booted up server.
assert acc.char_speed.value != 0
assert acc.char_speed.properties[PROP_MIN_STEP] == 25
await acc.run()
await hass.async_block_till_done()
hass.states.async_set(entity_id, STATE_ON, {ATTR_PERCENTAGE: 100})
await hass.async_block_till_done()
assert acc.char_speed.value == 100
# Set from HomeKit
call_set_percentage = async_mock_service(hass, DOMAIN, "set_percentage")
char_speed_iid = acc.char_speed.to_HAP()[HAP_REPR_IID]
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_speed.client_update_value, 42)
await hass.async_block_till_done()
assert acc.char_speed.value == 42
assert acc.char_active.value == 1
assert call_set_percentage[0]
assert call_set_percentage[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_percentage[0].data[ATTR_PERCENTAGE] == 42
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == 42
# Verify speed is preserved from off to on
hass.states.async_set(entity_id, STATE_OFF, {ATTR_PERCENTAGE: 42})
await hass.async_block_till_done()
assert acc.char_speed.value == 42
assert acc.char_active.value == 0
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert acc.char_speed.value == 42
assert acc.char_active.value == 1
async def test_fan_set_all_one_shot(hass, hk_driver, events):
"""Test fan with speed."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED
| SUPPORT_OSCILLATE
| SUPPORT_DIRECTION,
ATTR_PERCENTAGE: 0,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
# Initial value can be anything but 0. If it is 0, it might cause HomeKit to set the
# speed to 100 when turning on a fan on a freshly booted up server.
assert acc.char_speed.value != 0
await acc.run()
await hass.async_block_till_done()
hass.states.async_set(
entity_id,
STATE_OFF,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED
| SUPPORT_OSCILLATE
| SUPPORT_DIRECTION,
ATTR_PERCENTAGE: 0,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
},
)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
# Set from HomeKit
call_set_percentage = async_mock_service(hass, DOMAIN, "set_percentage")
call_oscillate = async_mock_service(hass, DOMAIN, "oscillate")
call_set_direction = async_mock_service(hass, DOMAIN, "set_direction")
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
char_direction_iid = acc.char_direction.to_HAP()[HAP_REPR_IID]
char_swing_iid = acc.char_swing.to_HAP()[HAP_REPR_IID]
char_speed_iid = acc.char_speed.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert not call_turn_on
assert call_set_percentage[0]
assert call_set_percentage[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_percentage[0].data[ATTR_PERCENTAGE] == 42
assert call_oscillate[0]
assert call_oscillate[0].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[0].data[ATTR_OSCILLATING] is True
assert call_set_direction[0]
assert call_set_direction[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[0].data[ATTR_DIRECTION] == DIRECTION_REVERSE
assert len(events) == 3
assert events[0].data[ATTR_VALUE] is True
assert events[1].data[ATTR_VALUE] == DIRECTION_REVERSE
assert events[2].data[ATTR_VALUE] == 42
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED
| SUPPORT_OSCILLATE
| SUPPORT_DIRECTION,
ATTR_PERCENTAGE: 0,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
},
)
await hass.async_block_till_done()
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
# Turn on should not be called if its already on
# and we set a fan speed
await hass.async_block_till_done()
assert len(events) == 6
assert call_set_percentage[1]
assert call_set_percentage[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_percentage[1].data[ATTR_PERCENTAGE] == 42
assert call_oscillate[1]
assert call_oscillate[1].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[1].data[ATTR_OSCILLATING] is True
assert call_set_direction[1]
assert call_set_direction[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[1].data[ATTR_DIRECTION] == DIRECTION_REVERSE
assert events[-3].data[ATTR_VALUE] is True
assert events[-2].data[ATTR_VALUE] == DIRECTION_REVERSE
assert events[-1].data[ATTR_VALUE] == 42
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 0,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(events) == 7
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(call_set_percentage) == 2
assert len(call_oscillate) == 2
assert len(call_set_direction) == 2
async def test_fan_restore(hass, hk_driver, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
"fan",
"generic",
"1234",
suggested_object_id="simple",
)
registry.async_get_or_create(
"fan",
"generic",
"9012",
suggested_object_id="all_info_set",
capabilities={"speed_list": ["off", "low", "medium", "high"]},
supported_features=SUPPORT_SET_SPEED | SUPPORT_OSCILLATE | SUPPORT_DIRECTION,
device_class="mock-device-class",
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", "fan.simple", 2, None)
assert acc.category == 3
assert acc.char_active is not None
assert acc.char_direction is None
assert acc.char_speed is None
assert acc.char_swing is None
acc = Fan(hass, hk_driver, "Fan", "fan.all_info_set", 2, None)
assert acc.category == 3
assert acc.char_active is not None
assert acc.char_direction is not None
assert acc.char_speed is not None
assert acc.char_swing is not None
async def test_fan_preset_modes(hass, hk_driver, events):
"""Test fan with direction."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_PRESET_MODE,
ATTR_PRESET_MODE: "auto",
ATTR_PRESET_MODES: ["auto", "smart"],
},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.preset_mode_chars["auto"].value == 1
assert acc.preset_mode_chars["smart"].value == 0
await acc.run()
await hass.async_block_till_done()
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_PRESET_MODE,
ATTR_PRESET_MODE: "smart",
ATTR_PRESET_MODES: ["auto", "smart"],
},
)
await hass.async_block_till_done()
assert acc.preset_mode_chars["auto"].value == 0
assert acc.preset_mode_chars["smart"].value == 1
# Set from HomeKit
call_set_preset_mode = async_mock_service(hass, DOMAIN, "set_preset_mode")
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
char_auto_iid = acc.preset_mode_chars["auto"].to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_auto_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_set_preset_mode[0]
assert call_set_preset_mode[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_preset_mode[0].data[ATTR_PRESET_MODE] == "auto"
assert len(events) == 1
assert events[-1].data["service"] == "set_preset_mode"
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_auto_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on[0]
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert events[-1].data["service"] == "turn_on"
assert len(events) == 2
| |
# Standard library
import inspect
# Third-party
import astropy.units as u
from astropy.utils import isiterable
import numpy as np
# Project
from ..util import atleast_2d
from ..units import UnitSystem, DimensionlessUnitSystem
class PotentialParameter:
"""A class for defining parameters needed by the potential classes
Parameters
----------
name : str
The name of the parameter. For example, "m" for mass.
physical_type : str (optional)
The physical type (as defined by `astropy.units`) of the expected
physical units that this parameter is in. For example, "mass" for a mass
parameter.
default : numeric, str, array (optional)
The default value of the parameter.
equivalencies : `astropy.units.equivalencies.Equivalency` (optional)
Any equivalencies required for the parameter.
"""
def __init__(self, name, physical_type="dimensionless", default=None,
equivalencies=None):
# TODO: could add a "shape" argument?
# TODO: need better sanitization and validation here
self.name = str(name)
self.physical_type = str(physical_type)
self.default = default
self.equivalencies = equivalencies
def __repr__(self):
return f"<PotentialParameter: {self.name} [{self.physical_type}]>"
class CommonBase:
def __init_subclass__(cls, GSL_only=False, **kwargs):
# Read the default call signature for the init
sig = inspect.signature(cls.__init__)
# Collect all potential parameters defined on the class:
cls._parameters = dict()
sig_parameters = []
# Also allow passing parameters in to subclassing:
subcls_params = kwargs.pop('parameters', {})
subcls_params.update(cls.__dict__)
for k, v in subcls_params.items():
if not isinstance(v, PotentialParameter):
continue
cls._parameters[k] = v
if v.default is None:
default = inspect.Parameter.empty
else:
default = v.default
sig_parameters.append(inspect.Parameter(
k, inspect.Parameter.POSITIONAL_OR_KEYWORD, default=default))
for k, param in sig.parameters.items():
if k == 'self' or param.kind == param.VAR_POSITIONAL:
continue
sig_parameters.append(param)
sig_parameters = sorted(sig_parameters, key=lambda x: int(x.kind))
# Define a new init signature based on the potential parameters:
newsig = sig.replace(parameters=tuple(sig_parameters))
cls.__signature__ = newsig
super().__init_subclass__(**kwargs)
cls._GSL_only = GSL_only
def _validate_units(self, units):
# make sure the units specified are a UnitSystem instance
if units is not None and not isinstance(units, UnitSystem):
units = UnitSystem(*units)
elif units is None:
units = DimensionlessUnitSystem()
return units
def _parse_parameter_values(self, *args, **kwargs):
expected_parameter_keys = list(self._parameters.keys())
if len(args) > len(expected_parameter_keys):
raise ValueError(
"Too many positional arguments passed in to "
f"{self.__class__.__name__}: Potential and Frame classes only "
"accept parameters as positional arguments, all other "
"arguments (e.g., units) must now be passed in as keyword "
"argument.")
parameter_values = dict()
# Get any parameters passed as positional arguments
i = 0
if args:
for i in range(len(args)):
parameter_values[expected_parameter_keys[i]] = args[i]
i += 1
# Get parameters passed in as keyword arguments:
for k in expected_parameter_keys[i:]:
val = kwargs.pop(k, self._parameters[k].default)
parameter_values[k] = val
if len(kwargs):
raise ValueError(f"{self.__class__} received unexpected keyword "
f"argument(s): {list(kwargs.keys())}")
return parameter_values
@classmethod
def _prepare_parameters(cls, parameters, units):
pars = dict()
for k, v in parameters.items():
expected_ptype = cls._parameters[k].physical_type
expected_unit = units[expected_ptype]
equiv = cls._parameters[k].equivalencies
if hasattr(v, 'unit'):
if (not isinstance(units, DimensionlessUnitSystem) and
not v.unit.is_equivalent(expected_unit, equiv)):
msg = (f"Parameter {k} has physical type "
f"'{v.unit.physical_type}', but we expected a "
f"physical type '{expected_ptype}'")
if equiv is not None:
msg = (msg +
f" or something equivalent via the {equiv} "
"equivalency.")
raise ValueError(msg)
# NOTE: this can lead to some comparison issues in __eq__, which
# tests for strong equality between parameter values. Here, the
# .to() could cause small rounding issues in comparisons
if v.unit.physical_type != expected_ptype:
v = v.to(expected_unit, equiv)
elif expected_ptype is not None:
# this is false for empty ptype: treat empty string as u.one
# (i.e. this goes to the else clause)
# TODO: remove when fix potentials that ask for scale velocity!
if expected_ptype == 'speed':
v = v * units['length'] / units['time']
else:
v = v * units[expected_ptype]
else:
v = v * u.one
pars[k] = v.decompose(units)
return pars
def _remove_units_prepare_shape(self, x):
from gala.dynamics import PhaseSpacePosition
if hasattr(x, 'unit'):
x = x.decompose(self.units).value
elif isinstance(x, PhaseSpacePosition):
x = x.w(self.units)
x = atleast_2d(x, insert_axis=1).astype(np.float64)
return x
def _get_c_valid_arr(self, x):
"""
Warning! Interpretation of axes is different for C code.
"""
orig_shape = x.shape
x = np.ascontiguousarray(x.reshape(orig_shape[0], -1).T)
return orig_shape, x
def _validate_prepare_time(self, t, pos_c):
"""
Make sure that t is a 1D array and compatible with the C position array.
"""
if hasattr(t, 'unit'):
t = t.decompose(self.units).value
if not isiterable(t):
t = np.atleast_1d(t)
t = np.ascontiguousarray(t.ravel())
if len(t) > 1:
if len(t) != pos_c.shape[0]:
raise ValueError("If passing in an array of times, it must have a shape "
"compatible with the input position(s).")
return t
# For comparison operations
def __eq__(self, other):
if other is None or not hasattr(other, 'parameters'):
return False
# the funkiness in the below is in case there are array parameters:
par_bool = [
(k1 == k2) and np.all(self.parameters[k1] == other.parameters[k2])
for k1, k2 in zip(self.parameters.keys(), other.parameters.keys())]
return np.all(par_bool) and (str(self) == str(other)) and (self.units == other.units)
# String representations:
def __repr__(self):
pars = []
keys = self.parameters.keys()
for k in keys:
v = self.parameters[k].value
post = ""
if hasattr(v, 'unit'):
post = f" {v.unit}"
v = v.value
if isinstance(v, float):
if v == 0:
par = f"{v:.0f}"
elif np.log10(v) < -2 or np.log10(v) > 5:
par = f"{v:.2e}"
else:
par = f"{v:.2f}"
elif isinstance(v, int) and np.log10(v) > 5:
par = f"{v:.2e}"
else:
par = str(v)
pars.append(f"{k}={par}{post}")
par_str = ", ".join(pars)
if isinstance(self.units, DimensionlessUnitSystem):
return f"<{self.__class__.__name__}: {par_str} (dimensionless)>"
else:
core_units_str = ",".join(map(str, self.units._core_units))
return f"<{self.__class__.__name__}: {par_str} ({core_units_str})>"
def __str__(self):
return self.__class__.__name__
| |
""" Cisco_IOS_XR_dnx_port_mapper_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR dnx\-port\-mapper package operational data.
This module contains definitions
for the following management objects\:
oor\: DPA operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Oor(object):
"""
DPA operational data
.. attribute:: nodes
OOR data for available nodes
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes>`
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = Oor.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
OOR data for available nodes
.. attribute:: node
DPA operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node>`
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
DPA operational data for a particular node
.. attribute:: node_name <key>
Node ID
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: bundle_interface_details
OOR Bundle Interface Detail
**type**\: :py:class:`BundleInterfaceDetails <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.BundleInterfaceDetails>`
.. attribute:: interface_details
OOR Interface Detail
**type**\: :py:class:`InterfaceDetails <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceDetails>`
.. attribute:: interface_npu_resources
OOR information with NPU resources
**type**\: :py:class:`InterfaceNpuResources <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceNpuResources>`
.. attribute:: interface_summary_datas
OOR Per Interface Summary
**type**\: :py:class:`InterfaceSummaryDatas <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceSummaryDatas>`
.. attribute:: oor_summary
OOR Summary
**type**\: :py:class:`OorSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.OorSummary>`
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.bundle_interface_details = Oor.Nodes.Node.BundleInterfaceDetails()
self.bundle_interface_details.parent = self
self.interface_details = Oor.Nodes.Node.InterfaceDetails()
self.interface_details.parent = self
self.interface_npu_resources = Oor.Nodes.Node.InterfaceNpuResources()
self.interface_npu_resources.parent = self
self.interface_summary_datas = Oor.Nodes.Node.InterfaceSummaryDatas()
self.interface_summary_datas.parent = self
self.oor_summary = Oor.Nodes.Node.OorSummary()
self.oor_summary.parent = self
class InterfaceNpuResources(object):
"""
OOR information with NPU resources
.. attribute:: interface_npu_resource
OOR information with NPU resources for an interface
**type**\: list of :py:class:`InterfaceNpuResource <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceNpuResources.InterfaceNpuResource>`
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_npu_resource = YList()
self.interface_npu_resource.parent = self
self.interface_npu_resource.name = 'interface_npu_resource'
class InterfaceNpuResource(object):
"""
OOR information with NPU resources for an
interface
.. attribute:: interface_name <key>
The name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: interface_state
Current OOR state of the interface/bundle
**type**\: str
.. attribute:: max_entries
Max entries in NPU for this HW resource
**type**\: int
**range:** 0..4294967295
.. attribute:: member
Interface/Bundle member HW/NPU resources
**type**\: list of :py:class:`Member <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceNpuResources.InterfaceNpuResource.Member>`
.. attribute:: name
HW/NPU resource name
**type**\: str
.. attribute:: number_of_members
Number of bundle members. for non\-bundles this will be 1
**type**\: int
**range:** 0..4294967295
.. attribute:: red_threshold
Red threshold
**type**\: int
**range:** 0..4294967295
.. attribute:: red_threshold_percent
Red threshold percentage
**type**\: int
**range:** 0..4294967295
**units**\: percentage
.. attribute:: time_stamp
Timestamp of last OOR change
**type**\: str
.. attribute:: yellow_threshold
Yellow threshold
**type**\: int
**range:** 0..4294967295
.. attribute:: yellow_threshold_percent
Yellow threshold percentage
**type**\: int
**range:** 0..4294967295
**units**\: percentage
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.interface_state = None
self.max_entries = None
self.member = YList()
self.member.parent = self
self.member.name = 'member'
self.name = None
self.number_of_members = None
self.red_threshold = None
self.red_threshold_percent = None
self.time_stamp = None
self.yellow_threshold = None
self.yellow_threshold_percent = None
class Member(object):
"""
Interface/Bundle member HW/NPU resources
.. attribute:: dpa_table
Logical (DPA) tables information
**type**\: list of :py:class:`DpaTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceNpuResources.InterfaceNpuResource.Member.DpaTable>`
.. attribute:: interface_name
Name of the member interface
**type**\: str
.. attribute:: location
Rack/Slot/Instance of the interface
**type**\: str
.. attribute:: npu_id
Npu Id of the interface
**type**\: int
**range:** 0..4294967295
.. attribute:: number_of_dpa_tables
Number of logical tables using this NPU resource
**type**\: int
**range:** 0..4294967295
.. attribute:: total_in_use
Total In\-use entries of NPU resource DB
**type**\: int
**range:** 0..4294967295
.. attribute:: total_in_use_percent
Total In\-use percentage of NPU resource DB
**type**\: int
**range:** 0..4294967295
**units**\: percentage
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dpa_table = YList()
self.dpa_table.parent = self
self.dpa_table.name = 'dpa_table'
self.interface_name = None
self.location = None
self.npu_id = None
self.number_of_dpa_tables = None
self.total_in_use = None
self.total_in_use_percent = None
class DpaTable(object):
"""
Logical (DPA) tables information
.. attribute:: in_use
In\-use entries of NPU resource DB for this logical table
**type**\: int
**range:** 0..4294967295
.. attribute:: in_use_percent
In\-use entries of NPU resource DB for this logical table
**type**\: int
**range:** 0..4294967295
.. attribute:: name
Logical (DPA) table name
**type**\: str
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.in_use = None
self.in_use_percent = None
self.name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:dpa-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.in_use is not None:
return True
if self.in_use_percent is not None:
return True
if self.name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceNpuResources.InterfaceNpuResource.Member.DpaTable']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:member'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.dpa_table is not None:
for child_ref in self.dpa_table:
if child_ref._has_data():
return True
if self.interface_name is not None:
return True
if self.location is not None:
return True
if self.npu_id is not None:
return True
if self.number_of_dpa_tables is not None:
return True
if self.total_in_use is not None:
return True
if self.total_in_use_percent is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceNpuResources.InterfaceNpuResource.Member']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:interface-npu-resource[Cisco-IOS-XR-dnx-port-mapper-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_name is not None:
return True
if self.interface_state is not None:
return True
if self.max_entries is not None:
return True
if self.member is not None:
for child_ref in self.member:
if child_ref._has_data():
return True
if self.name is not None:
return True
if self.number_of_members is not None:
return True
if self.red_threshold is not None:
return True
if self.red_threshold_percent is not None:
return True
if self.time_stamp is not None:
return True
if self.yellow_threshold is not None:
return True
if self.yellow_threshold_percent is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceNpuResources.InterfaceNpuResource']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:interface-npu-resources'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_npu_resource is not None:
for child_ref in self.interface_npu_resource:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceNpuResources']['meta_info']
class BundleInterfaceDetails(object):
"""
OOR Bundle Interface Detail
.. attribute:: bundle_interface_detail
OOR Data for particular Bundle interface
**type**\: list of :py:class:`BundleInterfaceDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.BundleInterfaceDetails.BundleInterfaceDetail>`
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bundle_interface_detail = YList()
self.bundle_interface_detail.parent = self
self.bundle_interface_detail.name = 'bundle_interface_detail'
class BundleInterfaceDetail(object):
"""
OOR Data for particular Bundle interface
.. attribute:: interface <key>
Interface Name
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: interface_state
Current state of the interface
**type**\: str
.. attribute:: member
Member details
**type**\: list of :py:class:`Member <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.BundleInterfaceDetails.BundleInterfaceDetail.Member>`
.. attribute:: time_stamp
Timestamp
**type**\: str
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = None
self.interface_state = None
self.member = YList()
self.member.parent = self
self.member.name = 'member'
self.time_stamp = None
class Member(object):
"""
Member details
.. attribute:: hardware_resource
Type of harware resoruce
**type**\: str
.. attribute:: interface_name
Name of the interface
**type**\: str
.. attribute:: interface_status
The current state of the interface
**type**\: str
.. attribute:: npu_id
Npuid of the interface
**type**\: str
.. attribute:: time_stamp
Timestamp
**type**\: str
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hardware_resource = None
self.interface_name = None
self.interface_status = None
self.npu_id = None
self.time_stamp = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:member'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.hardware_resource is not None:
return True
if self.interface_name is not None:
return True
if self.interface_status is not None:
return True
if self.npu_id is not None:
return True
if self.time_stamp is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.BundleInterfaceDetails.BundleInterfaceDetail.Member']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface is None:
raise YPYModelError('Key property interface is None')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:bundle-interface-detail[Cisco-IOS-XR-dnx-port-mapper-oper:interface = ' + str(self.interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface is not None:
return True
if self.interface_state is not None:
return True
if self.member is not None:
for child_ref in self.member:
if child_ref._has_data():
return True
if self.time_stamp is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.BundleInterfaceDetails.BundleInterfaceDetail']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:bundle-interface-details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bundle_interface_detail is not None:
for child_ref in self.bundle_interface_detail:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.BundleInterfaceDetails']['meta_info']
class InterfaceDetails(object):
"""
OOR Interface Detail
.. attribute:: interface_detail
OOR Data for particular interface
**type**\: list of :py:class:`InterfaceDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceDetails.InterfaceDetail>`
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_detail = YList()
self.interface_detail.parent = self
self.interface_detail.name = 'interface_detail'
class InterfaceDetail(object):
"""
OOR Data for particular interface
.. attribute:: interface <key>
Interface Name
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: hardware_resource
Type of harware resoruce
**type**\: str
.. attribute:: interface_name
Name of the interface
**type**\: str
.. attribute:: interface_status
The current state of the interface
**type**\: str
.. attribute:: npu_id
Npuid of the interface
**type**\: str
.. attribute:: time_stamp
Timestamp
**type**\: str
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = None
self.hardware_resource = None
self.interface_name = None
self.interface_status = None
self.npu_id = None
self.time_stamp = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface is None:
raise YPYModelError('Key property interface is None')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:interface-detail[Cisco-IOS-XR-dnx-port-mapper-oper:interface = ' + str(self.interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface is not None:
return True
if self.hardware_resource is not None:
return True
if self.interface_name is not None:
return True
if self.interface_status is not None:
return True
if self.npu_id is not None:
return True
if self.time_stamp is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceDetails.InterfaceDetail']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:interface-details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_detail is not None:
for child_ref in self.interface_detail:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceDetails']['meta_info']
class InterfaceSummaryDatas(object):
"""
OOR Per Interface Summary
.. attribute:: interface_summary_data
OOR Data for particular interface
**type**\: list of :py:class:`InterfaceSummaryData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_port_mapper_oper.Oor.Nodes.Node.InterfaceSummaryDatas.InterfaceSummaryData>`
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_summary_data = YList()
self.interface_summary_data.parent = self
self.interface_summary_data.name = 'interface_summary_data'
class InterfaceSummaryData(object):
"""
OOR Data for particular interface
.. attribute:: interface <key>
Interface Number
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: hardware_resource
Type of harware resoruce
**type**\: str
.. attribute:: interface_name
Name of the interface
**type**\: str
.. attribute:: interface_status
The current state of the interface
**type**\: str
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = None
self.hardware_resource = None
self.interface_name = None
self.interface_status = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface is None:
raise YPYModelError('Key property interface is None')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:interface-summary-data[Cisco-IOS-XR-dnx-port-mapper-oper:interface = ' + str(self.interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface is not None:
return True
if self.hardware_resource is not None:
return True
if self.interface_name is not None:
return True
if self.interface_status is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceSummaryDatas.InterfaceSummaryData']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:interface-summary-datas'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_summary_data is not None:
for child_ref in self.interface_summary_data:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.InterfaceSummaryDatas']['meta_info']
class OorSummary(object):
"""
OOR Summary
.. attribute:: green
interfaces in green state
**type**\: int
**range:** 0..4294967295
.. attribute:: red
interfaces in red state
**type**\: int
**range:** 0..4294967295
.. attribute:: yel_low
interfaces in yellow state
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'dnx-port-mapper-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.green = None
self.red = None
self.yel_low = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-dnx-port-mapper-oper:oor-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.green is not None:
return True
if self.red is not None:
return True
if self.yel_low is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node.OorSummary']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-dnx-port-mapper-oper:oor/Cisco-IOS-XR-dnx-port-mapper-oper:nodes/Cisco-IOS-XR-dnx-port-mapper-oper:node[Cisco-IOS-XR-dnx-port-mapper-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node_name is not None:
return True
if self.bundle_interface_details is not None and self.bundle_interface_details._has_data():
return True
if self.interface_details is not None and self.interface_details._has_data():
return True
if self.interface_npu_resources is not None and self.interface_npu_resources._has_data():
return True
if self.interface_summary_datas is not None and self.interface_summary_datas._has_data():
return True
if self.oor_summary is not None and self.oor_summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-dnx-port-mapper-oper:oor/Cisco-IOS-XR-dnx-port-mapper-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor.Nodes']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-dnx-port-mapper-oper:oor'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.nodes is not None and self.nodes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_port_mapper_oper as meta
return meta._meta_table['Oor']['meta_info']
| |
""" basic collect and runtest protocol implementations """
from __future__ import absolute_import, division, print_function
import bdb
import sys
from time import time
import py
from _pytest._code.code import TerminalRepr, ExceptionInfo
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption('--durations',
action="store", type=int, default=None, metavar="N",
help="show N slowest setup/test durations (N=0 for all)."),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, 'duration'):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" %
(rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
class NodeInfo:
def __init__(self, location):
self.location = location
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(
nodeid=item.nodeid, location=item.location,
)
runtestprotocol(item, nextitem=nextitem)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.option.setupshow:
show_test_item(item)
if not item.config.option.setuponly:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log,
nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def show_test_item(item):
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(' ' * 8)
tw.write(item._nodeid)
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
if used_fixtures:
tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
def pytest_runtest_setup(item):
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del tb # Get rid of it in this namespace
raise
def pytest_runtest_teardown(item, nextitem):
item.session._setupstate.teardown_exact(item, nextitem)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail") or
call.excinfo.errisinstance(skip.Exception) or
call.excinfo.errisinstance(bdb.BdbQuit))
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(lambda: ihook(item=item, **kwds), when=when)
class CallInfo:
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
self.stop = time()
raise
except:
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d['version_info'][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d['id'], d['sysplatform'], ver, d['executable'])
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, 'node'):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return ''.join(content for (prefix, content) in self.get_sections('Captured stdout'))
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return ''.join(content for (prefix, content) in self.get_sections('Captured stderr'))
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop-call.start
keywords = dict([(x,1) for x in item.keywords])
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(excinfo,
style=item.config.option.tbstyle)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" %(key, rwhen), content))
return TestReport(item.nodeid, item.location,
keywords, outcome, longrepr, when,
sections, duration)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(self, nodeid, location, keywords, outcome,
longrepr, when, sections=(), duration=0, **extra):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: list of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid, self.when, self.outcome)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(
lambda: list(collector.collect()),
'collect')
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(collector.nodeid, outcome, longrepr,
getattr(call, 'result', None))
rep.call = call # see collect_one_node
return rep
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result,
sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid, len(self.result), self.outcome)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert py.builtin.callable(finalizer)
#assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except Exception:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
py.builtin._reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack \
or isinstance(colitem, tuple)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, '_prepare_exc'):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack):]:
self.stack.append(col)
try:
col.setup()
except Exception:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
# =============================================================
# Test OutcomeExceptions and helpers for creating them.
class OutcomeException(Exception):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
Exception.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
val = self.msg
if isinstance(val, bytes):
val = py._builtin._totext(val, errors='replace')
return val
return "<%s instance>" %(self.__class__.__name__,)
__str__ = __repr__
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = 'builtins'
def __init__(self, msg=None, pytrace=True, allow_module_level=False):
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
self.allow_module_level = allow_module_level
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = 'builtins'
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason"):
self.msg = msg
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg):
""" exit testing process as if KeyboardInterrupt was triggered. """
__tracebackhide__ = True
raise Exit(msg)
exit.Exception = Exit
def skip(msg=""):
""" skip an executing test with the given message. Note: it's usually
better to use the pytest.mark.skipif marker to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies. See the pytest_skipping plugin for details.
"""
__tracebackhide__ = True
raise Skipped(msg=msg)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
""" explicitly fail an currently-executing test with the given Message.
:arg pytrace: if false the msg represents the full failure information
and no python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
def importorskip(modname, minversion=None):
""" return imported module if it has at least "minversion" as its
__version__ attribute. If no minversion is specified the a skip
is only triggered if the module can not be imported.
"""
import warnings
__tracebackhide__ = True
compile(modname, '', 'eval') # to catch syntaxerrors
should_skip = False
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter('ignore')
try:
__import__(modname)
except ImportError:
# Do not raise chained exception here(#1485)
should_skip = True
if should_skip:
raise Skipped("could not import %r" %(modname,), allow_module_level=True)
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, '__version__', None)
if minversion is not None:
try:
from pkg_resources import parse_version as pv
except ImportError:
raise Skipped("we have a required version for %r but can not import "
"pkg_resources to parse version strings." % (modname,),
allow_module_level=True)
if verattr is None or pv(verattr) < pv(minversion):
raise Skipped("module %r has __version__ %r, required is: %r" %(
modname, verattr, minversion), allow_module_level=True)
return mod
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, unittest
import requests
from frappe.model.delete_doc import delete_doc
from frappe.utils.data import today, add_to_date
from frappe import _dict
from frappe.limits import update_limits, clear_limit
from frappe.utils import get_url
from frappe.core.doctype.user.user import get_total_users
from frappe.core.doctype.user.user import MaxUsersReachedError, test_password_strength
test_records = frappe.get_test_records('User')
class TestUser(unittest.TestCase):
def tearDown(self):
# disable password strength test
frappe.db.set_value("System Settings", "System Settings", "enable_password_policy", 0)
frappe.db.set_value("System Settings", "System Settings", "minimum_password_score", "")
def test_user_type(self):
new_user = frappe.get_doc(dict(doctype='User', email='test-for-type@example.com',
first_name='Tester')).insert()
self.assertEquals(new_user.user_type, 'Website User')
# role with desk access
new_user.add_roles('_Test Role 2')
new_user.save()
self.assertEquals(new_user.user_type, 'System User')
# clear role
new_user.roles = []
new_user.save()
self.assertEquals(new_user.user_type, 'Website User')
# role without desk access
new_user.add_roles('_Test Role 4')
new_user.save()
self.assertEquals(new_user.user_type, 'Website User')
frappe.delete_doc('User', new_user.name)
def test_delete(self):
frappe.get_doc("User", "test@example.com").add_roles("_Test Role 2")
self.assertRaises(frappe.LinkExistsError, delete_doc, "Role", "_Test Role 2")
frappe.db.sql("""delete from `tabHas Role` where role='_Test Role 2'""")
delete_doc("Role","_Test Role 2")
if frappe.db.exists("User", "_test@example.com"):
delete_doc("User", "_test@example.com")
user = frappe.copy_doc(test_records[1])
user.email = "_test@example.com"
user.insert()
frappe.get_doc({"doctype": "ToDo", "description": "_Test"}).insert()
delete_doc("User", "_test@example.com")
self.assertTrue(not frappe.db.sql("""select * from `tabToDo` where owner=%s""",
("_test@example.com",)))
from frappe.core.doctype.role.test_role import test_records as role_records
frappe.copy_doc(role_records[1]).insert()
def test_get_value(self):
self.assertEquals(frappe.db.get_value("User", "test@example.com"), "test@example.com")
self.assertEquals(frappe.db.get_value("User", {"email":"test@example.com"}), "test@example.com")
self.assertEquals(frappe.db.get_value("User", {"email":"test@example.com"}, "email"), "test@example.com")
self.assertEquals(frappe.db.get_value("User", {"email":"test@example.com"}, ["first_name", "email"]),
("_Test", "test@example.com"))
self.assertEquals(frappe.db.get_value("User",
{"email":"test@example.com", "first_name": "_Test"},
["first_name", "email"]),
("_Test", "test@example.com"))
test_user = frappe.db.sql("select * from tabUser where name='test@example.com'",
as_dict=True)[0]
self.assertEquals(frappe.db.get_value("User", {"email":"test@example.com"}, "*", as_dict=True),
test_user)
self.assertEquals(frappe.db.get_value("User", "xxxtest@example.com"), None)
frappe.db.set_value("Website Settings", "Website Settings", "_test", "_test_val")
self.assertEquals(frappe.db.get_value("Website Settings", None, "_test"), "_test_val")
self.assertEquals(frappe.db.get_value("Website Settings", "Website Settings", "_test"), "_test_val")
def test_high_permlevel_validations(self):
user = frappe.get_meta("User")
self.assertTrue("roles" in [d.fieldname for d in user.get_high_permlevel_fields()])
me = frappe.get_doc("User", "testperm@example.com")
me.remove_roles("System Manager")
frappe.set_user("testperm@example.com")
me = frappe.get_doc("User", "testperm@example.com")
self.assertRaises(frappe.PermissionError, me.add_roles, "System Manager")
frappe.set_user("Administrator")
me = frappe.get_doc("User", "testperm@example.com")
me.add_roles("System Manager")
self.assertTrue("System Manager" in [d.role for d in me.get("roles")])
def test_user_limit_for_site(self):
update_limits({'users': get_total_users()})
# reload site config
from frappe import _dict
frappe.local.conf = _dict(frappe.get_site_config())
# Create a new user
user = frappe.new_doc('User')
user.email = 'test_max_users@example.com'
user.first_name = 'Test_max_user'
self.assertRaises(MaxUsersReachedError, user.add_roles, 'System Manager')
if frappe.db.exists('User', 'test_max_users@example.com'):
frappe.delete_doc('User', 'test_max_users@example.com')
# Clear the user limit
clear_limit('users')
def test_user_limit_for_site_with_simultaneous_sessions(self):
clear_limit('users')
# make sure this user counts
user = frappe.get_doc('User', 'test@example.com')
user.add_roles('Website Manager')
user.save()
update_limits({'users': get_total_users()})
user.simultaneous_sessions = user.simultaneous_sessions + 1
self.assertRaises(MaxUsersReachedError, user.save)
# Clear the user limit
clear_limit('users')
# def test_deny_multiple_sessions(self):
# from frappe.installer import update_site_config
# clear_limit('users')
#
# # allow one session
# user = frappe.get_doc('User', 'test@example.com')
# user.simultaneous_sessions = 1
# user.new_password = 'Eastern_43A1W'
# user.save()
#
# def test_request(conn):
# value = conn.get_value('User', 'first_name', {'name': 'test@example.com'})
# self.assertTrue('first_name' in value)
#
# from frappe.frappeclient import FrappeClient
# update_site_config('deny_multiple_sessions', 0)
#
# conn1 = FrappeClient(get_url(), "test@example.com", "Eastern_43A1W", verify=False)
# test_request(conn1)
#
# conn2 = FrappeClient(get_url(), "test@example.com", "Eastern_43A1W", verify=False)
# test_request(conn2)
#
# update_site_config('deny_multiple_sessions', 1)
# conn3 = FrappeClient(get_url(), "test@example.com", "Eastern_43A1W", verify=False)
# test_request(conn3)
#
# # first connection should fail
# test_request(conn1)
def test_site_expiry(self):
user = frappe.get_doc('User', 'test@example.com')
user.enabled = 1
user.new_password = 'Eastern_43A1W'
user.save()
update_limits({'expiry': add_to_date(today(), days=-1), 'support_email': 'support@example.com'})
frappe.local.conf = _dict(frappe.get_site_config())
frappe.db.commit()
res = requests.post(get_url(), params={'cmd': 'login', 'usr':
'test@example.com', 'pwd': 'Eastern_43A1W', 'device': 'desktop'})
# While site is expired status code returned is 417 Failed Expectation
self.assertEqual(res.status_code, 417)
clear_limit("expiry")
frappe.local.conf = _dict(frappe.get_site_config())
def test_delete_user(self):
new_user = frappe.get_doc(dict(doctype='User', email='test-for-delete@example.com',
first_name='Tester Delete User')).insert()
self.assertEquals(new_user.user_type, 'Website User')
# role with desk access
new_user.add_roles('_Test Role 2')
new_user.save()
self.assertEquals(new_user.user_type, 'System User')
comm = frappe.get_doc({
"doctype":"Communication",
"subject": "To check user able to delete even if linked with communication",
"content": "To check user able to delete even if linked with communication",
"sent_or_received": "Sent",
"user": new_user.name
})
comm.insert(ignore_permissions=True)
frappe.delete_doc('User', new_user.name)
self.assertFalse(frappe.db.exists('User', new_user.name))
def test_deactivate_additional_users(self):
update_limits({'users': get_total_users()+1})
if not frappe.db.exists("User", "test_deactivate_additional_users@example.com"):
user = frappe.new_doc('User')
user.email = 'test_deactivate_additional_users@example.com'
user.first_name = 'Test Deactivate Additional Users'
user.add_roles("System Manager")
#update limits
update_limits({"users": get_total_users()-1})
self.assertEqual(frappe.db.get_value("User", "test_deactivate_additional_users@example.com", "enabled"), 0)
if frappe.db.exists("User", "test_deactivate_additional_users@example.com"):
frappe.delete_doc('User', 'test_deactivate_additional_users@example.com')
# Clear the user limit
clear_limit('users')
def test_password_strength(self):
# Test Password without Password Strenth Policy
frappe.db.set_value("System Settings", "System Settings", "enable_password_policy", 0)
# password policy is disabled, test_password_strength should be ignored
result = test_password_strength("test_password")
self.assertFalse(result.get("feedback", None))
# Test Password with Password Strenth Policy Set
frappe.db.set_value("System Settings", "System Settings", "enable_password_policy", 1)
frappe.db.set_value("System Settings", "System Settings", "minimum_password_score", 2)
# Score 1; should now fail
result = test_password_strength("bee2ve")
self.assertEqual(result['feedback']['password_policy_validation_passed'], False)
# Score 4; should pass
result = test_password_strength("Eastern_43A1W")
self.assertEqual(result['feedback']['password_policy_validation_passed'], True)
| |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.country_v30_rc1 import CountryV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.created_date_v30_rc1 import CreatedDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc1 import LastModifiedDateV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.source_v30_rc1 import SourceV30Rc1 # noqa: F401,E501
class AddressV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30Rc1',
'last_modified_date': 'LastModifiedDateV30Rc1',
'source': 'SourceV30Rc1',
'country': 'CountryV30Rc1',
'visibility': 'str',
'path': 'str',
'put_code': 'int',
'display_index': 'int'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'country': 'country',
'visibility': 'visibility',
'path': 'path',
'put_code': 'put-code',
'display_index': 'display-index'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, country=None, visibility=None, path=None, put_code=None, display_index=None): # noqa: E501
"""AddressV30Rc1 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._country = None
self._visibility = None
self._path = None
self._put_code = None
self._display_index = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
self.country = country
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
if put_code is not None:
self.put_code = put_code
if display_index is not None:
self.display_index = display_index
@property
def created_date(self):
"""Gets the created_date of this AddressV30Rc1. # noqa: E501
:return: The created_date of this AddressV30Rc1. # noqa: E501
:rtype: CreatedDateV30Rc1
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this AddressV30Rc1.
:param created_date: The created_date of this AddressV30Rc1. # noqa: E501
:type: CreatedDateV30Rc1
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this AddressV30Rc1. # noqa: E501
:return: The last_modified_date of this AddressV30Rc1. # noqa: E501
:rtype: LastModifiedDateV30Rc1
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this AddressV30Rc1.
:param last_modified_date: The last_modified_date of this AddressV30Rc1. # noqa: E501
:type: LastModifiedDateV30Rc1
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this AddressV30Rc1. # noqa: E501
:return: The source of this AddressV30Rc1. # noqa: E501
:rtype: SourceV30Rc1
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this AddressV30Rc1.
:param source: The source of this AddressV30Rc1. # noqa: E501
:type: SourceV30Rc1
"""
self._source = source
@property
def country(self):
"""Gets the country of this AddressV30Rc1. # noqa: E501
:return: The country of this AddressV30Rc1. # noqa: E501
:rtype: CountryV30Rc1
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this AddressV30Rc1.
:param country: The country of this AddressV30Rc1. # noqa: E501
:type: CountryV30Rc1
"""
if country is None:
raise ValueError("Invalid value for `country`, must not be `None`") # noqa: E501
self._country = country
@property
def visibility(self):
"""Gets the visibility of this AddressV30Rc1. # noqa: E501
:return: The visibility of this AddressV30Rc1. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this AddressV30Rc1.
:param visibility: The visibility of this AddressV30Rc1. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this AddressV30Rc1. # noqa: E501
:return: The path of this AddressV30Rc1. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this AddressV30Rc1.
:param path: The path of this AddressV30Rc1. # noqa: E501
:type: str
"""
self._path = path
@property
def put_code(self):
"""Gets the put_code of this AddressV30Rc1. # noqa: E501
:return: The put_code of this AddressV30Rc1. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this AddressV30Rc1.
:param put_code: The put_code of this AddressV30Rc1. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def display_index(self):
"""Gets the display_index of this AddressV30Rc1. # noqa: E501
:return: The display_index of this AddressV30Rc1. # noqa: E501
:rtype: int
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this AddressV30Rc1.
:param display_index: The display_index of this AddressV30Rc1. # noqa: E501
:type: int
"""
self._display_index = display_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AddressV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddressV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
#!/usr/bin/python
# ----------------------------------------------------------------------
# Copyright (2010) Aram Davtyan and Garegin Papoian
# Papoian's Group, University of Maryland at Collage Park
# http://papoian.chem.umd.edu/
# Modified by Nick Schafer to read from a file with per-residue
# color information and insert it into the B-factor field
# The format of the file should be as follows: it should have one entry
# for every residue on every line, separated by a space and as many
# lines as there are snapshots in the dump file that is being processed.
# Example for 5 residue, 4 snapshot trajectory:
# 1.0 0.5 0.7 0.3 0.0
# 1.0 0.5 0.7 0.3 0.2
# 0.8 0.5 0.2 0.5 0.1
# 1.0 0.7 0.7 0.3 0.0
# To specify the file with color information, add
# -color colorfile
# to the normal list of arguments.
# Last Update: 03/23/12
# ----------------------------------------------------------------------
import sys
#from Bio.PDB.PDBParser import PDBParser
atom_type = {'1' : 'C', '2' : 'N', '3' : 'O', '4' : 'C', '5' : 'H', '6' : 'C'}
atom_desc = {'1' : 'C-Alpha', '2' : 'N', '3' : 'O', '4' : 'C-Beta', '5' : 'H-Beta', '6' : 'C-Prime'}
PDB_type = {'1' : 'CA', '2' : 'N', '3' : 'O', '4' : 'CB', '5' : 'HB', '6' : 'C' }
d_res = {"C" : "CYS", "I" : "ILE", "S" : "SER", "Q" : "GLN", "K" : "LYS",
"N" : "ASN", "P" : "PRO", "T" : "THR", "F" : "PHE", "A" : "ALA",
"H" : "HIS", "G" : "GLY", "D" : "ASP", "L" : "LEU", "R" : "ARG",
"W" : "TRP", "V" : "VAL", "E" : "GLU", "Y" : "TYR", "M" : "MET"}
class PDB_Atom:
no = 0
ty = ''
res = 'UNK'
res_no = 0
x = 0.0
y = 0.0
z = 0.0
atm = 'C'
def __init__(self, no, ty, res, res_no, x, y, z, atm):
self.no = no
self.ty = ty
self.res = res
self.res_no = res_no
self.x = x
self.y = y
self.z = z
self.atm = atm
def write_(self, f, colorsnap):
f.write('ATOM')
f.write((' '+str(self.no))[-7:])
f.write(' ')
f.write((self.ty+' ')[:4])
f.write(self.res)
f.write(' ')
f.write('T')
f.write((' '+str(self.res_no))[-4:])
f.write((' '+str(round(self.x,3)))[-12:])
f.write((' '+str(round(self.y,3)))[-8:])
f.write((' '+str(round(self.z,3)))[-8:])
f.write(' 1.00')
if addcolorinformation:
f.write(' %.2f' % float(colortimeseries[self.res_no-1][colorsnap]))
else:
f.write(' 0.00')
f.write((' '+self.atm)[-12:]+' ')
f.write('\n')
class Atom:
No = 0
ty = ''
x = 0.0
y = 0.0
z = 0.0
desc = ''
def __init__(self, No, ty, No_m, x, y, z, desc=''):
self.No = No
self.ty = ty
self.No_m = No_m
self.x = x
self.y = y
self.z = z
self.desc = desc
def write_(self, f):
f.write(str(self.No))
f.write(' ')
f.write(PDB_type[self.No_m])
f.write(' ')
f.write(str(round(self.x,8)))
f.write(' ')
f.write(str(round(self.y,8)))
f.write(' ')
f.write(str(round(self.z,8)))
f.write(' ')
f.write(self.desc)
f.write('\n')
def One2ThreeLetters(txt):
seq_array = []
for s in txt:
seq_array.append( d_res.get(s,"ALA") )
return seq_array
def file_len(fname):
return len(open(fname).readlines())
if len(sys.argv)<3 or len(sys.argv)>8:
print "Too many or too few arguments."
print "\n" + sys.argv[0] + " Input_file Output_file [snapshot] [-seq sequence_file] [-color color_file]\n"
exit()
del_list=[]
seq_file = ""
color_file = ""
sequance = []
seq_txt = ""
addcolorinformation = False
bseq = False
for iarg in range(3, len(sys.argv)):
if sys.argv[iarg]=="-seq":
bseq = True
seq_file = sys.argv[iarg+1]
del_list.insert(0, iarg)
del_list.insert(0, iarg+1)
if sys.argv[iarg]=="-color":
addcolorinformation = True
color_file = sys.argv[iarg+1]
del_list.insert(0, iarg)
del_list.insert(0, iarg+1)
for idel in del_list:
sys.argv.pop(idel)
lammps_file = sys.argv[1]
output_file = ""
if len(sys.argv)>2: output_file = sys.argv[2]
psf_file = output_file
if output_file[-4:]!=".pdb": output_file = output_file + ".pdb"
if psf_file[-4:]==".pdb": psf_file = psf_file[:-3] + "psf"
if psf_file[-4:]!=".psf": psf_file = psf_file + ".psf"
snapshot = -1
if len(sys.argv)>3: snapshot = int(sys.argv[3])
if seq_file!="":
fseq = open(seq_file)
seq_txt = fseq.read().strip().replace("\n","")
sequance = One2ThreeLetters(seq_txt)
fseq.close()
if color_file!="":
numsnap=file_len(color_file)
print "Building color information into the b-factor field..."
print "Number of snapshots (inferred from number of lines in color file): " + str(numsnap)
snap=0
linecounter=1
fcolor = open(color_file)
for line in fcolor:
splitline=line.split()
if linecounter==1:
numres=len(splitline)
print "Number of residues (inferred from first line size of color file): " + str(numres)
linecounter +=1
# Build array that is numres x numsnapshots
# Each line contains all per residue values for a single snapshot
colortimeseries=[]
for res in range(numres):
colortimeseries.append([])
for allsnaps in range(numsnap):
colortimeseries[res].append(0.0)
for res in range(numres):
colortimeseries[res][snap]=splitline[res]
snap += 1
fcolor.close()
an = 0.4831806
bn = 0.7032820
cn = -0.1864262
ap = 0.4436538
bp = 0.2352006
cp = 0.3211455
n_atoms = 0
i_atom = 0
item = ''
step = 0
atoms = []
atoms2 = []
atoms3 = []
bonds = []
box = []
A = []
out = open(output_file, 'w')
def convertToPDB():
ires = 1
for ia in atoms2:
if ia.desc == 'N': ires = ires + 1
res_ty="ALA"
if bseq: res_ty = sequance[ires-1]
atom = PDB_Atom(ia.No, PDB_type[ia.No_m], res_ty, ires, ia.x, ia.y, ia.z, ia.ty)
atoms3.append(atom)
def buildAllAtoms():
index = 0
last_Ca_index = -1
last_O_index = -1
Cp_index = -1
NullVal = Atom(0, '', '6', 0.0, 0.0, 0.0, '')
# atoms2 = []
for i in range(0, len(atoms)):
ia = atoms[i]
index = index + 1
if ia.desc == 'O': last_O_index = i
if ia.desc == 'C-Alpha':
if last_Ca_index != -1:
Cai = atoms[last_Ca_index]
Cai1 = ia
Oi = atoms[last_O_index]
nx = an*Cai.x + bn*Cai1.x + cn*Oi.x
ny = an*Cai.y + bn*Cai1.y + cn*Oi.y
nz = an*Cai.z + bn*Cai1.z + cn*Oi.z
px = ap*Cai.x + bp*Cai1.x + cp*Oi.x
py = ap*Cai.y + bp*Cai1.y + cp*Oi.y
pz = ap*Cai.z + bp*Cai1.z + cp*Oi.z
N = Atom(index, 'N', '2', nx, ny, nz, 'N')
index = index + 1
Cp = Atom(int(Cai.No) + 1, 'C', '6', px, py, pz, 'C-Prime')
# Cp = Atom(index, 'C', '6', px, py, pz, 'C-Prime')
# index = index + 1
atoms2.append(N)
atoms2.pop(Cp_index)
atoms2.insert(Cp_index, Cp)
# atoms2.append(Cp)
last_Ca_index = i
ia.No = index
atoms2.append(ia)
if ia.desc == 'C-Alpha':
atoms2.append(NullVal)
Cp_index = index
index = index + 1
if atoms2[Cp_index].No==0: atoms2.pop(Cp_index)
for i in range(Cp_index, len(atoms2)):
atoms2[i].No = atoms2[i].No - 1
def buildBonds():
N_index = -1
Ca_index = -1
Cp_index = -1
O_index = -1
Cb_index = -1
Hb_index = -1
for i in range(0, len(atoms2)):
ia = atoms2[i]
if ia.desc == 'N':
if N_index!=-1 and Ca_index!=-1:
bonds.append([N_index, Ca_index])
if Ca_index!=-1 and Cp_index!=-1:
bonds.append([Ca_index, Cp_index])
if Cp_index!=-1 and O_index!=-1:
bonds.append([Cp_index, O_index])
if Ca_index!=-1 and Cb_index!=-1:
bonds.append([Ca_index, Cb_index])
if Ca_index!=-1 and Hb_index!=-1:
bonds.append([Ca_index, Hb_index])
N_index = i+1
if Cp_index!=-1:
bonds.append([Cp_index, N_index])
Ca_index = -1
Cp_index = -1
O_index = -1
Cb_index = -1
Hb_index = -1
if ia.desc == 'C-Alpha': Ca_index = i+1
if ia.desc == 'C-Beta': Cb_index = i+1
if ia.desc == 'H-Beta': Hb_index = i+1
if ia.desc == 'C-Prime': Cp_index = i+1
if ia.desc == 'O': O_index = i+1
if N_index!=-1 and Ca_index!=-1:
bonds.append([N_index, Ca_index])
if Ca_index!=-1 and Cb_index!=-1:
bonds.append([Ca_index, Cb_index])
if Ca_index!=-1 and Hb_index!=-1:
bonds.append([Ca_index, Hb_index])
def print_atom_array():
out.write("ITEM: TIMESTEP\n")
out.write(str(step))
out.write("\n")
out.write("ITEM: NUMBER OF ATOMS\n")
out.write(str(n_atoms))
out.write("\n")
out.write("ITEM: BOX BOUNDS\n")
for ib in box:
out.write(ib)
out.write("\n")
out.write("ITEM: ATOMS\n")
for ia in atoms2:
ia.write_(out)
def print_pdb(colorsnap):
for ia in atoms3:
ia.write_(out,colorsnap)
out.write("END\n");
def print_psf():
space8 = " "
psfout = open(psf_file,'w')
psfout.write("PDF\n\n\t2 !NTITLE\n\n")
psfout.write((space8+str(len(atoms3)))[-8:]+" !NATOM\n")
for ia in atoms2:
psfout.write((space8+str(ia.No))[-8:]+" PROT 1")
psfout.write(" R00")
psfout.write(" "+ia.ty)
psfout.write(" 1")
psfout.write(" 0 1 0\n")
psfout.write("\n")
psfout.write((space8+str(len(bonds)))[-8:]+" !NBOND")
for i in range(0, len(bonds)):
ib = bonds[i]
if i%4==0: psfout.write("\n")
psfout.write((space8+str(ib[0]))[-8:])
psfout.write((space8+str(ib[1]))[-8:])
psfout.close()
nFrame = 0
found = False
lfile = open(lammps_file)
colorsnap=-2
if snapshot<0:
for l in lfile:
l = l.strip()
if l[:5]=="ITEM:":
item = l[6:]
else:
if item == "TIMESTEP":
colorsnap +=1
if len(atoms)>0:
buildAllAtoms()
convertToPDB()
n_atoms = len(atoms2)
print_pdb(colorsnap)
step = int(l)
atoms = []
atoms2 = []
atoms3 = []
box = []
A = []
nFrame = nFrame + 1
elif item == "NUMBER OF ATOMS":
n_atoms = int(l)
elif item[:10] == "BOX BOUNDS":
box.append(l)
l = l.split()
A.append([float(l[0]), float(l[1])])
elif item[:5] == "ATOMS":
l = l.split()
i_atom = l[0]
x = float(l[2])
y = float(l[3])
z = float(l[4])
x = (A[0][1] - A[0][0])*x + A[0][0]
y = (A[1][1] - A[1][0])*y + A[1][0]
z = (A[2][1] - A[2][0])*z + A[2][0]
desc = atom_desc[l[1]]
atom = Atom(i_atom, atom_type[l[1]], l[1], x, y, z, desc)
atoms.append(atom)
if len(atoms)>0:
buildAllAtoms()
convertToPDB()
n_atoms = len(atoms2)
print_pdb(colorsnap)
buildBonds()
print_psf()
else:
for l in lfile:
l = l.strip()
if l[:5]=="ITEM:":
item = l[6:]
if item == "TIMESTEP":
if found: break
elif nFrame==snapshot: found = True
nFrame = nFrame + 1
elif found:
if item == "TIMESTEP":
step = int(l)
elif item == "NUMBER OF ATOMS":
n_atoms = int(l)
elif item[:10] == "BOX BOUNDS":
box.append(l)
l = l.split()
A.append([float(l[0]), float(l[1])])
elif item[:5] == "ATOMS":
l = l.split()
i_atom = l[0]
x = float(l[2])
y = float(l[3])
z = float(l[4])
x = (A[0][1] - A[0][0])*x + A[0][0]
y = (A[1][1] - A[1][0])*y + A[1][0]
z = (A[2][1] - A[2][0])*z + A[2][0]
desc = atom_desc[l[1]]
atom = Atom(i_atom, atom_type[l[1]], l[1], x, y, z, desc)
atoms.append(atom)
if len(atoms)>0:
buildAllAtoms()
convertToPDB()
n_atoms = len(atoms2)
if numsnap == 1:
print_pdb(0)
else:
print_pdb(snapshot)
buildBonds()
print_psf()
lfile.close()
out.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.