code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# Copyright (c) 2016 SwiftStack, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import requests from swift.common.middleware.s3api.etree import fromstring import test.functional as tf from test.functional.s3api import S3ApiBase from test.functional.s3api.utils import get_error_code, get_error_msg def setUpModule(): tf.setup_package() def tearDownModule(): tf.teardown_package() class TestS3ApiPresignedUrls(S3ApiBase): def test_bucket(self): bucket = 'test-bucket' req_objects = ('object', 'object2') max_bucket_listing = tf.cluster_info['s3api'].get( 'max_bucket_listing', 1000) # GET Bucket (Without Object) status, _junk, _junk = self.conn.make_request('PUT', bucket) self.assertEqual(status, 200) url, headers = self.conn.generate_url_and_headers('GET', bucket) resp = requests.get(url, headers=headers) self.assertEqual(resp.status_code, 200, 'Got %d %s' % (resp.status_code, resp.content)) self.assertCommonResponseHeaders(resp.headers) self.assertIsNotNone(resp.headers['content-type']) self.assertEqual(resp.headers['content-length'], str(len(resp.content))) elem = fromstring(resp.content, 'ListBucketResult') self.assertEqual(elem.find('Name').text, bucket) self.assertIsNone(elem.find('Prefix').text) self.assertIsNone(elem.find('Marker').text) self.assertEqual(elem.find('MaxKeys').text, str(max_bucket_listing)) self.assertEqual(elem.find('IsTruncated').text, 'false') objects = elem.findall('./Contents') self.assertEqual(list(objects), []) # GET Bucket (With Object) for obj in req_objects: status, _junk, _junk = self.conn.make_request('PUT', bucket, obj) self.assertEqual( status, 200, 'Got %d response while creating %s' % (status, obj)) resp = requests.get(url, headers=headers) self.assertEqual(resp.status_code, 200, 'Got %d %s' % (resp.status_code, resp.content)) self.assertCommonResponseHeaders(resp.headers) self.assertIsNotNone(resp.headers['content-type']) self.assertEqual(resp.headers['content-length'], str(len(resp.content))) elem = fromstring(resp.content, 'ListBucketResult') self.assertEqual(elem.find('Name').text, bucket) self.assertIsNone(elem.find('Prefix').text) self.assertIsNone(elem.find('Marker').text) self.assertEqual(elem.find('MaxKeys').text, str(max_bucket_listing)) self.assertEqual(elem.find('IsTruncated').text, 'false') resp_objects = elem.findall('./Contents') self.assertEqual(len(list(resp_objects)), 2) for o in resp_objects: self.assertIn(o.find('Key').text, req_objects) self.assertIsNotNone(o.find('LastModified').text) self.assertRegexpMatches( o.find('LastModified').text, r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$') self.assertIsNotNone(o.find('ETag').text) self.assertEqual(o.find('Size').text, '0') self.assertIsNotNone(o.find('StorageClass').text is not None) self.assertEqual(o.find('Owner/ID').text, self.conn.user_id) self.assertEqual(o.find('Owner/DisplayName').text, self.conn.user_id) # DELETE Bucket for obj in req_objects: self.conn.make_request('DELETE', bucket, obj) url, headers = self.conn.generate_url_and_headers('DELETE', bucket) resp = requests.delete(url, headers=headers) self.assertEqual(resp.status_code, 204, 'Got %d %s' % (resp.status_code, resp.content)) def test_expiration_limits(self): if os.environ.get('S3_USE_SIGV4'): self._test_expiration_limits_v4() else: self._test_expiration_limits_v2() def _test_expiration_limits_v2(self): bucket = 'test-bucket' # Expiration date is too far in the future url, headers = self.conn.generate_url_and_headers( 'GET', bucket, expires_in=2 ** 32) resp = requests.get(url, headers=headers) self.assertEqual(resp.status_code, 403, 'Got %d %s' % (resp.status_code, resp.content)) self.assertEqual(get_error_code(resp.content), 'AccessDenied') self.assertIn('Invalid date (should be seconds since epoch)', get_error_msg(resp.content)) def _test_expiration_limits_v4(self): bucket = 'test-bucket' # Expiration is negative url, headers = self.conn.generate_url_and_headers( 'GET', bucket, expires_in=-1) resp = requests.get(url, headers=headers) self.assertEqual(resp.status_code, 400, 'Got %d %s' % (resp.status_code, resp.content)) self.assertEqual(get_error_code(resp.content), 'AuthorizationQueryParametersError') self.assertIn('X-Amz-Expires must be non-negative', get_error_msg(resp.content)) # Expiration date is too far in the future for exp in (7 * 24 * 60 * 60 + 1, 2 ** 63 - 1): url, headers = self.conn.generate_url_and_headers( 'GET', bucket, expires_in=exp) resp = requests.get(url, headers=headers) self.assertEqual(resp.status_code, 400, 'Got %d %s' % (resp.status_code, resp.content)) self.assertEqual(get_error_code(resp.content), 'AuthorizationQueryParametersError') self.assertIn('X-Amz-Expires must be less than 604800 seconds', get_error_msg(resp.content)) # Expiration date is *way* too far in the future, or isn't a number for exp in (2 ** 63, 'foo'): url, headers = self.conn.generate_url_and_headers( 'GET', bucket, expires_in=2 ** 63) resp = requests.get(url, headers=headers) self.assertEqual(resp.status_code, 400, 'Got %d %s' % (resp.status_code, resp.content)) self.assertEqual(get_error_code(resp.content), 'AuthorizationQueryParametersError') self.assertEqual('X-Amz-Expires should be a number', get_error_msg(resp.content)) def test_object(self): bucket = 'test-bucket' obj = 'object' status, _junk, _junk = self.conn.make_request('PUT', bucket) self.assertEqual(status, 200) # HEAD/missing object head_url, headers = self.conn.generate_url_and_headers( 'HEAD', bucket, obj) resp = requests.head(head_url, headers=headers) self.assertEqual(resp.status_code, 404, 'Got %d %s' % (resp.status_code, resp.content)) # Wrong verb resp = requests.get(head_url) self.assertEqual(resp.status_code, 403, 'Got %d %s' % (resp.status_code, resp.content)) self.assertEqual(get_error_code(resp.content), 'SignatureDoesNotMatch') # PUT empty object put_url, headers = self.conn.generate_url_and_headers( 'PUT', bucket, obj) resp = requests.put(put_url, data='', headers=headers) self.assertEqual(resp.status_code, 200, 'Got %d %s' % (resp.status_code, resp.content)) # GET empty object get_url, headers = self.conn.generate_url_and_headers( 'GET', bucket, obj) resp = requests.get(get_url, headers=headers) self.assertEqual(resp.status_code, 200, 'Got %d %s' % (resp.status_code, resp.content)) self.assertEqual(resp.content, '') # PUT over object resp = requests.put(put_url, data='foobar', headers=headers) self.assertEqual(resp.status_code, 200, 'Got %d %s' % (resp.status_code, resp.content)) # GET non-empty object resp = requests.get(get_url, headers=headers) self.assertEqual(resp.status_code, 200, 'Got %d %s' % (resp.status_code, resp.content)) self.assertEqual(resp.content, 'foobar') # DELETE Object delete_url, headers = self.conn.generate_url_and_headers( 'DELETE', bucket, obj) resp = requests.delete(delete_url, headers=headers) self.assertEqual(resp.status_code, 204, 'Got %d %s' % (resp.status_code, resp.content)) # Final cleanup status, _junk, _junk = self.conn.make_request('DELETE', bucket) self.assertEqual(status, 204) class TestS3ApiPresignedUrlsSigV4(TestS3ApiPresignedUrls): @classmethod def setUpClass(cls): os.environ['S3_USE_SIGV4'] = "True" @classmethod def tearDownClass(cls): del os.environ['S3_USE_SIGV4'] def setUp(self): super(TestS3ApiPresignedUrlsSigV4, self).setUp()
matthewoliver/swift
test/functional/s3api/test_presigned.py
Python
apache-2.0
9,841
import os import tempfile import unittest import logging from pyidf import ValidationLevel import pyidf from pyidf.idf import IDF from pyidf.thermal_zones_and_surfaces import GlazedDoorInterzone log = logging.getLogger(__name__) class TestGlazedDoorInterzone(unittest.TestCase): def setUp(self): self.fd, self.path = tempfile.mkstemp() def tearDown(self): os.remove(self.path) def test_create_glazeddoorinterzone(self): pyidf.validation_level = ValidationLevel.error obj = GlazedDoorInterzone() # alpha var_name = "Name" obj.name = var_name # object-list var_construction_name = "object-list|Construction Name" obj.construction_name = var_construction_name # object-list var_building_surface_name = "object-list|Building Surface Name" obj.building_surface_name = var_building_surface_name # object-list var_outside_boundary_condition_object = "object-list|Outside Boundary Condition Object" obj.outside_boundary_condition_object = var_outside_boundary_condition_object # real var_multiplier = 1.0 obj.multiplier = var_multiplier # real var_starting_x_coordinate = 6.6 obj.starting_x_coordinate = var_starting_x_coordinate # real var_starting_z_coordinate = 7.7 obj.starting_z_coordinate = var_starting_z_coordinate # real var_length = 8.8 obj.length = var_length # real var_height = 9.9 obj.height = var_height idf = IDF() idf.add(obj) idf.save(self.path, check=False) with open(self.path, mode='r') as f: for line in f: log.debug(line.strip()) idf2 = IDF(self.path) self.assertEqual(idf2.glazeddoorinterzones[0].name, var_name) self.assertEqual(idf2.glazeddoorinterzones[0].construction_name, var_construction_name) self.assertEqual(idf2.glazeddoorinterzones[0].building_surface_name, var_building_surface_name) self.assertEqual(idf2.glazeddoorinterzones[0].outside_boundary_condition_object, var_outside_boundary_condition_object) self.assertAlmostEqual(idf2.glazeddoorinterzones[0].multiplier, var_multiplier) self.assertAlmostEqual(idf2.glazeddoorinterzones[0].starting_x_coordinate, var_starting_x_coordinate) self.assertAlmostEqual(idf2.glazeddoorinterzones[0].starting_z_coordinate, var_starting_z_coordinate) self.assertAlmostEqual(idf2.glazeddoorinterzones[0].length, var_length) self.assertAlmostEqual(idf2.glazeddoorinterzones[0].height, var_height)
rbuffat/pyidf
tests/test_glazeddoorinterzone.py
Python
apache-2.0
2,670
import pymysql.cursors from model.group import Group from model.contact import Contact class DbFixture(): def __init__(self, host, name, user, password): self.host = host self.name = name self.user = user self.password = password self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True) def get_group_list(self): list =[] cursor = self.connection.cursor() try: cursor.execute("select group_id, group_name, group_header, group_footer from group_list") for row in cursor: (id, name, header, footer) = row list.append(Group(id=str(id), name=name, header=header, footer=footer)) finally: cursor.close() return list def get_contact_list(self): list =[] cursor = self.connection.cursor() try: cursor.execute("select id, firstname, lastname from addressbook where deprecated='0000-00-00 00:00:00' ") for row in cursor: (id, firstname, lastname) = row list.append(Contact(id=str(id), firstname=firstname, lastname=lastname)) finally: cursor.close() return list def destroy(self): self.connection.close()
zbikowa/python_training
fixture/db.py
Python
apache-2.0
1,332
"""sdb module initialization; sets value for base decorator.""" from .models import sdb_backends from ..core.models import base_decorator mock_sdb = base_decorator(sdb_backends)
spulec/moto
moto/sdb/__init__.py
Python
apache-2.0
179
# -*- coding: utf8 -*- import base64 import hashlib import io import nose import requests import aliyunauth.utils import aliyunauth.consts def test_cal_b64md5(): s_data = b"foo" l_data = b"bar" * aliyunauth.consts.MD5_CHUNK_SIZE # normal data, None nose.tools.eq_(aliyunauth.utils.cal_b64md5(None), None) def b64md5(data): return base64.b64encode(hashlib.md5(data).digest()).decode("utf8") # normal data, small size, bytes nose.tools.eq_(aliyunauth.utils.cal_b64md5(s_data), b64md5(s_data)) # normal data, small size, bytes nose.tools.eq_( aliyunauth.utils.cal_b64md5(s_data.decode("utf8")), b64md5(s_data) ) # io-like, big size, bytes nose.tools.eq_( aliyunauth.utils.cal_b64md5(io.BytesIO(l_data)), b64md5(l_data) ) # io-like, big size, str nose.tools.eq_( aliyunauth.utils.cal_b64md5(io.StringIO(l_data.decode("utf8"))), b64md5(l_data) ) def test_to_bytes(): nose.tools.ok_(isinstance( aliyunauth.utils.to_bytes(u"foo"), requests.compat.bytes )) nose.tools.ok_(isinstance( aliyunauth.utils.to_bytes(b"foo"), requests.compat.bytes )) nose.tools.eq_(aliyunauth.utils.to_bytes(u"福", "gb2312"), b'\xb8\xa3') def test_to_str(): nose.tools.ok_(isinstance( aliyunauth.utils.to_str(u"bar"), requests.compat.str ), "unicode to str failed") nose.tools.ok_(isinstance( aliyunauth.utils.to_str(b"bar"), requests.compat.str ), "bytes to str failed") nose.tools.eq_(aliyunauth.utils.to_str(b"\xb0\xf4", "gb2312"), u"棒") def test_percent_quote(): nose.tools.eq_( aliyunauth.utils.percent_quote(u"福棒 &?/*~=+foo\""), "%E7%A6%8F%E6%A3%92%20%26%3F%2F%2A~%3D%2Bfoo%22" ) def test_percent_encode(): nose.tools.eq_( aliyunauth.utils.percent_encode([("福 棒", "foo+bar"), ("none", None)]), "%E7%A6%8F%20%E6%A3%92=foo%2Bbar" ) nose.tools.eq_( aliyunauth.utils.percent_encode([("foo", "福"), ("bar", "棒")], True), "bar=%E6%A3%92&foo=%E7%A6%8F" )
SkyLothar/requests-aliyun
tests/test-utils.py
Python
apache-2.0
2,134
from . import list
mliberty1/embc
py/embc/collections/__init__.py
Python
apache-2.0
19
# # Hubblemon - Yet another general purpose system monitor # # Copyright 2015 NAVER Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os, socket, sys, time import data_loader from datetime import datetime hubblemon_path = os.path.join(os.path.dirname(__file__), '..') sys.path.append(hubblemon_path) import common.core redis_preset = [['memory', 'memory_human', 'memory_lua', 'memory_rss'], 'mem_frag', ['cpu_user', 'cpu_sys', 'cpu_user_children', 'cpu_sys_children'], 'connections', (lambda x : x['keyspace_hits'] / (x['keyspace_hits'] + x['keyspace_misses']) * 100, 'hit ratio'), 'expired_keys', 'evicted_keys', 'cmds_processed', ['cmd_get', 'cmd_set', 'cmd_mget', 'cmd_mset'], ['cmd_del', 'cmd_expire', 'cmd_checkpoint'], ['cmd_linsert', 'cmd_lpush', 'cmd_lpop', 'cmd_llen'], ['cmd_lindex', 'cmd_lrange'], ['cmd_sadd', 'cmd_scard', 'cmd_set', 'cmd_srem'], ['cmd_sismember', 'cmd_smembers'], ['cmd_zadd', 'cmd_zcard', 'cmd_zrem'], ['cmd_zrange', 'cmd_zrank', 'cmd_zscore']] def redis_view(path, title = ''): return common.core.loader(path, redis_preset, title) # # chart list # redis_cloud_map = {} last_ts = 0 def init_plugin(): print('#### redis init ########') ret = get_chart_list({}) print(ret) def get_chart_data(param): #print(param) global redis_cloud_map type = 'redis_stat' if 'type' in param: type = param['type'] if 'instance' not in param or 'server' not in param: return None instance_name = param['instance'] server_name = param['server'] if type == 'redis_stat': for node in redis_cloud_map[server_name]: if node.startswith(instance_name): results = common.core.loader(server_name + '/' + node, redis_preset, title=node) break return results def get_chart_list(param): #print(param) global redis_cloud_map global last_ts ts = time.time() if ts - last_ts >= 300: redis_cloud_map_tmp = {} entity_list = common.core.get_entity_list() for entity in entity_list: instance_list = common.core.get_table_list_of_entity(entity, 'redis_') if len(instance_list) > 0: redis_cloud_map_tmp[entity] = instance_list redis_cloud_map = redis_cloud_map_tmp last_ts = ts if 'type' in param: type = param['type'] return (['server', 'instance'], redis_cloud_map)
naver/hubblemon
redis_mon/redis_view.py
Python
apache-2.0
2,771
import numpy as np import pandas import scipy, scipy.spatial import sklearn import sklearn.svm import sys import argparse import pickle sys.path.append('codes/') from utilities_v2 import * ymin = 157 ysplit = 160 ymax = 161 feat_threshold = 0.01 # C=10.0000 Gamma=0.0080 optimal_c = 10.00 optimal_gamma = 0.008 #--------------------------------------# # MAIN # #--------------------------------------# def main(): parser = argparse.ArgumentParser() parser.add_argument('train', help='Training Data') parser.add_argument('labels', help='Training Labels') parser.add_argument('test', help='Test Data') parser.add_argument('data_cv', help='Data for CrossValidation') parser.add_argument('label_cv', help='Labels for CrossValidation') parser.add_argument('out', help='Output file name') args = parser.parse_args() y = pandas.read_table(args.labels, sep=" ", dtype='int', header=None) print(y.head()) #r = calClassStat(args.train, y[0]) #print(r) cstat = pickle.load( open( "data/sum_features.dat", "rb" )) print(cstat[1][0][1:10]) print(cstat['all'][1][1:10]) rdiff = calStandMeanDiff(y, cstat, np.arange(ymin,ysplit), np.arange(ysplit, ymax+1)) ## Good Features: goodfeatures = np.where(rdiff > feat_threshold)[0] print(goodfeatures) sys.stderr.write('Number of Features: %d'%goodfeatures.shape[0]) #gf_test = np.arange(21,35) #Xsub, ysub = readRandomSample(args.train, y[0], \ # size=2000, goodfeat=gf_test, acc_miny=15, acc_maxy=20) #print(Xsub.shape) #print(np.unique(ysub)) n = 50000 for i in range(1): Xsub, ysub = readRandomSample(args.train, y[0], size=n, \ goodfeat=goodfeatures, acc_miny=ymin, acc_maxy=ymax) assert(np.sum(ysub < ymin) == 0) assert(np.sum(ysub > ymax) == 0) ysub[np.where(ysub < ysplit)[0]] = -1 ysub[np.where(ysub >= ysplit)[0]] = 1 features_idx = np.where(np.std(Xsub, axis=0)> 0.0001)[0] print("Number of Good Features: %d"%features_idx.shape[0]) Xsub = Xsub[:,features_idx] x_mean = np.mean(Xsub, axis=0) x_std = np.std(Xsub, axis=0) Xsub = (Xsub - x_mean) / x_std sys.stderr.write('Applying SVM classification ... %d'%(i)) clf = sklearn.svm.SVC(C=optimal_c, kernel='rbf', gamma=optimal_gamma) clf.fit(Xsub, ysub) Xcv = pandas.read_table(args.data_cv, sep=' ', usecols=goodfeatures, dtype='int', header=None) ytrue_cv = pandas.read_table(args.label_cv, sep=' ', dtype='int', header=None)[0] Xcv = Xcv.iloc[np.where((ytrue_cv >= ymin) & (ytrue_cv <= ymax))[0],:] ytrue_cv = ytrue_cv[np.where((ytrue_cv >= ymin) & (ytrue_cv <= ymax))[0]].values #ytrue_cv = ytrue_cv[np.where(ytrue_cv <= ymax)[0]] #Xcv = Xcv.iloc[np.where(ytrue_cv <= ymax)[0],:] print('CrossVal Shape= %d,%d' %Xcv.shape) print(np.unique(ytrue_cv)) ytrue_cv[np.where(ytrue_cv < ysplit)[0]] = -1 ytrue_cv[np.where(ytrue_cv >= ysplit)[0]] = 1 print("CrossVal: Neg %s\tPos %d"%(np.sum(ytrue_cv == -1), np.sum(ytrue_cv == 1))) Xcv = (Xcv - x_mean) / x_std ypred_cv = clf.predict(Xcv) prec, recall, f1score = evalPerformance(ytrue_cv, ypred_cv) print('CrossVal-Perf: Prec=%.3f Recall=%.3f F1-score=%.3f\n'%(prec, recall, f1score)) np.savetxt('%s.cv'%args.out, ypred_cv, fmt='%d', \ header=' CrossVal-Perf.: Prec %.3f Recall %.3f F1-score %.3f (n= %d dim= %d )' \ %(prec, recall, f1score, n, features_idx.shape[0])) Xtest = pandas.read_table(args.test, sep=' ', usecols=goodfeatures, dtype='int', header=None) Xtest = (Xtest - x_mean) / x_std sys.stderr.write('Test data shape=(%d,%d)'%(Xtest.shape[0], Xtest.shape[1])) #ypred = np.zeros(shape=Xtest.shape[0], dtype=int) ypred = clf.predict(Xtest) np.savetxt(args.out, ypred, fmt='%d', \ header=' CrossVal-Perf.: Prec %.3f Recall %.3f F1-score %.3f (n= %d dim= %d )' \ %(prec, recall, f1score, n, features_idx.shape[0])) if __name__ == '__main__': main()
mirjalil/ml-visual-recognition
codes/classify_half5.py
Python
apache-2.0
4,215
import collections g=open("depth_29.txt","w") with open('depth_28.txt') as infile: counts = collections.Counter(l.strip() for l in infile) for line, count in counts.most_common(): g.write(str(line)) #g.write(str(count)) g.write("\n")
join2saurav/Lexical-syntax-semantic-analysis-of-Hindi-text-
test10.py
Python
apache-2.0
256
# coding: utf-8 #------------------------------------------------------------------------- # Copyright (c) Microsoft. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #-------------------------------------------------------------------------- import base64 import time import unittest from datetime import datetime from azure import WindowsAzureError, WindowsAzureBatchOperationError from azure.storage import ( Entity, EntityProperty, StorageServiceProperties, TableService, ) from util import ( AzureTestCase, credentials, getUniqueName, set_service_options, ) #------------------------------------------------------------------------------ MAX_RETRY = 60 #------------------------------------------------------------------------------ class TableServiceTest(AzureTestCase): def setUp(self): self.ts = TableService(credentials.getStorageServicesName(), credentials.getStorageServicesKey()) set_service_options(self.ts) self.table_name = getUniqueName('uttable') self.additional_table_names = [] def tearDown(self): self.cleanup() return super(TableServiceTest, self).tearDown() def cleanup(self): try: self.ts.delete_table(self.table_name) except: pass for name in self.additional_table_names: try: self.ts.delete_table(name) except: pass #--Helpers----------------------------------------------------------------- def _create_table(self, table_name): ''' Creates a table with the specified name. ''' self.ts.create_table(table_name, True) def _create_table_with_default_entities(self, table_name, entity_count): ''' Creates a table with the specified name and adds entities with the default set of values. PartitionKey is set to 'MyPartition' and RowKey is set to a unique counter value starting at 1 (as a string). ''' entities = [] self._create_table(table_name) for i in range(1, entity_count + 1): entities.append(self.ts.insert_entity( table_name, self._create_default_entity_dict('MyPartition', str(i)))) return entities def _create_default_entity_class(self, partition, row): ''' Creates a class-based entity with fixed values, using all of the supported data types. ''' entity = Entity() entity.PartitionKey = partition entity.RowKey = row entity.age = 39 entity.sex = 'male' entity.married = True entity.deceased = False entity.optional = None entity.ratio = 3.1 entity.large = 9333111000 entity.Birthday = datetime(1973, 10, 4) entity.birthday = datetime(1970, 10, 4) entity.binary = None entity.other = EntityProperty('Edm.Int64', 20) entity.clsid = EntityProperty( 'Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity def _create_default_entity_dict(self, partition, row): ''' Creates a dictionary-based entity with fixed values, using all of the supported data types. ''' return {'PartitionKey': partition, 'RowKey': row, 'age': 39, 'sex': 'male', 'married': True, 'deceased': False, 'optional': None, 'ratio': 3.1, 'large': 9333111000, 'Birthday': datetime(1973, 10, 4), 'birthday': datetime(1970, 10, 4), 'other': EntityProperty('Edm.Int64', 20), 'clsid': EntityProperty( 'Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833')} def _create_updated_entity_dict(self, partition, row): ''' Creates a dictionary-based entity with fixed values, with a different set of values than the default entity. It adds fields, changes field values, changes field types, and removes fields when compared to the default entity. ''' return {'PartitionKey': partition, 'RowKey': row, 'age': 'abc', 'sex': 'female', 'sign': 'aquarius', 'birthday': datetime(1991, 10, 4)} def _assert_default_entity(self, entity): ''' Asserts that the entity passed in matches the default entity. ''' self.assertEqual(entity.age, 39) self.assertEqual(entity.sex, 'male') self.assertEqual(entity.married, True) self.assertEqual(entity.deceased, False) self.assertFalse(hasattr(entity, "aquarius")) self.assertEqual(entity.ratio, 3.1) self.assertEqual(entity.large, 9333111000) self.assertEqual(entity.Birthday, datetime(1973, 10, 4)) self.assertEqual(entity.birthday, datetime(1970, 10, 4)) self.assertEqual(entity.other, 20) self.assertIsInstance(entity.clsid, EntityProperty) self.assertEqual(entity.clsid.type, 'Edm.Guid') self.assertEqual(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833') def _assert_updated_entity(self, entity): ''' Asserts that the entity passed in matches the updated entity. ''' self.assertEqual(entity.age, 'abc') self.assertEqual(entity.sex, 'female') self.assertFalse(hasattr(entity, "married")) self.assertFalse(hasattr(entity, "deceased")) self.assertEqual(entity.sign, 'aquarius') self.assertFalse(hasattr(entity, "optional")) self.assertFalse(hasattr(entity, "ratio")) self.assertFalse(hasattr(entity, "large")) self.assertFalse(hasattr(entity, "Birthday")) self.assertEqual(entity.birthday, datetime(1991, 10, 4)) self.assertFalse(hasattr(entity, "other")) self.assertFalse(hasattr(entity, "clsid")) def _assert_merged_entity(self, entity): ''' Asserts that the entity passed in matches the default entity merged with the updated entity. ''' self.assertEqual(entity.age, 'abc') self.assertEqual(entity.sex, 'female') self.assertEqual(entity.sign, 'aquarius') self.assertEqual(entity.married, True) self.assertEqual(entity.deceased, False) self.assertEqual(entity.sign, 'aquarius') self.assertEqual(entity.ratio, 3.1) self.assertEqual(entity.large, 9333111000) self.assertEqual(entity.Birthday, datetime(1973, 10, 4)) self.assertEqual(entity.birthday, datetime(1991, 10, 4)) self.assertEqual(entity.other, 20) self.assertIsInstance(entity.clsid, EntityProperty) self.assertEqual(entity.clsid.type, 'Edm.Guid') self.assertEqual(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833') #--Test cases for table service ------------------------------------------- def test_get_set_table_service_properties(self): table_properties = self.ts.get_table_service_properties() self.ts.set_table_service_properties(table_properties) tests = [('logging.delete', True), ('logging.delete', False), ('logging.read', True), ('logging.read', False), ('logging.write', True), ('logging.write', False), ] for path, value in tests: # print path cur = table_properties for component in path.split('.')[:-1]: cur = getattr(cur, component) last_attr = path.split('.')[-1] setattr(cur, last_attr, value) self.ts.set_table_service_properties(table_properties) retry_count = 0 while retry_count < MAX_RETRY: table_properties = self.ts.get_table_service_properties() cur = table_properties for component in path.split('.'): cur = getattr(cur, component) if value == cur: break time.sleep(1) retry_count += 1 self.assertEqual(value, cur) def test_table_service_retention_single_set(self): table_properties = self.ts.get_table_service_properties() table_properties.logging.retention_policy.enabled = False table_properties.logging.retention_policy.days = 5 # TODO: Better error, ValueError? self.assertRaises(WindowsAzureError, self.ts.set_table_service_properties, table_properties) table_properties = self.ts.get_table_service_properties() table_properties.logging.retention_policy.days = None table_properties.logging.retention_policy.enabled = True # TODO: Better error, ValueError? self.assertRaises(WindowsAzureError, self.ts.set_table_service_properties, table_properties) def test_table_service_set_both(self): table_properties = self.ts.get_table_service_properties() table_properties.logging.retention_policy.enabled = True table_properties.logging.retention_policy.days = 5 self.ts.set_table_service_properties(table_properties) table_properties = self.ts.get_table_service_properties() self.assertEqual( True, table_properties.logging.retention_policy.enabled) self.assertEqual(5, table_properties.logging.retention_policy.days) #--Test cases for tables -------------------------------------------------- def test_create_table(self): # Arrange # Act created = self.ts.create_table(self.table_name) # Assert self.assertTrue(created) def test_create_table_fail_on_exist(self): # Arrange # Act created = self.ts.create_table(self.table_name, True) # Assert self.assertTrue(created) def test_create_table_with_already_existing_table(self): # Arrange # Act created1 = self.ts.create_table(self.table_name) created2 = self.ts.create_table(self.table_name) # Assert self.assertTrue(created1) self.assertFalse(created2) def test_create_table_with_already_existing_table_fail_on_exist(self): # Arrange # Act created = self.ts.create_table(self.table_name) with self.assertRaises(WindowsAzureError): self.ts.create_table(self.table_name, True) # Assert self.assertTrue(created) def test_query_tables(self): # Arrange self._create_table(self.table_name) # Act tables = self.ts.query_tables() for table in tables: pass # Assert tableNames = [x.name for x in tables] self.assertGreaterEqual(len(tableNames), 1) self.assertGreaterEqual(len(tables), 1) self.assertIn(self.table_name, tableNames) def test_query_tables_with_table_name(self): # Arrange self._create_table(self.table_name) # Act tables = self.ts.query_tables(self.table_name) for table in tables: pass # Assert self.assertEqual(len(tables), 1) self.assertEqual(tables[0].name, self.table_name) def test_query_tables_with_table_name_no_tables(self): # Arrange # Act with self.assertRaises(WindowsAzureError): self.ts.query_tables(self.table_name) # Assert def test_query_tables_with_top(self): # Arrange self.additional_table_names = [ self.table_name + suffix for suffix in 'abcd'] for name in self.additional_table_names: self.ts.create_table(name) # Act tables = self.ts.query_tables(None, 3) for table in tables: pass # Assert self.assertEqual(len(tables), 3) def test_query_tables_with_top_and_next_table_name(self): # Arrange self.additional_table_names = [ self.table_name + suffix for suffix in 'abcd'] for name in self.additional_table_names: self.ts.create_table(name) # Act tables_set1 = self.ts.query_tables(None, 3) tables_set2 = self.ts.query_tables( None, 3, tables_set1.x_ms_continuation['NextTableName']) # Assert self.assertEqual(len(tables_set1), 3) self.assertGreaterEqual(len(tables_set2), 1) self.assertLessEqual(len(tables_set2), 3) def test_delete_table_with_existing_table(self): # Arrange self._create_table(self.table_name) # Act deleted = self.ts.delete_table(self.table_name) # Assert self.assertTrue(deleted) tables = self.ts.query_tables() self.assertNamedItemNotInContainer(tables, self.table_name) def test_delete_table_with_existing_table_fail_not_exist(self): # Arrange self._create_table(self.table_name) # Act deleted = self.ts.delete_table(self.table_name, True) # Assert self.assertTrue(deleted) tables = self.ts.query_tables() self.assertNamedItemNotInContainer(tables, self.table_name) def test_delete_table_with_non_existing_table(self): # Arrange # Act deleted = self.ts.delete_table(self.table_name) # Assert self.assertFalse(deleted) def test_delete_table_with_non_existing_table_fail_not_exist(self): # Arrange # Act with self.assertRaises(WindowsAzureError): self.ts.delete_table(self.table_name, True) # Assert #--Test cases for entities ------------------------------------------ def test_insert_entity_dictionary(self): # Arrange self._create_table(self.table_name) # Act dict = self._create_default_entity_dict('MyPartition', '1') resp = self.ts.insert_entity(self.table_name, dict) # Assert self.assertIsNotNone(resp) def test_insert_entity_class_instance(self): # Arrange self._create_table(self.table_name) # Act entity = self._create_default_entity_class('MyPartition', '1') resp = self.ts.insert_entity(self.table_name, entity) # Assert self.assertIsNotNone(resp) def test_insert_entity_conflict(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act with self.assertRaises(WindowsAzureError): self.ts.insert_entity( self.table_name, self._create_default_entity_dict('MyPartition', '1')) # Assert def test_get_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.get_entity(self.table_name, 'MyPartition', '1') # Assert self.assertEqual(resp.PartitionKey, 'MyPartition') self.assertEqual(resp.RowKey, '1') self._assert_default_entity(resp) def test_get_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act with self.assertRaises(WindowsAzureError): self.ts.get_entity(self.table_name, 'MyPartition', '1') # Assert def test_get_entity_with_select(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.get_entity( self.table_name, 'MyPartition', '1', 'age,sex') # Assert self.assertEqual(resp.age, 39) self.assertEqual(resp.sex, 'male') self.assertFalse(hasattr(resp, "birthday")) self.assertFalse(hasattr(resp, "married")) self.assertFalse(hasattr(resp, "deceased")) def test_query_entities(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) # Act resp = self.ts.query_entities(self.table_name) # Assert self.assertEqual(len(resp), 2) for entity in resp: self.assertEqual(entity.PartitionKey, 'MyPartition') self._assert_default_entity(entity) self.assertEqual(resp[0].RowKey, '1') self.assertEqual(resp[1].RowKey, '2') def test_query_entities_with_filter(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) self.ts.insert_entity( self.table_name, self._create_default_entity_dict('MyOtherPartition', '3')) # Act resp = self.ts.query_entities( self.table_name, "PartitionKey eq 'MyPartition'") # Assert self.assertEqual(len(resp), 2) for entity in resp: self.assertEqual(entity.PartitionKey, 'MyPartition') self._assert_default_entity(entity) def test_query_entities_with_select(self): # Arrange self._create_table_with_default_entities(self.table_name, 2) # Act resp = self.ts.query_entities(self.table_name, None, 'age,sex') # Assert self.assertEqual(len(resp), 2) self.assertEqual(resp[0].age, 39) self.assertEqual(resp[0].sex, 'male') self.assertFalse(hasattr(resp[0], "birthday")) self.assertFalse(hasattr(resp[0], "married")) self.assertFalse(hasattr(resp[0], "deceased")) def test_query_entities_with_top(self): # Arrange self._create_table_with_default_entities(self.table_name, 3) # Act resp = self.ts.query_entities(self.table_name, None, None, 2) # Assert self.assertEqual(len(resp), 2) def test_query_entities_with_top_and_next(self): # Arrange self._create_table_with_default_entities(self.table_name, 5) # Act resp1 = self.ts.query_entities(self.table_name, None, None, 2) resp2 = self.ts.query_entities( self.table_name, None, None, 2, resp1.x_ms_continuation['NextPartitionKey'], resp1.x_ms_continuation['NextRowKey']) resp3 = self.ts.query_entities( self.table_name, None, None, 2, resp2.x_ms_continuation['NextPartitionKey'], resp2.x_ms_continuation['NextRowKey']) # Assert self.assertEqual(len(resp1), 2) self.assertEqual(len(resp2), 2) self.assertEqual(len(resp3), 1) self.assertEqual(resp1[0].RowKey, '1') self.assertEqual(resp1[1].RowKey, '2') self.assertEqual(resp2[0].RowKey, '3') self.assertEqual(resp2[1].RowKey, '4') self.assertEqual(resp3[0].RowKey, '5') def test_update_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_update_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_update_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert def test_insert_or_merge_entity_with_existing_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_insert_or_merge_entity_with_non_existing_entity(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_insert_or_replace_entity_with_existing_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_replace_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_insert_or_replace_entity_with_non_existing_entity(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.insert_or_replace_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_merge_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_merge_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity) # Assert def test_merge_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') resp = self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) # Assert self.assertIsNotNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_merged_entity(received_entity) def test_merge_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') with self.assertRaises(WindowsAzureError): self.ts.merge_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert def test_delete_entity(self): # Arrange self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.delete_entity(self.table_name, 'MyPartition', '1') # Assert self.assertIsNone(resp) with self.assertRaises(WindowsAzureError): self.ts.get_entity(self.table_name, 'MyPartition', '1') def test_delete_entity_not_existing(self): # Arrange self._create_table(self.table_name) # Act with self.assertRaises(WindowsAzureError): self.ts.delete_entity(self.table_name, 'MyPartition', '1') # Assert def test_delete_entity_with_if_matches(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act resp = self.ts.delete_entity( self.table_name, 'MyPartition', '1', if_match=entities[0].etag) # Assert self.assertIsNone(resp) with self.assertRaises(WindowsAzureError): self.ts.get_entity(self.table_name, 'MyPartition', '1') def test_delete_entity_with_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act with self.assertRaises(WindowsAzureError): self.ts.delete_entity( self.table_name, 'MyPartition', '1', if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') # Assert #--Test cases for batch --------------------------------------------- def test_with_filter_single(self): called = [] def my_filter(request, next): called.append(True) return next(request) tc = self.ts.with_filter(my_filter) tc.create_table(self.table_name) self.assertTrue(called) del called[:] tc.delete_table(self.table_name) self.assertTrue(called) del called[:] def test_with_filter_chained(self): called = [] def filter_a(request, next): called.append('a') return next(request) def filter_b(request, next): called.append('b') return next(request) tc = self.ts.with_filter(filter_a).with_filter(filter_b) tc.create_table(self.table_name) self.assertEqual(called, ['b', 'a']) tc.delete_table(self.table_name) def test_batch_insert(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.begin_batch() self.ts.insert_entity(self.table_name, entity) self.ts.commit_batch() # Assert result = self.ts.get_entity(self.table_name, '001', 'batch_insert') self.assertIsNotNone(result) def test_batch_update(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_update' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity = self.ts.get_entity(self.table_name, '001', 'batch_update') self.assertEqual(3, entity.test3) entity.test2 = 'value1' self.ts.begin_batch() self.ts.update_entity(self.table_name, '001', 'batch_update', entity) self.ts.commit_batch() entity = self.ts.get_entity(self.table_name, '001', 'batch_update') # Assert self.assertEqual('value1', entity.test2) def test_batch_merge(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_merge' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity = self.ts.get_entity(self.table_name, '001', 'batch_merge') self.assertEqual(3, entity.test3) entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_merge' entity.test2 = 'value1' self.ts.begin_batch() self.ts.merge_entity(self.table_name, '001', 'batch_merge', entity) self.ts.commit_batch() entity = self.ts.get_entity(self.table_name, '001', 'batch_merge') # Assert self.assertEqual('value1', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_update_if_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 1) # Act sent_entity = self._create_updated_entity_dict('MyPartition', '1') self.ts.begin_batch() resp = self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag) self.ts.commit_batch() # Assert self.assertIsNone(resp) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_updated_entity(received_entity) def test_batch_update_if_doesnt_match(self): # Arrange entities = self._create_table_with_default_entities(self.table_name, 2) # Act sent_entity1 = self._create_updated_entity_dict('MyPartition', '1') sent_entity2 = self._create_updated_entity_dict('MyPartition', '2') self.ts.begin_batch() self.ts.update_entity( self.table_name, 'MyPartition', '1', sent_entity1, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"') self.ts.update_entity( self.table_name, 'MyPartition', '2', sent_entity2) try: self.ts.commit_batch() except WindowsAzureBatchOperationError as error: self.assertEqual(error.code, 'UpdateConditionNotSatisfied') self.assertTrue(str(error).startswith('0:The update condition specified in the request was not satisfied.')) else: self.fail('WindowsAzureBatchOperationError was expected') # Assert received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '1') self._assert_default_entity(received_entity) received_entity = self.ts.get_entity( self.table_name, 'MyPartition', '2') self._assert_default_entity(received_entity) def test_batch_insert_replace(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert_replace' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.begin_batch() self.ts.insert_or_replace_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) self.ts.commit_batch() entity = self.ts.get_entity( self.table_name, '001', 'batch_insert_replace') # Assert self.assertIsNotNone(entity) self.assertEqual('value', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_insert_merge(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_insert_merge' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.begin_batch() self.ts.insert_or_merge_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) self.ts.commit_batch() entity = self.ts.get_entity( self.table_name, '001', 'batch_insert_merge') # Assert self.assertIsNotNone(entity) self.assertEqual('value', entity.test2) self.assertEqual(1234567890, entity.test4) def test_batch_delete(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '001' entity.RowKey = 'batch_delete' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity = self.ts.get_entity(self.table_name, '001', 'batch_delete') #self.assertEqual(3, entity.test3) self.ts.begin_batch() self.ts.delete_entity(self.table_name, '001', 'batch_delete') self.ts.commit_batch() def test_batch_inserts(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = 'batch_inserts' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') self.ts.begin_batch() for i in range(100): entity.RowKey = str(i) self.ts.insert_entity(self.table_name, entity) self.ts.commit_batch() entities = self.ts.query_entities( self.table_name, "PartitionKey eq 'batch_inserts'", '') # Assert self.assertIsNotNone(entities) self.assertEqual(100, len(entities)) def test_batch_all_operations_together(self): # Arrange self._create_table(self.table_name) # Act entity = Entity() entity.PartitionKey = '003' entity.RowKey = 'batch_all_operations_together-1' entity.test = EntityProperty('Edm.Boolean', 'true') entity.test2 = 'value' entity.test3 = 3 entity.test4 = EntityProperty('Edm.Int64', '1234567890') entity.test5 = datetime.utcnow() self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-2' self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-3' self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-4' self.ts.insert_entity(self.table_name, entity) self.ts.begin_batch() entity.RowKey = 'batch_all_operations_together' self.ts.insert_entity(self.table_name, entity) entity.RowKey = 'batch_all_operations_together-1' self.ts.delete_entity( self.table_name, entity.PartitionKey, entity.RowKey) entity.RowKey = 'batch_all_operations_together-2' entity.test3 = 10 self.ts.update_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-3' entity.test3 = 100 self.ts.merge_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-4' entity.test3 = 10 self.ts.insert_or_replace_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) entity.RowKey = 'batch_all_operations_together-5' self.ts.insert_or_merge_entity( self.table_name, entity.PartitionKey, entity.RowKey, entity) self.ts.commit_batch() # Assert entities = self.ts.query_entities( self.table_name, "PartitionKey eq '003'", '') self.assertEqual(5, len(entities)) def test_batch_same_row_operations_fail(self): # Arrange self._create_table(self.table_name) entity = self._create_default_entity_dict('001', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) # Act with self.assertRaises(WindowsAzureError): self.ts.begin_batch() entity = self._create_updated_entity_dict( '001', 'batch_negative_1') self.ts.update_entity( self.table_name, entity['PartitionKey'], entity['RowKey'], entity) entity = self._create_default_entity_dict( '001', 'batch_negative_1') self.ts.merge_entity( self.table_name, entity['PartitionKey'], entity['RowKey'], entity) self.ts.cancel_batch() # Assert def test_batch_different_partition_operations_fail(self): # Arrange self._create_table(self.table_name) entity = self._create_default_entity_dict('001', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) # Act with self.assertRaises(WindowsAzureError): self.ts.begin_batch() entity = self._create_updated_entity_dict( '001', 'batch_negative_1') self.ts.update_entity( self.table_name, entity['PartitionKey'], entity['RowKey'], entity) entity = self._create_default_entity_dict( '002', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) self.ts.cancel_batch() # Assert def test_batch_different_table_operations_fail(self): # Arrange other_table_name = self.table_name + 'other' self.additional_table_names = [other_table_name] self._create_table(self.table_name) self._create_table(other_table_name) # Act with self.assertRaises(WindowsAzureError): self.ts.begin_batch() entity = self._create_default_entity_dict( '001', 'batch_negative_1') self.ts.insert_entity(self.table_name, entity) entity = self._create_default_entity_dict( '001', 'batch_negative_2') self.ts.insert_entity(other_table_name, entity) self.ts.cancel_batch() def test_unicode_property_value(self): ''' regression test for github issue #57''' # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test1', 'Description': u'ꀕ'}) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test2', 'Description': 'ꀕ'}) resp = self.ts.query_entities( self.table_name, "PartitionKey eq 'test'") # Assert self.assertEqual(len(resp), 2) self.assertEqual(resp[0].Description, u'ꀕ') self.assertEqual(resp[1].Description, u'ꀕ') def test_unicode_property_name(self): # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test1', u'啊齄丂狛狜': u'ꀕ'}) self.ts.insert_entity( self.table_name, {'PartitionKey': 'test', 'RowKey': 'test2', u'啊齄丂狛狜': 'hello'}) resp = self.ts.query_entities( self.table_name, "PartitionKey eq 'test'") # Assert self.assertEqual(len(resp), 2) self.assertEqual(resp[0].__dict__[u'啊齄丂狛狜'], u'ꀕ') self.assertEqual(resp[1].__dict__[u'啊齄丂狛狜'], u'hello') def test_unicode_create_table_unicode_name(self): # Arrange self.table_name = self.table_name + u'啊齄丂狛狜' # Act with self.assertRaises(WindowsAzureError): # not supported - table name must be alphanumeric, lowercase self.ts.create_table(self.table_name) # Assert def test_empty_and_spaces_property_value(self): # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'EmptyByte': '', 'EmptyUnicode': u'', 'SpacesOnlyByte': ' ', 'SpacesOnlyUnicode': u' ', 'SpacesBeforeByte': ' Text', 'SpacesBeforeUnicode': u' Text', 'SpacesAfterByte': 'Text ', 'SpacesAfterUnicode': u'Text ', 'SpacesBeforeAndAfterByte': ' Text ', 'SpacesBeforeAndAfterUnicode': u' Text ', }) resp = self.ts.get_entity(self.table_name, 'test', 'test1') # Assert self.assertIsNotNone(resp) self.assertEqual(resp.EmptyByte, '') self.assertEqual(resp.EmptyUnicode, u'') self.assertEqual(resp.SpacesOnlyByte, ' ') self.assertEqual(resp.SpacesOnlyUnicode, u' ') self.assertEqual(resp.SpacesBeforeByte, ' Text') self.assertEqual(resp.SpacesBeforeUnicode, u' Text') self.assertEqual(resp.SpacesAfterByte, 'Text ') self.assertEqual(resp.SpacesAfterUnicode, u'Text ') self.assertEqual(resp.SpacesBeforeAndAfterByte, ' Text ') self.assertEqual(resp.SpacesBeforeAndAfterUnicode, u' Text ') def test_none_property_value(self): # Act self._create_table(self.table_name) self.ts.insert_entity( self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'NoneValue': None, }) resp = self.ts.get_entity(self.table_name, 'test', 'test1') # Assert self.assertIsNotNone(resp) self.assertFalse(hasattr(resp, 'NoneValue')) def test_binary_property_value(self): # Act binary_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\t\n' self._create_table(self.table_name) self.ts.insert_entity( self.table_name, { 'PartitionKey': 'test', 'RowKey': 'test1', 'binary': EntityProperty('Edm.Binary', binary_data) }) resp = self.ts.get_entity(self.table_name, 'test', 'test1') # Assert self.assertIsNotNone(resp) self.assertEqual(resp.binary.type, 'Edm.Binary') self.assertEqual(resp.binary.value, binary_data) #------------------------------------------------------------------------------ if __name__ == '__main__': unittest.main()
ljjt/azure-sdk-for-python
tests/test_tableservice.py
Python
apache-2.0
44,444
# Copyright 2017 The Imaging Source Europe GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import time from tcam_capture.CapsDesc import CapsDesc from tcam_capture.TcamScreen import TcamScreen from tcam_capture.FileNameGenerator import FileNameGenerator from tcam_capture.MediaSaver import MediaSaver from tcam_capture.Settings import Settings from tcam_capture.Encoder import MediaType, get_encoder_dict from tcam_capture.TcamCaptureData import TcamCaptureData from tcam_capture.FPSCounter import FPSCounter from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import (QWidget, QHBoxLayout) from PyQt5.QtCore import QObject, pyqtSignal, Qt, QEvent import logging import gi gi.require_version("Gst", "1.0") gi.require_version("Tcam", "0.1") gi.require_version("GstVideo", "1.0") from gi.repository import Tcam, Gst, GLib, GstVideo log = logging.getLogger(__name__) class TcamView(QWidget): image_saved = pyqtSignal(str) video_saved = pyqtSignal(str) new_pixel_under_mouse = pyqtSignal(bool, int, int, QtGui.QColor) current_fps = pyqtSignal(float) format_selected = pyqtSignal(str, str, str) # format, widthxheight, framerate first_image = pyqtSignal() def __init__(self, serial: str, dev_type: str, parent=None): super(TcamView, self).__init__(parent) self.layout = QHBoxLayout() self.container = TcamScreen(self) self.container.new_pixel_under_mouse.connect(self.new_pixel_under_mouse_slot) self.fullscreen_container = None # separate widget for fullscreen usage self.is_fullscreen = False self.layout.addWidget(self.container) self.layout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize) self.setLayout(self.layout) self.serial = serial self.dev_type = dev_type self.tcam = None self.pipeline = None self.image = None self.mouse_is_pressed = False self.use_dutils = True self.current_width = 0 self.current_height = 0 self.device_lost_callbacks = [] self.caps_desc = None self.video_format = None self.retry_countdown = 0 self.settings = None self.video_fng = None self.image_fng = None # additional timer to update actual_fps # when no images arrive self.fps_timer = QtCore.QTimer() self.fps_timer.timeout.connect(self.fps_tick) self.fps = None self.file_pattern = "" self.file_location = "/tmp" self.caps = None self.state = None self.videosaver = None self.imagesaver = None self.window_id = self.container.winId() self.displaysink = None def get_caps_desc(self): """ Returns a CapsDesc describing the caps of the currently opened device Returns None if device is not opened """ if not self.caps_desc: tcam = self.get_tcam() if not tcam: return None caps = tcam.get_static_pad("src").query_caps() self.caps_desc = CapsDesc(caps) return self.caps_desc def new_pixel_under_mouse_slot(self, active: bool, mouse_x: int, mouse_y: int, color: QtGui.QColor): self.new_pixel_under_mouse.emit(active, mouse_x, mouse_y, color) def eventFilter(self, obj, event): """""" if event.type == QEvent.KeyPress: if event.key() == Qt.Key_F11: self.toggle_fullscreen() return True return QObject.eventFilter(self, obj, event) def set_settings(self, new_settings: Settings): """ Update settings of all subclasses """ self.settings = new_settings self.use_dutils = self.settings.use_dutils if not self.video_fng: self.video_fng = FileNameGenerator(self.serial, self.settings.video_name) else: self.video_fng.set_settings(self.settings.video_name) self.video_fng.location = self.settings.save_location self.video_fng.file_suffix = get_encoder_dict()[self.settings.video_type].file_ending if not self.image_fng: self.image_fng = FileNameGenerator(self.serial, self.settings.image_name) else: self.image_fng.set_settings(self.settings.image_name) self.image_fng.location = self.settings.save_location self.image_fng.file_suffix = get_encoder_dict()[self.settings.image_type].file_ending def toggle_fullscreen(self): if self.is_fullscreen: self.is_fullscreen = False self.showNormal() self.fullscreen_container.hide() # self.fullscreen_container.deleteLater() self.fullscreen_container = None self.displaysink.set_window_handle(self.window_id) else: self.is_fullscreen = True self.fullscreen_container = TcamScreen() self.fullscreen_container.is_fullscreen = True self.fullscreen_container.setAttribute(QtCore.Qt.WA_DeleteOnClose) self.fullscreen_container.showFullScreen() self.fullscreen_container.show() self.container.first_image = True self.displaysink.set_window_handle(self.fullscreen_container.winId()) self.fullscreen_container.setFocusPolicy(QtCore.Qt.StrongFocus) self.fullscreen_container.installEventFilter(self.fullscreen_container) self.fullscreen_container.destroy_widget.connect(self.toggle_fullscreen) # either show info that we are in trigger mode and still waiting for the first image # or show that last image we had. This way we always have something to show to the user if self.is_trigger_mode_on() and self.container.first_image: self.fullscreen_container.wait_for_first_image() else: self.fullscreen_container.on_new_pixmap(self.container.pix.pixmap()) def fit_view(self): if self.is_fullscreen: self.fullscreen_container.fit_in_view.emit() else: self.container.fit_in_view.emit() def save_image(self, image_type: str): if not self.imagesaver: self.imagesaver = MediaSaver(self.serial, self.caps, MediaType.image) self.imagesaver.saved.connect(self.image_saved_callback) self.imagesaver.error.connect(self.image_error_callback) self.image_fng.set_settings(self.settings.image_name) fn = self.image_fng.create_file_name("image") self.imagesaver.current_filename = fn self.imagesaver.save_image(get_encoder_dict()[image_type]) def image_saved_callback(self, image_path: str): """ SLOT for imagesaver callback for successfull saving """ self.image_saved.emit(image_path) def image_error_callback(self, error_msg: str): pass def video_saved_callback(self, video_path: str): """ SLOT for videosaver callback for successfull saving """ self.video_saved.emit(video_path) def start_recording_video(self, video_type: str): """ """ if self.videosaver: log.error("A video recording is already ongoing.") return self.videosaver = MediaSaver(self.serial, self.caps, MediaType.video) self.videosaver.set_encoder(video_type) self.videosaver.location = self.file_location self.videosaver.current_filename = self.video_fng.create_file_name() self.videosaver.saved.connect(self.video_saved_callback) self.videosaver.start_recording_video(video_type) def stop_recording_video(self): """ """ if self.videosaver: self.videosaver.stop_recording_video() self.videosaver = None def get_gst_state(self, timeout=5): """ Arguments: timeout=5, optional """ if not self.pipeline: return None return self.pipeline.get_state(timeout).state def play(self, video_format=None): if self.videosaver: self.stop_recording_video() if self.pipeline is None: self.create_pipeline() if self.get_gst_state() == Gst.State.PLAYING: log.debug("Setting state to NULL") # Set to NULL to ensure that buffers, # etc are destroyed. # do this by calling stop # so that additional steps like fps.stop() # are taken self.stop() self.pipeline.set_state(Gst.State.READY) if video_format: caps_desc = self.get_caps_desc() if caps_desc.contains(video_format): self.video_format = video_format else: log.error("Given format caps could not be found in caps descriptions. {}".format(video_format)) log.error("Falling back to default behavior.") if self.video_format is not None: log.info("Setting format to {}".format(video_format)) caps = self.pipeline.get_by_name("bin") caps.set_property("device-caps", video_format) if self.state and self.settings.apply_property_cache: log.info("Property state found.") # log.debug("Setting state: ==>{}<==".format(self.state)) self.tcam.set_property("state", str(self.state)) self.state = None else: log.info("No property state to be applied. Starting vanilla camera") log.debug("Setting state to PLAYING") self.pipeline.set_state(Gst.State.PLAYING) self.fps_timer.start(1000) # 1 second self.fps = FPSCounter() self.fps.start() self.container.first_image = True if self.is_trigger_mode_on(): self.container.wait_for_first_image() def fps_tick(self): """ Recalculate the current fps and emit current_fps signal """ self.current_fps.emit(self.fps.get_fps()) def new_buffer(self, appsink): """ callback for appsink new-sample signal converts gstbuffer into qpixmap and gives it to the display container """ self.fps.tick() self.fps_tick() if self.container.first_image: self.first_image.emit() self.container.remove_wait_for_fist_image() buf = self.pipeline.get_by_name("sink").emit("pull-sample") caps = buf.get_caps() self.caps = caps if (not (self.videosaver and self.videosaver.accept_buffer) and not (self.imagesaver and self.imagesaver.accept_buffer)): return Gst.FlowReturn.OK b = buf.get_buffer() if self.videosaver and self.videosaver.accept_buffer: self.videosaver.feed_image(b) if self.imagesaver and self.imagesaver.accept_buffer: self.imagesaver.feed_image(b) return Gst.FlowReturn.OK def create_pipeline(self, video_format=None): # we cheat # inject the type into the serial # this ensures that no matter what we # always have the correct backend if self.dev_type: self.serial = "{}-{}".format(self.serial, self.dev_type.lower()) # the queue element before the sink is important. # it allows set_state to work as expected. # the sink is synced with our main thread (the display thread). # changing the state from out main thread will cause a deadlock, # since the remaining buffers can not be displayed because our main thread # is currently in set_state pipeline_str = ("tcambin serial={serial} name=bin use-dutils={dutils} " "! video/x-raw,format=BGRx " "! tee name=tee " "! queue max-size-buffers=2 leaky=downstream " "! video/x-raw,format=BGRx " "! appsink name=sink emit-signals=true sync=false drop=true max-buffers=4 " "tee. " "! queue max-size-buffers=2 leaky=downstream " "! videoconvert " "! xvimagesink double-buffer=true sync=false name=displaysink draw-borders=false") self.pipeline = None self.pipeline = Gst.parse_launch(pipeline_str.format(serial=self.serial, type=self.dev_type.lower(), dutils=self.use_dutils)) self.displaysink = self.pipeline.get_by_name("displaysink") sink = self.pipeline.get_by_name("sink") sink.connect("new-sample", self.new_buffer) # Create bus to get events from GStreamer pipeline self.bus = self.pipeline.get_bus() self.bus.add_signal_watch() self.bus.enable_sync_message_emission() self.bus.connect('message::error', self.on_error) self.bus.connect('message::info', self.on_info) self.bus.connect("sync-message::element", self.on_sync_message) self.tcam = self.pipeline.get_by_name("bin") if video_format: self.tcam.set_property("device-caps", video_format) # This ready is required so that get_caps_desc # works and does not return ANY self.pipeline.set_state(Gst.State.READY) log.debug("Created pipeline and set to READY") log.debug("Pipeline is: {}".format(pipeline_str.format(serial=self.serial, type=self.dev_type.lower(), dutils=self.use_dutils))) def on_sync_message(self, bus, message): structure = message.get_structure() if structure is None: return message_name = structure.get_name() if message_name == "prepare-window-handle": # "Note that trying to get the drawingarea XID in your on_sync_message() handler # will cause a segfault because of threading issues." # print 'sinkx_overview win_id: %s (%s)' % (self.gstWindowId, self.video_container.winId()) assert self.window_id message.src.set_window_handle(self.window_id) def pause(self): log.info("Setting state to PAUSED") if self.pipeline: self.pipeline.set_state(Gst.State.PAUSED) else: log.error("Pipeline object does not exist.") self.fps_timer.stop() if self.fps: self.fps.stop() def stop(self): """ Stop playback """ log.info("Setting state to NULL") self.fps_timer.stop() if self.fps: self.fps.stop() self.pipeline.set_state(Gst.State.NULL) def on_info(self, bus, msg): """ Callback for gst bus info messages """ info, dbg = msg.parse_info() log.info(dbg) if msg.src.get_name() == "bin": if dbg.startswith("Working with src caps:"): log.info("{}".format(dbg.split(": ")[1])) self.caps = dbg.split(": ")[1] self.fire_format_selected(dbg.split(": ")[1]) else: log.error("Info from bin: {}".format(dbg)) else: log.error("ERROR:", msg.src.get_name()) if dbg: log.debug("Debug info:", dbg) def fire_format_selected(self, caps: str): """ Emit SIGNAL that the pipeline has selected src caps and inform listeners what the caps are """ if caps is None or caps == "NULL": log.error("Bin returned faulty source caps. Not firiing format_selected") return c = Gst.Caps.from_string(caps) if c.is_empty(): log.error("Received empty caps. Aborting fire_format_selected") return structure = c.get_structure(0) self.image_fng.set_caps(c) self.video_fng.set_caps(c) if structure.get_name() == "image/jpeg": fmt = "jpeg" else: fmt = structure.get_value("format") resolution = "{}x{}".format(structure.get_value("width"), structure.get_value("height")) # compatability problems # Older python bindings do not know the type Gst.Fraction. # Thus we have to work around this problem... results = re.search("framerate=\(fraction\)\d+/\d+", caps) if results: fps = results.group() fps = fps.replace("framerate=(fraction)", "") else: fps = None log.error("Unable to determine framerate settings. This will affect usability.") self.format_selected.emit(fmt, resolution, str(fps)) def on_error(self, bus, msg): """ Callback for gst bus messages Receives errors and chooses appropriate actions """ err, dbg = msg.parse_error() if "tcamsrc-" in msg.src.get_name(): if err: if "Device lost (" in err.message: m = re.search('Device lost \((.*)\)', err.message) log.error("Received device lost message for {}".format(m.group(1))) self.fire_device_lost() else: log.error("Error from source: {}".format(err.message)) self.retry_countdown -= 1 if self.retry_countdown <= 0: log.error("Repeatedly retried to start stream. No Success. Giving up.") return log.info("Trying restart of stream") self.stop() self.play(self.video_format) else: log.error("ERROR: {} : {}".format(msg.src.get_name(), err.message)) if dbg: log.debug("Debug info: {}".format(dbg)) def get_tcam(self): return self.tcam def register_device_lost(self, callback): self.device_lost_callbacks.append(callback) def fire_device_lost(self): """ Notify all callback that our device is gone """ for cb in self.device_lost_callbacks: cb() def is_trigger_mode_on(self): if not self.tcam: return False names = self.tcam.get_tcam_property_names() if "Trigger Mode" not in names: return False try: (result, value, minval, maxval, defval, step, valuetype, flags, category, group) = self.tcam.get_tcam_property("Trigger Mode") except TypeError: log.warning("get_tcam_property failed for '{}'".format("Trigger Mode")) return False if valuetype == "boolean": if value: return True return False elif valuetype == "enum": if value == "On": return True return True def trigger_image(self): """ Checks if trigger mode is active and try to trigger an image """ if self.is_trigger_mode_on(): self.tcam.set_tcam_property("Software Trigger", True) def start_roi_capture(self, finished_signal): """ Start capturing a ROI and emit finished_signal once the capture is finished """ self.container.start_roi_capture(finished_signal) def add_roi(self, roi_widget): """ Add the given roi_widget for permanent display. Call remove_roi to undo. """ self.container.add_roi(roi_widget) def remove_roi(self, roi_widget): """ Remove roi_widget from display """ self.container.remove_roi(roi_widget) def get_state(self): """ Retrieve a json description of the current property settings Returns: str or None """ if not self.tcam: return None return self.tcam.get_property("state") def load_state(self, state: str): """ Arguments: state: str containing json descibing the property values """ self.state = state @staticmethod def has_dutils(): """ Check to see if the gstreamer module gsttcamdutils is available. """ factory = Gst.ElementFactory.find("tcamdutils") if factory: return True return False
TheImagingSource/tiscamera
tools/tcam-capture/tcam_capture/TcamView.py
Python
apache-2.0
21,385
# Copyright 2014 Diamond Light Source Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: structures :platform: Unix :synopsis: Classes which describe the main data types for passing between plugins .. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk> """ import numpy as np import h5py import logging from mpi4py import MPI from savu.core.utils import logmethod NX_CLASS = 'NX_class' # Core Direction Keywords CD_PROJECTION = 'core_dir_projection' CD_SINOGRAM = 'core_dir_sinogram' CD_ROTATION_AXIS = 'core_dir_rotation_axis' CD_PATTERN = 'core_dir_pattern' class SliceAvailableWrapper(object): """ This class takes 2 datasets, one avaialble boolean ndarray, and 1 data ndarray. Its purpose is to provide slices from the data array only if data has been put there, and to allow a convinient way to put slices into the data array, and set the available array to True """ def __init__(self, avail, data): """ :param avail: The available boolean ndArray :type avail: boolean ndArray :param data: The data ndArray :type data: any ndArray """ self.avail = avail self.data = data def __getitem__(self, item): if self.avail[item].all(): return self.data[item] else: return None def __setitem__(self, item, value): self.data[item] = value self.avail[item] = True def __getattr__(self, name): """ Delegate everything else to the data class """ value = self.data.__getattribute__(name) return value class SliceAlwaysAvailableWrapper(SliceAvailableWrapper): """ This class takes 1 data ndarray. Its purpose is to provide slices from the data array in the same way as the SliceAvailableWrapper but assuming the data is always available (for example in the case of the input file) """ def __init__(self, data): """ :param data: The data ndArray :type data: any ndArray """ super(SliceAlwaysAvailableWrapper, self).__init__(None, data) @logmethod def __getitem__(self, item): return self.data[item] @logmethod def __setitem__(self, item, value): self.data[item] = value class PassThrough(object): """ Interface Class describing when the input data of a plugin is also the output """ def __init__(self): super(PassThrough, self).__init__() class Data(object): """ Baseclass for all data """ def __init__(self): super(Data, self).__init__() self.backing_file = None self.data = None self.base_path = None self.core_directions = {} @logmethod def complete(self): """ Closes the backing file and completes work """ if self.backing_file is not None: logging.debug("Completing file %s %s", self.base_path, self.backing_file.filename) self.backing_file.close() self.backing_file = None def external_link(self): return h5py.ExternalLink(self.backing_file.filename, self.base_path) def get_slice_list(self, frame_type): if frame_type in self.core_directions.keys(): it = np.nditer(self.data, flags=['multi_index']) dirs_to_remove = list(self.core_directions[frame_type]) dirs_to_remove.sort(reverse=True) for direction in dirs_to_remove: it.remove_axis(direction) mapping_list = range(len(it.multi_index)) dirs_to_remove.sort() for direction in dirs_to_remove: mapping_list.insert(direction, -1) mapping_array = np.array(mapping_list) slice_list = [] while not it.finished: tup = it.multi_index + (slice(None),) slice_list.append(tuple(np.array(tup)[mapping_array])) it.iternext() return slice_list return None def get_data_shape(self): """ Simply returns the shape of the main data array """ return self.data.shape class RawTimeseriesData(Data): """ Descriptor for raw timeseries data """ def __init__(self): super(RawTimeseriesData, self).__init__() self.image_key = None self.rotation_angle = None self.control = None self.center_of_rotation = None @logmethod def populate_from_nx_tomo(self, path): """ Populate the RawTimeseriesData from an NXTomo defined NeXus file :param path: The full path of the NeXus file to load. :type path: str """ self.backing_file = h5py.File(path, 'r') logging.debug("Creating file '%s' '%s'", 'tomo_entry', self.backing_file.filename) data = self.backing_file['entry1/tomo_entry/instrument/detector/data'] self.data = SliceAlwaysAvailableWrapper(data) image_key = self.backing_file[ 'entry1/tomo_entry/instrument/detector/image_key'] self.image_key = SliceAlwaysAvailableWrapper(image_key) rotation_angle = \ self.backing_file['entry1/tomo_entry/sample/rotation_angle'] self.rotation_angle = SliceAlwaysAvailableWrapper(rotation_angle) control = self.backing_file['entry1/tomo_entry/control/data'] self.control = SliceAlwaysAvailableWrapper(control) self.core_directions[CD_PROJECTION] = (1, 2) self.core_directions[CD_SINOGRAM] = (0, 2) self.core_directions[CD_ROTATION_AXIS] = (0, ) @logmethod def create_backing_h5(self, path, group_name, data, mpi=False, new_shape=None): """ Create a h5 backend for this RawTimeseriesData :param path: The full path of the NeXus file to use as a backend :type path: str :param data: The structure from which this can be created :type data: savu.structure.RawTimeseriesData :param mpi: if an MPI process, provide MPI package here, default None :type mpi: package """ self.backing_file = None if mpi: self.backing_file = h5py.File(path, 'w', driver='mpio', comm=MPI.COMM_WORLD) else: self.backing_file = h5py.File(path, 'w') if self.backing_file is None: raise IOError("Failed to open the hdf5 file") logging.debug("Creating file '%s' '%s'", self.base_path, self.backing_file.filename) self.base_path = group_name if not isinstance(data, RawTimeseriesData): raise ValueError("data is not a RawTimeseriesData") self.core_directions[CD_PROJECTION] = (1, 2) self.core_directions[CD_SINOGRAM] = (0, 2) self.core_directions[CD_ROTATION_AXIS] = 0 data_shape = new_shape if data_shape is None: data_shape = data.data.shape data_type = np.double image_key_shape = data.image_key.shape image_key_type = data.image_key.dtype rotation_angle_shape = data.rotation_angle.shape rotation_angle_type = data.rotation_angle.dtype control_shape = data.control.shape control_type = data.control.dtype cor_shape = (data.data.shape[self.core_directions[CD_ROTATION_AXIS]],) cor_type = np.double group = self.backing_file.create_group(group_name) group.attrs[NX_CLASS] = 'NXdata' data_value = group.create_dataset('data', data_shape, data_type) data_value.attrs['signal'] = 1 data_avail = group.create_dataset('data_avail', data_shape, np.bool_) self.data = SliceAvailableWrapper(data_avail, data_value) # Create and prepopulate the following, as they are likely to be # Unchanged during the processing image_key = \ group.create_dataset('image_key', image_key_shape, image_key_type) image_key_avail = \ group.create_dataset('image_key_avail', image_key_shape, np.bool_) self.image_key = \ SliceAvailableWrapper(image_key_avail, image_key) self.image_key[:] = data.image_key[:] rotation_angle = \ group.create_dataset('rotation_angle', rotation_angle_shape, rotation_angle_type) rotation_angle_avail = \ group.create_dataset('rotation_angle_avail', rotation_angle_shape, np.bool_) self.rotation_angle = \ SliceAvailableWrapper(rotation_angle_avail, rotation_angle) self.rotation_angle[:] = data.rotation_angle[:] control = \ group.create_dataset('control', control_shape, control_type) control_avail = \ group.create_dataset('control_avail', control_shape, np.bool_) self.control = \ SliceAvailableWrapper(control_avail, control) self.control[:] = data.control[:] cor = \ group.create_dataset('center_of_rotation', cor_shape, cor_type) cor_avail = \ group.create_dataset('center_of_rotation_avail', cor_shape, np.bool_) self.center_of_rotation = \ SliceAvailableWrapper(cor_avail, cor) def get_number_of_projections(self): """ Gets the real number of projections excluding calibration data :returns: integer number of data frames """ return (self.image_key.data[:] == 0).sum() def get_projection_shape(self): """ Gets the shape of a projection :returns: a tuple of the shape of a single projection """ return self.data.data.shape[1:3] def get_clusterd_frame_list(self): """ Gets a list of index arrays grouped by sequential image_key :returns: a list of integer index arrays """ diff = np.abs(np.diff(self.image_key)) pos = np.where(diff > 0)[0] + 1 return np.split(np.arange(self.image_key.shape[0]), pos) class ProjectionData(Data): """ Descriptor for corrected projection data """ def __init__(self): super(ProjectionData, self).__init__() self.rotation_angle = None self.center_of_rotation = None @logmethod def create_backing_h5(self, path, group_name, data, mpi=False, new_shape=None): """ Create a h5 backend for this ProjectionData :param path: The full path of the NeXus file to use as a backend :type path: str :param data: The structure from which this can be created :type data: savu.structure :param mpi: if an MPI process, provide MPI package here, default None :type boolean: package """ self.backing_file = None if mpi: self.backing_file = h5py.File(path, 'w', driver='mpio', comm=MPI.COMM_WORLD) else: self.backing_file = h5py.File(path, 'w') if self.backing_file is None: raise IOError("Failed to open the hdf5 file") self.base_path = group_name logging.debug("Creating file '%s' '%s'", self.base_path, self.backing_file.filename) data_shape = new_shape data_type = None rotation_angle_shape = None rotation_angle_type = None cor_shape = None cor_type = np.double if data.__class__ == RawTimeseriesData: if data_shape is None: data_shape = (data.get_number_of_projections(),) +\ data.get_projection_shape() data_type = np.double rotation_angle_shape = (data.get_number_of_projections(),) rotation_angle_type = data.rotation_angle.dtype cor_shape = (data.data.shape[1],) elif data.__class__ == ProjectionData: if data_shape is None: data_shape = data.data.shape data_type = np.double rotation_angle_shape = data.rotation_angle.shape rotation_angle_type = data.rotation_angle.dtype cor_shape = (data.data.shape[1],) group = self.backing_file.create_group(group_name) group.attrs[NX_CLASS] = 'NXdata' data_value = group.create_dataset('data', data_shape, data_type) data_value.attrs['signal'] = 1 data_avail = group.create_dataset('data_avail', data_shape, np.bool_) self.data = SliceAvailableWrapper(data_avail, data_value) rotation_angle = \ group.create_dataset('rotation_angle', rotation_angle_shape, rotation_angle_type) rotation_angle_avail = \ group.create_dataset('rotation_angle_avail', rotation_angle_shape, np.bool_) self.rotation_angle = \ SliceAvailableWrapper(rotation_angle_avail, rotation_angle) cor = \ group.create_dataset('center_of_rotation', cor_shape, cor_type) cor_avail = \ group.create_dataset('center_of_rotation_avail', cor_shape, np.bool_) self.center_of_rotation = \ SliceAvailableWrapper(cor_avail, cor) self.core_directions = data.core_directions @logmethod def populate_from_h5(self, path): """ Populate the contents of this object from a file :param path: The full path of the h5 file to load. :type path: str """ self.backing_file = h5py.File(path, 'r') logging.debug("Creating file '%s' '%s'", 'TimeseriesFieldCorrections', self.backing_file.filename) data = self.backing_file['TimeseriesFieldCorrections/data'] self.data = SliceAlwaysAvailableWrapper(data) rotation_angle = \ self.backing_file['TimeseriesFieldCorrections/rotation_angle'] self.rotation_angle = SliceAlwaysAvailableWrapper(rotation_angle) self.core_directions[CD_PROJECTION] = (1, 2) self.core_directions[CD_SINOGRAM] = (0, 2) self.core_directions[CD_ROTATION_AXIS] = (0,) def get_number_of_sinograms(self): """ Gets the real number sinograms :returns: integer number of sinogram frames """ return self.data.shape[1] def get_number_of_projections(self): """ Gets the real number projections :returns: integer number of projection frames """ return self.data.shape[0] class VolumeData(Data): """ Descriptor for volume data """ def __init__(self): super(VolumeData, self).__init__() @logmethod def create_backing_h5(self, path, group_name, data_shape, data_type, mpi=False): """ Create a h5 backend for this ProjectionData :param path: The full path of the NeXus file to use as a backend :type path: str :param data_shape: The shape of the data block :type data: tuple :param data_type: The type of the data block :type data: np.dtype :param mpi: if an MPI process, provide MPI package here, default None :type mpi: package """ self.backing_file = None if mpi: self.backing_file = h5py.File(path, 'w', driver='mpio', comm=MPI.COMM_WORLD) else: self.backing_file = h5py.File(path, 'w') if self.backing_file is None: raise IOError("Failed to open the hdf5 file") self.base_path = group_name logging.debug("Creating file '%s' '%s'", self.base_path, self.backing_file.filename) group = self.backing_file.create_group(group_name) group.attrs[NX_CLASS] = 'NXdata' data_value = group.create_dataset('data', data_shape, data_type) data_value.attrs['signal'] = 1 data_avail = group.create_dataset('data_avail', data_shape, np.bool_) self.data = SliceAvailableWrapper(data_avail, data_value) def get_volume_shape(self): """ Gets the real number sinograms :returns: integer number of sinogram frames """ return self.data.shape
swtp1v07/Savu
savu/data/structures.py
Python
apache-2.0
17,375
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os from abc import abstractmethod from collections import OrderedDict from zipfile import ZIP_DEFLATED, ZIP_STORED from pants.util.contextutil import open_tar, open_zip from pants.util.dirutil import safe_walk from pants.util.meta import AbstractClass from pants.util.strutil import ensure_text """Support for wholesale archive creation and extraction in a uniform API across archive types.""" class Archiver(AbstractClass): @classmethod def extract(cls, path, outdir): """Extracts an archive's contents to the specified outdir.""" raise NotImplementedError() @abstractmethod def create(self, basedir, outdir, name, prefix=None): """Creates an archive of all files found under basedir to a file at outdir of the given name. If prefix is specified, it should be prepended to all archive paths. """ class TarArchiver(Archiver): """An archiver that stores files in a tar file with optional compression.""" @classmethod def extract(cls, path, outdir): with open_tar(path, errorlevel=1) as tar: tar.extractall(outdir) def __init__(self, mode, extension): super(TarArchiver, self).__init__() self.mode = mode self.extension = extension def create(self, basedir, outdir, name, prefix=None): basedir = ensure_text(basedir) tarpath = os.path.join(outdir, '{}.{}'.format(ensure_text(name), self.extension)) with open_tar(tarpath, self.mode, dereference=True, errorlevel=1) as tar: tar.add(basedir, arcname=prefix or '.') return tarpath class ZipArchiver(Archiver): """An archiver that stores files in a zip file with optional compression.""" @classmethod def extract(cls, path, outdir, filter_func=None): """Extract from a zip file, with an optional filter :param string path: path to the zipfile to extract from :param string outdir: directory to extract files into :param function filter_func: optional filter with the filename as the parameter. Returns True if the file should be extracted. """ with open_zip(path) as archive_file: for name in archive_file.namelist(): # While we're at it, we also perform this safety test. if name.startswith(b'/') or name.startswith(b'..'): raise ValueError('Zip file contains unsafe path: {}'.format(name)) # Ignore directories. extract() will create parent dirs as needed. # OS X's python 2.6.1 has a bug in zipfile that makes it unzip directories as regular files. # This method should work on for python 2.6-3.x. # TODO(Eric Ayers) Pants no longer builds with python 2.6. Can this be removed? if not name.endswith(b'/'): if (not filter_func or filter_func(name)): archive_file.extract(name, outdir) def __init__(self, compression, extension): super(ZipArchiver, self).__init__() self.compression = compression self.extension = extension def create(self, basedir, outdir, name, prefix=None): zippath = os.path.join(outdir, '{}.{}'.format(name, self.extension)) with open_zip(zippath, 'w', compression=self.compression) as zip: # For symlinks, we want to archive the actual content of linked files but # under the relpath derived from symlink. for root, _, files in safe_walk(basedir, followlinks=True): root = ensure_text(root) for file in files: file = ensure_text(file) full_path = os.path.join(root, file) relpath = os.path.relpath(full_path, basedir) if prefix: relpath = os.path.join(ensure_text(prefix), relpath) zip.write(full_path, relpath) return zippath TAR = TarArchiver('w:', 'tar') TGZ = TarArchiver('w:gz', 'tar.gz') TBZ2 = TarArchiver('w:bz2', 'tar.bz2') ZIP = ZipArchiver(ZIP_DEFLATED, 'zip') JAR = ZipArchiver(ZIP_STORED, 'jar') _ARCHIVER_BY_TYPE = OrderedDict(tar=TAR, tgz=TGZ, tbz2=TBZ2, zip=ZIP, jar=JAR) TYPE_NAMES = frozenset(_ARCHIVER_BY_TYPE.keys()) def archiver(typename): """Returns Archivers in common configurations. The typename must correspond to one of the following: 'tar' Returns a tar archiver that applies no compression and emits .tar files. 'tgz' Returns a tar archiver that applies gzip compression and emits .tar.gz files. 'tbz2' Returns a tar archiver that applies bzip2 compression and emits .tar.bz2 files. 'zip' Returns a zip archiver that applies standard compression and emits .zip files. 'jar' Returns a jar archiver that applies no compression and emits .jar files. Note this is provided as a light way of zipping input files into a jar, without the need to prepare Manifest etc. For more advanced usages, please refer to :class: `pants.backend.jvm.subsystems.jar_tool.JarTool` or :class: `pants.backend.jvm.tasks.jar_task.JarTask`. """ archiver = _ARCHIVER_BY_TYPE.get(typename) if not archiver: raise ValueError('No archiver registered for {!r}'.format(typename)) return archiver def archiver_for_path(path_name): """Returns an Archiver for the given path name. :param string path_name: The path name of the archive - need not exist. :raises: :class:`ValueError` If the path name does not uniquely identify a supported archive type. """ if path_name.endswith('.tar.gz'): return TGZ elif path_name.endswith('.tar.bz2'): return TBZ2 else: _, ext = os.path.splitext(path_name) if ext: ext = ext[1:] # Trim leading '.'. if not ext: raise ValueError('Could not determine archive type of path {}'.format(path_name)) return archiver(ext)
dturner-tw/pants
src/python/pants/fs/archive.py
Python
apache-2.0
5,886
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-08 15:45 from __future__ import unicode_literals from django.db import migrations import share.robot class Migration(migrations.Migration): dependencies = [ ('share', '0001_initial'), ('djcelery', '0001_initial'), ] operations = [ migrations.RunPython( code=share.robot.RobotUserMigration('org.biorxiv'), ), migrations.RunPython( code=share.robot.RobotOauthTokenMigration('org.biorxiv'), ), migrations.RunPython( code=share.robot.RobotScheduleMigration('org.biorxiv'), ), ]
zamattiac/SHARE
providers/org/biorxiv/migrations/0001_initial.py
Python
apache-2.0
658
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import uuid import fixtures import mock import oslo_config.fixture from oslo_db.sqlalchemy import migration from oslo_log import log from six.moves import configparser from six.moves import range from testtools import matchers from keystone.auth import controllers from keystone.cmd import cli from keystone.cmd.doctor import caching from keystone.cmd.doctor import credential from keystone.cmd.doctor import database as doc_database from keystone.cmd.doctor import debug from keystone.cmd.doctor import federation from keystone.cmd.doctor import ldap from keystone.cmd.doctor import security_compliance from keystone.cmd.doctor import tokens from keystone.cmd.doctor import tokens_fernet from keystone.common import dependency from keystone.common.sql import upgrades import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.identity.mapping_backends import mapping as identity_mapping from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.ksfixtures import ldapdb CONF = keystone.conf.CONF class CliTestCase(unit.SQLDriverOverrides, unit.TestCase): def config_files(self): config_files = super(CliTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def test_token_flush(self): self.useFixture(database.Database()) self.load_backends() cli.TokenFlush.main() class CliNoConfigTestCase(unit.BaseTestCase): def setUp(self): self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) self.config_fixture.register_cli_opt(cli.command_opt) self.useFixture(fixtures.MockPatch( 'oslo_config.cfg.find_config_files', return_value=[])) super(CliNoConfigTestCase, self).setUp() # NOTE(crinkle): the command call doesn't have to actually work, # that's what the other unit tests are for. So just mock it out. class FakeConfCommand(object): def __init__(self): self.cmd_class = mock.Mock() self.useFixture(fixtures.MockPatchObject( CONF, 'command', FakeConfCommand())) self.logging = self.useFixture(fixtures.FakeLogger(level=log.WARN)) def test_cli(self): expected_msg = 'Config file not found, using default configs.' cli.main(argv=['keystone-manage', 'db_sync']) self.assertThat(self.logging.output, matchers.Contains(expected_msg)) class CliBootStrapTestCase(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): self.useFixture(database.Database()) super(CliBootStrapTestCase, self).setUp() def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super(CliBootStrapTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def config(self, config_files): CONF(args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex], project='keystone', default_config_files=config_files) def test_bootstrap(self): bootstrap = cli.BootStrap() self._do_test_bootstrap(bootstrap) def _do_test_bootstrap(self, bootstrap): bootstrap.do_bootstrap() project = bootstrap.resource_manager.get_project_by_name( bootstrap.project_name, 'default') user = bootstrap.identity_manager.get_user_by_name( bootstrap.username, 'default') role = bootstrap.role_manager.get_role(bootstrap.role_id) role_list = ( bootstrap.assignment_manager.get_roles_for_user_and_project( user['id'], project['id'])) self.assertIs(1, len(role_list)) self.assertEqual(role_list[0], role['id']) # NOTE(morganfainberg): Pass an empty context, it isn't used by # `authenticate` method. bootstrap.identity_manager.authenticate( self.make_request(), user['id'], bootstrap.password) if bootstrap.region_id: region = bootstrap.catalog_manager.get_region(bootstrap.region_id) self.assertEqual(self.region_id, region['id']) if bootstrap.service_id: svc = bootstrap.catalog_manager.get_service(bootstrap.service_id) self.assertEqual(self.service_name, svc['name']) self.assertEqual(set(['admin', 'public', 'internal']), set(bootstrap.endpoints)) urls = {'public': self.public_url, 'internal': self.internal_url, 'admin': self.admin_url} for interface, url in urls.items(): endpoint_id = bootstrap.endpoints[interface] endpoint = bootstrap.catalog_manager.get_endpoint(endpoint_id) self.assertEqual(self.region_id, endpoint['region_id']) self.assertEqual(url, endpoint['url']) self.assertEqual(svc['id'], endpoint['service_id']) self.assertEqual(interface, endpoint['interface']) def test_bootstrap_is_idempotent_when_password_does_not_change(self): # NOTE(morganfainberg): Ensure we can run bootstrap with the same # configuration multiple times without erroring. bootstrap = cli.BootStrap() self._do_test_bootstrap(bootstrap) v3_token_controller = controllers.Auth() v3_password_data = { 'identity': { "methods": ["password"], "password": { "user": { "name": bootstrap.username, "password": bootstrap.password, "domain": { "id": CONF.identity.default_domain_id } } } } } auth_response = v3_token_controller.authenticate_for_token( self.make_request(), v3_password_data) token = auth_response.headers['X-Subject-Token'] self._do_test_bootstrap(bootstrap) # build validation request request = self.make_request(is_admin=True) request.context_dict['subject_token_id'] = token # Make sure the token we authenticate for is still valid. v3_token_controller.validate_token(request) def test_bootstrap_is_not_idempotent_when_password_does_change(self): # NOTE(lbragstad): Ensure bootstrap isn't idempotent when run with # different arguments or configuration values. bootstrap = cli.BootStrap() self._do_test_bootstrap(bootstrap) v3_token_controller = controllers.Auth() v3_password_data = { 'identity': { "methods": ["password"], "password": { "user": { "name": bootstrap.username, "password": bootstrap.password, "domain": { "id": CONF.identity.default_domain_id } } } } } auth_response = v3_token_controller.authenticate_for_token( self.make_request(), v3_password_data) token = auth_response.headers['X-Subject-Token'] os.environ['OS_BOOTSTRAP_PASSWORD'] = uuid.uuid4().hex self._do_test_bootstrap(bootstrap) # build validation request request = self.make_request(is_admin=True) request.context_dict['subject_token_id'] = token # Since the user account was recovered with a different password, we # shouldn't be able to validate this token. Bootstrap should have # persisted a revocation event because the user's password was updated. # Since this token was obtained using the original password, it should # now be invalid. self.assertRaises( exception.TokenNotFound, v3_token_controller.validate_token, request ) def test_bootstrap_recovers_user(self): bootstrap = cli.BootStrap() self._do_test_bootstrap(bootstrap) # Completely lock the user out. user_id = bootstrap.identity_manager.get_user_by_name( bootstrap.username, 'default')['id'] bootstrap.identity_manager.update_user( user_id, {'enabled': False, 'password': uuid.uuid4().hex}) # The second bootstrap run will recover the account. self._do_test_bootstrap(bootstrap) # Sanity check that the original password works again. bootstrap.identity_manager.authenticate( self.make_request(), user_id, bootstrap.password) def test_bootstrap_creates_default_role(self): bootstrap = cli.BootStrap() try: role = bootstrap.role_manager.get_role(CONF.member_role_id) self.fail('Member Role is created and should not be.') except exception.RoleNotFound: pass self._do_test_bootstrap(bootstrap) role = bootstrap.role_manager.get_role(CONF.member_role_id) self.assertEqual(role['name'], CONF.member_role_name) self.assertEqual(role['id'], CONF.member_role_id) class CliBootStrapTestCaseWithEnvironment(CliBootStrapTestCase): def config(self, config_files): CONF(args=['bootstrap'], project='keystone', default_config_files=config_files) def setUp(self): super(CliBootStrapTestCaseWithEnvironment, self).setUp() self.password = uuid.uuid4().hex self.username = uuid.uuid4().hex self.project_name = uuid.uuid4().hex self.role_name = uuid.uuid4().hex self.service_name = uuid.uuid4().hex self.public_url = uuid.uuid4().hex self.internal_url = uuid.uuid4().hex self.admin_url = uuid.uuid4().hex self.region_id = uuid.uuid4().hex self.default_domain = { 'id': CONF.identity.default_domain_id, 'name': 'Default', } self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_PASSWORD', newvalue=self.password)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_USERNAME', newvalue=self.username)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_PROJECT_NAME', newvalue=self.project_name)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_ROLE_NAME', newvalue=self.role_name)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_SERVICE_NAME', newvalue=self.service_name)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_PUBLIC_URL', newvalue=self.public_url)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_INTERNAL_URL', newvalue=self.internal_url)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_ADMIN_URL', newvalue=self.admin_url)) self.useFixture( fixtures.EnvironmentVariable('OS_BOOTSTRAP_REGION_ID', newvalue=self.region_id)) def test_assignment_created_with_user_exists(self): # test assignment can be created if user already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) user_ref = unit.new_user_ref(self.default_domain['id'], name=self.username, password=self.password) bootstrap.identity_manager.create_user(user_ref) self._do_test_bootstrap(bootstrap) def test_assignment_created_with_project_exists(self): # test assignment can be created if project already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) project_ref = unit.new_project_ref(self.default_domain['id'], name=self.project_name) bootstrap.resource_manager.create_project(project_ref['id'], project_ref) self._do_test_bootstrap(bootstrap) def test_assignment_created_with_role_exists(self): # test assignment can be created if role already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) role = unit.new_role_ref(name=self.role_name) bootstrap.role_manager.create_role(role['id'], role) self._do_test_bootstrap(bootstrap) def test_assignment_created_with_region_exists(self): # test assignment can be created if region already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) region = unit.new_region_ref(id=self.region_id) bootstrap.catalog_manager.create_region(region) self._do_test_bootstrap(bootstrap) def test_endpoints_created_with_service_exists(self): # test assignment can be created if service already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) service = unit.new_service_ref(name=self.service_name) bootstrap.catalog_manager.create_service(service['id'], service) self._do_test_bootstrap(bootstrap) def test_endpoints_created_with_endpoint_exists(self): # test assignment can be created if endpoint already exists. bootstrap = cli.BootStrap() bootstrap.resource_manager.create_domain(self.default_domain['id'], self.default_domain) service = unit.new_service_ref(name=self.service_name) bootstrap.catalog_manager.create_service(service['id'], service) region = unit.new_region_ref(id=self.region_id) bootstrap.catalog_manager.create_region(region) endpoint = unit.new_endpoint_ref(interface='public', service_id=service['id'], url=self.public_url, region_id=self.region_id) bootstrap.catalog_manager.create_endpoint(endpoint['id'], endpoint) self._do_test_bootstrap(bootstrap) class CliDomainConfigAllTestCase(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): self.useFixture(database.Database()) super(CliDomainConfigAllTestCase, self).setUp() self.load_backends() self.config_fixture.config( group='identity', domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap') self.domain_count = 3 self.setup_initial_domains() self.logging = self.useFixture( fixtures.FakeLogger(level=logging.INFO)) def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super(CliDomainConfigAllTestCase, self).config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def cleanup_domains(self): for domain in self.domains: if domain == 'domain_default': # Not allowed to delete the default domain, but should at least # delete any domain-specific config for it. self.domain_config_api.delete_config( CONF.identity.default_domain_id) continue this_domain = self.domains[domain] this_domain['enabled'] = False self.resource_api.update_domain(this_domain['id'], this_domain) self.resource_api.delete_domain(this_domain['id']) self.domains = {} def config(self, config_files): CONF(args=['domain_config_upload', '--all'], project='keystone', default_config_files=config_files) def setup_initial_domains(self): def create_domain(domain): return self.resource_api.create_domain(domain['id'], domain) self.domains = {} self.addCleanup(self.cleanup_domains) for x in range(1, self.domain_count): domain = 'domain%s' % x self.domains[domain] = create_domain( {'id': uuid.uuid4().hex, 'name': domain}) self.default_domain = unit.new_domain_ref( description=u'The default domain', id=CONF.identity.default_domain_id, name=u'Default') self.domains['domain_default'] = create_domain(self.default_domain) def test_config_upload(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap'} } domain1_config = { 'ldap': {'url': 'fake://memory1', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap', 'list_limit': '101'} } domain2_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=myroot,cn=com', 'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org', 'user_tree_dn': 'ou=Users,dc=myroot,dc=org'}, 'identity': {'driver': 'ldap'} } # Clear backend dependencies, since cli loads these manually dependency.reset() cli.DomainConfigUpload.main() res = self.domain_config_api.get_config_with_sensitive_info( CONF.identity.default_domain_id) self.assertEqual(default_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain1']['id']) self.assertEqual(domain1_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain2']['id']) self.assertEqual(domain2_config, res) class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF(args=['domain_config_upload', '--domain-name', 'Default'], project='keystone', default_config_files=config_files) def test_config_upload(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': {'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com'}, 'identity': {'driver': 'ldap'} } # Clear backend dependencies, since cli loads these manually dependency.reset() cli.DomainConfigUpload.main() res = self.domain_config_api.get_config_with_sensitive_info( CONF.identity.default_domain_id) self.assertEqual(default_config, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain1']['id']) self.assertEqual({}, res) res = self.domain_config_api.get_config_with_sensitive_info( self.domains['domain2']['id']) self.assertEqual({}, res) def test_no_overwrite_config(self): # Create a config for the default domain default_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': 'ldap'} } self.domain_config_api.create_config( CONF.identity.default_domain_id, default_config) # Now try and upload the settings in the configuration file for the # default domain dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) file_name = ('keystone.%s.conf' % self.default_domain['name']) error_msg = _( 'Domain: %(domain)s already has a configuration defined - ' 'ignoring file: %(file)s.') % { 'domain': self.default_domain['name'], 'file': os.path.join(CONF.identity.domain_config_dir, file_name)} mock_print.assert_has_calls([mock.call(error_msg)]) res = self.domain_config_api.get_config( CONF.identity.default_domain_id) # The initial config should not have been overwritten self.assertEqual(default_config, res) class CliDomainConfigNoOptionsTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF(args=['domain_config_upload'], project='keystone', default_config_files=config_files) def test_config_upload(self): dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) mock_print.assert_has_calls( [mock.call( _('At least one option must be provided, use either ' '--all or --domain-name'))]) class CliDomainConfigTooManyOptionsTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF(args=['domain_config_upload', '--all', '--domain-name', 'Default'], project='keystone', default_config_files=config_files) def test_config_upload(self): dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) mock_print.assert_has_calls( [mock.call(_('The --all option cannot be used with ' 'the --domain-name option'))]) class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase): def config(self, config_files): self.invalid_domain_name = uuid.uuid4().hex CONF(args=['domain_config_upload', '--domain-name', self.invalid_domain_name], project='keystone', default_config_files=config_files) def test_config_upload(self): dependency.reset() with mock.patch('six.moves.builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) file_name = 'keystone.%s.conf' % self.invalid_domain_name error_msg = (_( 'Invalid domain name: %(domain)s found in config file name: ' '%(file)s - ignoring this file.') % { 'domain': self.invalid_domain_name, 'file': os.path.join(CONF.identity.domain_config_dir, file_name)}) mock_print.assert_has_calls([mock.call(error_msg)]) class TestDomainConfigFinder(unit.BaseTestCase): def setUp(self): super(TestDomainConfigFinder, self).setUp() self.logging = self.useFixture(fixtures.LoggerFixture()) @mock.patch('os.walk') def test_finder_ignores_files(self, mock_walk): mock_walk.return_value = [ ['.', [], ['file.txt', 'keystone.conf', 'keystone.domain0.conf']], ] domain_configs = list(cli._domain_config_finder('.')) expected_domain_configs = [('./keystone.domain0.conf', 'domain0')] self.assertThat(domain_configs, matchers.Equals(expected_domain_configs)) expected_msg_template = ('Ignoring file (%s) while scanning ' 'domain config directory') self.assertThat( self.logging.output, matchers.Contains(expected_msg_template % 'file.txt')) self.assertThat( self.logging.output, matchers.Contains(expected_msg_template % 'keystone.conf')) class CliDBSyncTestCase(unit.BaseTestCase): class FakeConfCommand(object): def __init__(self, parent): self.extension = False self.check = parent.command_check self.expand = parent.command_expand self.migrate = parent.command_migrate self.contract = parent.command_contract self.version = None def setUp(self): super(CliDBSyncTestCase, self).setUp() self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) self.config_fixture.register_cli_opt(cli.command_opt) upgrades.offline_sync_database_to_version = mock.Mock() upgrades.expand_schema = mock.Mock() upgrades.migrate_data = mock.Mock() upgrades.contract_schema = mock.Mock() self.command_check = False self.command_expand = False self.command_migrate = False self.command_contract = False def _assert_correct_call(self, mocked_function): for func in [upgrades.offline_sync_database_to_version, upgrades.expand_schema, upgrades.migrate_data, upgrades.contract_schema]: if func == mocked_function: self.assertTrue(func.called) else: self.assertFalse(func.called) def test_db_sync(self): self.useFixture(fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self))) cli.DbSync.main() self._assert_correct_call( upgrades.offline_sync_database_to_version) def test_db_sync_expand(self): self.command_expand = True self.useFixture(fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self))) cli.DbSync.main() self._assert_correct_call(upgrades.expand_schema) def test_db_sync_migrate(self): self.command_migrate = True self.useFixture(fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self))) cli.DbSync.main() self._assert_correct_call(upgrades.migrate_data) def test_db_sync_contract(self): self.command_contract = True self.useFixture(fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self))) cli.DbSync.main() self._assert_correct_call(upgrades.contract_schema) @mock.patch('keystone.cmd.cli.upgrades.get_db_version') def test_db_sync_check_when_database_is_empty(self, mocked_get_db_version): e = migration.exception.DbMigrationError("Invalid version") mocked_get_db_version.side_effect = e checker = cli.DbSync() log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO)) status = checker.check_db_sync_status() self.assertIn("not currently under version control", log_info.output) self.assertEqual(status, 2) class TestMappingPopulate(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): sqldb = self.useFixture(database.Database()) super(TestMappingPopulate, self).setUp() self.ldapdb = self.useFixture(ldapdb.LDAPDatabase()) self.ldapdb.clear() self.load_backends() sqldb.recreate() self.load_fixtures(default_fixtures) def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super(TestMappingPopulate, self).config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def config_overrides(self): super(TestMappingPopulate, self).config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='identity_mapping', backward_compatible_ids=False) def config(self, config_files): CONF(args=['mapping_populate', '--domain-name', 'Default'], project='keystone', default_config_files=config_files) def test_mapping_populate(self): # mapping_populate should create id mappings. Test plan: # 0. Purge mappings # 1. Fetch user list directly via backend. It will not create any # mappings because it bypasses identity manager # 2. Verify that users have no public_id yet # 3. Execute mapping_populate. It should create id mappings # 4. For the same users verify that they have public_id now purge_filter = {} self.id_mapping_api.purge_mappings(purge_filter) hints = None users = self.identity_api.driver.list_users(hints) for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER} self.assertIsNone(self.id_mapping_api.get_public_id(local_entity)) dependency.reset() # backends are loaded again in the command handler cli.MappingPopulate.main() for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER} self.assertIsNotNone( self.id_mapping_api.get_public_id(local_entity)) def test_bad_domain_name(self): CONF(args=['mapping_populate', '--domain-name', uuid.uuid4().hex], project='keystone') dependency.reset() # backends are loaded again in the command handler # NOTE: assertEqual is used on purpose. assertFalse passes with None. self.assertEqual(False, cli.MappingPopulate.main()) class CliDomainConfigUploadNothing(unit.BaseTestCase): def setUp(self): super(CliDomainConfigUploadNothing, self).setUp() config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) config_fixture.register_cli_opt(cli.command_opt) # NOTE(dstanek): since this is not testing any database # functionality there is no need to go through the motions and # setup a test database. def fake_load_backends(self): self.resource_manager = mock.Mock() self.useFixture(fixtures.MockPatchObject( cli.DomainConfigUploadFiles, 'load_backends', fake_load_backends)) tempdir = self.useFixture(fixtures.TempDir()) config_fixture.config(group='identity', domain_config_dir=tempdir.path) self.logging = self.useFixture( fixtures.FakeLogger(level=logging.DEBUG)) def test_uploading_all_from_an_empty_directory(self): CONF(args=['domain_config_upload', '--all'], project='keystone', default_config_files=[]) cli.DomainConfigUpload.main() expected_msg = ('No domain configs uploaded from %r' % CONF.identity.domain_config_dir) self.assertThat(self.logging.output, matchers.Contains(expected_msg)) class CachingDoctorTests(unit.TestCase): def test_symptom_caching_disabled(self): # Symptom Detected: Caching disabled self.config_fixture.config(group='cache', enabled=False) self.assertTrue(caching.symptom_caching_disabled()) # No Symptom Detected: Caching is enabled self.config_fixture.config(group='cache', enabled=True) self.assertFalse(caching.symptom_caching_disabled()) def test_caching_symptom_caching_enabled_without_a_backend(self): # Success Case: Caching enabled and backend configured self.config_fixture.config(group='cache', enabled=True) self.config_fixture.config(group='cache', backend='dogpile.cache.null') self.assertTrue(caching.symptom_caching_enabled_without_a_backend()) # Failure Case 1: Caching disabled and backend not configured self.config_fixture.config(group='cache', enabled=False) self.config_fixture.config(group='cache', backend='dogpile.cache.null') self.assertFalse(caching.symptom_caching_enabled_without_a_backend()) # Failure Case 2: Caching disabled and backend configured self.config_fixture.config(group='cache', enabled=False) self.config_fixture.config(group='cache', backend='dogpile.cache.memory') self.assertFalse(caching.symptom_caching_enabled_without_a_backend()) # Failure Case 3: Caching enabled and backend configured self.config_fixture.config(group='cache', enabled=True) self.config_fixture.config(group='cache', backend='dogpile.cache.memory') self.assertFalse(caching.symptom_caching_enabled_without_a_backend()) class CredentialDoctorTests(unit.TestCase): def test_credential_and_fernet_key_repositories_match(self): # Symptom Detected: Key repository paths are not unique directory = self.useFixture(fixtures.TempDir()).path self.config_fixture.config(group='credential', key_repository=directory) self.config_fixture.config(group='fernet_tokens', key_repository=directory) self.assertTrue(credential.symptom_unique_key_repositories()) def test_credential_and_fernet_key_repositories_are_unique(self): # No Symptom Detected: Key repository paths are unique self.config_fixture.config(group='credential', key_repository='/etc/keystone/cred-repo') self.config_fixture.config(group='fernet_tokens', key_repository='/etc/keystone/fernet-repo') self.assertFalse(credential.symptom_unique_key_repositories()) @mock.patch('keystone.cmd.doctor.credential.utils') def test_usability_of_cred_fernet_key_repo_raised(self, mock_utils): # Symptom Detected: credential fernet key repository is world readable self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = False self.assertTrue( credential.symptom_usability_of_credential_fernet_key_repository()) @mock.patch('keystone.cmd.doctor.credential.utils') def test_usability_of_cred_fernet_key_repo_not_raised(self, mock_utils): # No Symptom Detected: Custom driver is used self.config_fixture.config(group='credential', provider='my-driver') mock_utils.FernetUtils().validate_key_repository.return_value = True self.assertFalse( credential.symptom_usability_of_credential_fernet_key_repository()) # No Symptom Detected: key repository is not world readable self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = True self.assertFalse( credential.symptom_usability_of_credential_fernet_key_repository()) @mock.patch('keystone.cmd.doctor.credential.utils') def test_keys_in_credential_fernet_key_repository_raised(self, mock_utils): # Symptom Detected: Key repo is empty self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = False self.assertTrue( credential.symptom_keys_in_credential_fernet_key_repository()) @mock.patch('keystone.cmd.doctor.credential.utils') def test_keys_in_credential_fernet_key_repository_not_raised( self, mock_utils): # No Symptom Detected: Custom driver is used self.config_fixture.config(group='credential', provider='my-driver') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( credential.symptom_keys_in_credential_fernet_key_repository()) # No Symptom Detected: Key repo is not empty, fernet is current driver self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( credential.symptom_keys_in_credential_fernet_key_repository()) class DatabaseDoctorTests(unit.TestCase): def test_symptom_is_raised_if_database_connection_is_SQLite(self): # Symptom Detected: Database connection is sqlite self.config_fixture.config( group='database', connection='sqlite:///mydb') self.assertTrue( doc_database.symptom_database_connection_is_not_SQLite()) # No Symptom Detected: Database connection is MySQL self.config_fixture.config( group='database', connection='mysql+mysqlconnector://admin:secret@localhost/mydb') self.assertFalse( doc_database.symptom_database_connection_is_not_SQLite()) class DebugDoctorTests(unit.TestCase): def test_symptom_debug_mode_is_enabled(self): # Symptom Detected: Debug mode is enabled self.config_fixture.config(debug=True) self.assertTrue(debug.symptom_debug_mode_is_enabled()) # No Symptom Detected: Debug mode is disabled self.config_fixture.config(debug=False) self.assertFalse(debug.symptom_debug_mode_is_enabled()) class FederationDoctorTests(unit.TestCase): def test_symptom_comma_in_SAML_public_certificate_path(self): # Symptom Detected: There is a comma in path to public cert file self.config_fixture.config(group='saml', certfile='file,cert.pem') self.assertTrue( federation.symptom_comma_in_SAML_public_certificate_path()) # No Symptom Detected: There is no comma in the path self.config_fixture.config(group='saml', certfile='signing_cert.pem') self.assertFalse( federation.symptom_comma_in_SAML_public_certificate_path()) def test_symptom_comma_in_SAML_private_key_file_path(self): # Symptom Detected: There is a comma in path to private key file self.config_fixture.config(group='saml', keyfile='file,key.pem') self.assertTrue( federation.symptom_comma_in_SAML_private_key_file_path()) # No Symptom Detected: There is no comma in the path self.config_fixture.config(group='saml', keyfile='signing_key.pem') self.assertFalse( federation.symptom_comma_in_SAML_private_key_file_path()) class LdapDoctorTests(unit.TestCase): def test_user_enabled_emulation_dn_ignored_raised(self): # Symptom when user_enabled_emulation_dn is being ignored because the # user did not enable the user_enabled_emulation self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config( group='ldap', user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com') self.assertTrue( ldap.symptom_LDAP_user_enabled_emulation_dn_ignored()) def test_user_enabled_emulation_dn_ignored_not_raised(self): # No symptom when configuration set properly self.config_fixture.config(group='ldap', user_enabled_emulation=True) self.config_fixture.config( group='ldap', user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com') self.assertFalse( ldap.symptom_LDAP_user_enabled_emulation_dn_ignored()) # No symptom when both configurations disabled self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config(group='ldap', user_enabled_emulation_dn=None) self.assertFalse( ldap.symptom_LDAP_user_enabled_emulation_dn_ignored()) def test_user_enabled_emulation_use_group_config_ignored_raised(self): # Symptom when user enabled emulation isn't enabled but group_config is # enabled self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=True) self.assertTrue( ldap. symptom_LDAP_user_enabled_emulation_use_group_config_ignored()) def test_user_enabled_emulation_use_group_config_ignored_not_raised(self): # No symptom when configuration deactivated self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=False) self.assertFalse( ldap. symptom_LDAP_user_enabled_emulation_use_group_config_ignored()) # No symptom when configurations set properly self.config_fixture.config(group='ldap', user_enabled_emulation=True) self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=True) self.assertFalse( ldap. symptom_LDAP_user_enabled_emulation_use_group_config_ignored()) def test_group_members_are_ids_disabled_raised(self): # Symptom when objectclass is set to posixGroup but members_are_ids are # not enabled self.config_fixture.config(group='ldap', group_objectclass='posixGroup') self.config_fixture.config(group='ldap', group_members_are_ids=False) self.assertTrue(ldap.symptom_LDAP_group_members_are_ids_disabled()) def test_group_members_are_ids_disabled_not_raised(self): # No symptom when the configurations are set properly self.config_fixture.config(group='ldap', group_objectclass='posixGroup') self.config_fixture.config(group='ldap', group_members_are_ids=True) self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled()) # No symptom when configuration deactivated self.config_fixture.config(group='ldap', group_objectclass='groupOfNames') self.config_fixture.config(group='ldap', group_members_are_ids=False) self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled()) @mock.patch('os.listdir') @mock.patch('os.path.isdir') def test_file_based_domain_specific_configs_raised(self, mocked_isdir, mocked_listdir): self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True) self.config_fixture.config( group='identity', domain_configurations_from_database=False) # Symptom if there is no existing directory mocked_isdir.return_value = False self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs()) # Symptom if there is an invalid filename inside the domain directory mocked_isdir.return_value = True mocked_listdir.return_value = ['openstack.domains.conf'] self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs()) @mock.patch('os.listdir') @mock.patch('os.path.isdir') def test_file_based_domain_specific_configs_not_raised(self, mocked_isdir, mocked_listdir): # No symptom if both configurations deactivated self.config_fixture.config( group='identity', domain_specific_drivers_enabled=False) self.config_fixture.config( group='identity', domain_configurations_from_database=False) self.assertFalse( ldap.symptom_LDAP_file_based_domain_specific_configs()) # No symptom if directory exists with no invalid filenames self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True) self.config_fixture.config( group='identity', domain_configurations_from_database=False) mocked_isdir.return_value = True mocked_listdir.return_value = ['keystone.domains.conf'] self.assertFalse( ldap.symptom_LDAP_file_based_domain_specific_configs()) @mock.patch('os.listdir') @mock.patch('os.path.isdir') @mock.patch('keystone.cmd.doctor.ldap.configparser.ConfigParser') def test_file_based_domain_specific_configs_formatted_correctly_raised( self, mocked_parser, mocked_isdir, mocked_listdir): symptom = ('symptom_LDAP_file_based_domain_specific_configs' '_formatted_correctly') # Symptom Detected: Ldap domain specific configuration files are not # formatted correctly self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True) self.config_fixture.config( group='identity', domain_configurations_from_database=False) mocked_isdir.return_value = True mocked_listdir.return_value = ['keystone.domains.conf'] mock_instance = mock.MagicMock() mock_instance.read.side_effect = configparser.Error('No Section') mocked_parser.return_value = mock_instance self.assertTrue(getattr(ldap, symptom)()) @mock.patch('os.listdir') @mock.patch('os.path.isdir') def test_file_based_domain_specific_configs_formatted_correctly_not_raised( self, mocked_isdir, mocked_listdir): symptom = ('symptom_LDAP_file_based_domain_specific_configs' '_formatted_correctly') # No Symptom Detected: Domain_specific drivers is not enabled self.config_fixture.config( group='identity', domain_specific_drivers_enabled=False) self.assertFalse(getattr(ldap, symptom)()) # No Symptom Detected: Domain configuration from database is enabled self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True) self.assertFalse(getattr(ldap, symptom)()) self.config_fixture.config( group='identity', domain_configurations_from_database=True) self.assertFalse(getattr(ldap, symptom)()) # No Symptom Detected: The directory in domain_config_dir doesn't exist mocked_isdir.return_value = False self.assertFalse(getattr(ldap, symptom)()) # No Symptom Detected: domain specific drivers are enabled, domain # configurations from database are disabled, directory exists, and no # exceptions found. self.config_fixture.config( group='identity', domain_configurations_from_database=False) mocked_isdir.return_value = True # An empty directory should not raise this symptom self.assertFalse(getattr(ldap, symptom)()) # Test again with a file inside the directory mocked_listdir.return_value = ['keystone.domains.conf'] self.assertFalse(getattr(ldap, symptom)()) class SecurityComplianceDoctorTests(unit.TestCase): def test_minimum_password_age_greater_than_password_expires_days(self): # Symptom Detected: Minimum password age is greater than the password # expires days. Both values are positive integers greater than zero. self.config_fixture.config(group='security_compliance', minimum_password_age=2) self.config_fixture.config(group='security_compliance', password_expires_days=1) self.assertTrue( security_compliance. symptom_minimum_password_age_greater_than_expires_days()) def test_minimum_password_age_equal_to_password_expires_days(self): # Symptom Detected: Minimum password age is equal to the password # expires days. Both values are positive integers greater than zero. self.config_fixture.config(group='security_compliance', minimum_password_age=1) self.config_fixture.config(group='security_compliance', password_expires_days=1) self.assertTrue( security_compliance. symptom_minimum_password_age_greater_than_expires_days()) def test_minimum_password_age_less_than_password_expires_days(self): # No Symptom Detected: Minimum password age is less than password # expires days. Both values are positive integers greater than zero. self.config_fixture.config(group='security_compliance', minimum_password_age=1) self.config_fixture.config(group='security_compliance', password_expires_days=2) self.assertFalse( security_compliance. symptom_minimum_password_age_greater_than_expires_days()) def test_minimum_password_age_and_password_expires_days_deactivated(self): # No Symptom Detected: when minimum_password_age's default value is 0 # and password_expires_days' default value is None self.assertFalse( security_compliance. symptom_minimum_password_age_greater_than_expires_days()) def test_invalid_password_regular_expression(self): # Symptom Detected: Regular expression is invalid self.config_fixture.config( group='security_compliance', password_regex='^^(??=.*\d)$') self.assertTrue( security_compliance.symptom_invalid_password_regular_expression()) def test_valid_password_regular_expression(self): # No Symptom Detected: Regular expression is valid self.config_fixture.config( group='security_compliance', password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$') self.assertFalse( security_compliance.symptom_invalid_password_regular_expression()) def test_password_regular_expression_deactivated(self): # No Symptom Detected: Regular expression deactivated to None self.config_fixture.config( group='security_compliance', password_regex=None) self.assertFalse( security_compliance.symptom_invalid_password_regular_expression()) def test_password_regular_expression_description_not_set(self): # Symptom Detected: Regular expression is set but description is not self.config_fixture.config( group='security_compliance', password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$') self.config_fixture.config( group='security_compliance', password_regex_description=None) self.assertTrue( security_compliance. symptom_password_regular_expression_description_not_set()) def test_password_regular_expression_description_set(self): # No Symptom Detected: Regular expression and description are set desc = '1 letter, 1 digit, and a minimum length of 7 is required' self.config_fixture.config( group='security_compliance', password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$') self.config_fixture.config( group='security_compliance', password_regex_description=desc) self.assertFalse( security_compliance. symptom_password_regular_expression_description_not_set()) def test_password_regular_expression_description_deactivated(self): # No Symptom Detected: Regular expression and description are # deactivated to None self.config_fixture.config( group='security_compliance', password_regex=None) self.config_fixture.config( group='security_compliance', password_regex_description=None) self.assertFalse( security_compliance. symptom_password_regular_expression_description_not_set()) class TokensDoctorTests(unit.TestCase): def test_unreasonable_max_token_size_raised(self): # Symptom Detected: the max_token_size for uuid is not 32 self.config_fixture.config(group='token', provider='uuid') self.config_fixture.config(max_token_size=33) self.assertTrue(tokens.symptom_unreasonable_max_token_size()) # Symptom Detected: the max_token_size for fernet is greater than 255 self.config_fixture.config(group='token', provider='fernet') self.config_fixture.config(max_token_size=256) self.assertTrue(tokens.symptom_unreasonable_max_token_size()) def test_unreasonable_max_token_size_not_raised(self): # No Symptom Detected: the max_token_size for uuid is 32 self.config_fixture.config(group='token', provider='uuid') self.config_fixture.config(max_token_size=32) self.assertFalse(tokens.symptom_unreasonable_max_token_size()) # No Symptom Detected: the max_token_size for fernet is 255 or less self.config_fixture.config(group='token', provider='fernet') self.config_fixture.config(max_token_size=255) self.assertFalse(tokens.symptom_unreasonable_max_token_size()) class TokenFernetDoctorTests(unit.TestCase): @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_usability_of_Fernet_key_repository_raised(self, mock_utils): # Symptom Detected: Fernet key repo is world readable self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = False self.assertTrue( tokens_fernet.symptom_usability_of_Fernet_key_repository()) @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_usability_of_Fernet_key_repository_not_raised(self, mock_utils): # No Symptom Detected: UUID is used instead of fernet self.config_fixture.config(group='token', provider='uuid') mock_utils.FernetUtils().validate_key_repository.return_value = False self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository()) # No Symptom Detected: configs set properly, key repo is not world # readable but is user readable self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = True self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository()) @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_keys_in_Fernet_key_repository_raised(self, mock_utils): # Symptom Detected: Fernet key repository is empty self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = False self.assertTrue( tokens_fernet.symptom_keys_in_Fernet_key_repository()) @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_keys_in_Fernet_key_repository_not_raised(self, mock_utils): # No Symptom Detected: UUID is used instead of fernet self.config_fixture.config(group='token', provider='uuid') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository()) # No Symptom Detected: configs set properly, key repo has been # populated with keys self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository())
rajalokan/keystone
keystone/tests/unit/test_cli.py
Python
apache-2.0
57,490
#!/usr/bin/env python3 # Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Summarize groups failed tests together by finding edit distances between their failure strings, and emits JSON for rendering in a browser. """ # pylint: disable=invalid-name,missing-docstring import argparse import functools import hashlib import json import logging import os import re import sys import time import zlib import berghelroach editdist = berghelroach.dist flakeReasonDateRE = re.compile( r'[A-Z][a-z]{2}, \d+ \w+ 2\d{3} [\d.-: ]*([-+]\d+)?|' r'\w{3}\s+\d{1,2} \d+:\d+:\d+(\.\d+)?|(\d{4}-\d\d-\d\d.|.\d{4} )\d\d:\d\d:\d\d(.\d+)?') # Find random noisy strings that should be replaced with renumbered strings, for more similarity. flakeReasonOrdinalRE = re.compile( r'0x[0-9a-fA-F]+' # hex constants r'|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?' # IPs + optional port r'|[0-9a-fA-F]{8}-\S{4}-\S{4}-\S{4}-\S{12}(-\d+)?' # UUIDs + trailing digits r'|[0-9a-f]{12,32}' # hex garbage r'|(?<=minion-group-|default-pool-)[-0-9a-z]{4,}' # node names ) def normalize(s): """ Given a traceback or error message from a text, reduce excess entropy to make clustering easier. This includes: - blanking dates and timestamps - renumbering unique information like - pointer addresses - UUIDs - IP addresses - sorting randomly ordered map[] strings. """ # blank out dates s = flakeReasonDateRE.sub('TIME', s) # do alpha conversion-- rename random garbage strings (hex pointer values, node names, etc) # into 'UNIQ1', 'UNIQ2', etc. matches = {} def repl(m): s = m.group(0) if s not in matches: matches[s] = 'UNIQ%d' % (len(matches) + 1) return matches[s] if 'map[' in s: # Go's maps are in a random order. Try to sort them to reduce diffs. s = re.sub(r'map\[([^][]*)\]', lambda m: 'map[%s]' % ' '.join(sorted(m.group(1).split())), s) s = flakeReasonOrdinalRE.sub(repl, s) if len(s) > 10000: # for long strings, remove repeated lines! s = re.sub(r'(?m)^(.*\n)\1+', r'\1', s) if len(s) > 10000: # ridiculously long test output s = s[:5000] + '\n...[truncated]...\n' + s[-5000:] return s def normalize_name(name): """ Given a test name, remove [...]/{...}. Matches code in testgrid and kubernetes/hack/update_owners.py. """ name = re.sub(r'\[.*?\]|{.*?\}', '', name) name = re.sub(r'\s+', ' ', name) return name.strip() def make_ngram_counts(s, ngram_counts={}): """ Convert a string into a histogram of frequencies for different byte combinations. This can be used as a heuristic to estimate edit distance between two strings in constant time. Instead of counting each ngram individually, they are hashed into buckets. This makes the output count size constant. """ # Yes, I'm intentionally memoizing here. # pylint: disable=dangerous-default-value size = 64 if s not in ngram_counts: counts = [0] * size for x in range(len(s)-3): counts[zlib.crc32(s[x:x+4].encode('utf8')) & (size - 1)] += 1 ngram_counts[s] = counts # memoize return ngram_counts[s] def ngram_editdist(a, b): """ Compute a heuristic lower-bound edit distance using ngram counts. An insert/deletion/substitution can cause up to 4 ngrams to differ: abcdefg => abcefg (abcd, bcde, cdef, defg) => (abce, bcef, cefg) This will underestimate the edit distance in many cases: - ngrams hashing into the same bucket will get confused - a large-scale transposition will barely disturb ngram frequencies, but will have a very large effect on edit distance. It is useful to avoid more expensive precise computations when they are guaranteed to exceed some limit (being a lower bound), or as a proxy when the exact edit distance computation is too expensive (for long inputs). """ counts_a = make_ngram_counts(a) counts_b = make_ngram_counts(b) return sum(abs(x-y) for x, y in zip(counts_a, counts_b))//4 def make_ngram_counts_digest(s): """ Returns a hashed version of the ngram counts. """ return hashlib.sha1(str(make_ngram_counts(s)).encode()).hexdigest()[:20] def file_memoize(description, name): """ Decorator to save a function's results to a file. """ def inner(func): @functools.wraps(func) def wrapper(*args, **kwargs): if os.path.exists(name): with open(name) as f: data = json.load(f) logging.info('done (cached) %s', description) return data data = func(*args, **kwargs) with open(name, 'w') as f: json.dump(data, f) logging.info('done %s', description) return data wrapper.__wrapped__ = func return wrapper return inner @file_memoize('loading failed tests', 'memo_load_failures.json') def load_failures(builds_file, tests_files): """ Load builds and failed tests files. Group builds by path, group test failures by test name. Args: filenames Returns: { build_path: [{ path: build_path, started: 12345, ...} ...], ...}, { test_name: [{build: gs://foo/bar, name: test_name, failure_text: xxx}, ...], ...} """ builds = {} with open(builds_file) as f: for build in json.load(f): if not build['started'] or not build['number']: continue for attr in ('started', 'tests_failed', 'number', 'tests_run'): build[attr] = int(build[attr]) build['elapsed'] = int(float(build['elapsed'])) if 'pr-logs' in build['path']: build['pr'] = build['path'].split('/')[-3] builds[build['path']] = build failed_tests = {} for tests_file in tests_files: with open(tests_file) as f: for line in f: test = json.loads(line) failed_tests.setdefault(test['name'], []).append(test) for tests in failed_tests.values(): tests.sort(key=lambda t: t['build']) return builds, failed_tests def find_match(fnorm, clusters): for ngram_dist, other in sorted((ngram_editdist(fnorm, x), x) for x in clusters): # allow up to 10% differences limit = int((len(fnorm)+len(other))/2.0 * 0.10) if ngram_dist > limit: continue if limit <= 1 and other != fnorm: # no chance continue dist = editdist(fnorm, other, limit) if dist < limit: return other return None def cluster_test(tests): """ Compute failure clusters given a list of failures for one test. Normalize the failure text prior to clustering to avoid needless entropy. Args: [{name: test_name, build: gs://foo/bar, failure_text: xxx}, ...] Returns: {cluster_text_1: [test1, test2, ...]} """ clusters = {} start = time.time() for test in tests: ftext = test['failure_text'] fnorm = normalize(ftext) if fnorm in clusters: clusters[fnorm].append(test) else: other = find_match(fnorm, clusters) if other: clusters[other].append(test) else: clusters[fnorm] = [test] if time.time() > start + 60: logging.info('bailing early, taking too long!') break return clusters @file_memoize('clustering inside each test', 'memo_cluster_local.json') def cluster_local(failed_tests): """ Cluster together the failures for each test. Args: {test_1: [{name: test_1, build: gs://foo/bar, failure_text: xxx}, ...], ...} Returns: {test_1: {cluster_text_1: [test1, test2], ... }, test_2: ...} """ clustered = {} num_failures = 0 start = time.time() logging.info("Clustering failures for %d unique tests...", len(failed_tests)) # Look at tests with the most failures first for n, (test_name, tests) in enumerate( sorted(failed_tests.items(), key=lambda x: len(x[1]), reverse=True), 1): num_failures += len(tests) logging.info('%4d/%4d, %d failures, %s', n, len(failed_tests), len(tests), test_name) sys.stdout.flush() clustered[test_name] = cluster_test(tests) elapsed = time.time() - start logging.info('Finished locally clustering %d unique tests (%d failures) in %dm%ds', len(clustered), num_failures, elapsed / 60, elapsed % 60) return clustered @file_memoize('clustering across tests', 'memo_cluster_global.json') def cluster_global(clustered, previous_clustered): """Combine together clustered failures for each test. This is done hierarchically for efficiency-- each test's failures are likely to be similar, reducing the number of clusters that need to be paired up at this stage. Args: {test_name: {cluster_text_1: [test1, test2, ...], ...}, ...} Returns: {cluster_text_1: [{test_name: [test1, test2, ...]}, ...], ...} """ clusters = {} num_failures = 0 logging.info("Combining clustered failures for %d unique tests...", len(clustered)) start = time.time() if previous_clustered: # seed clusters using output from the previous run n = 0 for cluster in previous_clustered: key = cluster['key'] if key != normalize(key): logging.info(key) logging.info(normalize(key)) n += 1 continue clusters[cluster['key']] = {} logging.info('Seeding with %d previous clusters', len(clusters)) if n: logging.warning('!!! %d clusters lost from different normalization! !!!', n) # Look at tests with the most failures over all clusters first for n, (test_name, test_clusters) in enumerate( sorted(clustered.items(), key=lambda kv: sum(len(x) for x in kv[1].values()), reverse=True), 1): logging.info('%4d/%4d, %d clusters, %s', n, len(clustered), len(test_clusters), test_name) # Look at clusters with the most failures first for key, tests in sorted(test_clusters.items(), key=lambda x: len(x[1]), reverse=True): num_failures += len(tests) if key in clusters: clusters[key].setdefault(test_name, []).extend(tests) else: other = find_match(key, clusters) if other: clusters[other].setdefault(test_name, []).extend(tests) else: clusters[key] = {test_name: list(tests)} # If we seeded clusters using the previous run's keys, some of those # clusters may have disappeared. Remove the resulting empty entries. for k in {k for k, v in clusters.items() if not v}: clusters.pop(k) elapsed = time.time() - start logging.info('Finished clustering %d unique tests (%d failures) into %d clusters in %dm%ds', len(clustered), num_failures, len(clusters), elapsed / 60, elapsed % 60) return clusters def tests_group_by_job(tests, builds): """Turn a list of test failures into {job: [buildnumber, ...], ...}""" groups = {} for test in tests: try: build = builds[test['build']] except KeyError: continue if 'number' in build: groups.setdefault(build['job'], set()).add(build['number']) return sorted(((key, sorted(value, reverse=True)) for key, value in groups.items()), key=lambda kv: (-len(kv[1]), kv[0])) SPAN_RE = re.compile(r'\w+|\W+') def common_spans(xs): """ Finds something similar to the longest common subsequence of xs, but much faster. Returns a list of [matchlen_1, mismatchlen_2, matchlen_2, mismatchlen_2, ...], representing sequences of the first element of the list that are present in all members. """ common = None for x in xs: x_split = SPAN_RE.findall(x) if common is None: # first iteration common = set(x_split) else: common.intersection_update(x_split) spans = [] match = True span_len = 0 for x in SPAN_RE.findall(xs[0]): if x in common: if not match: match = True spans.append(span_len) span_len = 0 span_len += len(x) else: if match: match = False spans.append(span_len) span_len = 0 span_len += len(x) if span_len: spans.append(span_len) return spans def clusters_to_display(clustered, builds): """Transpose and sort the output of cluster_global.""" return [{ "key": key, "id": key_id, "spans": common_spans([f['failure_text'] for _, fs in clusters for f in fs]), "text": clusters[0][1][0]['failure_text'], "tests": [{ "name": test_name, "jobs": [{"name": n, "builds": [str(x) for x in b]} for n, b in tests_group_by_job(tests, builds)] } for test_name, tests in sorted(clusters, key=lambda nt: (-len(nt[1]), nt[0])) ] } for key, key_id, clusters in clustered if sum(len(x[1]) for x in clusters) > 1 ] def builds_to_columns(builds): """Convert a list of build dictionaries into a columnar form. This compresses much better with gzip.""" jobs = {} cols = {v: [] for v in 'started tests_failed elapsed tests_run result executor pr'.split()} out = {'jobs': jobs, 'cols': cols, 'job_paths': {}} for build in sorted(builds.values(), key=lambda b: (b['job'], b['number'])): if 'number' not in build: continue index = len(cols['started']) for key, entries in cols.items(): entries.append(build.get(key)) job = jobs.setdefault(build['job'], {}) if not job: out['job_paths'][build['job']] = build['path'][:build['path'].rindex('/')] job[build['number']] = index for k, indexes in jobs.items(): numbers = sorted(indexes) base = indexes[numbers[0]] count = len(numbers) # optimization: if we have a dense sequential mapping of builds=>indexes, # store only the first build number, the run length, and the first index number. if numbers[-1] == numbers[0] + count - 1 and \ all(indexes[k] == n + base for n, k in enumerate(numbers)): jobs[k] = [numbers[0], count, base] for n in numbers: assert n <= numbers[0] + len(numbers), (k, n, jobs[k], len(numbers), numbers) return out def render(builds, clustered): clustered_sorted = sorted( clustered.items(), key=lambda kv: (-sum(len(ts) for ts in kv[1].values()), kv[0])) clustered_tuples = [(k, make_ngram_counts_digest(k), sorted(clusters.items(), key=lambda nt: (-len(nt[1]), nt[0]))) for k, clusters in clustered_sorted] return {'clustered': clusters_to_display(clustered_tuples, builds), 'builds': builds_to_columns(builds)} SIG_LABEL_RE = re.compile(r'\[sig-([^]]*)\]') def annotate_owners(data, builds, owners): """ Assign ownership to a cluster based on the share of hits in the last day. """ owner_re = re.compile(r'(?:%s)' % '|'.join( '(?P<%s>%s)' % ( sig.replace('-', '_'), # regex group names can't have - '|'.join(re.escape(p) for p in prefixes) ) for sig, prefixes in owners.items() )) job_paths = data['builds']['job_paths'] yesterday = max(data['builds']['cols']['started']) - (60 * 60 * 24) for cluster in data['clustered']: owner_counts = {} for test in cluster['tests']: m = SIG_LABEL_RE.search(test['name']) if m: owner = m.group(1) else: m = owner_re.match(normalize_name(test['name'])) if not m or not m.groupdict(): continue owner = next(k for k, v in m.groupdict().items() if v) owner = owner.replace('_', '-') counts = owner_counts.setdefault(owner, [0, 0]) for job in test['jobs']: if ':' in job['name']: # non-standard CI continue job_path = job_paths[job['name']] for build in job['builds']: if builds['%s/%s' % (job_path, build)]['started'] > yesterday: counts[0] += 1 else: counts[1] += 1 if owner_counts: owner = max(owner_counts.items(), key=lambda oc: (oc[1], oc[0]))[0] cluster['owner'] = owner else: cluster['owner'] = 'testing' def render_slice(data, builds, prefix='', owner=''): clustered = [] builds_out = {} jobs = set() for cluster in data['clustered']: # print [cluster['id'], prefix] if owner and cluster.get('owner') == owner: clustered.append(cluster) elif prefix and cluster['id'].startswith(prefix): clustered.append(cluster) else: continue for test in cluster['tests']: for job in test['jobs']: jobs.add(job['name']) for path, build in builds.items(): if build['job'] in jobs: builds_out[path] = build return {'clustered': clustered, 'builds': builds_to_columns(builds_out)} def setup_logging(): """Initialize logging to screen""" # See https://docs.python.org/2/library/logging.html#logrecord-attributes # [IWEF]mmdd HH:MM:SS.mmm] msg fmt = '%(levelname).1s%(asctime)s.%(msecs)03d] %(message)s' # pylint: disable=line-too-long datefmt = '%m%d %H:%M:%S' logging.basicConfig( level=logging.INFO, format=fmt, datefmt=datefmt, ) def parse_args(args): parser = argparse.ArgumentParser() parser.add_argument('builds', help='builds.json file from BigQuery') parser.add_argument('tests', help='tests.json file from BigQuery', nargs='+') parser.add_argument('--previous', help='previous output', type=argparse.FileType('r')) parser.add_argument('--owners', help='test owner SIGs', type=argparse.FileType('r')) parser.add_argument('--output', default='failure_data.json') parser.add_argument('--output_slices', help='Output slices to this path (must include PREFIX in template)') return parser.parse_args(args) def main(args): setup_logging() builds, failed_tests = load_failures(args.builds, args.tests) previous_clustered = None if args.previous: logging.info('loading previous') previous_clustered = json.load(args.previous)['clustered'] clustered_local = cluster_local(failed_tests) clustered = cluster_global(clustered_local, previous_clustered) logging.info("Rendering results...") start = time.time() data = render(builds, clustered) if args.owners: owners = json.load(args.owners) annotate_owners(data, builds, owners) with open(args.output, 'w') as f: json.dump(data, f, sort_keys=True) if args.output_slices: assert 'PREFIX' in args.output_slices for subset in range(256): id_prefix = '%02x' % subset with open(args.output_slices.replace('PREFIX', id_prefix), 'w') as f: json.dump(render_slice(data, builds, id_prefix), f, sort_keys=True) if args.owners: owners.setdefault('testing', []) # for output for owner in owners: with open(args.output_slices.replace('PREFIX', 'sig-' + owner), 'w') as f: json.dump(render_slice(data, builds, prefix='', owner=owner), f, sort_keys=True) elapsed = time.time() - start logging.info('Finished rendering results in %dm%ds', elapsed / 60, elapsed % 60) if __name__ == '__main__': main(parse_args(sys.argv[1:]))
krzyzacy/test-infra
triage/summarize.py
Python
apache-2.0
21,103
"""Cascade on Delete Revision ID: 5ca019edf61f Revises: 469f428604aa Create Date: 2019-06-23 05:49:26.061932 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "5ca019edf61f" down_revision = "469f428604aa" branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table("penalty") as batch_op: batch_op.drop_constraint("penalty_ibfk_1", type_="foreignkey") batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey") op.create_foreign_key( "penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="CASCADE" ) op.create_foreign_key( "penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="CASCADE" ) with op.batch_alter_table("snapshot_team") as batch_op: batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey") op.create_foreign_key( "snapshot_team_ibfk_1", "snapshot_team", "team", ["team_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op: batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey") batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey") op.create_foreign_key( "snapshot_to_snapshot_team_ibfk_1", "snapshot_to_snapshot_team", "snapshot", ["snapshot_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "snapshot_to_snapshot_team_ibfk_2", "snapshot_to_snapshot_team", "snapshot_team", ["snapshot_team_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("snapshot_team_to_flag") as batch_op: batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey") batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey") op.create_foreign_key( "snapshot_team_to_flag_ibfk_1", "snapshot_team_to_flag", "snapshot_team", ["snapshot_team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "snapshot_team_to_flag_ibfk_2", "snapshot_team_to_flag", "flag", ["flag_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("snapshot_team_to_game_level") as batch_op: batch_op.drop_constraint( "snapshot_team_to_game_level_ibfk_1", type_="foreignkey" ) batch_op.drop_constraint( "snapshot_team_to_game_level_ibfk_2", type_="foreignkey" ) op.create_foreign_key( "snapshot_team_to_game_level_ibfk_1", "snapshot_team_to_game_level", "snapshot_team", ["snapshot_team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "snapshot_team_to_game_level_ibfk_2", "snapshot_team_to_game_level", "game_level", ["gam_level_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("team_to_box") as batch_op: batch_op.drop_constraint("team_to_box_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_box_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_box_ibfk_1", "team_to_box", "team", ["team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "team_to_box_ibfk_2", "team_to_box", "box", ["box_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("team_to_item") as batch_op: batch_op.drop_constraint("team_to_item_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_item_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_item_ibfk_1", "team_to_item", "team", ["team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "team_to_item_ibfk_2", "team_to_item", "market_item", ["item_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("team_to_source_code") as batch_op: batch_op.drop_constraint("team_to_source_code_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_source_code_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_source_code_ibfk_1", "team_to_source_code", "team", ["team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "team_to_source_code_ibfk_2", "team_to_source_code", "source_code", ["source_code_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("team_to_hint") as batch_op: batch_op.drop_constraint("team_to_hint_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_hint_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_hint_ibfk_1", "team_to_hint", "team", ["team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "team_to_hint_ibfk_2", "team_to_hint", "hint", ["hint_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("team_to_flag") as batch_op: batch_op.drop_constraint("team_to_flag_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_flag_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_flag_ibfk_1", "team_to_flag", "team", ["team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "team_to_flag_ibfk_2", "team_to_flag", "flag", ["flag_id"], ["id"], ondelete="CASCADE", ) with op.batch_alter_table("team_to_game_level") as batch_op: batch_op.drop_constraint("team_to_game_level_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_game_level_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_game_level_ibfk_1", "team_to_game_level", "team", ["team_id"], ["id"], ondelete="CASCADE", ) op.create_foreign_key( "team_to_game_level_ibfk_2", "team_to_game_level", "game_level", ["game_level_id"], ["id"], ondelete="CASCADE", ) def downgrade(): with op.batch_alter_table("penalty") as batch_op: batch_op.drop_constraint("penalty_ibfk_1", type_="foreignkey") batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey") op.create_foreign_key( "penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="RESTRICT" ) op.create_foreign_key( "penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="RESTRICT" ) with op.batch_alter_table("snapshot_team") as batch_op: batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey") op.create_foreign_key( "snapshot_team_ibfk_1", "snapshot_team", "team", ["team_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op: batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey") batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey") op.create_foreign_key( "snapshot_to_snapshot_team_ibfk_1", "snapshot_to_snapshot_team", "snapshot", ["snapshot_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "snapshot_to_snapshot_team_ibfk_2", "snapshot_to_snapshot_team", "snapshot_team", ["snapshot_team_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("snapshot_team_to_flag") as batch_op: batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey") batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey") op.create_foreign_key( "snapshot_team_to_flag_ibfk_1", "snapshot_team_to_flag", "snapshot_team", ["snapshot_team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "snapshot_team_to_flag_ibfk_2", "snapshot_team_to_flag", "flag", ["flag_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("snapshot_team_to_game_level") as batch_op: batch_op.drop_constraint( "snapshot_team_to_game_level_ibfk_1", type_="foreignkey" ) batch_op.drop_constraint( "snapshot_team_to_game_level_ibfk_2", type_="foreignkey" ) op.create_foreign_key( "snapshot_team_to_game_level_ibfk_1", "snapshot_team_to_game_level", "snapshot_team", ["snapshot_team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "snapshot_team_to_game_level_ibfk_2", "snapshot_team_to_game_level", "game_level", ["gam_level_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("team_to_box") as batch_op: batch_op.drop_constraint("team_to_box_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_box_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_box_ibfk_1", "team_to_box", "team", ["team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "team_to_box_ibfk_2", "team_to_box", "box", ["box_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("team_to_item") as batch_op: batch_op.drop_constraint("team_to_item_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_item_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_item_ibfk_1", "team_to_item", "team", ["team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "team_to_item_ibfk_2", "team_to_item", "market_item", ["item_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("team_to_source_code") as batch_op: batch_op.drop_constraint("team_to_source_code_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_source_code_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_source_code_ibfk_1", "team_to_source_code", "team", ["team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "team_to_source_code_ibfk_2", "team_to_source_code", "source_code", ["source_code_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("team_to_hint") as batch_op: batch_op.drop_constraint("team_to_hint_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_hint_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_hint_ibfk_1", "team_to_hint", "team", ["team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "team_to_hint_ibfk_2", "team_to_hint", "hint", ["hint_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("team_to_flag") as batch_op: batch_op.drop_constraint("team_to_flag_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_flag_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_flag_ibfk_1", "team_to_flag", "team", ["team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "team_to_flag_ibfk_2", "team_to_flag", "flag", ["flag_id"], ["id"], ondelete="RESTRICT", ) with op.batch_alter_table("team_to_game_level") as batch_op: batch_op.drop_constraint("team_to_game_level_ibfk_1", type_="foreignkey") batch_op.drop_constraint("team_to_game_level_ibfk_2", type_="foreignkey") op.create_foreign_key( "team_to_game_level_ibfk_1", "team_to_game_level", "team", ["team_id"], ["id"], ondelete="RESTRICT", ) op.create_foreign_key( "team_to_game_level_ibfk_2", "team_to_game_level", "game_level", ["game_level_id"], ["id"], ondelete="RESTRICT", )
moloch--/RootTheBox
alembic/versions/5ca019edf61f_cascade_on_delete.py
Python
apache-2.0
12,722
# Copyright (c) 2017 CorpNewt # # This software is released under the MIT License. # https://opensource.org/licenses/MIT import discord def name(member : discord.Member): # A helper function to return the member's display name nick = name = None try: nick = member.nick except AttributeError: pass try: name = member.name except AttributeError: pass if nick: return nick if name: return name return None def memberForID(id, members, me): # Check self first. if me.id == id: return me # Check other members. for member in members: if member.id == id: return member return None def memberForName(name, members, me): # Check self first. if me.display_name.lower() == name.lower(): return me # Check rest of members. for member in members: if member.display_name.lower() == name.lower(): return member # No member yet - try ID memID = ''.join(list(filter(str.isdigit, name))) newMem = memberForID(memID, members, me) if newMem: return newMem return None def roleForID(id, server): for role in server.roles: if role.id == id: return role return None def roleForName(name, server): for role in server.roles: if role.name.lower() == name.lower(): return role # No role yet - try ID roleID = ''.join(list(filter(str.isdigit, name))) newRole = roleForID(roleID, server) if newRole: return newRole return None def serverNick(user, server): for member in server.members: if member.id == user.id: return name(member) return None def checkNameForInt(name, server): theList = name.split() # We see if we have multiple parts split by a space if len(theList)<2: # Only one part - no int included (or not separated by space) # Check if member exists - and if not throw an error, if so, throw a diff error amember = memberForName(name, server) if amember: # We at least have a member return { "Member" : amember, "Int" : None } else: # Now we check if we got an ID instead # Get just the numbers memID = ''.join(list(filter(str.isdigit, name))) newMem = memberForID(memID, server) if newMem: # We FOUND it! return { "Member" : newMem, "Int" : None } else: # Nothing was right about this... return { "Member" : None, "Int" : None } try: # Let's cast the last item as an int and catch any exceptions theInt = int(theList[len(theList)-1]) newMemberName = " ".join(theList[:-1]) amember = memberForName(newMemberName, server) if amember: return { "Member" : amember, "Int" : theInt } else: # Now we check if we got an ID instead # Get just the numbers memID = ''.join(list(filter(str.isdigit, newMemberName))) newMem = memberForID(memID, server) if newMem: # We FOUND it! return { "Member" : newMem, "Int" : theInt } else: # Nothing was right about this... return { "Member" : None, "Int" : None } except ValueError: # Last section wasn't an int amember = memberForName(name, server) if amember: # Name was just a member - return return { "Member" : amember, "Int" : None } else: # Now we check if we got an ID instead # Get just the numbers memID = ''.join(list(filter(str.isdigit, name))) newMem = memberForID(memID, server) if newMem: # We FOUND it! return { "Member" : newMem, "Int" : None } else: # Nothing was right about this... return { "Member" : None, "Int" : None } # Should never get here return None def checkRoleForInt(name, server): theList = name.split() # We see if we have multiple parts split by a space if len(theList)<2: # Only one part - no int included (or not separated by space) # Check if role exists - and if not throw an error, if so, throw a diff error amember = roleForName(name, server) if amember: # We at least have a member return { "Role" : amember, "Int" : None } else: # Now we check if we got an ID instead # Get just the numbers memID = ''.join(list(filter(str.isdigit, name))) newMem = roleForID(memID, server) if newMem: # We FOUND it! return { "Role" : newMem, "Int" : None } else: # Nothing was right about this... return { "Role" : None, "Int" : None } try: # Let's cast the last item as an int and catch any exceptions theInt = int(theList[len(theList)-1]) newMemberName = " ".join(theList[:-1]) amember = roleForName(newMemberName, server) if amember: return { "Role" : amember, "Int" : theInt } else: # Now we check if we got an ID instead # Get just the numbers memID = ''.join(list(filter(str.isdigit, newMemberName))) newMem = roleForID(memID, server) if newMem: # We FOUND it! return { "Role" : newMem, "Int" : theInt } else: # Nothing was right about this... return { "Role" : None, "Int" : None } except ValueError: # Last section wasn't an int amember = roleForName(name, server) if amember: # Name was just a role - return return { "Role" : amember, "Int" : None } else: # Now we check if we got an ID instead # Get just the numbers memID = ''.join(list(filter(str.isdigit, name))) newMem = roleForID(memID, server) if newMem: # We FOUND it! return { "Role" : newMem, "Int" : None } else: # Nothing was right about this... return { "Role" : None, "Int" : None } # Should never get here return None
StarbotDiscord/Starbot
libs/displayname.py
Python
apache-2.0
6,488
# Copyright (c) <2016> <GUANGHAN NING>. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Script File: ROLO_evaluation.py Description: ROLO is short for Recurrent YOLO, aimed at simultaneous object detection and tracking Paper: http://arxiv.org/abs/1607.05781 Author: Guanghan Ning Webpage: http://guanghan.info/ ''' import numpy print numpy.__path__ import cv2 import os import numpy as np import sys import ROLO_utils as utils import matplotlib.pyplot as plot import pickle import scipy.io import re import h5py import matlab.engine ''' -----------------------------Deal with benchmark results: matlab format-------------------------- ''' def choose_benchmark_method(id): if id == 0: method = 'STRUCK' elif id == 1: method = 'CXT' elif id == 2: method = 'TLD' elif id == 3: method = 'OAB' elif id == 4: method = 'CSK' elif id == 5: method = 'RS' elif id == 6: method = 'LSK' elif id == 7: method = 'VTD' elif id == 8: method = 'VTS' elif id == 9: method = 'CNN-SVM' elif id == 10: method = 'Staple' return method def choose_mat_file(method_id, sequence_id): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) method_name = choose_benchmark_method(method_id) mat_file = sequence_name + '_' + method_name + '.mat' return mat_file def load_mat_results(mat_file, TRE, SRE, OPE, id): if TRE is True: fold = '/u03/Guanghan/dev/ROLO-dev/experiments/benchmark_results/pami15_TRE' elif SRE is True: fold = '/u03/Guanghan/dev/ROLO-dev/experiments/benchmark_results/pami15_SRE' elif OPE is True: fold = '/u03/Guanghan/dev/ROLO-dev/experiments/benchmark_results/pami15_TRE' id = 0 mat_path = os.path.join(fold, mat_file) CNN_SVM = False if CNN_SVM is True: eng = matlab.engine.start_matlab() content = eng.load(mat_path,nargout=1) mat_results= content['results'][0]['res']#[0] numbers= [0, content['results'][0]['len']] eng.exit() else: mat = scipy.io.loadmat(mat_path) mat_results = mat['results'][0][id][0][0][5] mat_range_str = mat['results'][0][id][0][0][2] numbers= re.findall(r'\d+', str(mat_range_str)) return [mat_results, int(numbers[0]), int(numbers[1])] def load_benchmark_results(): # 1. read mat file, output numpy file to: e.g., /u03/Guanghan/dev/ROLO-dev/benchmark/DATA/Car1/STRUCK/ # 2. convert to same format as yolo and rolo # 3. evaluate AUC and avg_IOU score, for drawing the success plot # 4. Compare with ROLO and YOLO's OPE (3 parts: TRE ,SRE, SRER) return def evaluate_benchmark_avg_IOU(method_id): # calculate AUC(Average Under Curve) of benchmark algorithms ''' PARAMETERS ''' evaluate_st = 0 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1.0 avg_score= 0 method_name= choose_benchmark_method(method_id) file_name= 'output/IOU/avgIOU_' + method_name + '.txt' f= open(file_name, 'w') for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) # Load benchmark detection loc mat_file = choose_mat_file(method_id, sequence_id) [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, False, False, True, 0) # Load ground truth detection loc gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') lines = utils.load_dataset_gt(gt_file_path) # total= 0 total_score= 0 for id in range(0, ed_frame_num): location= locations[id] gt_location = utils.find_gt_location(lines, id) score = utils.iou(location, gt_location) total_score += score total += 1.0 total_score /= total [dummy, dummy, sequence_name, dummy, dummy]= utils.choose_video_sequence(sequence_id) print(method_name, ',' ,sequence_name, ": avg_IOU = ", total_score) f.write(method_name + ', ' + sequence_name + ": avg_IOU = " + str("{:.3f}".format(total_score)) + '\n') avg_score += total_score f.close() avg_score /= num_evaluate print('average score over all sequences:', avg_score) def evaluate_benchmark_AUC_OPE(method_id): # calculate AUC(Average Under Curve) of benchmark algorithms ''' PARAMETERS ''' evaluate_st = 0 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1.0 AUC_score= [] for thresh_int in range(0, 100, 5): thresh = thresh_int / 100.0 + 0.0001 print("thresh= ", thresh) avg_score= 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) # Load benchmark detection loc mat_file = choose_mat_file(method_id, sequence_id) [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, False, False, True, 0) #print(locations) # Load ground truth detection loc gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') lines = utils.load_dataset_gt(gt_file_path) # total= 0 total_score= 0 for id in range(0, ed_frame_num): location= locations[id] gt_location = utils.find_gt_location(lines, id) score = utils.cal_benchmark_score(location, gt_location, thresh) total_score += score total += 1.0 total_score /= total avg_score += total_score AUC_score.append(avg_score/num_evaluate) print("(thresh, AUC_score) = ", thresh, ' ', avg_score/num_evaluate) method_name= choose_benchmark_method(method_id) file_name= 'output/AUC_score_' + method_name + '.pickle' with open(file_name, 'w') as f: pickle.dump(AUC_score, f) def evaluate_benchmark_AUC_TRE(method_id): # calculate TRE of AUC(Average Under Curve) of benchmark algorithms ''' PARAMETERS ''' evaluate_st = 0 evaluate_ed = 29 TRE_num = 20 num_evaluate= evaluate_ed - evaluate_st + 1.0 AUC_score= [] for thresh_int in range(0, 100, 5): thresh = thresh_int / 100.0 + 0.0001 print("thresh= ", thresh) avg_score= 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) # Load ground truth detection loc gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') lines = utils.load_dataset_gt(gt_file_path) # Load benchmark detection loc mat_file = choose_mat_file(method_id, sequence_id) total_score_over_TREs= 0 for locations_id in range(0, TRE_num): [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, True, False, False, locations_id) ct_frames= 0 total_score_over_frames= 0 for id in range(st_frame_num-1, ed_frame_num): id_offset= id - st_frame_num + 1 location= locations[id_offset] # id_offset, not id gt_location = utils.find_gt_location(lines, id) #id, not id_offset score = utils.cal_benchmark_score(location, gt_location, thresh) total_score_over_frames += score ct_frames += 1.0 total_score_over_frames /= ct_frames total_score_over_TREs += total_score_over_frames total_score_over_TREs /= (TRE_num * 1.0) avg_score += total_score_over_TREs AUC_score.append(avg_score/num_evaluate) print("(thresh, AUC_score) = ", thresh, ' ', avg_score/num_evaluate) method_name= choose_benchmark_method(method_id) file_name= 'output/TRE_score_' + method_name + '.pickle' with open(file_name, 'w') as f: pickle.dump(AUC_score, f) def evaluate_benchmark_avg_IOU_TRE(method_id): # calculate TRE of AUC(Average Under Curve) of benchmark algorithms ''' PARAMETERS ''' evaluate_st = 0 evaluate_ed = 29 TRE_num = 20 num_evaluate= evaluate_ed - evaluate_st + 1.0 score_over_sequences= 0 method_name= choose_benchmark_method(method_id) file_name= 'output/IOU/TRE_avgIOU_' + method_name + '.txt' f= open(file_name, 'w') for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) # Load ground truth detection loc gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') lines = utils.load_dataset_gt(gt_file_path) # Load benchmark detection loc mat_file = choose_mat_file(method_id, sequence_id) score_over_TREs= 0 for locations_id in range(0, TRE_num): [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, True, False, False, locations_id) ct_frames= 0 score_over_frames= 0 for id in range(st_frame_num-1, ed_frame_num): id_offset= id - st_frame_num + 1 location= locations[id_offset] # id_offset, not id gt_location = utils.find_gt_location(lines, id) #id, not id_offset score = utils.iou(location, gt_location) score_over_frames += score ct_frames += 1.0 score_over_frames /= ct_frames score_over_TREs += score_over_frames score_over_TREs /= (TRE_num * 1.0) score_over_sequences += score_over_TREs avg_IOU_TRE_score= score_over_sequences/num_evaluate print("avg_IOU_TRE_score = ", avg_IOU_TRE_score) f.write(method_name + ', ' + sequence_name + ": TRE_avg_IOU = " + str("{:.3f}".format(avg_IOU_TRE_score)) + '\n') f.close() return avg_IOU_TRE_score def evaluate_benchmark_AUC_SRE(method_id): # calculate TRE of AUC(Average Under Curve) of benchmark algorithms ''' PARAMETERS ''' evaluate_st = 0 evaluate_ed = 29 SRE_num = 12 num_evaluate= evaluate_ed - evaluate_st + 1.0 AUC_score= [] for thresh_int in range(0, 100, 5): thresh = thresh_int / 100.0 + + 0.0001 print("thresh= ", thresh) avg_score_over_sequences = 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) # Load ground truth detection loc gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') lines = utils.load_dataset_gt(gt_file_path) # Load benchmark detection loc mat_file = choose_mat_file(method_id, sequence_id) total= 0 avg_score= 0 for locations_id in range(0, SRE_num): [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, False, True, False, locations_id) total += 1.0 ct = 0 total_score= 0 for id in range(st_frame_num-1, ed_frame_num): id_offset= id - st_frame_num + 1 location= locations[id_offset] # id_offset, not id gt_location = utils.find_gt_location(lines, id) #id, not id_offset score = utils.cal_benchmark_score(location, gt_location, thresh) total_score += score ct += 1.0 total_score /= ct avg_score += total_score avg_score /= total avg_score_over_sequences += avg_score AUC_score.append(avg_score_over_sequences/num_evaluate) print("(thresh, AUC_score) = ", thresh, ' ', avg_score_over_sequences/num_evaluate) method_name= choose_benchmark_method(method_id) file_name= 'output/SRE_score_' + method_name + '.pickle' with open(file_name, 'w') as f: pickle.dump(AUC_score, f) def evaluate_benchmark_avg_IOU_SRE(method_id): # calculate TRE of AUC(Average Under Curve) of benchmark algorithms ''' PARAMETERS ''' evaluate_st = 0 evaluate_ed = 29 SRE_num = 12 num_evaluate= evaluate_ed - evaluate_st + 1.0 method_name= choose_benchmark_method(method_id) file_name= 'output/IOU/SRE_avgIOU_' + method_name + '.txt' f= open(file_name, 'w') avg_score= 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) # Load ground truth detection loc gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') lines = utils.load_dataset_gt(gt_file_path) # Load benchmark detection loc mat_file = choose_mat_file(method_id, sequence_id) total= 0 total_score= 0 for locations_id in range(0, SRE_num): [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, False, True, False, locations_id) for id in range(st_frame_num-1, ed_frame_num): id_offset= id - st_frame_num + 1 location= locations[id_offset] # id_offset, not id gt_location = utils.find_gt_location(lines, id) #id, not id_offset score = utils.iou(location, gt_location) total_score += score total += 1.0 total_score /= total avg_score += total_score avg_IOU_SRE_score= avg_score/num_evaluate print("avg_IOU_score_SRE: ", avg_IOU_SRE_score) f.write(method_name + ', ' + sequence_name + ": SRE_avg_IOU = " + str("{:.3f}".format(avg_IOU_SRE_score)) + '\n') f.close() return avg_IOU_SRE_score ''' -----------------------------Deal with ROLO results: python format-----------------------------''' def draw_AUC_OPE(): num_methods = 9 + 1 with open('output/AUC_score.pickle') as f: [yolo_AUC_score, rolo_AUC_score] = pickle.load(f) yolo_AUC_score.append(0) rolo_AUC_score.append(0) yolo_AUC_score = np.asarray(yolo_AUC_score) rolo_AUC_score = np.asarray(rolo_AUC_score) with open('output/AUC_kalman_score.pickle') as f: [yolo_kalman_AUC_score] = pickle.load(f) yolo_kalman_AUC_score.append(0) yolo_kalman_AUC_score = np.asarray(yolo_kalman_AUC_score) benchmark_AUC_score = [] for method_id in range(0, num_methods): method_name= choose_benchmark_method(method_id) file_name= 'output/AUC_score_' + method_name + '.pickle' with open(file_name) as f: AUC_score = pickle.load(f) AUC_score.append(0) AUC_score = np.asarray(AUC_score) benchmark_AUC_score.append(AUC_score) x = [i/100.0 for i in range(0, 105, 5)] print(len(x)) print(len(yolo_AUC_score)) print(x) print(yolo_AUC_score) print(rolo_AUC_score) fig= plot.figure() ax = fig.gca() ax.set_xticks(np.arange(0, 1.1, 0.1)) ax.set_yticks(np.arange(0, 100, 10)) plot.title("Success Plot of OPE") #plot.title("Success Plot of OPE30: AUC(Average Under Curve)") plot.xlabel("overlap threshold") plot.ylabel("success rate") ''' plot.plot(x, rolo_AUC_score*100, color = 'g', label = "ROLO", linestyle='-', marker= "s", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, yolo_AUC_score*100, color = 'g', label = "YOLO", linestyle='--', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[0]*100, color = 'r', label = "STRUCK", linestyle='-', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[1]*100, color = 'r', label = "CXT", linestyle='--', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[2]*100, color = 'b', label = "TLD", linestyle='-', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[3]*100, color = 'b', label = "OAB", linestyle='--', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[4]*100, color = 'c', label = "CSK", linestyle='-', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[5]*100, color = 'c', label = "RS", linestyle='--', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[6]*100, color = 'm', label = "LSK", linestyle='-', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[7]*100, color = 'm', label = "VTD", linestyle='--', marker= "o", markersize= 5, linewidth= 1, markevery= 1) plot.plot(x, benchmark_AUC_score[8]*100, color = 'y', label = "VTS", linestyle='-', marker= "o", markersize= 5, linewidth= 1, markevery= 1) ''' 'test all 30' # #plot.plot(x, rolo_AUC_score*100, color = 'g', label = "ROLO [0.564]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) #exp all frames # plot.plot(x, rolo_AUC_score*100, color = 'g', label = "ROLO [0.458]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) #exp 1/3 frames # #plot.plot(x, benchmark_AUC_score[9]*100, color = 'y', label = "CNN-SVM[0.520]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) # #plot.plot(x, yolo_AUC_score*100, color = 'g', label = "YOLO [0.440]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[0]*100, color = 'r', label = "STRUCK [0.410]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[3]*100, color = 'b', label = "OAB [0.366]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[6]*100, color = 'm', label = "LSK [0.356]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[2]*100, color = 'b', label = "TLD [0.343]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) # # plot.plot(x, yolo_kalman_AUC_score*100, color = 'k', label = "YOLO+SORT [0.341]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) # # plot.plot(x, benchmark_AUC_score[1]*100, color = 'r', label = "CXT [0.333]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[5]*100, color = 'c', label = "RS [0.325]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[8]*100, color = 'y', label = "VTS [0.320]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[7]*100, color = 'm', label = "VTD [0.315]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) # plot.plot(x, benchmark_AUC_score[4]*100, color = 'c', label = "CSK [0.311]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) '''test last 8''' plot.plot(x, rolo_AUC_score*100, color = 'g', label = "ROLO [0.476]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) #plot.plot(x, yolo_AUC_score*100, color = 'g', label = "YOLO [0.459]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[6]*100, color = 'm', label = "LSK [0.454]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[8]*100, color = 'y', label = "VTS [0.444]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[7]*100, color = 'm', label = "VTD [0.433]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[1]*100, color = 'r', label = "CXT [0.433]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[0]*100, color = 'r', label = "STRUCK [0.428]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, yolo_kalman_AUC_score*100, color = 'k', label = "YOLO+SORT [0.406]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[4]*100, color = 'c', label = "CSK [0.406]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[5]*100, color = 'c', label = "RS [0.392]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[3]*100, color = 'b', label = "OAB [0.366]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[2]*100, color = 'b', label = "TLD [0.318]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) #plot.plot(x, benchmark_AUC_score[9]*100, color = 'y', label = "VTS", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.axis([0, 1, 0, 100]) plot.grid() plot.legend(loc = 1, prop={'size':10}) plot.show() def draw_AUC_TRE(): with open('output/AUC_score_TRE.pickle') as f: [yolo_AUC_score, rolo_AUC_score] = pickle.load(f) yolo_AUC_score.append(0) rolo_AUC_score.append(0) yolo_AUC_score = np.asarray(yolo_AUC_score) rolo_AUC_score = np.asarray(rolo_AUC_score) with open('output/AUC_kalman_score_TRE.pickle') as f: [yolo_kalman_AUC_score] = pickle.load(f) yolo_kalman_AUC_score.append(0) yolo_kalman_AUC_score = np.asarray(yolo_kalman_AUC_score) benchmark_AUC_score = [] for method_id in range(0, 9): method_name= choose_benchmark_method(method_id) file_name= 'output/TRE_score_' + method_name + '.pickle' with open(file_name) as f: AUC_score = pickle.load(f) AUC_score.append(0) AUC_score = np.asarray(AUC_score) benchmark_AUC_score.append(AUC_score) x = [i/100.0 for i in range(0, 105, 5)] print(len(x)) print(len(yolo_AUC_score)) print(x) print(yolo_AUC_score) print(rolo_AUC_score) fig= plot.figure() ax = fig.gca() ax.set_xticks(np.arange(0, 1.1, 0.1)) ax.set_yticks(np.arange(0, 100, 10)) plot.title("Success Plot of TRE") plot.xlabel("overlap threshold") plot.ylabel("success rate") '''test all 30''' plot.plot(x, rolo_AUC_score*100, color = 'g', label = "ROLO [0.562]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[0]*100, color = 'r', label = "STRUCK [0.548]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[3]*100, color = 'b', label = "OAB [0.462]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[4]*100, color = 'c', label = "CSK [0.459]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[1]*100, color = 'r', label = "CXT [0.432]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) #plot.plot(x, yolo_AUC_score*100, color = 'g', label = "YOLO [0.429]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[6]*100, color = 'm', label = "LSK [0.427]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[5]*100, color = 'c', label = "RS [0.425]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[2]*100, color = 'b', label = "TLD [0.414]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[7]*100, color = 'm', label = "VTD [0.414]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[8]*100, color = 'y', label = "VTS [0.397]", linestyle= '-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, yolo_kalman_AUC_score*100, color = 'k', label = "YOLO+SORT [0.322]", linestyle= '--', markersize= 5, linewidth= 2, markevery= 1) plot.axis([0, 1, 0, 100]) plot.grid() plot.legend(loc = 1, prop={'size':10}) plot.show() def draw_AUC_SRE(): with open('output/AUC_score.pickle') as f: [yolo_AUC_score, rolo_AUC_score] = pickle.load(f) yolo_AUC_score.append(0) rolo_AUC_score.append(0) yolo_AUC_score = np.asarray(yolo_AUC_score) rolo_AUC_score = np.asarray(rolo_AUC_score) with open('output/AUC_kalman_score.pickle') as f: [yolo_kalman_AUC_score] = pickle.load(f) yolo_kalman_AUC_score.append(0) yolo_kalman_AUC_score = np.asarray(yolo_kalman_AUC_score) benchmark_AUC_score = [] for method_id in range(0, 9): method_name= choose_benchmark_method(method_id) file_name= 'output/SRE_score_' + method_name + '.pickle' with open(file_name) as f: AUC_score = pickle.load(f) AUC_score.append(0) AUC_score = np.asarray(AUC_score) benchmark_AUC_score.append(AUC_score) x = [i/100.0 for i in range(0, 105, 5)] print(len(x)) print(len(yolo_AUC_score)) print(x) print(yolo_AUC_score) print(rolo_AUC_score) fig= plot.figure() ax = fig.gca() ax.set_xticks(np.arange(0, 1.1, 0.1)) ax.set_yticks(np.arange(0, 100, 10)) plot.title("Success Plot of SRE") plot.xlabel("overlap threshold") plot.ylabel("success rate") plot.plot(x, rolo_AUC_score*100, color = 'g', label = "ROLO [0.564]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) #plot.plot(x, yolo_AUC_score*100, color = 'g', label = "YOLO [0.440]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[0]*100, color = 'r', label = "STRUCK [0.391]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, yolo_kalman_AUC_score*100, color = 'k', label = "YOLO+SORT [0.341]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[3]*100, color = 'b', label = "OAB [0.341]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[2]*100, color = 'b', label = "TLD [0.331]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[5]*100, color = 'c', label = "RS [0.320]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[6]*100, color = 'm', label = "LSK [0.302]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[1]*100, color = 'r', label = "CXT [0.295]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[4]*100, color = 'c', label = "CSK [0.295]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[7]*100, color = 'm', label = "VTD [0.286]", linestyle='--', markersize= 5, linewidth= 2, markevery= 1) plot.plot(x, benchmark_AUC_score[8]*100, color = 'y', label = "VTS [0.284]", linestyle='-', markersize= 5, linewidth= 2, markevery= 1) plot.axis([0, 1, 0, 100]) plot.grid() plot.legend(loc = 1, prop={'size':10}) plot.show() def draw_step_IOU_curve(): #x = [i for i in range(3, 11, 3)] x= np.asarray([1, 3, 6, 9]) avg_IOU = np.asarray([0.359, 0.434, 0.458, 0.427]) fig= plot.figure() ax = fig.gca() ax.set_xticks(np.arange(1, 11, 1)) ax.set_yticks(np.arange(0.35, 0.47, 0.02)) plot.title("The average accuracy over the numbers of steps") plot.xlabel("step") plot.ylabel("Accuracy [IoU]") plot.plot(x, avg_IOU, color = 'g', linestyle='-', marker= "s", markersize= 10, linewidth= 2, markevery= 1) plot.axis([1, 10, 0.35, 0.47]) plot.grid() plot.legend(loc = 1, prop={'size':10}) plot.show() def draw_step_fps_curve(): avg_fps = np.asarray([271, 110, 61, 42]) x= np.asarray([1, 3, 6, 9]) #x = [i for i in range(3, 11, 3)] print x fig= plot.figure() ax = fig.gca() ax.set_xticks(np.arange(1, 11, 1)) ax.set_yticks(np.arange(0, 275, 30)) plot.title("Fps of the tracking module over the numbers of steps") plot.xlabel("step") plot.ylabel("Frames Per Second (fps)") plot.plot(x, avg_fps, color = 'r', linestyle='-', marker= "^", markersize= 10, linewidth= 2, markevery= 1) plot.axis([1, 10, 20, 275]) plot.grid() plot.legend(loc = 1, prop={'size':10}) plot.show() def evaluate_AUC_TRE(): # calculate AUC(Average Under Curve) TRE ''' PARAMETERS ''' num_steps= 3 TRE_num = 20 evaluate_st = 0 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1 yolo_AUC_score= [] rolo_AUC_score= [] for thresh_int in range(0, 100, 5): thresh = thresh_int / 100.0 + 0.0001 #print("thresh= ", thresh) rolo_avg_score= 0 yolo_avg_score= 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_out/') rolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'rolo_out_test/') paths_imgs = utils.load_folder( img_fold_path) paths_rolo= utils.load_folder( rolo_out_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object rolo_total_score_over_TREs= 0 yolo_total_score_over_TREs= 0 # Load benchmark detection loc mat_file = choose_mat_file(0, sequence_id) for locations_id in range(0, TRE_num): [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, True, False, False, locations_id) print(st_frame_num) ct_frames= 0 rolo_total_score_over_frames= 0 yolo_total_score_over_frames= 0 for i in range(st_frame_num-1, len(paths_rolo)- num_steps): id= i + 1 test_id= id + num_steps yolo_location= utils.find_yolo_location(yolo_out_path, test_id) yolo_location= utils.locations_normal(wid, ht, yolo_location) rolo_location= utils.find_rolo_location( rolo_out_path, test_id) rolo_location = utils.locations_normal( wid, ht, rolo_location) gt_location = utils.find_gt_location( lines, test_id - 1) rolo_score = utils.cal_rolo_score(rolo_location, gt_location, thresh) rolo_total_score_over_frames += rolo_score yolo_score = utils.cal_yolo_score(yolo_location, gt_location, thresh) yolo_total_score_over_frames += yolo_score ct_frames += 1.0 rolo_total_score_over_frames /= ct_frames yolo_total_score_over_frames /= ct_frames rolo_total_score_over_TREs += rolo_total_score_over_frames yolo_total_score_over_TREs += yolo_total_score_over_frames rolo_total_score_over_TREs /= (TRE_num * 1.0) yolo_total_score_over_TREs /= (TRE_num * 1.0) rolo_avg_score += rolo_total_score_over_TREs yolo_avg_score += yolo_total_score_over_TREs print('Sequence ID: ', sequence_id) print("yolo_avg_score = ", yolo_total_score_over_TREs) print("rolo_avg_score = ", rolo_total_score_over_TREs) yolo_AUC_score.append(yolo_avg_score/num_evaluate) rolo_AUC_score.append(rolo_avg_score/num_evaluate) print("(thresh, yolo_AUC_score) = ", thresh, ' ', yolo_avg_score/num_evaluate) print("(thresh, rolo_AUC_score) = ", thresh, ' ', rolo_avg_score/num_evaluate) with open('output/AUC_score_TRE.pickle', 'w') as f: pickle.dump([yolo_AUC_score, rolo_AUC_score], f) def evaluate_kalman_AUC_TRE(): # calculate AUC(Average Under Curve) TRE ''' PARAMETERS ''' num_steps= 3 TRE_num = 20 evaluate_st = 0 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1 yolo_AUC_score= [] rolo_AUC_score= [] for thresh_int in range(0, 100, 5): thresh = thresh_int / 100.0 + 0.0001 #print("thresh= ", thresh) yolo_avg_score= 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_output_kalman_txt/') paths_imgs = utils.load_folder( img_fold_path) paths_yolo= utils.load_folder( yolo_out_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object yolo_total_score_over_TREs= 0 # Load benchmark detection loc mat_file = choose_mat_file(0, sequence_id) for locations_id in range(0, TRE_num): [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, True, False, False, locations_id) #print(st_frame_num) ct_frames= 0 yolo_total_score_over_frames= 0 for i in range(st_frame_num-1, len(paths_yolo)- num_steps): id= i + 1 test_id= id + num_steps yolo_location= utils.find_yolo_kalman_location(yolo_out_path, test_id) gt_location = utils.find_gt_location( lines, test_id - 1) yolo_score = utils.cal_yolo_kalman_score(yolo_location, gt_location, thresh) yolo_total_score_over_frames += yolo_score ct_frames += 1.0 if ct_frames!= 0: yolo_total_score_over_frames /= ct_frames yolo_total_score_over_TREs += yolo_total_score_over_frames yolo_total_score_over_TREs /= (TRE_num * 1.0) yolo_avg_score += yolo_total_score_over_TREs print('Sequence ID: ', sequence_id) print("yolo_avg_score = ", yolo_total_score_over_TREs) yolo_AUC_score.append(yolo_avg_score/num_evaluate) print("(thresh, yolo_AUC_score) = ", thresh, ' ', yolo_avg_score/num_evaluate) with open('output/AUC_kalman_score_TRE.pickle', 'w') as f: pickle.dump([yolo_AUC_score], f) def evaluate_AUC(): # calculate AUC(Average Under Curve) ''' PARAMETERS ''' num_steps= 3 evaluate_st = 0 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1 yolo_AUC_score= [] rolo_AUC_score= [] for thresh_int in range(0, 100, 5): thresh = thresh_int / 100.0 + 0.0001 print("thresh= ", thresh) rolo_avg_score= 0 yolo_avg_score= 0 for test in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(test) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_out/') rolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'rolo_out_test/') print(rolo_out_path) paths_imgs = utils.load_folder( img_fold_path) paths_rolo= utils.load_folder( rolo_out_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object total= 0 rolo_total_score= 0 yolo_total_score= 0 for i in range(len(paths_rolo)- num_steps): id= i + 1 test_id= id + num_steps #path = paths_imgs[test_id] #img = utils.file_to_img(None, path) #if(img is None): break yolo_location= utils.find_yolo_location(yolo_out_path, test_id) yolo_location= utils.locations_normal(wid, ht, yolo_location) rolo_location= utils.find_rolo_location( rolo_out_path, test_id) rolo_location = utils.locations_normal( wid, ht, rolo_location) gt_location = utils.find_gt_location( lines, test_id - 1) rolo_score = utils.cal_rolo_score(rolo_location, gt_location, thresh) #print('rolo_score', rolo_score) rolo_total_score += rolo_score #print('rolo_total_score', rolo_total_score) yolo_score = utils.cal_yolo_score(yolo_location, gt_location, thresh) yolo_total_score += yolo_score total += 1.0 rolo_total_score /= total yolo_total_score /= total rolo_avg_score += rolo_total_score yolo_avg_score += yolo_total_score print('Sequence ID: ', test) print("yolo_avg_score = ", yolo_total_score) print("rolo_avg_score = ", rolo_total_score) yolo_AUC_score.append(yolo_avg_score/num_evaluate) rolo_AUC_score.append(rolo_avg_score/num_evaluate) print("(thresh, yolo_AUC_score) = ", thresh, ' ', yolo_avg_score/num_evaluate) print("(thresh, rolo_AUC_score) = ", thresh, ' ', rolo_avg_score/num_evaluate) with open('output/AUC_score.pickle', 'w') as f: pickle.dump([yolo_AUC_score, rolo_AUC_score], f) #draw_AUC() def evaluate_kalman_AUC(): # calculate AUC(Average Under Curve) ''' PARAMETERS ''' num_steps= 3 evaluate_st = 20 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1 yolo_AUC_score= [] for thresh_int in range(0, 100, 5): thresh = thresh_int / 100.0 + 0.0001 print("thresh= ", thresh) yolo_avg_score= 0 for test in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(test) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_output_kalman_txt/') print(yolo_out_path) paths_rolo= utils.load_folder( yolo_out_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object total= 0 yolo_total_score= 0 for i in range(len(paths_rolo)- num_steps): id= i + 1 test_id= id + num_steps #path = paths_imgs[test_id] #img = utils.file_to_img(None, path) #if(img is None): break yolo_location= utils.find_yolo_kalman_location(yolo_out_path, test_id) #yolo_location= utils.locations_normal(wid, ht, yolo_location) gt_location = utils.find_gt_location( lines, test_id - 1) yolo_score = utils.cal_yolo_kalman_score(yolo_location, gt_location, thresh) yolo_total_score += yolo_score total += 1.0 yolo_total_score /= total yolo_avg_score += yolo_total_score print('Sequence ID: ', test) print("yolo_avg_score = ", yolo_total_score) yolo_AUC_score.append(yolo_avg_score/num_evaluate) print("(thresh, yolo_kalman_AUC_score) = ", thresh, ' ', yolo_avg_score/num_evaluate) with open('output/AUC_kalman_score.pickle', 'w') as f: pickle.dump([yolo_AUC_score], f) #draw_AUC() def evaluate_avg_IOU(): # calculate AOS(Average Overlap Score) for each sequence ''' PARAMETERS ''' num_steps= 3 output_video = False display_video = False evaluate_st = 0 evaluate_ed = 29 yolo_ious = [] rolo_ious = [] for test in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(test) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_out/') rolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'rolo_out_test/') print(rolo_out_path) paths_imgs = utils.load_folder( img_fold_path) paths_rolo= utils.load_folder( rolo_out_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object fourcc= cv2.cv.CV_FOURCC(*'DIVX') video_name = sequence_name + '_test.avi' video_path = os.path.join('output/videos/', video_name) if output_video is True: video = cv2.VideoWriter(video_path, fourcc, 20, (wid, ht)) total= 0 rolo_avgloss= 0 yolo_avgloss= 0 for i in range(len(paths_rolo)- num_steps-1): id= i + 1 test_id= id + num_steps #* num_steps + 1 path = paths_imgs[test_id] img = utils.file_to_img( path) if(img is None): break yolo_location= utils.find_yolo_location( yolo_out_path, test_id) yolo_location= utils.locations_normal(wid, ht, yolo_location) #print(yolo_location) rolo_location= utils.find_rolo_location(rolo_out_path, test_id) rolo_location = utils.locations_normal(wid, ht, rolo_location) #print(rolo_location) gt_location = utils.find_gt_location(lines, test_id - 1) #print('gt: ' + str(test_id)) #print(gt_location) if display_video is True: frame = utils.debug_3_locations(img, gt_location, yolo_location, rolo_location) if output_video is True: video.write(frame) #cv2.imshow('frame',frame) #cv2.waitKey(100) rolo_loss = utils.cal_rolo_IOU(rolo_location, gt_location) rolo_avgloss += rolo_loss yolo_loss= utils.cal_yolo_IOU(yolo_location, gt_location) yolo_avgloss += yolo_loss total += 1 rolo_avgloss /= total yolo_avgloss /= total print('Sequence ID: ', test) print("yolo_avg_iou = ", yolo_avgloss) print("rolo_avg_iou = ", rolo_avgloss) yolo_ious.append(yolo_avgloss) rolo_ious.append(rolo_avgloss) if output_video is True: video.release() #cv2.destroyAllWindows() print('yolo_ious: ', yolo_ious) print('rolo_ious: ', rolo_ious) log_file = open("output/testing-log-final.txt", "a") log_file.write('YOLO_avg_IOU: ') for item in range(len(yolo_ious)): log_file.write(str("{:.3f}".format(yolo_ious[item])) + ' ') log_file.write('\nROLO_avg_IOU: ') for item in range(len(rolo_ious)): log_file.write(str("{:.3f}".format(rolo_ious[item])) + ' ') log_file.write('\n\n') yolo_avg_iou = np.mean(yolo_ious) rolo_avg_iou = np.mean(rolo_ious) log_file.write('YOLO_total_avg_IOU: ') log_file.write(str("{:.3f}".format(yolo_avg_iou))+ ' ') log_file.write('ROLO_total_avg_IOU: ') log_file.write(str("{:.3f}".format(rolo_avg_iou)) + ' ') def evaluate_avg_IOU_kalman(): # calculate AOS(Average Overlap Score) for each sequence ''' PARAMETERS ''' num_steps= 3 output_video = False display_video = False evaluate_st = 20 evaluate_ed = 29 yolo_ious = [] for test in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(test) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_kalman_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_output_kalman_txt/') paths_imgs = utils.load_folder( img_fold_path) paths_yolo= utils.load_folder( yolo_kalman_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object fourcc= cv2.cv.CV_FOURCC(*'DIVX') video_name = sequence_name + '_test.avi' video_path = os.path.join('output/videos_kalman/', video_name) if output_video is True: video = cv2.VideoWriter(video_path, fourcc, 20, (wid, ht)) total= 0 yolo_avgloss= 0 for i in range(len(paths_yolo)- num_steps-1): id= i + 1 test_id= id + num_steps #* num_steps + 1 path = paths_imgs[test_id] img = utils.file_to_img( path) if(img is None): break yolo_location= utils.find_yolo_kalman_location( yolo_kalman_path, test_id) #yolo_location= utils.locations_normal(wid, ht, yolo_location) #print(yolo_location) gt_location = utils.find_gt_location(lines, test_id - 1) #print('gt: ' + str(test_id)) #print(gt_location) if display_video is True: frame = utils.debug_kalman_locations(img, gt_location, yolo_location) cv2.imshow('frame',frame) cv2.waitKey(100) if output_video is True: video.write(frame) yolo_loss = utils.iou(yolo_location, gt_location) #yolo_loss= utils.cal_yolo_IOU(yolo_location, gt_location) yolo_avgloss += yolo_loss total += 1 yolo_avgloss /= total print('Sequence ID: ', test) print("yolo_avg_iou = ", yolo_avgloss) yolo_ious.append(yolo_avgloss) if output_video is True: video.release() #cv2.destroyAllWindows() print('yolo_ious: ', yolo_ious) log_file = open("output/yolo_kalman_log.txt", "a") log_file.write('YOLO_avg_IOU: ') for item in range(len(yolo_ious)): log_file.write(str("{:.3f}".format(yolo_ious[item])) + ' ') log_file.write('\n\n') yolo_avg_iou = np.mean(yolo_ious) log_file.write('YOLO_total_avg_IOU: ') log_file.write(str("{:.3f}".format(yolo_avg_iou))+ ' ') def evaluate_avg_IOU_TRE(): # calculate AUC(Average Under Curve) TRE ''' PARAMETERS ''' num_steps= 3 TRE_num = 20 evaluate_st = 0 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1 rolo_avg_score_over_sequences= 0 yolo_avg_score_over_sequences= 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_out/') rolo_out_path= os.path.join('benchmark/DATA', sequence_name, 'rolo_out_test/') paths_imgs = utils.load_folder( img_fold_path) paths_rolo= utils.load_folder( rolo_out_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object rolo_total_score_over_TREs= 0 yolo_total_score_over_TREs= 0 # Load benchmark detection loc mat_file = choose_mat_file(0, sequence_id) for locations_id in range(0, TRE_num): #print(locations_id) [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, True, False, False, locations_id) #print(ed_frame_num) ct_frames = 0 rolo_score_over_interval= 0 yolo_score_over_interval= 0 for i in range(st_frame_num-1, len(paths_rolo)- num_steps): id= i + 1 test_id= id + num_steps yolo_location= utils.find_yolo_location(yolo_out_path, test_id) yolo_location= utils.locations_normal(wid, ht, yolo_location) rolo_location= utils.find_rolo_location( rolo_out_path, test_id) rolo_location = utils.locations_normal( wid, ht, rolo_location) gt_location = utils.find_gt_location( lines, test_id - 1) rolo_score = utils.cal_rolo_IOU(rolo_location, gt_location) rolo_score_over_interval += rolo_score yolo_score = utils.cal_yolo_IOU(yolo_location, gt_location) yolo_score_over_interval += yolo_score ct_frames += 1.0 rolo_score_over_interval /= ct_frames yolo_score_over_interval /= ct_frames rolo_total_score_over_TREs += rolo_score_over_interval yolo_total_score_over_TREs += yolo_score_over_interval rolo_total_score_over_TREs /= (TRE_num * 1.0) yolo_total_score_over_TREs /= (TRE_num * 1.0) print('Sequence ID: ', sequence_id) print("yolo_avg_score = ", rolo_total_score_over_TREs) print("rolo_avg_score = ", yolo_total_score_over_TREs) rolo_avg_score_over_sequences += rolo_total_score_over_TREs yolo_avg_score_over_sequences += yolo_total_score_over_TREs yolo_avg_IOU_TRE = yolo_avg_score_over_sequences/num_evaluate rolo_avg_IOU_TRE = rolo_avg_score_over_sequences/num_evaluate print("(yolo_avg_IOU_TRE) = ", yolo_avg_IOU_TRE) print("(rolo_avg_IOU_TRE) = ", rolo_avg_IOU_TRE) log_file = open("output/IOU/avg_IOU_TRE.txt", "a") log_file.write('yolo_avg_IOU_TRE: ') log_file.write(str("{:.3f}".format(yolo_avg_IOU_TRE)) + ' ') log_file.write('\n rolo_avg_IOU_TRE: ') log_file.write(str("{:.3f}".format(rolo_avg_IOU_TRE)) + ' ') log_file.write('\n\n') def evaluate_avg_IOU_kalman_TRE(): # calculate AUC(Average Under Curve) TRE ''' PARAMETERS ''' num_steps= 3 TRE_num = 20 evaluate_st = 0 evaluate_ed = 29 num_evaluate= evaluate_ed - evaluate_st + 1 yolo_avg_score_over_sequences= 0 for sequence_id in range(evaluate_st, evaluate_ed + 1): [wid, ht, sequence_name, dummy_1, dummy_2] = utils.choose_video_sequence(sequence_id) img_fold_path = os.path.join('benchmark/DATA', sequence_name, 'img/') gt_file_path= os.path.join('benchmark/DATA', sequence_name, 'groundtruth_rect.txt') yolo_kalman_path= os.path.join('benchmark/DATA', sequence_name, 'yolo_output_kalman_txt/') paths_imgs = utils.load_folder( img_fold_path) paths_yolo= utils.load_folder( yolo_kalman_path) lines = utils.load_dataset_gt( gt_file_path) # Define the codec and create VideoWriter object yolo_total_score_over_TREs= 0 # Load benchmark detection loc mat_file = choose_mat_file(0, sequence_id) for locations_id in range(0, TRE_num): #print(locations_id) [locations, st_frame_num, ed_frame_num] = load_mat_results(mat_file, True, False, False, locations_id) #print(ed_frame_num) ct_frames = 0 yolo_score_over_interval= 0 for i in range(st_frame_num-1, len(paths_yolo)- num_steps): id= i + 1 test_id= id + num_steps #print(test_id) yolo_location= utils.find_yolo_kalman_location(yolo_kalman_path, test_id) gt_location = utils.find_gt_location( lines, test_id - 1) #yolo_score = utils.cal_yolo_kalman_IOU(yolo_location, gt_location) yolo_score = utils.iou(yolo_location, gt_location) #print(yolo_score) yolo_score_over_interval += yolo_score ct_frames += 1.0 if ct_frames!= 0: yolo_score_over_interval /= ct_frames yolo_total_score_over_TREs += yolo_score_over_interval yolo_total_score_over_TREs /= (TRE_num * 1.0) print('Sequence ID: ', sequence_id) print("yolo_avg_score = ", yolo_total_score_over_TREs) yolo_avg_score_over_sequences += yolo_total_score_over_TREs yolo_avg_IOU_TRE = yolo_avg_score_over_sequences/num_evaluate print("(yolo_avg_IOU_TRE) = ", yolo_avg_IOU_TRE) log_file = open("output/IOU/avg_kalman_IOU_TRE.txt", "a") log_file.write('yolo_kalman_avg_IOU_TRE: ') log_file.write(str("{:.3f}".format(yolo_avg_IOU_TRE)) + ' ') log_file.write('\n\n') '''----------------------------------------main-----------------------------------------------------''' def main(argv): #evaluate_avg_IOU() #evaluate_avg_IOU_TRE() #evaluate_avg_IOU_kalman() #evaluate_avg_IOU_kalman_TRE() #evaluate_AUC() #AUC_OPE and AUC_SRE is the same for ROLO and YOLO #evaluate_AUC_TRE() #evaluate_kalman_AUC() #evaluate_kalman_AUC_TRE() #for method_id in range(9, 10): # evaluate_benchmark_avg_IOU(method_id) #for method_id in range(0, 9): # evaluate_benchmark_avg_IOU_TRE(method_id) #for method_id in range(0, 9): # evaluate_benchmark_avg_IOU_SRE(method_id) #for method_id in range(9, 10): # evaluate_benchmark_AUC_OPE(method_id) #for method_id in range(0, 9): # evaluate_benchmark_AUC_TRE(method_id) #for method_id in range(0, 9): # evaluate_benchmark_AUC_SRE(method_id) draw_AUC_OPE() #draw_AUC_TRE() #draw_AUC_SRE() #draw_step_IOU_curve() #draw_step_fps_curve() if __name__=='__main__': main(sys.argv)
Guanghan/ROLO
ROLO_evaluation.py
Python
apache-2.0
54,621
""" Utilities for all views Ben Adida (12-30-2008) """ from django.conf import settings from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import loader import helios_auth from helios_auth.security import get_user ## ## BASICS ## SUCCESS = HttpResponse("SUCCESS") ## ## template abstraction ## def prepare_vars(request, values): vars_with_user = values.copy() if request: vars_with_user['user'] = get_user(request) vars_with_user['csrf_token'] = request.session['csrf_token'] vars_with_user['SECURE_URL_HOST'] = settings.SECURE_URL_HOST vars_with_user['STATIC'] = '/static/auth' vars_with_user['MEDIA_URL'] = '/static/auth/' vars_with_user['TEMPLATE_BASE'] = helios_auth.TEMPLATE_BASE vars_with_user['TEMPLATE_BASENONAV'] = helios_auth.TEMPLATE_BASENONAV vars_with_user['settings'] = settings return vars_with_user def render_template(request, template_name, values=None): vars_with_user = prepare_vars(request, values or {}) return render_to_response('helios_auth/templates/%s.html' % template_name, vars_with_user) def render_template_raw(request, template_name, values=None): t = loader.get_template(template_name + '.html') values = values or {} vars_with_user = prepare_vars(request, values) return t.render(context=vars_with_user, request=request) def render_json(json_txt): return HttpResponse(json_txt)
shirlei/helios-server
helios_auth/view_utils.py
Python
apache-2.0
1,428
import argparse import mlflow from ax.service.ax_client import AxClient from iris import IrisClassification from iris_data_module import IrisDataModule import pytorch_lightning as pl def train_evaluate(params, max_epochs=100): model = IrisClassification(**params) dm = IrisDataModule() dm.setup(stage="fit") trainer = pl.Trainer(max_epochs=max_epochs) mlflow.pytorch.autolog() trainer.fit(model, dm) trainer.test(datamodule=dm) test_accuracy = trainer.callback_metrics.get("test_acc") return test_accuracy def model_training_hyperparameter_tuning(max_epochs, total_trials, params): """ This function takes input params max_epochs, total_trials, params and creates a nested run in Mlflow. The parameters, metrics, model and summary are dumped into their respective mlflow-run ids. The best parameters are dumped along with the baseline model. :param max_epochs: Max epochs used for training the model. Type:int :param total_trials: Number of ax-client experimental trials. Type:int :param params: Model parameters. Type:dict """ with mlflow.start_run(run_name="Parent Run"): train_evaluate(params=params, max_epochs=max_epochs) ax_client = AxClient() ax_client.create_experiment( parameters=[ {"name": "lr", "type": "range", "bounds": [1e-3, 0.15], "log_scale": True}, {"name": "weight_decay", "type": "range", "bounds": [1e-4, 1e-3]}, {"name": "momentum", "type": "range", "bounds": [0.7, 1.0]}, ], objective_name="test_accuracy", ) for i in range(total_trials): with mlflow.start_run(nested=True, run_name="Trial " + str(i)) as child_run: parameters, trial_index = ax_client.get_next_trial() test_accuracy = train_evaluate(params=parameters, max_epochs=max_epochs) # completion of trial ax_client.complete_trial(trial_index=trial_index, raw_data=test_accuracy.item()) best_parameters, metrics = ax_client.get_best_parameters() for param_name, value in best_parameters.items(): mlflow.log_param("optimum_" + param_name, value) if __name__ == "__main__": parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parent_parser=parser) parser.add_argument( "--total_trials", default=3, help="umber of trials to be run for the optimization experiment", ) args = parser.parse_args() if "max_epochs" in args: max_epochs = args.max_epochs else: max_epochs = 100 params = {"lr": 0.1, "momentum": 0.9, "weight_decay": 0} model_training_hyperparameter_tuning( max_epochs=int(max_epochs), total_trials=int(args.total_trials), params=params )
mlflow/mlflow
examples/pytorch/AxHyperOptimizationPTL/ax_hpo_iris.py
Python
apache-2.0
2,854
""" This code was generated by Codezu. Changes to this file may cause incorrect behavior and will be lost if the code is regenerated. """ from mozurestsdk.mozuclient import default as default_client from mozurestsdk.mozuurl import MozuUrl; from mozurestsdk.urllocation import UrlLocation from mozurestsdk.apicontext import ApiContext; class Shipping(object): def __init__(self, apiContext: ApiContext = None, mozuClient = None): self.client = mozuClient or default_client(); if (apiContext is not None): self.client.withApiContext(apiContext); else: self.client.withApiContext(ApiContext()); def getRates(self,rateRequest, includeRawResponse = False, responseFields = None): """ Retrieves the shipping rates applicable for the site. Args: | rateRequest(rateRequest) - Properties required to request a shipping rate calculation. | includeRawResponse (bool) - Set this parameter to to retrieve the full raw JSON response from a shipping carrier (instead of just the shipping rate). | responseFields (string) - Use this field to include those fields which are not included by default. Returns: | RatesResponse Raises: | ApiException """ url = MozuUrl("/api/commerce/catalog/storefront/shipping/request-rates?responseFields={responseFields}", "POST", UrlLocation.TenantPod, False); url.formatUrl("includeRawResponse", includeRawResponse); url.formatUrl("responseFields", responseFields); self.client.withResourceUrl(url).withBody(rateRequest).execute(); return self.client.result();
Mozu/mozu-python-sdk
mozurestsdk/commerce/catalog/storefront/shipping.py
Python
apache-2.0
1,607
# -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2017-04-08 23:26 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0006_author_has_github_task'), ('app', '0006_auto_20170405_1957'), ] operations = [ ]
TeamAADGT/CMPUT404-project-socialdistribution
social/app/migrations/0007_merge_20170408_2326.py
Python
apache-2.0
334
import unittest import broccoli.parser class BroccoliTest(unittest.TestCase): def test_job_creation(self): config = broccoli.parser.parse('input.json') job = broccoli.job.Job(config) self.assertEqual(job.name, 'Job1') self.assertEqual(job.description, 'Job1 Description') self.assertIsNotNone(job.get_tasks()) self.assertEqual(len(job.get_tasks()), 2) task = job.get_tasks().get(0) self.assertEqual(task.name, 'Task1') self.assertEqual(task.description, 'Task1 Description') self.assertIsNotNone(task.get_sub_tasks()) self.assertIsNotNone(task.get_children()) if __name__ == '__main__': unittest.main()
mcmartins/broccoli
broccoli/test/parse/test.py
Python
apache-2.0
711
# coding=utf8 from cStringIO import StringIO import time import random from twisted.internet import reactor, task from twisted.internet.defer import ( inlineCallbacks, returnValue, Deferred, succeed, ) from twisted.internet.endpoints import TCP4ClientEndpoint from twisted.internet.protocol import Protocol from twisted.names.client import Resolver from twisted.web import client from twisted.web.http_headers import Headers from twisted.web.client import ( HTTPPageGetter, HTTPClientFactory, _makeGetterFactory, ) from observer.utils import wait from observer.lib import log class HTTPRESTGetter(HTTPPageGetter): def handleStatus_304(self): pass class HTTPRESTClientFactory(HTTPClientFactory): protocol = HTTPRESTGetter def getPage(url, contextFactory=None, *args, **kwargs): return _makeGetterFactory( url, HTTPRESTClientFactory, contextFactory=contextFactory, *args, **kwargs).deferred class HTTPBodyReceiver(Protocol): def __init__(self, deferred): self.deferred = deferred self.body = StringIO() self.reason = None def dataReceived(self, data): self.body.write(data) def connectionLost(self, reason): body = self.body.getvalue() self.reason = reason self.deferred.callback(body) def _cbRequest(response): finished = Deferred() response.deliverBody(HTTPBodyReceiver(finished)) return finished def request(agent, url, headers=None, body=None): log.debug('begin request ' + url) print agent, agent.request if body is None: d = agent.request('GET', str(url), headers) else: d = agent.request('POST', str(url), headers, client.FileBodyProducer(StringIO(body))) d.addCallback(_cbRequest) return d class AdditionalHeaderAgent(object): """ An L{Agent} wrapper to add default headers. @param headers: A list or tuple of (name, value) objects. The name is which of the HTTP header to set the value for. and the value is which to set for the named header. """ def __init__(self, agent, headers): self._header_list = headers self._agent = agent def request(self, method, uri, headers=None, bodyProducer=None): if headers is None: headers = Headers() else: headers = headers.copy() for name, value in self._header_list: headers.addRawHeader(name, value) return self._agent.request(method, uri, headers, bodyProducer) class InfiniteLoginError(Exception): def __init__(self, message): Exception.__init__(self, message) self.message = message def __str__(self): return self[0] class NoAgentError(Exception): def __init__(self, message): Exception.__init__(self, message) self.message = message def __str__(self): return self[0] class LoginResponse(object): def __init__(self, response, body, reason): self.original = response self.data = body self.reason = reason def __getattr__(self, name): return getattr(self.original, name) def deliverBody(self, protocol): protocol.dataReceived(self.data) protocol.connectionLost(self.reason) class LoginAgent(object): def __init__(self, agent, retryLimit=1): self._agent = agent self.loggedin = False self.retryLimit = retryLimit def login(self): raise NotImplementedError("Must Implement the login method.") def testLogin(self, content): raise NotImplementedError("Must Implement the test login method.") @inlineCallbacks def request(self, method, uri, headers=None, bodyProducer=None): retryCount = 0 while True: if not self.loggedin: yield self.login() retryCount += 1 response = yield self._agent.request(method, uri, headers, bodyProducer) finished = Deferred() p = HTTPBodyReceiver(finished) response.deliverBody(p) body = yield finished reason = p.reason body = yield self.testLogin(body) if self.loggedin: returnValue(LoginResponse(response, body, reason)) return if bodyProducer is not None: returnValue(None) return if retryCount >= self.retryLimit: raise InfiniteLoginError("Maximum retry limit reached") class TimedAgentPool(object): #FIXME here the float value should be replaced by variables def __init__( self, minTimeInterval=10.0, maxTimeInterval=15.0, loginInterval=60.0, ): self.lastLogin = 0.0 self.agents = [] self.idleAgents = [] self.defers = [] def initAgent(self, agent): self.agents.append(agent) self.idleAgents.append(agent) agent.nextAccess = 0 agent.pool = self def addAgent(self, agent): t = random.uniform(self.minTimeInterval, self.maxTimeInterval) agent.nextAccess = time.time() + t if self.defers: d = self.defers[0] del self.defers[0] task.deferLater(reactor, t, d.callback, agent) else: self.idleAgents.append(agent) @inlineCallbacks def getAgent(self): if not self.agents: raise NoAgentError('This pool has no agent yet.') if not self.idleAgents: d = Deferred() self.defers.append(d) agent = yield d else: agent = self.idleAgents[0] del self.idleAgents[0] now = time.time() if now > agent.nextAccess: returnValue(agent) else: yield wait(agent.nextAccess - now) returnValue(agent) def removeAgent(self, agent): self.agents.remove(agent) class DNSCachingResolver(Resolver): """ subclass Resolver to add dns caching mechanism """ clear_interval = 1 * 60 * 60 def __init__(self, *args, **kwargs): Resolver.__init__(self, *args, **kwargs) self.clear_cache() def clear_cache(self): self.cached_url = {} reactor.callLater(self.clear_interval, self.clear_cache) def update_cache(self, result, name): self.cached_url[name] = result return result def getHostByName(self, name, timeout=None, effort=10): """ @see: twisted.names.client.getHostByName """ # XXX - respect timeout if name in self.cached_url: return succeed(self.cached_url[name]) else: return self.lookupAllRecords( name, timeout, ).addCallback( self._cbRecords, name, effort, ).addCallback(self.update_cache, name)
seraphlnWu/creditor
observer/utils/http.py
Python
apache-2.0
7,090
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from quantumclient.quantum import v2_0 as quantumv20 class ListQoSQueue(quantumv20.ListCommand): """List queues that belong to a given tenant.""" resource = 'qos_queue' log = logging.getLogger(__name__ + '.ListQoSQueue') _formatters = {} list_columns = ['id', 'name', 'min', 'max', 'qos_marking', 'dscp', 'default'] class ShowQoSQueue(quantumv20.ShowCommand): """Show information of a given queue.""" resource = 'qos_queue' log = logging.getLogger(__name__ + '.ShowQoSQueue') allow_names = True class CreateQoSQueue(quantumv20.CreateCommand): """Create a queue.""" resource = 'qos_queue' log = logging.getLogger(__name__ + '.CreateQoSQueue') def add_known_arguments(self, parser): parser.add_argument( 'name', metavar='NAME', help='Name of queue') parser.add_argument( '--min', help='min-rate'), parser.add_argument( '--max', help='max-rate'), parser.add_argument( '--qos-marking', help='qos marking untrusted/trusted'), parser.add_argument( '--default', default=False, help=('If true all ports created with be the size of this queue' ' if queue is not specified')), parser.add_argument( '--dscp', help='Differentiated Services Code Point'), def args2body(self, parsed_args): params = {'name': parsed_args.name, 'default': parsed_args.default} if parsed_args.min: params['min'] = parsed_args.min if parsed_args.max: params['max'] = parsed_args.max if parsed_args.qos_marking: params['qos_marking'] = parsed_args.qos_marking if parsed_args.dscp: params['dscp'] = parsed_args.dscp if parsed_args.tenant_id: params['tenant_id'] = parsed_args.tenant_id return {'qos_queue': params} class DeleteQoSQueue(quantumv20.DeleteCommand): """Delete a given queue.""" log = logging.getLogger(__name__ + '.DeleteQoSQueue') resource = 'qos_queue' allow_names = True
wallnerryan/quantum_migrate
quantumclient/quantum/v2_0/nvp_qos_queue.py
Python
apache-2.0
2,899
import os import re import shutil import zipfile import requests import json from shutil import copy2 from urllib.request import urlretrieve, urlopen # Input parameters version_param = os.environ.get('RELEASE_VERSION') is_latest_param = True if version_param == "master" else False # build constants m2repo_path = '/m2repo' tmp_path = './tmp/%s' % version_param policies_path = "%s/policies" % tmp_path resources_path = "%s/resources" % tmp_path fetchers_path = "%s/fetchers" % tmp_path services_path = "%s/services" % tmp_path reporters_path = "%s/reporters" % tmp_path repositories_path = "%s/repositories" % tmp_path connectors_path = "%s/connectors" % tmp_path snapshotPattern = re.compile('.*-SNAPSHOT') def clean(): if os.path.exists(tmp_path): shutil.rmtree(tmp_path) os.makedirs(tmp_path, exist_ok=True) os.makedirs(policies_path, exist_ok=True) os.makedirs(fetchers_path, exist_ok=True) os.makedirs(resources_path, exist_ok=True) os.makedirs(services_path, exist_ok=True) os.makedirs(reporters_path, exist_ok=True) os.makedirs(repositories_path, exist_ok=True) os.makedirs(connectors_path, exist_ok=True) def get_policies(release_json): components = release_json['components'] search_pattern = re.compile('gravitee-policy-.*') policies = [] for component in components: if search_pattern.match(component['name']) and 'gravitee-policy-api' != component['name']: policies.append(component) if "gravitee-policy-ratelimit" == component['name']: policies.append({"name": "gravitee-policy-quota", "version": component['version']}) if int(component['version'].replace(".", "").replace("-SNAPSHOT", "")) >= 1100: policies.append({"name": "gravitee-policy-spikearrest", "version": component['version']}) return policies def get_resources(release_json): components_name = [ "gravitee-resource-cache", "gravitee-resource-oauth2-provider-generic", "gravitee-resource-oauth2-provider-am" ] resources = [] for component_name in components_name: resources.append(get_component_by_name(release_json, component_name)) return resources def get_fetchers(release_json): components = release_json['components'] search_pattern = re.compile('gravitee-fetcher-.*') fetchers = [] for component in components: if search_pattern.match(component['name']) and 'gravitee-fetcher-api' != component['name']: fetchers.append(component) return fetchers def get_reporters(release_json): components_name = [ "gravitee-reporter-file", "gravitee-reporter-tcp", "gravitee-elasticsearch" ] reporters = [] for component_name in components_name: reporters.append(get_component_by_name(release_json, component_name)) return reporters def get_repositories(release_json): components_name = [ "gravitee-repository-mongodb", "gravitee-repository-jdbc", "gravitee-elasticsearch", "gravitee-repository-gateway-bridge-http" ] repositories = [] for component_name in components_name: repositories.append(get_component_by_name(release_json, component_name)) return repositories def get_services(release_json): components_name = [ "gravitee-service-discovery-consul" ] components = release_json['components'] search_pattern = re.compile('gravitee-policy-ratelimit') services = [] for component in components: if search_pattern.match(component['name']): service = component.copy() service['name'] = 'gravitee-gateway-services-ratelimit' services.append(service) break for component_name in components_name: services.append(get_component_by_name(release_json, component_name)) return services def get_connectors(release_json): components = release_json['components'] search_pattern = re.compile('gravitee-.*-connectors-ws') connectors = [] for component in components: if search_pattern.match(component['name']): connectors.append(component) return connectors def get_component_by_name(release_json, component_name): components = release_json['components'] search_pattern = re.compile(component_name) for component in components: if search_pattern.match(component['name']): return component def get_download_url(group_id, artifact_id, version, t): m2path = "%s/%s/%s/%s/%s-%s.%s" % (m2repo_path, group_id.replace(".", "/"), artifact_id, version, artifact_id, version, t) if os.path.exists(m2path): return m2path else: sonatypeUrl = "https://oss.sonatype.org/service/local/artifact/maven/redirect?r=%s&g=%s&a=%s&v=%s&e=%s" % ( ("snapshots" if snapshotPattern.match(version) else "releases"), group_id.replace(".", "/"), artifact_id, version, t) f = urlopen(sonatypeUrl) return f.geturl() def get_suffix_path_by_name(name): if name.find("policy") == -1: suffix = name[name.find('-') + 1:name.find('-', name.find('-') + 1)] if suffix == "gateway": return "services" if suffix == "repository": return "repositories" if suffix == "cockpit": return "connectors" return suffix + "s" else: return "policies" def download(name, filename_path, url): print('\nDowloading %s\n%s' % (name, url)) if url.startswith("http"): filename_path = tmp_path + "/" + get_suffix_path_by_name(name) + url[url.rfind('/'):] urlretrieve(url, filename_path) else: copy2(url, filename_path) print('\nDowloaded in %s' % filename_path) return filename_path def unzip(files): unzip_dirs = [] dist_dir = get_dist_dir_name() for file in files: with zipfile.ZipFile(file) as zip_file: zip_file.extractall("%s/%s" % (tmp_path, dist_dir)) unzip_dir = "%s/%s/%s" % (tmp_path, dist_dir, sorted(zip_file.namelist())[0]) unzip_dirs.append(unzip_dir) preserve_permissions(unzip_dir) return sorted(unzip_dirs) def preserve_permissions(d): search_bin_pattern = re.compile(".*/bin$") search_gravitee_pattern = re.compile("gravitee(\.bat)?") perm = 0o0755 for dirname, subdirs, files in os.walk(d): if search_bin_pattern.match(dirname): for file in files: if search_gravitee_pattern.match(file): file_path = "%s/%s" % (dirname, file) print(" set permission %o to %s" % (perm, file_path)) os.chmod(file_path, perm) def copy_files_into(src_dir, dest_dir, exclude_pattern=None): if exclude_pattern is None: exclude_pattern = [] filenames = [os.path.join(src_dir, fn) for fn in next(os.walk(src_dir))[2]] print(" copy") print(" %s" % filenames) print(" into") print(" %s" % dest_dir) for file in filenames: to_exclude = False for pattern in exclude_pattern: search_pattern = re.compile(pattern) if search_pattern.match(file): to_exclude = True break if to_exclude: print("[INFO] %s is excluded from files." % file) continue copy2(file, dest_dir) def download_policies(policies): paths = [] for policy in policies: if policy['name'] != "gravitee-policy-core": url = get_download_url("io.gravitee.policy", policy['name'], policy['version'], "zip") paths.append( download(policy['name'], '%s/%s-%s.zip' % (policies_path, policy['name'], policy['version']), url)) return paths def download_management_api(mgmt_api, default_version): v = default_version if 'version' not in mgmt_api else mgmt_api['version'] url = get_download_url("io.gravitee.management.standalone", "gravitee-management-api-standalone-distribution-zip", v, "zip") return download(mgmt_api['name'], '%s/%s-%s.zip' % (tmp_path, mgmt_api['name'], v), url) def download_managementV3_api(mgmt_api, default_version): v = default_version if 'version' not in mgmt_api else mgmt_api['version'] url = get_download_url("io.gravitee.rest.api.standalone.distribution", "gravitee-rest-api-standalone-distribution-zip", v, "zip") return download(mgmt_api['name'], '%s/%s-%s.zip' % (tmp_path, mgmt_api['name'], v), url) def download_gateway(gateway, default_version): v = default_version if 'version' not in gateway else gateway['version'] url = get_download_url("io.gravitee.gateway.standalone", "gravitee-gateway-standalone-distribution-zip", v, "zip") return download(gateway['name'], '%s/%s-%s.zip' % (tmp_path, gateway['name'], v), url) def download_fetchers(fetchers): paths = [] for fetcher in fetchers: url = get_download_url("io.gravitee.fetcher", fetcher['name'], fetcher['version'], "zip") paths.append( download(fetcher['name'], '%s/%s-%s.zip' % (fetchers_path, fetcher['name'], fetcher['version']), url)) return paths def download_resources(resources): paths = [] for resource in resources: url = get_download_url("io.gravitee.resource", resource['name'], resource['version'], "zip") paths.append( download(resource['name'], '%s/%s-%s.zip' % (resources_path, resource['name'], resource['version']), url)) return paths def download_services(services): paths = [] for service in services: # for release < 1.22 if service is not None: if service['name'] == "gravitee-gateway-services-ratelimit": url = get_download_url("io.gravitee.policy", service['name'], service['version'], "zip") else: url = get_download_url("io.gravitee.discovery", service['name'], service['version'], "zip") paths.append( download(service['name'], '%s/%s-%s.zip' % (services_path, service['name'], service['version']), url)) return paths def download_connectors(connectors): paths = [] for connector in connectors: url = get_download_url("io.gravitee.cockpit", connector['name'], connector['version'], "zip") paths.append( download(connector['name'], '%s/%s-%s.zip' % (resources_path, connector['name'], connector['version']), url)) return paths def download_ui(ui, default_version): v = default_version if 'version' not in ui else ui['version'] url = get_download_url("io.gravitee.management", ui['name'], v, "zip") return download(ui['name'], '%s/%s-%s.zip' % (tmp_path, ui['name'], v), url) def download_portal_ui(ui, default_version): v = default_version if 'version' not in ui else ui['version'] url = get_download_url("io.gravitee.portal", ui['name'], v, "zip") return download(ui['name'], '%s/%s-%s.zip' % (tmp_path, ui['name'], v), url) def download_reporters(reporters): paths = [] for reporter in reporters: name = "gravitee-reporter-elasticsearch" if "gravitee-elasticsearch" == reporter['name'] else reporter['name'] url = get_download_url("io.gravitee.reporter", name, reporter['version'], "zip") paths.append( download(name, '%s/%s-%s.zip' % (reporters_path, name, reporter['version']), url)) return paths def download_repositories(repositories): paths = [] for repository in repositories: if repository['name'] != "gravitee-repository-gateway-bridge-http": name = "gravitee-repository-elasticsearch" if "gravitee-elasticsearch" == repository['name'] else repository['name'] url = get_download_url("io.gravitee.repository", name, repository['version'], "zip") paths.append(download(name, '%s/%s-%s.zip' % (repositories_path, name, repository['version']), url)) else: for name in ["gravitee-repository-gateway-bridge-http-client", "gravitee-repository-gateway-bridge-http-server"]: url = get_download_url("io.gravitee.gateway", name, repository['version'], "zip") paths.append(download(name, '%s/%s-%s.zip' % (repositories_path, name, repository['version']), url)) return paths def prepare_gateway_bundle(gateway): print("==================================") print("Prepare %s" % gateway) bundle_path = unzip([gateway])[0] print(" bundle_path: %s" % bundle_path) copy_files_into(policies_path, bundle_path + "plugins") copy_files_into(resources_path, bundle_path + "plugins") copy_files_into(repositories_path, bundle_path + "plugins", [".*gravitee-repository-elasticsearch.*"]) copy_files_into(reporters_path, bundle_path + "plugins") copy_files_into(services_path, bundle_path + "plugins") copy_files_into(connectors_path, bundle_path + "plugins") os.makedirs(bundle_path + "plugins/ext/repository-jdbc", exist_ok=True) def prepare_ui_bundle(ui): print("==================================") print("Prepare %s" % ui) bundle_path = unzip([ui])[0] print(" bundle_path: %s" % bundle_path) def prepare_mgmt_bundle(mgmt): print("==================================") print("Prepare %s" % mgmt) bundle_path = unzip([mgmt])[0] print(" bundle_path: %s" % bundle_path) copy_files_into(policies_path, bundle_path + "plugins") copy_files_into(resources_path, bundle_path + "plugins") copy_files_into(fetchers_path, bundle_path + "plugins") copy_files_into(repositories_path, bundle_path + "plugins", [".*gravitee-repository-ehcache.*", ".*gravitee-repository-gateway-bridge-http-client.*", ".*gravitee-repository-gateway-bridge-http-server.*"]) copy_files_into(services_path, bundle_path + "plugins", [".*gravitee-gateway-services-ratelimit.*"]) copy_files_into(connectors_path, bundle_path + "plugins") os.makedirs(bundle_path + "plugins/ext/repository-jdbc", exist_ok=True) def prepare_policies(version): print("==================================") print("Prepare Policies") dist_dir = get_dist_dir_name() policies_dist_path = "%s/%s/gravitee-policies-%s" % (tmp_path, dist_dir, version) os.makedirs(policies_dist_path, exist_ok=True) copy_files_into(policies_path, policies_dist_path) copy_files_into(services_path, policies_dist_path) def package(version, release_json): print("==================================") print("Packaging") packages = [] exclude_from_full_zip_list = [re.compile(".*graviteeio-policies.*")] dist_dir = get_dist_dir_name() full_zip_name = "graviteeio-full-%s" % version # how to create a symbolic link ? #if jdbc: # full_zip_name = "graviteeio-full-jdbc-%s" % version full_zip_path = "%s/%s/%s.zip" % (tmp_path, dist_dir, full_zip_name) dirs = [os.path.join("%s/%s/" % (tmp_path, dist_dir), fn) for fn in next(os.walk("%s/%s/" % (tmp_path, dist_dir)))[1]] # add release.json jsonfile_name = "release.json" jsonfile_absname = os.path.join("%s/%s/%s" % (tmp_path, dist_dir, jsonfile_name)) jsonfile = open(jsonfile_absname, "w") jsonfile.write("%s" % json.dumps(release_json, indent=4)) jsonfile.close() with zipfile.ZipFile(full_zip_path, "w", zipfile.ZIP_DEFLATED) as full_zip: print("Create %s" % full_zip_path) packages.append(full_zip_path) full_zip.write(jsonfile_absname, jsonfile_name) for d in dirs: with zipfile.ZipFile("%s.zip" % d, "w", zipfile.ZIP_DEFLATED) as bundle_zip: print("Create %s.zip" % d) packages.append("%s.zip" % d) dir_abs_path = os.path.abspath(d) dir_name = os.path.split(dir_abs_path)[1] for dirname, subdirs, files in os.walk(dir_abs_path): exclude_from_full_zip = False for pattern in exclude_from_full_zip_list: if pattern.match(d): exclude_from_full_zip = True break for filename in files: absname = os.path.abspath(os.path.join(dirname, filename)) arcname = absname[len(dir_abs_path) - len(dir_name):] bundle_zip.write(absname, arcname) if exclude_from_full_zip is False: full_zip.write(absname, "%s/%s" % (full_zip_name, arcname)) if len(files) == 0: absname = os.path.abspath(dirname) arcname = absname[len(dir_abs_path) - len(dir_name):] bundle_zip.write(absname, arcname) if exclude_from_full_zip is False: full_zip.write(absname, "%s/%s" % (full_zip_name, arcname)) return packages def rename(string): return string.replace("gravitee", "graviteeio") \ .replace("management-standalone", "management-api") \ .replace("management-webui", "management-ui") \ .replace("portal-webui", "portal-ui") \ .replace("standalone-", "") def clean_dir_names(): print("==================================") print("Clean directory names") dirs = [os.path.join("%s/%s/" % (tmp_path, get_dist_dir_name()), fn) for fn in next(os.walk("%s/%s/" % (tmp_path, get_dist_dir_name())))[1]] for d in dirs: os.rename(d, rename(d)) def response_pretty_print(r): print("###########################################################") print("STATUS %s" % r.status_code) print("HEADERS \n%s" % r.headers) print("RESPONSE \n%s" % r.text) print("###########################################################\n\n") r.raise_for_status() def get_dist_dir_name(): dist_dir = "dist" return dist_dir def main(): if is_latest_param: release_json_url = "https://raw.githubusercontent.com/gravitee-io/release/master/release.json" else: release_json_url = "https://raw.githubusercontent.com/gravitee-io/release/%s/release.json" % version_param print(release_json_url) release_json = requests.get(release_json_url) print(release_json) release_json = release_json.json() version = release_json['version'] print("Create bundles for Gravitee.io v%s" % version) clean() v3 = int(version[0]) > 1 if v3: portal_ui = download_portal_ui(get_component_by_name(release_json, "gravitee-portal-webui"), version) mgmt_api = download_managementV3_api(get_component_by_name(release_json, "gravitee-management-rest-api"), version) else: mgmt_api = download_management_api(get_component_by_name(release_json, "gravitee-management-rest-api"), version) ui = download_ui(get_component_by_name(release_json, "gravitee-management-webui"), version) gateway = download_gateway(get_component_by_name(release_json, "gravitee-gateway"), version) download_policies(get_policies(release_json)) download_resources(get_resources(release_json)) download_fetchers(get_fetchers(release_json)) download_services(get_services(release_json)) download_reporters(get_reporters(release_json)) download_repositories(get_repositories(release_json)) if int(version.replace(".", "").replace("-SNAPSHOT", "")) > 354: download_connectors(get_connectors(release_json)) if v3: prepare_ui_bundle(portal_ui) prepare_gateway_bundle(gateway) prepare_ui_bundle(ui) prepare_mgmt_bundle(mgmt_api) prepare_policies(version) clean_dir_names() package(version, release_json) main()
gravitee-io/jenkins-scripts
src/main/python/package_bundles.py
Python
apache-2.0
19,869
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright 2010-2012 Asidev s.r.l. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import namedtuple import os import shutil import stat import tempfile import unittest from aybu.manager.activity_log import ActivityLog from aybu.manager.activity_log.fs import (mkdir, create, copy, mv, rm, rmdir, rmtree) from aybu.manager.activity_log.exc import TransactionError from aybu.manager.activity_log.template import render class ActivityLogTests(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tempdir) def test_create(self): al = ActivityLog() # test rollback file_= os.path.join(self.tempdir, 'test.txt') al.add(create, file_) self.assertTrue(os.path.exists(file_)) al.rollback() self.assertFalse(os.path.exists(file_)) # test successfull create al.add(create, file_) al.commit() self.assertTrue(os.path.exists(file_)) # test unsuccessfull create with self.assertRaises(OSError): al.add(create, file_) self.assertTrue(os.path.exists(file_)) def test_transaction_status(self): al = ActivityLog(autobegin=False) with self.assertRaises(TransactionError): al.commit() with self.assertRaises(TransactionError): al.rollback() al.begin() al.commit() with self.assertRaises(TransactionError): al.commit() def test_transaction(self): al = ActivityLog() dir_ = os.path.join(self.tempdir, 'test') join = os.path.join def dostuff(): al.add(mkdir, dir_) al.add(create, join(dir_, 'testfile.txt'), content="Test") al.add(copy, join(dir_, 'testfile.txt'), join(dir_, 'test2.txt')) dostuff() al.rollback() self.assertFalse(os.path.exists(join(dir_, 'test2.txt'))) self.assertFalse(os.path.exists(join(dir_, 'testfile.txt'))) self.assertFalse(os.path.exists(dir_)) dostuff() al.commit() self.assertTrue(os.path.exists(dir_)) self.assertTrue(os.path.exists(join(dir_, 'testfile.txt'))) self.assertTrue(os.path.exists(join(dir_, 'test2.txt'))) def test_failed_rollback(self): al = ActivityLog() dir_ = os.path.join(self.tempdir, 'test') inner_dir = os.path.join(dir_, 'inner') al.add(mkdir, dir_) al.add(mkdir, inner_dir) os.chmod(dir_, stat.S_IRUSR|stat.S_IXUSR) with self.assertRaises(OSError): al.rollback() self.assertTrue(os.path.exists(dir_)) self.assertTrue(os.path.exists(inner_dir)) os.chmod(dir_, stat.S_IRWXU | stat.S_IRWXG) def test_error_on_exists(self): al = ActivityLog() dir_ = os.path.join(self.tempdir, 'test') al.add(mkdir, dir_) al.commit() al.add(mkdir, dir_, error_on_exists=False) al.rollback() self.assertTrue(os.path.exists(dir_)) def test_render(self): al = ActivityLog() instance = namedtuple('Instance', ['paths', 'environment'])( paths=namedtuple('Paths', ['pyramid_config', 'alembic_config'])( pyramid_config='MYDUMMYCONFIG', alembic_config='MYDUMMYCONFIG' ), environment= namedtuple('Environment', ['settings', 'smtp_config', 'uwsgi_config', 'os_config'])( smtp_config=None, uwsgi_config=None, os_config=None, settings=None ) ) template_name = 'main.py.mako' target = os.path.join(self.tempdir, 'main.py') al.add(render, template_name, target, instance=instance) self.assertTrue(os.path.exists(target)) with open(target) as f: self.assertIn('MYDUMMYCONFIG', f.read()) al.rollback() self.assertFalse(os.path.exists(target)) al.add(render, template_name, target, deferred=True, instance=instance) self.assertFalse(os.path.exists(target)) al.commit() self.assertTrue(os.path.exists(target)) def test_delete(self): al = ActivityLog() testfile = os.path.join(self.tempdir, 'test.txt') with self.assertRaises(OSError): al.add(rm, testfile) al.add(rm, testfile, error_on_not_exists=False) al.commit() with open(testfile, "w") as f: f.write("###") al.add(rm, testfile) self.assertFalse(os.path.exists(testfile)) al.rollback() self.assertTrue(os.path.exists(testfile)) al.add(rm, testfile) self.assertFalse(os.path.exists(testfile)) al.commit() self.assertFalse(os.path.exists(testfile)) testdir = os.path.join(self.tempdir, 'test') al.add(mkdir, testdir) al.commit() # test rmdir al.add(rmdir, testdir) self.assertFalse(os.path.exists(testdir)) al.rollback() self.assertTrue(os.path.exists(testdir)) al.add(rmdir, testdir) al.commit() self.assertFalse(os.path.exists(testdir)) # test rmtree al.add(mkdir, testdir) inner = os.path.join(testdir, 'inner') al.add(mkdir, inner) al.commit() al.add(rmtree, testdir) self.assertFalse(os.path.exists(testdir)) al.rollback() self.assertTrue(os.path.exists(testdir)) al.add(rmtree, testdir) al.commit() self.assertFalse(os.path.exists(testdir)) def test_mv(self): al = ActivityLog() source = os.path.join(self.tempdir, "source") destination = os.path.join(self.tempdir, "destination") os.mkdir(source) os.mkdir(destination) with self.assertRaises(OSError): al.add(mv, source, destination) shutil.rmtree(destination) al.add(mv, source, destination) self.assertFalse(os.path.exists(source)) self.assertTrue(os.path.exists(destination)) al.rollback() self.assertTrue(os.path.exists(source)) self.assertFalse(os.path.exists(destination)) al.add(mv, source, destination) al.commit() self.assertFalse(os.path.exists(source)) self.assertTrue(os.path.exists(destination))
asidev/aybu-manager
tests/test_activity_log.py
Python
apache-2.0
7,363
"""Validate requirements.""" from __future__ import annotations from collections import deque import json import operator import os import re import subprocess import sys from awesomeversion import AwesomeVersion, AwesomeVersionStrategy from stdlib_list import stdlib_list from tqdm import tqdm from homeassistant.const import REQUIRED_PYTHON_VER import homeassistant.util.package as pkg_util from script.gen_requirements_all import COMMENT_REQUIREMENTS, normalize_package_name from .model import Config, Integration IGNORE_PACKAGES = { commented.lower().replace("_", "-") for commented in COMMENT_REQUIREMENTS } PACKAGE_REGEX = re.compile( r"^(?:--.+\s)?([-_\.\w\d\[\]]+)(==|>=|<=|~=|!=|<|>|===)*(.*)$" ) PIP_REGEX = re.compile(r"^(--.+\s)?([-_\.\w\d]+.*(?:==|>=|<=|~=|!=|<|>|===)?.*$)") SUPPORTED_PYTHON_TUPLES = [ REQUIRED_PYTHON_VER[:2], tuple(map(operator.add, REQUIRED_PYTHON_VER, (0, 1, 0)))[:2], ] SUPPORTED_PYTHON_VERSIONS = [ ".".join(map(str, version_tuple)) for version_tuple in SUPPORTED_PYTHON_TUPLES ] STD_LIBS = {version: set(stdlib_list(version)) for version in SUPPORTED_PYTHON_VERSIONS} PIPDEPTREE_CACHE = None IGNORE_VIOLATIONS = { # Still has standard library requirements. "acmeda", "blink", "ezviz", "hdmi_cec", "juicenet", "lupusec", "rainbird", "slide", "suez_water", } def validate(integrations: dict[str, Integration], config: Config): """Handle requirements for integrations.""" # Check if we are doing format-only validation. if not config.requirements: for integration in integrations.values(): validate_requirements_format(integration) return ensure_cache() # check for incompatible requirements disable_tqdm = config.specific_integrations or os.environ.get("CI", False) for integration in tqdm(integrations.values(), disable=disable_tqdm): if not integration.manifest: continue validate_requirements(integration) def validate_requirements_format(integration: Integration) -> bool: """Validate requirements format. Returns if valid. """ start_errors = len(integration.errors) for req in integration.requirements: if " " in req: integration.add_error( "requirements", f'Requirement "{req}" contains a space', ) continue pkg, sep, version = PACKAGE_REGEX.match(req).groups() if integration.core and sep != "==": integration.add_error( "requirements", f'Requirement {req} need to be pinned "<pkg name>==<version>".', ) continue if ( version and AwesomeVersion(version).strategy == AwesomeVersionStrategy.UNKNOWN ): integration.add_error( "requirements", f"Unable to parse package version ({version}) for {pkg}.", ) continue return len(integration.errors) == start_errors def validate_requirements(integration: Integration): """Validate requirements.""" if not validate_requirements_format(integration): return # Some integrations have not been fixed yet so are allowed to have violations. if integration.domain in IGNORE_VIOLATIONS: return integration_requirements = set() integration_packages = set() for req in integration.requirements: package = normalize_package_name(req) if not package: integration.add_error( "requirements", f"Failed to normalize package name from requirement {req}", ) return if (package == ign for ign in IGNORE_PACKAGES): continue integration_requirements.add(req) integration_packages.add(package) if integration.disabled: return install_ok = install_requirements(integration, integration_requirements) if not install_ok: return all_integration_requirements = get_requirements(integration, integration_packages) if integration_requirements and not all_integration_requirements: integration.add_error( "requirements", f"Failed to resolve requirements {integration_requirements}", ) return # Check for requirements incompatible with standard library. for version, std_libs in STD_LIBS.items(): for req in all_integration_requirements: if req in std_libs: integration.add_error( "requirements", f"Package {req} is not compatible with Python {version} standard library", ) def ensure_cache(): """Ensure we have a cache of pipdeptree. { "flake8-docstring": { "key": "flake8-docstrings", "package_name": "flake8-docstrings", "installed_version": "1.5.0" "dependencies": {"flake8"} } } """ global PIPDEPTREE_CACHE if PIPDEPTREE_CACHE is not None: return cache = {} for item in json.loads( subprocess.run( ["pipdeptree", "-w", "silence", "--json"], check=True, capture_output=True, text=True, ).stdout ): cache[item["package"]["key"]] = { **item["package"], "dependencies": {dep["key"] for dep in item["dependencies"]}, } PIPDEPTREE_CACHE = cache def get_requirements(integration: Integration, packages: set[str]) -> set[str]: """Return all (recursively) requirements for an integration.""" ensure_cache() all_requirements = set() to_check = deque(packages) while to_check: package = to_check.popleft() if package in all_requirements: continue all_requirements.add(package) item = PIPDEPTREE_CACHE.get(package) if item is None: # Only warn if direct dependencies could not be resolved if package in packages: integration.add_error( "requirements", f"Failed to resolve requirements for {package}" ) continue to_check.extend(item["dependencies"]) return all_requirements def install_requirements(integration: Integration, requirements: set[str]) -> bool: """Install integration requirements. Return True if successful. """ global PIPDEPTREE_CACHE ensure_cache() for req in requirements: match = PIP_REGEX.search(req) if not match: integration.add_error( "requirements", f"Failed to parse requirement {req} before installation", ) continue install_args = match.group(1) requirement_arg = match.group(2) is_installed = False normalized = normalize_package_name(requirement_arg) if normalized and "==" in requirement_arg: ver = requirement_arg.split("==")[-1] item = PIPDEPTREE_CACHE.get(normalized) is_installed = item and item["installed_version"] == ver if not is_installed: try: is_installed = pkg_util.is_installed(req) except ValueError: is_installed = False if is_installed: continue args = [sys.executable, "-m", "pip", "install", "--quiet"] if install_args: args.append(install_args) args.append(requirement_arg) try: result = subprocess.run(args, check=True, capture_output=True, text=True) except subprocess.SubprocessError: integration.add_error( "requirements", f"Requirement {req} failed to install", ) else: # Clear the pipdeptree cache if something got installed if "Successfully installed" in result.stdout: PIPDEPTREE_CACHE = None if integration.errors: return False return True
aronsky/home-assistant
script/hassfest/requirements.py
Python
apache-2.0
8,128
""" Django settings for cui project. Generated by 'django-admin startproject' using Django 1.10.2. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '%g1*8#gej6qsrz@*psc1t=#nh)ym#$)i=rio)eqk8im3)iyi7-' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'channels', 'home.apps.HomeConfig', 'data.apps.DataConfig', 'bootstrap3', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'cui.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'cui.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'es-ar' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' LOGIN_REDIRECT_URL="/data/" LOGIN_URL="/home/login/" LOGOUT_REDIRECT_URL="/home/login/"
ignacionf/cui
cui/settings.py
Python
apache-2.0
3,265
from sevenbridges.meta.resource import Resource from sevenbridges.meta.fields import StringField class Breakdown(Resource): """ Breakdown resource contains price breakdown by storage and computation. """ storage = StringField(read_only=True) computation = StringField(read_only=True) data_transfer = StringField(read_only=True) def __str__(self): if self.data_transfer: return ( f'<Breakdown: storage={self.storage}, ' f'computation={self.computation}, ' f'data_transfer={self.data_transfer}>' ) return ( f'<Breakdown: storage={self.storage}, ' f'computation={self.computation}>' )
sbg/sevenbridges-python
sevenbridges/models/compound/price_breakdown.py
Python
apache-2.0
733
from nltk.tree import Tree from np_shallow_neural_classifier import ShallowNeuralClassifier from np_rnn_classifier import RNNClassifier from np_autoencoder import Autoencoder from np_tree_nn import TreeNN import numpy as np import pytest import utils __author__ = "Christopher Potts" __version__ = "CS224u, Stanford, Spring 2021" utils.fix_random_seeds() class GradientCheckError(Exception): """Raised if a gradient check fails.""" @pytest.mark.parametrize("hidden_activation, d_hidden_activation", [ [np.tanh, utils.d_tanh], [utils.relu, utils.d_relu] ]) def test_np_shallow_neural_classifier_gradients(hidden_activation, d_hidden_activation): model = ShallowNeuralClassifier( max_iter=10, hidden_activation=hidden_activation, d_hidden_activation=d_hidden_activation) # A tiny dataset so that we can run `fit` and set all the model # parameters: X = utils.randmatrix(5, 2) y = np.random.choice((0,1), 5) model.fit(X, y) # Use the first example for the check: ex = X[0] label = model._onehot_encode([y[0]])[0] # Forward and backward to get the gradients: hidden, pred = model.forward_propagation(ex) d_W_hy, d_b_hy, d_W_xh, d_b_xh = model.backward_propagation( hidden, pred, ex, label) # Model parameters to check: param_pairs = ( ('W_hy', d_W_hy), ('b_hy', d_b_hy), ('W_xh', d_W_xh), ('b_xh', d_b_xh) ) gradient_check(param_pairs, model, ex, label) @pytest.mark.parametrize("hidden_activation, d_hidden_activation", [ [np.tanh, utils.d_tanh], [utils.relu, utils.d_relu] ]) def test_np_rnn_classifier(hidden_activation, d_hidden_activation): # A tiny dataset so that we can run `fit` and set all the model # parameters: vocab = ['a', 'b', '$UNK'] data = [ [list('ab'), 'good'], [list('aab'), 'good'], [list('abb'), 'good']] model = RNNClassifier( vocab, max_iter=10, hidden_dim=2, hidden_activation=hidden_activation, d_hidden_activation=d_hidden_activation) X, y = zip(*data) model.fit(X, y) # Use the first example for the check: ex = X[0] label = model._onehot_encode([y[0]])[0] # Forward and backward to get the gradients: hidden, pred = model.forward_propagation(ex) d_W_hy, d_b, d_W_hh, d_W_xh = model.backward_propagation( hidden, pred, ex, label) # Model parameters to check: param_pairs = ( ('W_xh', d_W_xh), ('W_hh', d_W_hh), ('W_hy', d_W_hy), ('b', d_b) ) gradient_check(param_pairs, model, ex, label) @pytest.mark.parametrize("hidden_activation, d_hidden_activation", [ [np.tanh, utils.d_tanh], [utils.relu, utils.d_relu] ]) def test_np_autoencoder(hidden_activation, d_hidden_activation): model = Autoencoder( max_iter=10, hidden_dim=2, hidden_activation=hidden_activation, d_hidden_activation=d_hidden_activation) # A tiny dataset so that we can run `fit` and set all the model # parameters: X = utils.randmatrix(5, 5) model.fit(X) # Use the first example for the check: ex = X[0] label = X[0] # Forward and backward to get the gradients: hidden, pred = model.forward_propagation(ex) d_W_hy, d_b_hy, d_W_xh, d_b_xh = model.backward_propagation( hidden, pred, ex, label) # Model parameters to check: param_pairs = ( ('W_hy', d_W_hy), ('b_hy', d_b_hy), ('W_xh', d_W_xh), ('b_xh', d_b_xh) ) gradient_check(param_pairs, model, ex, label) @pytest.mark.parametrize("hidden_activation, d_hidden_activation", [ [np.tanh, utils.d_tanh], [utils.relu, utils.d_relu] ]) def test_np_tree_nn(hidden_activation, d_hidden_activation): # A tiny dataset so that we can run `fit` and set all the model # parameters: vocab = ["1", "+", "2"] X = [ "(even (odd 1) (neutral (neutral +) (odd 1)))", "(odd (odd 1) (neutral (neutral +) (even 2)))"] X = [Tree.fromstring(ex) for ex in X] y = [tree.label() for tree in X] model = TreeNN( vocab, max_iter=10, hidden_dim=5, hidden_activation=hidden_activation, d_hidden_activation=d_hidden_activation) model.fit(X, y) # Use the first example for the check: ex = X[0] label = model._onehot_encode([ex.label()])[0] # Forward and backward to get the gradients: hidden, pred = model.forward_propagation(ex) d_W_hy, d_b_y, d_W, d_b = model.backward_propagation( hidden, pred, ex, label) # Model parameters to check: param_pairs = ( ('W_hy', d_W_hy), ('b_y', d_b_y), ('W', d_W), ('b', d_b) ) gradient_check(param_pairs, model, ex, label) def gradient_check(param_pairs, model, ex, label, epsilon=0.0001, threshold=0.001): """ Numerical gradient check following the method described here: http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization Parameters ---------- param_pairs : list of str, np.aray pairs In each pair, the first is the name of the parameter to check, and the second is its purported derivatives. We use the name as the first pair so that we can raise an informative error message in the case of a failure. model : trained model instance This should have attributes for all of the parameters named in `param_pairs`, and it must have methods `forward_propagation`, and `get_error`. ex : an example that `model` can process label : a label vector that `model` can learn from directly epsilon : float The small constant by which the parameter values are changed. threshold : float Tolerance for raising an error. Raises ------ GradientCheckError """ for param_name, d_params in param_pairs: params = getattr(model, param_name) # This iterator will allow is to cycle over all the values for # arrays of any dimension: iterator = np.nditer(params, flags=['multi_index'], op_flags=['readwrite']) while not iterator.finished: idx = iterator.multi_index actual = params[idx] params[idx] = actual + epsilon _, pred = model.forward_propagation(ex) grad_pos = model.get_error(pred, label) params[idx] = actual - epsilon _, pred = model.forward_propagation(ex) grad_neg = model.get_error(pred, label) grad_est = (grad_pos - grad_neg) / (epsilon * 2.0) params[idx] = actual grad_bp = d_params[idx] # Relative error to control for differences in proportion # across parameter values: err = np.abs(grad_bp - grad_est) / (np.abs(grad_bp) + np.abs(grad_est)) if err >= threshold: raise GradientCheckError( "Gradient check error for {} at {}: error is {}".format( param_name, idx, err)) iterator.iternext()
cgpotts/cs224u
test/test_np_model_gradients.py
Python
apache-2.0
7,151
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the Policy class for mixed precision training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import six from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module from tensorflow.python.keras.utils import generic_utils from tensorflow.python.platform import tf_logging from tensorflow.python.training.experimental import mixed_precision_global_state from tensorflow.python.util.tf_export import keras_export # Default value of certain arguments, indicating the default behavior for # that argument should be used. USE_DEFAULT = 'USE_DEFAULT' @keras_export('keras.mixed_precision.experimental.Policy') class Policy(object): """A dtype policy for a Keras layer. A dtype policy determines dtype-related aspects of a layer, such as its computation and variable dtypes. Each layer has a policy. Policies can be passed to the `dtype` argument of layer constructors, or a global policy can be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will default to the global policy if no policy is passed to it's constructor. For many models, each layer's policy will have the same compute dtype and variable dtype, which will typically be float32. In this case, we refer to the singular dtype as the layer's dtype, which can be queried by the property `tf.keras.layers.Layer.dtype`. When mixed precision training is used, most layers will instead have a float16 or bfloat16 compute dtype and a float32 variable dtype, and so the layer does not have a single dtype. See [this link](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) for more information on mixed precision training. When the variable dtype does not match the compute dtype, variables will be automatically casted to the compute dtype to avoid type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the variable dtype, not the compute dtype. Certain policies also have a `tf.mixed_precision.experimental.LossScale` instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss scaling is a technique used with mixed precision to avoid numerical underflow in float16 gradients. Loss scaling is only done by Models in `Model.fit`, `Model.train_on_batch`, and similar methods. Layers which are not Models ignore the loss scale. Policies are constructed by passing a string to the constructor, e.g. `tf.keras.mixed_precision.experimental.Policy('float32')`. The string determines the compute and variable dtypes. It can be one of the following: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. No loss scaling is done by default. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. These policies are used for mixed precision training. With 'mixed_float16', a dynamic loss scale is used by default. 'mixed_bfloat16' does no loss scaling by default, as loss scaling is unnecessary with bfloat16. ### How to use mixed precision in a Keras model To use mixed precision in a Keras model, the `'mixed_float16'` or `'mixed_bfloat16'` policy can be used. `tf.keras.mixed_precision.experimental.set_policy` can be used to set the default policy for layers if no policy is passed to them. For example: ```python tf.keras.mixed_precision.experimental.set_policy('mixed_float16') model = tf.keras.models.Sequential([ tf.keras.layers.Input((100,)), # Dense layers use global policy of 'mixed_float16', which does # computations in float16 while keeping variables in float32. tf.keras.layers.Dense(10), tf.keras.layers.Dense(10), # Softmax should be done in float32 for numeric stability. We pass # dtype='float32' to use float32 instead of the global policy. tf.keras.layers.Activation('softmax', dtype='float32') ]) model.compile(...) model.fit(...) # Train `model` ``` Alternatively, the policy can be passed to individual layers instead of setting the global policy with `set_policy`: ```python policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') model = tf.keras.models.Sequential([ tf.keras.layers.Input((100,)), tf.keras.layers.Dense(10, dtype=policy), tf.keras.layers.Dense(10, dtype=policy), # Softmax should be done in float32 for numeric stability. tf.keras.layers.Activation('softmax', dtype='float32') ]) model.compile(...) model.fit(...) # Train `model` ``` Note the `'mixed_float16'` policy will apply loss scaling by default in `Model.fit`, `Model.train_on_batch`, and other training methods. If no such method is used (e.g., a custom training loop is used) and `'mixed_float16'` is used, the loss scale must be manually applied. See `tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For `'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be manually applied. ### How to use float64 in a Keras model Using float64 is similar to mixed precision. Either the global policy can be set to float64, or `dtype='float64'` can be passed to individual layers. For example, to set the global policy: ```python tf.keras.mixed_precision.experimental.set_policy('float64') model = tf.keras.models.Sequential([ tf.keras.layers.Input((100,)), # All layers use global policy of 'float64', which does computations and # creates variables in float64. tf.keras.layers.Dense(10), tf.keras.layers.Dense(10), tf.keras.layers.Activation('softmax') ]) model.compile(...) model.fit(...) # Train `model` ``` ### How a layer uses its policy's compute dtype A layer will cast its inputs to its compute dtype in TensorFlow 2. For example: ```python x = tf.ones((4, 4, 4, 4), dtype='float64') # `layer`'s policy defaults to float32. layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) # `layer` casts it's inputs to its compute dtype, which is float32, and does # computations in float32. y = layer(x) print(y.dtype) # float32 ``` Currently, only tensors in the first argument to the layer's `call` method are casted. For example: ```python class MyLayer(tf.keras.layers.Layer): # Bug! `b` will not be casted. def call(self, a, b): return a + 1., b + 1. a = tf.constant(1., dtype="float32") b = tf.constant(1., dtype="float32") layer = MyLayer(dtype="float64") x, y = layer(a, b) print(x.dtype) # float64 print(y.dtype) # float32. Not casted since `b` was not passed to first input ``` It is recommended to accept tensors only in the first argument. This way, all tensors are casted to the layer's compute dtype. `MyLayer` should therefore be written as: ```python class MyLayer(tf.keras.layers.Layer): # Now, all tensor inputs will be casted. def call(self, inputs): a, b = inputs return a + 1., b + 1. a = tf.constant(1., dtype="float32") b = tf.constant(1., dtype="float32") layer = MyLayer(dtype="float64") x, y = layer((a, b)) print(x.dtype) # float64 print(y.dtype) # float64. ``` Other arguments are not automatically casted for technical reasons, but this may change in a future minor release. A layer subclass can prevent its inputs from being autocasted by passing `autocast=False` to the layer constructor. For example: ```python class NonAutoCastingLayer(tf.keras.layers.Layer): def __init__(self, **kwargs): kwargs['autocast'] = False super(NonAutoCastingLayer, self).__init__(**kwargs) def call(self, inp): return inp x = tf.ones((4, 4, 4, 4), dtype='float32') layer = NonAutoCastingLayer(dtype='float64') y = layer(x) # MyLayer will not cast inputs to it's compute dtype of float32 print(y.dtype) # float32 ``` ### The deprecated "infer" policy In addition to a dtype or "<dtype>_with_float32_vars", a policy can also be "infer". This Policy is deprecated, and it is not recommended. When a layer has an infer policy, it will infer the computation and variable dtype from the first input the first time the layer is called. Once the layer is called for the first time, the layer's policy will change to the dtype of the first input. Similarly to "infer", there is a deprecated "infer_with_float32_vars" policy that infers the compute dtype, but not the variable dtype. Once a layer with an "infer_with_float32_vars" policy is called for the first time, the layer's policy will change to "<dtype>_with_float32_vars", where <dtype> is the dtype of the first input. These policies force variables in float32. Warning: Policies ending in "_with_float32_vars" will be removed in TensorFlow 2.1. Please use "mixed_float16" or "mixed_bfloat16" instead. In TensorFlow 1, only the "infer" and "infer_with_float32_vars" policies are available. """ # TODO(reedwm): Replace link in above docstring with a version that is more # TensorFlow-specific, and that also mentions bfloat16. # If True, warn when a policy is created whose name ends in # "_with_float32_vars". We always want to warn when a user creates such a # policy, but when the TensorFlow creates a policy, it suppresses the warning # by setting this to False when creating the policy. _warn_about_float32_vars = True def __init__(self, name, loss_scale=USE_DEFAULT): """Constructs the policy. The `name` argument determines the compute and variable dtype, and has no additional effect on the Policy. The compute and variable dtypes can only be specified through `name`, and cannot be specified directly. Args: name: A string. Can be one of the following values: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. With 'mixed_float16', a dynamic loss scale is used. These policies are used for mixed precision training. * 'infer' (deprecated): Infer the compute and variable dtype from the input dtype. loss_scale: A `tf.mixed_precision.experimental.LossScale`, or a value convertible to one such as "dynamic". Defaults to using no loss scaling unless `name` is "mixed_float16", in which case this defaults to "dynamic". Only `tf.keras.Model`s, not layers, use the loss scale, and it is only used during `Model.fit`, `Model.train_on_batch`, and other similar methods. """ if isinstance(name, dtypes.DType): raise TypeError("'name' must be a string, not a DType. " "Instead, pass DType.name. Got: %s" % (name.name,)) elif not isinstance(name, six.string_types): raise TypeError("'name' must be a string, but got: %s" % (name,)) if name == 'infer_float32_vars': # For backwards compatibility. TODO(reedwm): Remove this. name = 'infer_with_float32_vars' if name == 'float32_with_float32_vars': # Doesn't affect correctness, but causes "float32" instead of # "float32_with_float32_vars" to be printed in __repr__. name = 'float32' self._name = name self._compute_dtype, self._variable_dtype = self._parse_name(name) if name.endswith('_with_float32_vars') and self._warn_about_float32_vars: warning = ("WARNING: The '%s' policy is deprecated and will be removed " "in TensorFlow 2.1." % name) if name == 'infer_with_float32_vars': warning += (" Please use the 'mixed_float16' or 'mixed_bfloat16' " "policy instead.") elif name == 'float16_with_float32_vars': warning += " Please use the 'mixed_float16' policy instead." elif name == 'bfloat16_with_float32_vars': warning += " Please use the 'mixed_bfloat16' policy instead." tf_logging.warn(warning) if loss_scale == USE_DEFAULT: loss_scale = 'dynamic' if name == 'mixed_float16' else None self._using_default_loss_scale = True else: self._using_default_loss_scale = False if loss_scale and self._compute_dtype not in (None, 'float16'): tf_logging.warn('Creating a Policy with a loss scale is only useful for ' 'float16 policies. You passed loss_scale=%r for policy ' '%s. Consider not passing any loss_scale instead.' % (loss_scale, name)) self._loss_scale = keras_loss_scale_module.get(loss_scale) def _parse_name(self, name): """Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair. """ if name == 'mixed_float16': return 'float16', 'float32' elif name == 'mixed_bfloat16': return 'bfloat16', 'float32' if name.endswith('_with_float32_vars'): base_name = name[:-len('_with_float32_vars')] float32_vars = True else: base_name = name float32_vars = False if base_name == 'infer': base_dtype = None else: try: base_dtype = dtypes.as_dtype(base_name).name except TypeError: error = ("Cannot convert value %s to a mixed precision Policy. " "Valid policies include include 'mixed_float16', " "'mixed_bfloat16', and the name of any dtype such as " "'float32'." % (name,)) if float32_vars: error += (' The value %s ends with _with_float32_vars, but %s cannot ' 'be converted to a DType' % (name, base_name)) # six.raise_from supresses the original TypeError from being raised six.raise_from(ValueError(error), None) if float32_vars: return base_dtype, 'float32' else: return base_dtype, base_dtype @property def variable_dtype(self): """The variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicit chooses a different dtype. If this is different than `Policy.compute_dtype` and both are non-None, Layers will cast variables to the compute dtype to avoid type errors. Returns: The variable dtype of this policy, or None if the variable dtype should be inferred from the inputs. """ return self._variable_dtype @property def compute_dtype(self): """The compute dtype of this policy. This is the dtype layers will do their computations in. Note that even if the compute dtype is float16 or bfloat16, hardware devices may not do individual adds, multiplies, and other fundamental operations in [b]float16, but instead may do some of them in float32 for numeric stability. The compute dtype is the dtype of the inputs and outputs of the TensorFlow ops that the layer executes. Internally, many TensorFlow ops will do certain internal calculations in float32, or some other device-internal intermediate format with higher precision than [b]float16, to increase numeric stability. For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul will do use float32 intermediate math. The performance benefit of float16 is still apparent, due to increased memory bandwidth and the fact modern GPUs have specialized hardware for computing matmuls on float16 while still keeping intermediate computations in float32. Returns: The variable dtype of this policy, or None if the variable dtype should be inferred from the inputs. """ return self._compute_dtype @property def should_cast_variables(self): """Returns True if variables should be casted. This is true if the variable dtype is not the same as the compute dtype. Returns: True, if variables should be casted. """ return self.variable_dtype != self.compute_dtype @property def loss_scale(self): """Returns the loss scale of this Policy. Returns: A `tf.mixed_precision.experimental.LossScale`, or None. """ return self._loss_scale @property def name(self): """Returns the name of this policy.""" return self._name def __repr__(self): return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale) def get_config(self): config = { 'name': self.name } if not self._using_default_loss_scale: # We only include the loss scale if the default loss scale is not used. # This allows us to change the loss scale config format without breaking # users who use the default loss scale. config['loss_scale'] = keras_loss_scale_module.serialize(self.loss_scale) return config @classmethod def from_config(cls, config, custom_objects=None): if 'loss_scale' in config and isinstance(config['loss_scale'], dict): config = config.copy() config['loss_scale'] = keras_loss_scale_module.deserialize( config['loss_scale'], custom_objects=custom_objects) return cls(**config) def with_input_dtype(policy, dtype): """Copies "infer" `policy`, adding `dtype` to it. Policy must be "infer" or "infer_float32_vars" (i.e., has no compute dtype). Returns a new policy with compute dtype `dtype`. The returned policy's variable dtype is also `dtype` if `policy` is "infer", and is `float32` if `policy` is "infer_with_float32_vars". Args: policy: An "infer" or "infer_float32_vars" policy dtype: The dtype of an input to a layer. Returns: A new policy copied from `policy`, but with compute dtype and maybe variable_dtype set to `dtype`. """ assert not policy.compute_dtype dtype = dtypes.as_dtype(dtype).name if policy.variable_dtype is None: return Policy(dtype) else: # Policies without a compute dtype are either "infer" or # "infer_with_float32_vars", so the variable_dtype must be float32 here. assert policy.variable_dtype == 'float32' try: Policy._warn_about_float32_vars = False # pylint: disable=protected-access return Policy(dtype + '_with_float32_vars') finally: Policy._warn_about_float32_vars = True # pylint: disable=protected-access # The current global policy in effect. If None, it means the current value of # floatx should be used as the policy if the V2 dtype behavior is enabled, # or "infer" otherwise. # TODO(reedwm): Make this thread local? _global_policy = None @keras_export('keras.mixed_precision.experimental.global_policy') def global_policy(): """Returns the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no policy has been set with `keras.mixed_precision.experimental.set_policy`, this will return a policy constructed from `tf.keras.backend.floatx()` in TensorFlow 2 (floatx defaults to float32), or an "infer" policy in TensorFlow 1. See `keras.mixed_precision.experimental.Policy` for more information. Returns: The global Policy. """ if _global_policy is None: if base_layer_utils.v2_dtype_behavior_enabled(): return Policy(backend.floatx()) else: return Policy('infer') return _global_policy def policy_defaults_to_floatx(): """Returns True if `global_policy()` will use the current value of floatx.""" return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled() def _check_if_mixed_precision_graph_rewrite_is_enabled(): # TODO(reedwm): Update this comment once the Keras API is complete. if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled: raise ValueError( 'The mixed precision policy cannot be set, because the mixed ' 'precision graph rewrite has already been enabled.\n' 'At most, one of the following functions can be called:\n\n' ' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() ' '(You called this first)\n' ' 2. tf.keras.mixed_precision.experimental.set_policy() (You called ' 'this second)\n\n' 'You called both functions, which is an error, because both functions ' 'enable you to use mixed precision. The first function enables mixed ' 'precision in the graph with a graph rewrite. However it is currently ' 'not very customizable, and does not support eager. The second ' 'function is for Keras layers, but is not yet fully complete.') @keras_export('keras.mixed_precision.experimental.set_policy') def set_policy(policy): """Sets the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no global policy is set, layers will instead default to a Policy constructed from `tf.keras.backend.floatx()` in TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy. See `keras.mixed_precision.experimental.Policy` for more information. Args: policy: A Policy, or a string that will be converted to a Policy.. """ global _global_policy _check_if_mixed_precision_graph_rewrite_is_enabled() if policy is not None and not isinstance(policy, Policy): policy = Policy(policy) if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and policy.compute_dtype): raise ValueError( 'The global policy can only be set to a non-infer policy in TensorFlow ' '2') _global_policy = policy mixed_precision_global_state.using_default_mixed_precision_policy = ( _global_policy is None) # TODO(reedwm): Make this thread local @contextlib.contextmanager def policy_scope(policy): """A context manager that sets the global Policy under it. Args: policy: A Policy, or a string that will be converted to a Policy.. Yields: Nothing. """ old_policy = _global_policy try: set_policy(policy) yield finally: set_policy(old_policy) def _is_convertible_to_dtype(dtype): try: dtypes.as_dtype(dtype) return True except TypeError: return False def _policy_equivalent_to_dtype(policy): """Returns True if the Policy is equivalent to a single dtype. A policy is equivalent to a single dtype if the policy's compute and variable dtypes are the same and the policy does not cause the layer/model to have additional behavior, such as loss scaling. The "infer" policy is considered equivalent to a single dtype. Args: policy: A Policy. Returns: True, if the policy is equivalent to a single dtype. """ # We use type() instead of isinstance because a sublcass of Policy is never # equivalent to a dtype. return (type(policy) == Policy and # pylint: disable=unidiomatic-typecheck list(policy.get_config().keys()) == ['name'] and (policy.name == 'infer' or _is_convertible_to_dtype(policy.name))) def serialize(policy): if _policy_equivalent_to_dtype(policy): # We return either None or the policy name for compatibility with older # versions of Keras. If the policy name is returned, it is a dtype string # such as 'float32'. return None if policy.name == 'infer' else policy.name return generic_utils.serialize_keras_object(policy) def deserialize(config, custom_objects=None): if isinstance(config, str) and _is_convertible_to_dtype(config): return Policy(config) if config is None: return Policy('infer') module_objects = {'Policy': Policy} return generic_utils.deserialize_keras_object( config, module_objects=module_objects, custom_objects=custom_objects, printable_module_name='dtype policy')
DavidNorman/tensorflow
tensorflow/python/keras/mixed_precision/experimental/policy.py
Python
apache-2.0
24,791
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for observation_preprocessor.""" import math from absl.testing import absltest from absl.testing import parameterized from google.protobuf import text_format from learner import test_data from learner.brains import observation_preprocessor from learner.brains import tfa_specs import numpy as np import tensorflow as tf # pylint: disable=g-bad-import-order import common.generate_protos # pylint: disable=unused-import import brain_pb2 import observation_pb2 def get_hparams(feelers_version): return dict( always_compute_egocentric=True, feelers_version=feelers_version, feelers_v2_output_channels=3, feelers_v2_kernel_size=5) class ObservationPreprocessorTest(parameterized.TestCase): @parameterized.parameters([ ('v1', 'default'), ('v2', 'default'), ('v2', 'wrap')]) def test_preproc(self, feelers_version, yaw_range): spec_proto = test_data.brain_spec() if yaw_range == 'wrap': # Force yaw_angles to loop around 360 degrees. for field in spec_proto.observation_spec.player.entity_fields: if field.HasField('feeler'): for i, yaw in enumerate( np.linspace(0, math.pi, len(field.feeler.yaw_angles))): field.feeler.yaw_angles[i] = yaw spec = tfa_specs.BrainSpec(spec_proto) obs_preproc = observation_preprocessor.ObservationPreprocessor( spec, get_hparams(feelers_version)) data = test_data.observation_data(50, [0.0, 0.0, 0.0]) # Set camera position / rotation. data.camera.position.x = data.player.position.x data.camera.position.y = data.player.position.y data.camera.position.z = data.player.position.z data.camera.rotation.x = 0 data.camera.rotation.y = 0 data.camera.rotation.z = 1 # 180 degrees rotated around z data.camera.rotation.w = 0 tfa_val = spec.observation_spec.tfa_value(data) # Apply preprocessing layers to tf_val preprocessed, _ = obs_preproc(tfa_val) # Ignore state component. preprocessed = preprocessed.numpy().tolist() def _dist(d): """Preprocess distances to match observation preprocessor.""" return np.log(d + 1) want = [ 0.0, # global_entities/0/drink - one-hot, category 0 0.0, # global_entities/0/drink - one-hot, category 1 1.0, # global_entities/0/drink - one-hot, category 2 2 * (66 / 100) - 1, # global_entities/0/evilness 1, # player/feeler 1.1, # player/feeler 2, # player/feeler 2.1, # player/feeler 3, # player/feeler 3.1, # player/feeler 2 * (50 / 100) - 1, # player/health 0.0, # XZ-angle camera to entity1 0.0, # YZ-angle camera to entity1 _dist(1.0), # distance camera to entity1 0.0, # XZ-angle camera to entity2 -math.pi/2, # YZ-angle camera to entity2 _dist(1.0), # distance camera to entity2 math.pi/2, # XZ-angle camera to entity3 0.0, # YZ-angle camera to entity3 _dist(2.0), # distance camera to entity3 0.0, # XZ-angle player to entity1 0.0, # YZ-angle player to entity1 _dist(1.0), # distance player to entity1 0.0, # XZ-angle player to entity2 math.pi/2, # YZ-angle player to entity2 _dist(1.0), # distance player to entity2 -math.pi/2, # XZ-angle player to entity3 0.0, # YZ-angle player to entity3 _dist(2.0) # distance player to entity3 ] # We're rounding aggressively because batch norm adds noise. self.assertSequenceAlmostEqual(preprocessed, want, delta=0.05) @parameterized.parameters(['v1', 'v2']) def test_preproc_batch(self, feelers_version): spec = tfa_specs.BrainSpec(test_data.brain_spec()) obs_preproc = observation_preprocessor.ObservationPreprocessor( spec, get_hparams(feelers_version)) tfa_val = spec.observation_spec.tfa_value( test_data.observation_data(50, [0.0, 0.0, 0.0])) # Create batch of nested observations of size 5 tfa_val = tf.nest.map_structure( lambda x: tf.stack([x, x, x, x, x]), tfa_val) # Apply preprocessing layers to tf_val preprocessed, _ = obs_preproc(tfa_val) # Ignore state component. self.assertEqual(preprocessed.shape, (5, 29)) @parameterized.parameters(['v1', 'v2']) def test_preproc_missing_player(self, feelers_version): proto_obs_spec = test_data.observation_spec() proto_obs_spec.ClearField('player') # Delete player pos from spec. proto_act_spec = test_data.action_spec() # Delete player references. # Remove joystick actions since they reference the player and camera. # The last 3 actions in the example model are joystick actions, so we # remove them from the list. del proto_act_spec.actions[-3:] brain_spec = test_data.brain_spec() brain_spec.observation_spec.CopyFrom(proto_obs_spec) brain_spec.action_spec.CopyFrom(proto_act_spec) spec = tfa_specs.BrainSpec(brain_spec) obs_preproc = observation_preprocessor.ObservationPreprocessor( spec, get_hparams(feelers_version)) proto_data = test_data.observation_data(50, [0.0, 0.0, 0.0]) proto_data.ClearField('player') # Delete player from data. tfa_val = spec.observation_spec.tfa_value(proto_data) # Apply preprocessing layers to tf_val preprocessed, _ = obs_preproc(tfa_val) # Ignore state component. preprocessed = preprocessed.numpy().tolist() want = [ 0.0, # global_entities/0/drink - one-hot, categpry 0 0.0, # global_entities/0/drink - one-hot, category 1 1.0, # global_entities/0/drink - one-hot, category 2 2 * (66.0 / 100.0) - 1, # global_entities/0/evilness ] self.assertSequenceAlmostEqual(preprocessed, want, delta=0.05) @parameterized.product(dir_mode=['angle', 'unit_circle'], dist_mode=['linear', 'log_plus_one'], num_batch_dims=[0, 1, 2]) def test_egocentric_modes(self, dir_mode, dist_mode, num_batch_dims): brain_spec = brain_pb2.BrainSpec() text_format.Parse( """ observation_spec { player { position {} rotation {} } global_entities { position {} rotation {} } } action_spec { actions { name: "joy_pitch_yaw" joystick { axes_mode: DELTA_PITCH_YAW controlled_entity: "player" } } } """, brain_spec) hparams = { 'egocentric_direction_mode': dir_mode, 'egocentric_distance_mode': dist_mode, } spec = tfa_specs.BrainSpec(brain_spec) obs_preproc = observation_preprocessor.ObservationPreprocessor( spec, hparams) observation_data = observation_pb2.ObservationData() text_format.Parse( """ player { position {} rotation {} } global_entities { position { x: 1 y: -1 z: 1 } rotation {} } """, observation_data) tfa_val = spec.observation_spec.tfa_value(observation_data) # Stack into a batch of the requested size. for _ in range(num_batch_dims): tfa_val = tf.nest.map_structure( lambda x: tf.stack([x, x]), tfa_val) preprocessed, _ = obs_preproc(tfa_val) # Ignore state component. preprocessed = preprocessed.numpy() # Unpack result of first batch. for _ in range(num_batch_dims): preprocessed = preprocessed[0] if dir_mode == 'angle': want = [-math.pi/4, math.pi/4] # -45, 45 degree in radians. self.assertSequenceAlmostEqual(preprocessed[:len(want)], want, delta=0.05) preprocessed = preprocessed[len(want):] else: assert dir_mode == 'unit_circle' v = 1 / math.sqrt(2) # X and Y component of 45 degree 2D unit vec. want = [v, v, -v, v] self.assertSequenceAlmostEqual(preprocessed[:len(want)], want, delta=0.05) preprocessed = preprocessed[len(want):] if dist_mode == 'linear': want = [math.sqrt(3)] # diagonal of the unit cube. self.assertSequenceAlmostEqual(want, preprocessed, delta=0.05) else: assert dist_mode == 'log_plus_one' want = [math.log(math.sqrt(3) + 1)] self.assertSequenceAlmostEqual(want, preprocessed, delta=0.05) if __name__ == '__main__': absltest.main()
google-research/falken
service/learner/brains/observation_preprocessor_test.py
Python
apache-2.0
9,414
"""An implementation of Matching Layer.""" import typing import tensorflow as tf from keras.engine import Layer class MatchingLayer(Layer): """ Layer that computes a matching matrix between samples in two tensors. :param normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. :param matching_type: the similarity function for matching :param kwargs: Standard layer keyword arguments. Examples: >>> import matchzoo as mz >>> layer = mz.layers.MatchingLayer(matching_type='dot', ... normalize=True) >>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10 >>> layer.build([[num_batch, left_len, num_dim], ... [num_batch, right_len, num_dim]]) """ def __init__(self, normalize: bool = False, matching_type: str = 'dot', **kwargs): """:class:`MatchingLayer` constructor.""" super().__init__(**kwargs) self._normalize = normalize self._validate_matching_type(matching_type) self._matching_type = matching_type self._shape1 = None self._shape2 = None @classmethod def _validate_matching_type(cls, matching_type: str = 'dot'): valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat'] if matching_type not in valid_matching_type: raise ValueError(f"{matching_type} is not a valid matching type, " f"{valid_matching_type} expected.") def build(self, input_shape: list): """ Build the layer. :param input_shape: the shapes of the input tensors, for MatchingLayer we need tow input tensors. """ # Used purely for shape validation. if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `MatchingLayer` layer should be called ' 'on a list of 2 inputs.') self._shape1 = input_shape[0] self._shape2 = input_shape[1] for idx in 0, 2: if self._shape1[idx] != self._shape2[idx]: raise ValueError( 'Incompatible dimensions: ' f'{self._shape1[idx]} != {self._shape2[idx]}.' f'Layer shapes: {self._shape1}, {self._shape2}.' ) def call(self, inputs: list, **kwargs) -> typing.Any: """ The computation logic of MatchingLayer. :param inputs: two input tensors. """ x1 = inputs[0] x2 = inputs[1] if self._matching_type == 'dot': if self._normalize: x1 = tf.math.l2_normalize(x1, axis=2) x2 = tf.math.l2_normalize(x2, axis=2) return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3) else: if self._matching_type == 'mul': def func(x, y): return x * y elif self._matching_type == 'plus': def func(x, y): return x + y elif self._matching_type == 'minus': def func(x, y): return x - y elif self._matching_type == 'concat': def func(x, y): return tf.concat([x, y], axis=3) else: raise ValueError(f"Invalid matching type." f"{self._matching_type} received." f"Mut be in `dot`, `mul`, `plus`, " f"`minus` and `concat`.") x1_exp = tf.stack([x1] * self._shape2[1], 2) x2_exp = tf.stack([x2] * self._shape1[1], 1) return func(x1_exp, x2_exp) def compute_output_shape(self, input_shape: list) -> tuple: """ Calculate the layer output shape. :param input_shape: the shapes of the input tensors, for MatchingLayer we need tow input tensors. """ if not isinstance(input_shape, list) or len(input_shape) != 2: raise ValueError('A `MatchingLayer` layer should be called ' 'on a list of 2 inputs.') shape1 = list(input_shape[0]) shape2 = list(input_shape[1]) if len(shape1) != 3 or len(shape2) != 3: raise ValueError('A `MatchingLayer` layer should be called ' 'on 2 inputs with 3 dimensions.') if shape1[0] != shape2[0] or shape1[2] != shape2[2]: raise ValueError('A `MatchingLayer` layer should be called ' 'on 2 inputs with same 0,2 dimensions.') if self._matching_type in ['mul', 'plus', 'minus']: return shape1[0], shape1[1], shape2[1], shape1[2] elif self._matching_type == 'dot': return shape1[0], shape1[1], shape2[1], 1 elif self._matching_type == 'concat': return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2] else: raise ValueError(f"Invalid `matching_type`." f"{self._matching_type} received." f"Must be in `mul`, `plus`, `minus` " f"`dot` and `concat`.") def get_config(self) -> dict: """Get the config dict of MatchingLayer.""" config = { 'normalize': self._normalize, 'matching_type': self._matching_type, } base_config = super(MatchingLayer, self).get_config() return dict(list(base_config.items()) + list(config.items()))
faneshion/MatchZoo
matchzoo/layers/matching_layer.py
Python
apache-2.0
5,753
#!/usr/bin/env python # Copyright (C) 2012 Zulip, Inc. # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import from typing import IO, Any, Text, Union, Set, Tuple from types import FrameType import sys from six.moves import map from six.moves import range try: import simplejson except ImportError: import json as simplejson # type: ignore import re import time import subprocess import optparse import os import datetime import textwrap import time import signal import logging import hashlib import tempfile import select DEFAULT_SITE = "https://api.zulip.com" class States(object): Startup, ZulipToZephyr, ZephyrToZulip, ChildSending = list(range(4)) CURRENT_STATE = States.Startup logger = None # type: logging.Logger def to_zulip_username(zephyr_username): # type: (str) -> str if "@" in zephyr_username: (user, realm) = zephyr_username.split("@") else: (user, realm) = (zephyr_username, "ATHENA.MIT.EDU") if realm.upper() == "ATHENA.MIT.EDU": # Hack to make ctl's fake username setup work :) if user.lower() == 'golem': user = 'ctl' return user.lower() + "@mit.edu" return user.lower() + "|" + realm.upper() + "@mit.edu" def to_zephyr_username(zulip_username): # type: (str) -> str (user, realm) = zulip_username.split("@") if "|" not in user: # Hack to make ctl's fake username setup work :) if user.lower() == 'ctl': user = 'golem' return user.lower() + "@ATHENA.MIT.EDU" match_user = re.match(r'([a-zA-Z0-9_]+)\|(.+)', user) if not match_user: raise Exception("Could not parse Zephyr realm for cross-realm user %s" % (zulip_username,)) return match_user.group(1).lower() + "@" + match_user.group(2).upper() # Checks whether the pair of adjacent lines would have been # linewrapped together, had they been intended to be parts of the same # paragraph. Our check is whether if you move the first word on the # 2nd line onto the first line, the resulting line is either (1) # significantly shorter than the following line (which, if they were # in the same paragraph, should have been wrapped in a way consistent # with how the previous line was wrapped) or (2) shorter than 60 # characters (our assumed minimum linewrapping threshold for Zephyr) # or (3) the first word of the next line is longer than this entire # line. def different_paragraph(line, next_line): # type: (str, str) -> bool words = next_line.split() return (len(line + " " + words[0]) < len(next_line) * 0.8 or len(line + " " + words[0]) < 50 or len(line) < len(words[0])) # Linewrapping algorithm based on: # http://gcbenison.wordpress.com/2011/07/03/a-program-to-intelligently-remove-carriage-returns-so-you-can-paste-text-without-having-it-look-awful/ #ignorelongline def unwrap_lines(body): # type: (str) -> str lines = body.split("\n") result = "" previous_line = lines[0] for line in lines[1:]: line = line.rstrip() if (re.match(r'^\W', line, flags=re.UNICODE) and re.match(r'^\W', previous_line, flags=re.UNICODE)): result += previous_line + "\n" elif (line == "" or previous_line == "" or re.match(r'^\W', line, flags=re.UNICODE) or different_paragraph(previous_line, line)): # Use 2 newlines to separate sections so that we # trigger proper Markdown processing on things like # bulleted lists result += previous_line + "\n\n" else: result += previous_line + " " previous_line = line result += previous_line return result def send_zulip(zeph): # type: (Dict[str, str]) -> Dict[str, str] message = {} if options.forward_class_messages: message["forged"] = "yes" message['type'] = zeph['type'] message['time'] = zeph['time'] message['sender'] = to_zulip_username(zeph['sender']) if "subject" in zeph: # Truncate the subject to the current limit in Zulip. No # need to do this for stream names, since we're only # subscribed to valid stream names. message["subject"] = zeph["subject"][:60] if zeph['type'] == 'stream': # Forward messages sent to -c foo -i bar to stream bar subject "instance" if zeph["stream"] == "message": message['to'] = zeph['subject'].lower() message['subject'] = "instance %s" % (zeph['subject'],) elif zeph["stream"] == "tabbott-test5": message['to'] = zeph['subject'].lower() message['subject'] = "test instance %s" % (zeph['subject'],) else: message["to"] = zeph["stream"] else: message["to"] = zeph["recipient"] message['content'] = unwrap_lines(zeph['content']) if options.test_mode and options.site == DEFAULT_SITE: logger.debug("Message is: %s" % (str(message),)) return {'result': "success"} return zulip_client.send_message(message) def send_error_zulip(error_msg): # type: (str) -> None message = {"type": "private", "sender": zulip_account_email, "to": zulip_account_email, "content": error_msg, } zulip_client.send_message(message) current_zephyr_subs = set() def zephyr_bulk_subscribe(subs): # type: (List[Tuple[str, str, str]]) -> None try: zephyr._z.subAll(subs) except IOError: # Since we haven't added the subscription to # current_zephyr_subs yet, we can just return (so that we'll # continue processing normal messages) and we'll end up # retrying the next time the bot checks its subscriptions are # up to date. logger.exception("Error subscribing to streams (will retry automatically):") logger.warning("Streams were: %s" % ([cls for cls, instance, recipient in subs],)) return try: actual_zephyr_subs = [cls for (cls, _, _) in zephyr._z.getSubscriptions()] except IOError: logger.exception("Error getting current Zephyr subscriptions") # Don't add anything to current_zephyr_subs so that we'll # retry the next time we check for streams to subscribe to # (within 15 seconds). return for (cls, instance, recipient) in subs: if cls not in actual_zephyr_subs: logger.error("Zephyr failed to subscribe us to %s; will retry" % (cls,)) try: # We'll retry automatically when we next check for # streams to subscribe to (within 15 seconds), but # it's worth doing 1 retry immediately to avoid # missing 15 seconds of messages on the affected # classes zephyr._z.sub(cls, instance, recipient) except IOError: pass else: current_zephyr_subs.add(cls) def update_subscriptions(): # type: () -> None try: f = open(options.stream_file_path, "r") public_streams = simplejson.loads(f.read()) f.close() except Exception: logger.exception("Error reading public streams:") return classes_to_subscribe = set() for stream in public_streams: zephyr_class = stream.encode("utf-8") if (options.shard is not None and not hashlib.sha1(zephyr_class).hexdigest().startswith(options.shard)): # This stream is being handled by a different zephyr_mirror job. continue if zephyr_class in current_zephyr_subs: continue classes_to_subscribe.add((zephyr_class, "*", "*")) if len(classes_to_subscribe) > 0: zephyr_bulk_subscribe(list(classes_to_subscribe)) def maybe_kill_child(): # type: () -> None try: if child_pid is not None: os.kill(child_pid, signal.SIGTERM) except OSError: # We don't care if the child process no longer exists, so just log the error logger.exception("") def maybe_restart_mirroring_script(): # type: () -> None if os.stat(os.path.join(options.root_path, "stamps", "restart_stamp")).st_mtime > start_time or \ ((options.user == "tabbott" or options.user == "tabbott/extra") and os.stat(os.path.join(options.root_path, "stamps", "tabbott_stamp")).st_mtime > start_time): logger.warning("") logger.warning("zephyr mirroring script has been updated; restarting...") maybe_kill_child() try: zephyr._z.cancelSubs() except IOError: # We don't care whether we failed to cancel subs properly, but we should log it logger.exception("") while True: try: os.execvp(os.path.join(options.root_path, "user_root", "zephyr_mirror_backend.py"), sys.argv) except Exception: logger.exception("Error restarting mirroring script; trying again... Traceback:") time.sleep(1) def process_loop(log): # type: (IO) -> None restart_check_count = 0 last_check_time = time.time() while True: select.select([zephyr._z.getFD()], [], [], 15) try: # Fetch notices from the queue until its empty while True: notice = zephyr.receive(block=False) if notice is None: break try: process_notice(notice, log) except Exception: logger.exception("Error relaying zephyr:") time.sleep(2) except Exception: logger.exception("Error checking for new zephyrs:") time.sleep(1) continue if time.time() - last_check_time > 15: last_check_time = time.time() try: maybe_restart_mirroring_script() if restart_check_count > 0: logger.info("Stopped getting errors checking whether restart is required.") restart_check_count = 0 except Exception: if restart_check_count < 5: logger.exception("Error checking whether restart is required:") restart_check_count += 1 if options.forward_class_messages: try: update_subscriptions() except Exception: logger.exception("Error updating subscriptions from Zulip:") def parse_zephyr_body(zephyr_data): # type: (str) -> Tuple[str, str] try: (zsig, body) = zephyr_data.split("\x00", 1) except ValueError: (zsig, body) = ("", zephyr_data) return (zsig, body) def parse_crypt_table(zephyr_class, instance): # type: (Text, str) -> str try: crypt_table = open(os.path.join(os.environ["HOME"], ".crypt-table")) except IOError: return None for line in crypt_table.readlines(): if line.strip() == "": # Ignore blank lines continue match = re.match("^crypt-(?P<class>[^:]+):\s+((?P<algorithm>(AES|DES)):\s+)?(?P<keypath>\S+)$", line) if match is None: # Malformed crypt_table line logger.debug("Invalid crypt_table line!") continue groups = match.groupdict() if groups['class'].lower() == zephyr_class and 'keypath' in groups and \ groups.get("algorithm") == "AES": return groups["keypath"] return None def decrypt_zephyr(zephyr_class, instance, body): # type: (Text, str, str) -> str keypath = parse_crypt_table(zephyr_class, instance) if keypath is None: # We can't decrypt it, so we just return the original body return body # Enable handling SIGCHLD briefly while we call into # subprocess to avoid http://bugs.python.org/issue9127 signal.signal(signal.SIGCHLD, signal.SIG_DFL) # decrypt the message! p = subprocess.Popen(["gpg", "--decrypt", "--no-options", "--no-default-keyring", "--keyring=/dev/null", "--secret-keyring=/dev/null", "--batch", "--quiet", "--no-use-agent", "--passphrase-file", keypath], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) decrypted, _ = p.communicate(input=body) # Restore our ignoring signals signal.signal(signal.SIGCHLD, signal.SIG_IGN) return decrypted def process_notice(notice, log): # type: (zulip, IO) -> None (zsig, body) = parse_zephyr_body(notice.message) is_personal = False is_huddle = False if notice.opcode == "PING": # skip PING messages return zephyr_class = notice.cls.lower() if zephyr_class == options.nagios_class: # Mark that we got the message and proceed with open(options.nagios_path, "w") as f: f.write("0\n") return if notice.recipient != "": is_personal = True # Drop messages not to the listed subscriptions if is_personal and not options.forward_personals: return if (zephyr_class not in current_zephyr_subs) and not is_personal: logger.debug("Skipping ... %s/%s/%s" % (zephyr_class, notice.instance, is_personal)) return if notice.format.startswith("Zephyr error: See") or notice.format.endswith("@(@color(blue))"): logger.debug("Skipping message we got from Zulip!") return if (zephyr_class == "mail" and notice.instance.lower() == "inbox" and is_personal and not options.forward_mail_zephyrs): # Only forward mail zephyrs if forwarding them is enabled. return if is_personal: if body.startswith("CC:"): is_huddle = True # Map "CC: user1 user2" => "user1@mit.edu, user2@mit.edu" huddle_recipients = [to_zulip_username(x.strip()) for x in body.split("\n")[0][4:].split()] if notice.sender not in huddle_recipients: huddle_recipients.append(to_zulip_username(notice.sender)) body = body.split("\n", 1)[1] if options.forward_class_messages and notice.opcode.lower() == "crypt": body = decrypt_zephyr(zephyr_class, notice.instance.lower(), body) zeph = {'time': str(notice.time), 'sender': notice.sender, 'zsig': zsig, # logged here but not used by app 'content': body} if is_huddle: zeph['type'] = 'private' zeph['recipient'] = huddle_recipients elif is_personal: zeph['type'] = 'private' zeph['recipient'] = to_zulip_username(notice.recipient) else: zeph['type'] = 'stream' zeph['stream'] = zephyr_class if notice.instance.strip() != "": zeph['subject'] = notice.instance else: zeph["subject"] = '(instance "%s")' % (notice.instance,) # Add instances in for instanced personals if is_personal: if notice.cls.lower() != "message" and notice.instance.lower != "personal": heading = "[-c %s -i %s]\n" % (notice.cls, notice.instance) elif notice.cls.lower() != "message": heading = "[-c %s]\n" % (notice.cls,) elif notice.instance.lower() != "personal": heading = "[-i %s]\n" % (notice.instance,) else: heading = "" zeph["content"] = heading + zeph["content"] zeph = decode_unicode_byte_strings(zeph) logger.info("Received a message on %s/%s from %s..." % (zephyr_class, notice.instance, notice.sender)) if log is not None: log.write(simplejson.dumps(zeph) + '\n') log.flush() if os.fork() == 0: global CURRENT_STATE CURRENT_STATE = States.ChildSending # Actually send the message in a child process, to avoid blocking. try: res = send_zulip(zeph) if res.get("result") != "success": logger.error("Error relaying zephyr:\n%s\n%s" % (zeph, res)) except Exception: logger.exception("Error relaying zephyr:") finally: os._exit(0) def decode_unicode_byte_strings(zeph): # type: (Dict[str, Any]) -> Dict[str, str] # 'Any' can be of any type of text that is converted to str. for field in zeph.keys(): if isinstance(zeph[field], str): try: decoded = zeph[field].decode("utf-8") except Exception: decoded = zeph[field].decode("iso-8859-1") zeph[field] = decoded return zeph def quit_failed_initialization(message): # type: (str) -> str logger.error(message) maybe_kill_child() sys.exit(1) def zephyr_init_autoretry(): # type: () -> None backoff = zulip.RandomExponentialBackoff() while backoff.keep_going(): try: # zephyr.init() tries to clear old subscriptions, and thus # sometimes gets a SERVNAK from the server zephyr.init() backoff.succeed() return except IOError: logger.exception("Error initializing Zephyr library (retrying). Traceback:") backoff.fail() quit_failed_initialization("Could not initialize Zephyr library, quitting!") def zephyr_load_session_autoretry(session_path): # type: (str) -> None backoff = zulip.RandomExponentialBackoff() while backoff.keep_going(): try: session = open(session_path, "r").read() zephyr._z.initialize() zephyr._z.load_session(session) zephyr.__inited = True return except IOError: logger.exception("Error loading saved Zephyr session (retrying). Traceback:") backoff.fail() quit_failed_initialization("Could not load saved Zephyr session, quitting!") def zephyr_subscribe_autoretry(sub): # type: (Tuple[str, str, str]) -> None backoff = zulip.RandomExponentialBackoff() while backoff.keep_going(): try: zephyr.Subscriptions().add(sub) backoff.succeed() return except IOError: # Probably a SERVNAK from the zephyr server, but log the # traceback just in case it's something else logger.exception("Error subscribing to personals (retrying). Traceback:") backoff.fail() quit_failed_initialization("Could not subscribe to personals, quitting!") def zephyr_to_zulip(options): # type: (Any) -> None if options.use_sessions and os.path.exists(options.session_path): logger.info("Loading old session") zephyr_load_session_autoretry(options.session_path) else: zephyr_init_autoretry() if options.forward_class_messages: update_subscriptions() if options.forward_personals: # Subscribe to personals; we really can't operate without # those subscriptions, so just retry until it works. zephyr_subscribe_autoretry(("message", "*", "%me%")) zephyr_subscribe_autoretry(("mail", "inbox", "%me%")) if options.nagios_class: zephyr_subscribe_autoretry((options.nagios_class, "*", "*")) if options.use_sessions: open(options.session_path, "w").write(zephyr._z.dump_session()) if options.logs_to_resend is not None: with open(options.logs_to_resend, 'r') as log: for ln in log: try: zeph = simplejson.loads(ln) # New messages added to the log shouldn't have any # elements of type str (they should already all be # unicode), but older messages in the log are # still of type str, so convert them before we # send the message zeph = decode_unicode_byte_strings(zeph) # Handle importing older zephyrs in the logs # where it isn't called a "stream" yet if "class" in zeph: zeph["stream"] = zeph["class"] if "instance" in zeph: zeph["subject"] = zeph["instance"] logger.info("sending saved message to %s from %s..." % (zeph.get('stream', zeph.get('recipient')), zeph['sender'])) send_zulip(zeph) except Exception: logger.exception("Could not send saved zephyr:") time.sleep(2) logger.info("Successfully initialized; Starting receive loop.") if options.resend_log_path is not None: with open(options.resend_log_path, 'a') as log: process_loop(log) else: process_loop(None) def send_zephyr(zwrite_args, content): # type: (list, str) -> Tuple[int, str] p = subprocess.Popen(zwrite_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate(input=content.encode("utf-8")) if p.returncode: logger.error("zwrite command '%s' failed with return code %d:" % ( " ".join(zwrite_args), p.returncode,)) if stdout: logger.info("stdout: " + stdout) elif stderr: logger.warning("zwrite command '%s' printed the following warning:" % ( " ".join(zwrite_args),)) if stderr: logger.warning("stderr: " + stderr) return (p.returncode, stderr) def send_authed_zephyr(zwrite_args, content): # type: (list[str], str) -> Tuple[int, str] return send_zephyr(zwrite_args, content) def send_unauthed_zephyr(zwrite_args, content): # type: (list[str], str) -> Tuple[int, str] return send_zephyr(zwrite_args + ["-d"], content) def zcrypt_encrypt_content(zephyr_class, instance, content): # type: (str, str, str) -> str keypath = parse_crypt_table(zephyr_class, instance) if keypath is None: return None # encrypt the message! p = subprocess.Popen(["gpg", "--symmetric", "--no-options", "--no-default-keyring", "--keyring=/dev/null", "--secret-keyring=/dev/null", "--batch", "--quiet", "--no-use-agent", "--armor", "--cipher-algo", "AES", "--passphrase-file", keypath], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) encrypted, _ = p.communicate(input=content) return encrypted def forward_to_zephyr(message): # type: (Dict[str, Any]) -> None # 'Any' can be of any type of text support_heading = "Hi there! This is an automated message from Zulip." support_closing = """If you have any questions, please be in touch through the \ Feedback button or at support@zulipchat.com.""" wrapper = textwrap.TextWrapper(break_long_words=False, break_on_hyphens=False) wrapped_content = "\n".join("\n".join(wrapper.wrap(line)) for line in message["content"].replace("@", "@@").split("\n")) zwrite_args = ["zwrite", "-n", "-s", message["sender_full_name"], "-F", "Zephyr error: See http://zephyr.1ts.org/wiki/df", "-x", "UTF-8"] # Hack to make ctl's fake username setup work :) if message['type'] == "stream" and zulip_account_email == "ctl@mit.edu": zwrite_args.extend(["-S", "ctl"]) if message['type'] == "stream": zephyr_class = message["display_recipient"] instance = message["subject"] match_whitespace_instance = re.match(r'^\(instance "(\s*)"\)$', instance) if match_whitespace_instance: # Forward messages sent to '(instance "WHITESPACE")' back to the # appropriate WHITESPACE instance for bidirectional mirroring instance = match_whitespace_instance.group(1) elif (instance == "instance %s" % (zephyr_class,) or instance == "test instance %s" % (zephyr_class,)): # Forward messages to e.g. -c -i white-magic back from the # place we forward them to if instance.startswith("test"): instance = zephyr_class zephyr_class = "tabbott-test5" else: instance = zephyr_class zephyr_class = "message" zwrite_args.extend(["-c", zephyr_class, "-i", instance]) logger.info("Forwarding message to class %s, instance %s" % (zephyr_class, instance)) elif message['type'] == "private": if len(message['display_recipient']) == 1: recipient = to_zephyr_username(message["display_recipient"][0]["email"]) recipients = [recipient] elif len(message['display_recipient']) == 2: recipient = "" for r in message["display_recipient"]: if r["email"].lower() != zulip_account_email.lower(): recipient = to_zephyr_username(r["email"]) break recipients = [recipient] else: zwrite_args.extend(["-C"]) # We drop the @ATHENA.MIT.EDU here because otherwise the # "CC: user1 user2 ..." output will be unnecessarily verbose. recipients = [to_zephyr_username(user["email"]).replace("@ATHENA.MIT.EDU", "") for user in message["display_recipient"]] logger.info("Forwarding message to %s" % (recipients,)) zwrite_args.extend(recipients) if message.get("invite_only_stream"): result = zcrypt_encrypt_content(zephyr_class, instance, wrapped_content) if result is None: send_error_zulip("""%s Your Zulip-Zephyr mirror bot was unable to forward that last message \ from Zulip to Zephyr because you were sending to a zcrypted Zephyr \ class and your mirroring bot does not have access to the relevant \ key (perhaps because your AFS tokens expired). That means that while \ Zulip users (like you) received it, Zephyr users did not. %s""" % (support_heading, support_closing)) return # Proceed with sending a zcrypted message wrapped_content = result zwrite_args.extend(["-O", "crypt"]) if options.test_mode: logger.debug("Would have forwarded: %s\n%s" % (zwrite_args, wrapped_content.encode("utf-8"))) return (code, stderr) = send_authed_zephyr(zwrite_args, wrapped_content) if code == 0 and stderr == "": return elif code == 0: send_error_zulip("""%s Your last message was successfully mirrored to zephyr, but zwrite \ returned the following warning: %s %s""" % (support_heading, stderr, support_closing)) return elif code != 0 and (stderr.startswith("zwrite: Ticket expired while sending notice to ") or stderr.startswith("zwrite: No credentials cache found while sending notice to ")): # Retry sending the message unauthenticated; if that works, # just notify the user that they need to renew their tickets (code, stderr) = send_unauthed_zephyr(zwrite_args, wrapped_content) if code == 0: if options.ignore_expired_tickets: return send_error_zulip("""%s Your last message was forwarded from Zulip to Zephyr unauthenticated, \ because your Kerberos tickets have expired. It was sent successfully, \ but please renew your Kerberos tickets in the screen session where you \ are running the Zulip-Zephyr mirroring bot, so we can send \ authenticated Zephyr messages for you again. %s""" % (support_heading, support_closing)) return # zwrite failed and it wasn't because of expired tickets: This is # probably because the recipient isn't subscribed to personals, # but regardless, we should just notify the user. send_error_zulip("""%s Your Zulip-Zephyr mirror bot was unable to forward that last message \ from Zulip to Zephyr. That means that while Zulip users (like you) \ received it, Zephyr users did not. The error message from zwrite was: %s %s""" % (support_heading, stderr, support_closing)) return def maybe_forward_to_zephyr(message): # type: (Dict[str, Any]) -> None # The key string can be used to direct any type of text. if (message["sender_email"] == zulip_account_email): if not ((message["type"] == "stream") or (message["type"] == "private" and False not in [u["email"].lower().endswith("mit.edu") for u in message["display_recipient"]])): # Don't try forward private messages with non-MIT users # to MIT Zephyr. return timestamp_now = int(time.time()) if float(message["timestamp"]) < timestamp_now - 15: logger.warning("Skipping out of order message: %s < %s" % (message["timestamp"], timestamp_now)) return try: forward_to_zephyr(message) except Exception: # Don't let an exception forwarding one message crash the # whole process logger.exception("Error forwarding message:") def zulip_to_zephyr(options): # type: (int) -> None # Sync messages from zulip to zephyr logger.info("Starting syncing messages.") while True: try: zulip_client.call_on_each_message(maybe_forward_to_zephyr) except Exception: logger.exception("Error syncing messages:") time.sleep(1) def subscribed_to_mail_messages(): # type: () -> bool # In case we have lost our AFS tokens and those won't be able to # parse the Zephyr subs file, first try reading in result of this # query from the environment so we can avoid the filesystem read. stored_result = os.environ.get("HUMBUG_FORWARD_MAIL_ZEPHYRS") if stored_result is not None: return stored_result == "True" for (cls, instance, recipient) in parse_zephyr_subs(verbose=False): if (cls.lower() == "mail" and instance.lower() == "inbox"): os.environ["HUMBUG_FORWARD_MAIL_ZEPHYRS"] = "True" return True os.environ["HUMBUG_FORWARD_MAIL_ZEPHYRS"] = "False" return False def add_zulip_subscriptions(verbose): # type: (bool) -> None zephyr_subscriptions = set() skipped = set() for (cls, instance, recipient) in parse_zephyr_subs(verbose=verbose): if cls.lower() == "message": if recipient != "*": # We already have a (message, *, you) subscription, so # these are redundant continue # We don't support subscribing to (message, *) if instance == "*": if recipient == "*": skipped.add((cls, instance, recipient, "subscribing to all of class message is not supported.")) continue # If you're on -i white-magic on zephyr, get on stream white-magic on zulip # instead of subscribing to stream "message" on zulip zephyr_subscriptions.add(instance) continue elif cls.lower() == "mail" and instance.lower() == "inbox": # We forward mail zephyrs, so no need to log a warning. continue elif len(cls) > 60: skipped.add((cls, instance, recipient, "Class longer than 60 characters")) continue elif instance != "*": skipped.add((cls, instance, recipient, "Unsupported non-* instance")) continue elif recipient != "*": skipped.add((cls, instance, recipient, "Unsupported non-* recipient.")) continue zephyr_subscriptions.add(cls) if len(zephyr_subscriptions) != 0: res = zulip_client.add_subscriptions(list({"name": stream} for stream in zephyr_subscriptions), authorization_errors_fatal=False) if res.get("result") != "success": logger.error("Error subscribing to streams:\n%s" % (res["msg"],)) return already = res.get("already_subscribed") new = res.get("subscribed") unauthorized = res.get("unauthorized") if verbose: if already is not None and len(already) > 0: logger.info("\nAlready subscribed to: %s" % (", ".join(list(already.values())[0]),)) if new is not None and len(new) > 0: logger.info("\nSuccessfully subscribed to: %s" % (", ".join(list(new.values())[0]),)) if unauthorized is not None and len(unauthorized) > 0: logger.info("\n" + "\n".join(textwrap.wrap("""\ The following streams you have NOT been subscribed to, because they have been configured in Zulip as invitation-only streams. This was done at the request of users of these Zephyr classes, usually because traffic to those streams is sent within the Zephyr world encrypted via zcrypt (in Zulip, we achieve the same privacy goals through invitation-only streams). If you wish to read these streams in Zulip, you need to contact the people who are on these streams and already use Zulip. They can subscribe you to them via the "streams" page in the Zulip web interface: """)) + "\n\n %s" % (", ".join(unauthorized),)) if len(skipped) > 0: if verbose: logger.info("\n" + "\n".join(textwrap.wrap("""\ You have some lines in ~/.zephyr.subs that could not be synced to your Zulip subscriptions because they do not use "*" as both the instance and recipient and not one of the special cases (e.g. personals and mail zephyrs) that Zulip has a mechanism for forwarding. Zulip does not allow subscribing to only some subjects on a Zulip stream, so this tool has not created a corresponding Zulip subscription to these lines in ~/.zephyr.subs: """)) + "\n") for (cls, instance, recipient, reason) in skipped: if verbose: if reason != "": logger.info(" [%s,%s,%s] (%s)" % (cls, instance, recipient, reason)) else: logger.info(" [%s,%s,%s]" % (cls, instance, recipient)) if len(skipped) > 0: if verbose: logger.info("\n" + "\n".join(textwrap.wrap("""\ If you wish to be subscribed to any Zulip streams related to these .zephyrs.subs lines, please do so via the Zulip web interface. """)) + "\n") def valid_stream_name(name): # type: (str) -> bool return name != "" def parse_zephyr_subs(verbose=False): # type: (bool) -> Union[List, Tuple, Set[Tuple[str, str, str]]] zephyr_subscriptions = set() subs_file = os.path.join(os.environ["HOME"], ".zephyr.subs") if not os.path.exists(subs_file): if verbose: logger.error("Couldn't find ~/.zephyr.subs!") return [] for line in open(subs_file, "r").readlines(): line = line.strip() if len(line) == 0: continue try: (cls, instance, recipient) = line.split(",") cls = cls.replace("%me%", options.user) instance = instance.replace("%me%", options.user) recipient = recipient.replace("%me%", options.user) if not valid_stream_name(cls): if verbose: logger.error("Skipping subscription to unsupported class name: [%s]" % (line,)) continue except Exception: if verbose: logger.error("Couldn't parse ~/.zephyr.subs line: [%s]" % (line,)) continue zephyr_subscriptions.add((cls.strip(), instance.strip(), recipient.strip())) return zephyr_subscriptions def open_logger(): # type: () -> logging.Logger if options.log_path is not None: log_file = options.log_path elif options.forward_class_messages: if options.test_mode: log_file = "/var/log/zulip/test-mirror-log" else: log_file = "/var/log/zulip/mirror-log" else: f = tempfile.NamedTemporaryFile(prefix="zulip-log.%s." % (options.user,), delete=False) log_file = f.name # Close the file descriptor, since the logging system will # reopen it anyway. f.close() logger = logging.getLogger(__name__) log_format = "%(asctime)s <initial>: %(message)s" formatter = logging.Formatter(log_format) logging.basicConfig(format=log_format) logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler(log_file) file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger def configure_logger(logger, direction_name): # type: (logging.Logger, str) -> None if direction_name is None: log_format = "%(message)s" else: log_format = "%(asctime)s [" + direction_name + "] %(message)s" formatter = logging.Formatter(log_format) # Replace the formatters for the file and stdout loggers for handler in logger.handlers: handler.setFormatter(formatter) root_logger = logging.getLogger() for handler in root_logger.handlers: handler.setFormatter(formatter) def parse_args(): # type: () -> Tuple parser = optparse.OptionParser() parser.add_option('--forward-class-messages', default=False, help=optparse.SUPPRESS_HELP, action='store_true') parser.add_option('--shard', help=optparse.SUPPRESS_HELP) parser.add_option('--noshard', default=False, help=optparse.SUPPRESS_HELP, action='store_true') parser.add_option('--resend-log', dest='logs_to_resend', help=optparse.SUPPRESS_HELP) parser.add_option('--enable-resend-log', dest='resend_log_path', help=optparse.SUPPRESS_HELP) parser.add_option('--log-path', dest='log_path', help=optparse.SUPPRESS_HELP) parser.add_option('--stream-file-path', dest='stream_file_path', default="/home/zulip/public_streams", help=optparse.SUPPRESS_HELP) parser.add_option('--no-forward-personals', dest='forward_personals', help=optparse.SUPPRESS_HELP, default=True, action='store_false') parser.add_option('--forward-mail-zephyrs', dest='forward_mail_zephyrs', help=optparse.SUPPRESS_HELP, default=False, action='store_true') parser.add_option('--no-forward-from-zulip', default=True, dest='forward_from_zulip', help=optparse.SUPPRESS_HELP, action='store_false') parser.add_option('--verbose', default=False, help=optparse.SUPPRESS_HELP, action='store_true') parser.add_option('--sync-subscriptions', default=False, action='store_true') parser.add_option('--ignore-expired-tickets', default=False, action='store_true') parser.add_option('--site', default=DEFAULT_SITE, help=optparse.SUPPRESS_HELP) parser.add_option('--on-startup-command', default=None, help=optparse.SUPPRESS_HELP) parser.add_option('--user', default=os.environ["USER"], help=optparse.SUPPRESS_HELP) parser.add_option('--root-path', default="/afs/athena.mit.edu/user/t/a/tabbott/for_friends", help=optparse.SUPPRESS_HELP) parser.add_option('--session-path', default=None, help=optparse.SUPPRESS_HELP) parser.add_option('--nagios-class', default=None, help=optparse.SUPPRESS_HELP) parser.add_option('--nagios-path', default=None, help=optparse.SUPPRESS_HELP) parser.add_option('--use-sessions', default=False, action='store_true', help=optparse.SUPPRESS_HELP) parser.add_option('--test-mode', default=False, help=optparse.SUPPRESS_HELP, action='store_true') parser.add_option('--api-key-file', default=os.path.join(os.environ["HOME"], "Private", ".humbug-api-key")) return parser.parse_args() def die_gracefully(signal, frame): # type: (int, FrameType) -> None if CURRENT_STATE == States.ZulipToZephyr or CURRENT_STATE == States.ChildSending: # this is a child process, so we want os._exit (no clean-up necessary) os._exit(1) if CURRENT_STATE == States.ZephyrToZulip and not options.use_sessions: try: # zephyr=>zulip processes may have added subs, so run cancelSubs zephyr._z.cancelSubs() except IOError: # We don't care whether we failed to cancel subs properly, but we should log it logger.exception("") sys.exit(1) if __name__ == "__main__": # Set the SIGCHLD handler back to SIG_DFL to prevent these errors # when importing the "requests" module after being restarted using # the restart_stamp functionality: # # close failed in file object destructor: # IOError: [Errno 10] No child processes signal.signal(signal.SIGCHLD, signal.SIG_DFL) signal.signal(signal.SIGINT, die_gracefully) # The properties available on 'options' are dynamically # determined, so we have to treat it as an Any for type # annotations. (options, args) = parse_args() # type: Any, List[str] logger = open_logger() configure_logger(logger, "parent") # The 'api' directory needs to go first, so that 'import zulip' won't pick # up some other directory named 'humbug'. pyzephyr_lib_path = "python-zephyr/build/lib.linux-%s-%s/" % (os.uname()[4], sys.version[0:3]) sys.path[:0] = [os.path.join(options.root_path, 'api'), options.root_path, os.path.join(options.root_path, "python-zephyr"), os.path.join(options.root_path, pyzephyr_lib_path)] # In case this is an automated restart of the mirroring script, # and we have lost AFS tokens, first try reading the API key from # the environment so that we can skip doing a filesystem read. if os.environ.get("HUMBUG_API_KEY") is not None: api_key = os.environ.get("HUMBUG_API_KEY") else: if not os.path.exists(options.api_key_file): logger.error("\n" + "\n".join(textwrap.wrap("""\ Could not find API key file. You need to either place your api key file at %s, or specify the --api-key-file option.""" % (options.api_key_file,)))) sys.exit(1) api_key = open(options.api_key_file).read().strip() # Store the API key in the environment so that our children # don't need to read it in os.environ["HUMBUG_API_KEY"] = api_key if options.nagios_path is None and options.nagios_class is not None: logger.error("\n" + "nagios_path is required with nagios_class\n") sys.exit(1) zulip_account_email = options.user + "@mit.edu" import zulip zulip_client = zulip.Client( email=zulip_account_email, api_key=api_key, verbose=True, client="zephyr_mirror", site=options.site) start_time = time.time() if options.sync_subscriptions: configure_logger(logger, None) # make the output cleaner logger.info("Syncing your ~/.zephyr.subs to your Zulip Subscriptions!") add_zulip_subscriptions(True) sys.exit(0) # Kill all zephyr_mirror processes other than this one and its parent. if not options.test_mode: pgrep_query = "python.*zephyr_mirror" if options.shard is not None: # sharded class mirror pgrep_query = "%s.*--shard=%s" % (pgrep_query, options.shard) elif options.user is not None: # Personals mirror on behalf of another user. pgrep_query = "%s.*--user=%s" % (pgrep_query, options.user) proc = subprocess.Popen(['pgrep', '-U', os.environ["USER"], "-f", pgrep_query], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _err_unused = proc.communicate() for pid in map(int, out.split()): if pid == os.getpid() or pid == os.getppid(): continue # Another copy of zephyr_mirror.py! Kill it. logger.info("Killing duplicate zephyr_mirror process %s" % (pid,)) try: os.kill(pid, signal.SIGINT) except OSError: # We don't care if the target process no longer exists, so just log the error logger.exception("") if options.shard is not None and set(options.shard) != set("a"): # The shard that is all "a"s is the one that handles personals # forwarding and zulip => zephyr forwarding options.forward_personals = False options.forward_from_zulip = False if options.forward_mail_zephyrs is None: options.forward_mail_zephyrs = subscribed_to_mail_messages() if options.session_path is None: options.session_path = "/var/tmp/%s" % (options.user,) if options.forward_from_zulip: child_pid = os.fork() # type: int if child_pid == 0: CURRENT_STATE = States.ZulipToZephyr # Run the zulip => zephyr mirror in the child configure_logger(logger, "zulip=>zephyr") zulip_to_zephyr(options) sys.exit(0) else: child_pid = None CURRENT_STATE = States.ZephyrToZulip import zephyr logger_name = "zephyr=>zulip" if options.shard is not None: logger_name += "(%s)" % (options.shard,) configure_logger(logger, logger_name) # Have the kernel reap children for when we fork off processes to send Zulips signal.signal(signal.SIGCHLD, signal.SIG_IGN) zephyr_to_zulip(options)
samatdav/zulip
bots/zephyr_mirror_backend.py
Python
apache-2.0
48,314
"""Offer time listening automation rules.""" from datetime import datetime import logging import voluptuous as vol from homeassistant.const import CONF_AT, CONF_PLATFORM from homeassistant.core import callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.event import ( async_track_point_in_time, async_track_state_change, async_track_time_change, ) import homeassistant.util.dt as dt_util # mypy: allow-untyped-defs, no-check-untyped-defs _LOGGER = logging.getLogger(__name__) _TIME_TRIGGER_SCHEMA = vol.Any( cv.time, vol.All(str, cv.entity_domain("input_datetime")), msg="Expected HH:MM, HH:MM:SS or Entity ID from domain 'input_datetime'", ) TRIGGER_SCHEMA = vol.Schema( { vol.Required(CONF_PLATFORM): "time", vol.Required(CONF_AT): vol.All(cv.ensure_list, [_TIME_TRIGGER_SCHEMA]), } ) async def async_attach_trigger(hass, config, action, automation_info): """Listen for state changes based on configuration.""" entities = {} removes = [] @callback def time_automation_listener(now): """Listen for time changes and calls action.""" hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}}) @callback def update_entity_trigger(entity_id, old_state=None, new_state=None): # If a listener was already set up for entity, remove it. remove = entities.get(entity_id) if remove: remove() removes.remove(remove) remove = None # Check state of entity. If valid, set up a listener. if new_state: has_date = new_state.attributes["has_date"] if has_date: year = new_state.attributes["year"] month = new_state.attributes["month"] day = new_state.attributes["day"] has_time = new_state.attributes["has_time"] if has_time: hour = new_state.attributes["hour"] minute = new_state.attributes["minute"] second = new_state.attributes["second"] else: # If no time then use midnight. hour = minute = second = 0 if has_date: # If input_datetime has date, then track point in time. trigger_dt = dt_util.DEFAULT_TIME_ZONE.localize( datetime(year, month, day, hour, minute, second) ) # Only set up listener if time is now or in the future. if trigger_dt >= dt_util.now(): remove = async_track_point_in_time( hass, time_automation_listener, trigger_dt ) elif has_time: # Else if it has time, then track time change. remove = async_track_time_change( hass, time_automation_listener, hour=hour, minute=minute, second=second, ) # Was a listener set up? if remove: removes.append(remove) entities[entity_id] = remove for at_time in config[CONF_AT]: if isinstance(at_time, str): # input_datetime entity update_entity_trigger(at_time, new_state=hass.states.get(at_time)) else: # datetime.time removes.append( async_track_time_change( hass, time_automation_listener, hour=at_time.hour, minute=at_time.minute, second=at_time.second, ) ) # Track state changes of any entities. removes.append( async_track_state_change(hass, list(entities), update_entity_trigger) ) @callback def remove_track_time_changes(): """Remove tracked time changes.""" for remove in removes: remove() return remove_track_time_changes
titilambert/home-assistant
homeassistant/components/homeassistant/triggers/time.py
Python
apache-2.0
4,049
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ .. _toaccess: .. program:: toaccess ``toaccess`` ============ This module provides a set of functions meant to provide ease-of-use functionality for interacting with the Traffic Ops API. It provides scripts named :file:`to{method}` where `method` is the name of an HTTP method (in lowercase). Collectively they are referred to as :program:`toaccess` Implemented methods thus far are: - delete - head - get - options - patch - post - put Arguments and Flags ------------------- .. option:: PATH This is the request path. By default, whatever is passed is considered to be relative to :file:`/api/{api-version}/` where ``api-version`` is :option:`--api-version`. This behavior can be disabled by using :option:`--raw-path`. .. option:: DATA An optional positional argument that is a data payload to pass to the Traffic Ops server in the request body. If this is the absolute or relative path to a file, the contents of the file will instead be read and used as the request payload. .. option:: -h, --help Print usage information and exit .. option:: -a API_VERSION, --api-version API_VERSION Specifies the version of the Traffic Ops API that will be used for the request. Has no effect if :option:`--raw-path` is used. (Default: 2.0) .. option:: -f, --full Output the full HTTP exchange including request method line, request headers, request body (if any), response status line, and response headers (as well as the response body, if any). This is equivalent to using :option:`--request-headers`, :option:`--request-payload`, and :option:`--response-headers` at the same time, and those options will have no effect if given. (Default: false) .. option:: -k, --insecure Do not verify SSL certificates - typically useful for making requests to development or testing servers as they frequently have self-signed certificates. (Default: false) .. option:: -p, --pretty Pretty-print any payloads that are output as formatted JSON. Has no effect on plaintext payloads. Uses tab characters for indentation. (Default: false) .. option:: -r, --raw-path Request exactly :option:`PATH`; do not preface the request path with :file:`/api/{api_version}`. This effectively means that :option:`--api-version` will have no effect. (Default: false) .. option:: -v, --version Print version information and exit. .. option:: --request-headers Output the request method line and any and all request headers. (Default: false) .. option:: --request-payload Output the request body if any was sent. Will attempt to pretty-print the body as JSON if :option:`--pretty` is used. (Default: false) .. option:: --response-headers Output the response status line and any and all response headers. (Default: false) .. option:: --to-url URL The :abbr:`FQDN (Fully Qualified Domain Name)` and optionally the port and scheme of the Traffic Ops server. This will override :envvar:`TO_URL`. The format is the same as for :envvar:`TO_URL`. (Default: uses the value of :envvar:`TO_URL`) .. option:: --to-password PASSWORD The password to use when authenticating to Traffic Ops. Overrides :envvar:`TO_PASSWORD`. (Default: uses the value of :envvar:`TO_PASSWORD`) .. option:: --to-user USERNAME The username to use when connecting to Traffic Ops. Overrides :envvar:`TO_USER`. (Default: uses the value of :envvar:`TO_USER`) Environment Variables --------------------- If defined, :program:`toaccess` scripts will use the :envvar:`TO_URL`, :envvar:`TO_USER`, and :envvar`TO_PASSWORD` environment variables to define their connection to and authentication with the Traffic Ops server. Typically, setting these is easier than using the long options :option:`--to-url`, :option:`--to-user`, and :option:`--to-password` on every invocation. Exit Codes ---------- The exit code of a :program:`toaccess` script can sometimes be used by the caller to determine what the result of calling the script was without needing to parse the output. The exit codes used are: 0 The command executed successfully, and the result is on STDOUT. 1 Typically this exit code means that an error was encountered when parsing positional command line arguments. However, this is also the exit code used by most Python interpreters to signal an unhandled exception. 2 Signifies a runtime error that caused the request to fail - this is **not** generally indicative of an HTTP client or server error, but rather an underlying issue connecting to or authenticating with Traffic Ops. This is distinct from an exit code of ``32`` in that the *format* of the arguments was correct, but there was some problem with the *value*. For example, passing ``https://test:`` to :option:`--to-url` will cause an exit code of ``2``, not ``32``. 4 An HTTP client error occurred. The HTTP stack will be printed to stdout as indicated by other options - meaning by default it will only print the response payload if one was given, but will respect options like e.g. :option:`--request-payload` as well as :option:`-p`/:option:`--pretty`. 5 An HTTP server error occurred. The HTTP stack will be printed to stdout as indicated by other options - meaning by default it will only print the response payload if one was given, but will respect options like e.g. :option:`--request-payload` as well as :option:`-p`/:option:`--pretty`. 32 This is the error code emitted by Python's :mod:`argparse` module when the passed arguments could not be parsed successfully. .. note:: The way exit codes ``4`` and ``5`` are implemented is by returning the status code of the HTTP request divided by 100 whenever it is at least 400. This means that if the Traffic Ops server ever started returning e.g. 700 status codes, the exit code of the script would be 7. Module Reference ================ """ import json import logging import os import sys from urllib.parse import urlparse from trafficops.restapi import LoginError, OperationError, InvalidJSONError from trafficops.tosession import TOSession from trafficops.__version__ import __version__ from requests.exceptions import RequestException l = logging.getLogger() l.disabled = True logging.basicConfig(level=logging.CRITICAL+1) def output(r, pretty, request_header, response_header, request_payload, indent = '\t'): """ Prints the passed response object in a format consistent with the other parameters. :param r: The :mod:`requests` response object being printed :param pretty: If :const:`True`, attempt to pretty-print payloads as JSON :param request_header: If :const:`True`, print request line and request headers :param response_header: If :const:`True`, print response line and response headers :param request_payload: If :const:`True`, print the request payload :param indent: An optional number of spaces for pretty-printing indentation (default is the tab character) """ if request_header: print(r.request.method, r.request.path_url, "HTTP/1.1") for h,v in r.request.headers.items(): print("%s:" % h, v) print() if request_payload and r.request.body: try: result = r.request.body if not pretty else json.dumps(json.loads(r.request.body)) except ValueError: result = r.request.body print(result, end="\n\n") if response_header: print("HTTP/1.1", r.status_code, end="") print(" "+r.reason if r.reason else "") for h,v in r.headers.items(): print("%s:" % h, v) print() try: result = r.text if not pretty else json.dumps(r.json(), indent=indent) except ValueError: result = r.text print(result) def parse_arguments(program): """ A common-use function that parses the command line arguments. :param program: The name of the program being run - used for usage informational output :returns: The Traffic Ops HTTP session object, the requested path, any data to be sent, an output format specification, whether or not the path is raw, and whether or not output should be prettified """ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(prog=program, formatter_class=ArgumentDefaultsHelpFormatter, description="A helper program for interfacing with the Traffic Ops API", epilog=("Typically, one will want to connect and authenticate by defining " "the 'TO_URL', 'TO_USER' and 'TO_PASSWORD' environment variables " "rather than (respectively) the '--to-url', '--to-user' and " "'--to-password' command-line flags. Those flags are only " "required when said environment variables are not defined.\n" "%(prog)s will exit with a success provided a response was " "received and the status code of said response was less than 400. " "The exit code will be 1 if command line arguments cannot be " "parsed or authentication with the Traffic Ops server fails. " "In the event of some unknown error occurring when waiting for a " "response, the exit code will be 2. If the server responds with " "a status code indicating a client or server error, that status " "code will be used as the exit code.")) parser.add_argument("--to-url", type=str, help=("The fully qualified domain name of the Traffic Ops server. Overrides " "'$TO_URL'. The format for both the environment variable and the flag " "is '[scheme]hostname[:port]'. That is, ports should be specified here, " "and they need not start with 'http://' or 'https://'. HTTPS is the " "assumed protocol unless the scheme _is_ provided and is 'http://'.")) parser.add_argument("--to-user", type=str, help="The username to use when connecting to Traffic Ops. Overrides '$TO_USER") parser.add_argument("--to-password", type=str, help="The password to use when authenticating to Traffic Ops. Overrides '$TO_PASSWORD'") parser.add_argument("-k", "--insecure", action="store_true", help="Do not verify SSL certificates") parser.add_argument("-f", "--full", action="store_true", help=("Also output HTTP request/response lines and headers, and request payload. " "This is equivalent to using '--request-headers', '--response-headers' " "and '--request-payload' at the same time.")) parser.add_argument("--request-headers", action="store_true", help="Output request method line and headers") parser.add_argument("--response-headers", action="store_true", help="Output response status line and headers") parser.add_argument("--request-payload", action="store_true", help="Output request payload (will try to pretty-print if '--pretty' is given)") parser.add_argument("-r", "--raw-path", action="store_true", help="Request exactly PATH; it won't be prefaced with '/api/{{api-version}}/") parser.add_argument("-a", "--api-version", type=float, default=2.0, help="Specify the API version to request against") parser.add_argument("-p", "--pretty", action="store_true", help=("Pretty-print payloads as JSON. " "Note that this will make Content-Type headers \"wrong\", in general")) parser.add_argument("-v", "--version", action="version", help="Print version information and exit", version="%(prog)s v"+__version__) parser.add_argument("PATH", help="The path to the resource being requested - omit '/api/2.x'") parser.add_argument("DATA", help=("An optional data string to pass with the request. If this is a " "filename, the contents of the file will be sent instead."), nargs='?') args = parser.parse_args() try: to_host = args.to_url if args.to_url else os.environ["TO_URL"] except KeyError as e: raise KeyError("Traffic Ops hostname not set! Set the TO_URL environment variable or use "\ "'--to-url'.") from e original_to_host = to_host to_host = urlparse(to_host, scheme="https") useSSL = to_host.scheme.lower() == "https" to_port = to_host.port if to_port is None: if useSSL: to_port = 443 else: to_port = 80 to_host = to_host.hostname if not to_host: raise KeyError(f"Invalid URL/host for Traffic Ops: '{original_to_host}'") s = TOSession(to_host, host_port=to_port, ssl=useSSL, api_version=f"{args.api_version:.1f}", verify_cert=not args.insecure) data = args.DATA if data and os.path.isfile(data): with open(data) as f: data = f.read() if isinstance(data, str): data = data.encode() try: to_user = args.to_user if args.to_user else os.environ["TO_USER"] except KeyError as e: raise KeyError("Traffic Ops user not set! Set the TO_USER environment variable or use "\ "'--to-user'.") from e try: to_passwd = args.to_password if args.to_password else os.environ["TO_PASSWORD"] except KeyError as e: raise KeyError("Traffic Ops password not set! Set the TO_PASSWORD environment variable or "\ "use '--to-password'") from e # TOSession objects return LoginError when certs are invalid, OperationError when # login actually fails try: s.login(to_user, to_passwd) except LoginError as e: raise PermissionError( "certificate verification failed, the system may have a self-signed certificate - try using -k/--insecure" ) from e except (OperationError, InvalidJSONError) as e: raise PermissionError(e) from e except RequestException as e: raise ConnectionError("Traffic Ops host not found: Name or service not known") from e return (s, args.PATH, data, ( args.request_headers or args.full, args.response_headers or args.full, args.request_payload or args.full ), args.raw_path, args.pretty) def request(method): """ All of the scripts wind up calling this function to handle their common functionality. :param method: The name of the request method to use (case-insensitive) :returns: The program's exit code """ try: s, path, data, full, raw, pretty = parse_arguments("to%s" % method) except (PermissionError, KeyError, ConnectionError) as e: print(e, file=sys.stderr) return 1 if raw: path = '/'.join((s.to_url.rstrip('/'), path.lstrip('/'))) else: path = '/'.join((s.base_url.rstrip('/'), path.lstrip('/'))) try: if data is not None: r = s._session.request(method, path, data=data) else: r = s._session.request(method, path) except (RequestException, ValueError) as e: print("Error occurred: ", e, file=sys.stderr) return 2 output(r, pretty, *full) return 0 if r.status_code < 400 else r.status_code // 100 def get(): """ Entry point for :program:`toget` :returns: The program's exit code """ return request("get") def put(): """ Entry point for :program:`toput` :returns: The program's exit code """ return request("put") def post(): """ Entry point for :program:`topost` :returns: The program's exit code """ return request("post") def delete(): """ Entry point for :program:`todelete` :returns: The program's exit code """ return request("delete") def options(): """ Entry point for :program:`tooptions` :returns: The program's exit code """ return request("options") def head(): """ Entry point for :program:`tohead` :returns: The program's exit code """ return request("head") def patch(): """ Entry point for :program:`topatch` :returns: The program's exit code """ return request("patch")
hbeatty/incubator-trafficcontrol
traffic_control/clients/python/to_access/__init__.py
Python
apache-2.0
16,838
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for ExportAgent # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-dialogflowcx # [START dialogflow_v3_generated_Agents_ExportAgent_async] from google.cloud import dialogflowcx_v3 async def sample_export_agent(): # Create a client client = dialogflowcx_v3.AgentsAsyncClient() # Initialize request argument(s) request = dialogflowcx_v3.ExportAgentRequest( name="name_value", ) # Make the request operation = client.export_agent(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) # [END dialogflow_v3_generated_Agents_ExportAgent_async]
googleapis/python-dialogflow-cx
samples/generated_samples/dialogflow_v3_generated_agents_export_agent_async.py
Python
apache-2.0
1,539
""" Django settings for findtorun project. Generated by 'django-admin startproject' using Django 1.11.2. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '3@i55e)e-m8af#@st3n98!$64fe-3ti-6o=j5g*k%3n6ri9yx!' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True DEBUG = True log_level = 'DEBUG' LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple' }, }, 'formatters': { 'simple': { 'format': '%(filename)s %(lineno)d %(asctime)s %(levelname)s %(message)s' } }, 'loggers': { 'find2run': { 'handlers': ['console'], 'level': log_level, }, }, } ALLOWED_HOSTS = [ 'ec2-54-193-111-20.us-west-1.compute.amazonaws.com', 'localhost', 'api.findtorun.fun' ] # Application definition INSTALLED_APPS = [ 'find2run.apps.Find2RunConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'findtorun.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'findtorun.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'findtorun', 'USER': 'findtorun_user', 'PASSWORD': 'p@$$w0rd111!!!', 'HOST': 'localhost', 'PORT': '5432', 'TEST': { 'NAME': 'findtorun_test' }, } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/'
maheshp212/find-to-run
findtorun/findtorun/settings.py
Python
apache-2.0
3,920
#!/usr/bin/python # -*- coding: UTF-8 -*- ''' Created on Jan 17, 2017 @author: hegxiten ''' import sys import geo.haversine as haversine from imposm.parser import OSMParser import geo.haversine as haversine import numpy import time from scipy import spatial import csv import codecs import math default_encoding='utf-8' if sys.getdefaultencoding()!=default_encoding: reload(sys) sys.setdefaultencoding(default_encoding) PI=3.14159265358 def process_network(FOLDER,FILE,CONCURRENCYVAL,GLOBALROUNDINGDIGITS): stations={} '''stations[station_node_osmid]=[name, lat, lon]''' refnodes_index_dict={} '''refnodes_index_dict[nodeid]=listindex_of_nodeid''' refnodes=[] '''refnodes=[nodeid1,nodeid2,nodeid3...]''' refnodes_coord_list=[] '''refnodes_coord_list[coord1,coord2,coord3...]''' node_fromto_dict={} '''node_fromto_dict[fromnode]=[(fromnode,tonode1),(fromnode,tonode2),(fromnode,tonode3)...]''' distance_mapper={} '''distance_mapper[(fromnode,tonode)]=distance''' attribute_mapper={} '''attribute_mapper[(fromnode,tonode)]=attribute_dictionary''' midpoints_coord=[] '''miderpoint_map[(fromnode,tonode)]=(midercoord)''' midsegment=[] approxCoord_map={} '''approxCoord_map[coord]=nodeid(veryfirstone)''' refnode_mapper={} '''refnode_mapper[nodeid2]=nodeid1(previous_nodeid1 with the same coordinate as nodeid2 after digit rounding)''' edgeDict={} '''edgeDict[(vertex tuple)]=(edgereflist,edgelength)''' disconnected_stations=[] connected_stations=[] def loadstations(): '''Load stations from csv format output''' startt=time.time() with codecs.open(FOLDER+FILE+'_stations.csv', 'rb') as csvfile: '''Example row: >>1234(osmid),$Illinois Terminal$($name$),40.11545(latitude),-88.24111(longitude)<<''' spamreader = csv.reader(csvfile, delimiter=',', quotechar='$') for row in spamreader: stations[int(row[0])]=[row[1],float(row[2]),float(row[3])] stopt=time.time() print("Loading stations. Time:("+str(stopt-startt)+")") def loadcoordinates(): '''Load coordinates of reference-nodes from csv format output''' startt=time.time() with codecs.open(FOLDER+FILE+'_waysegment_nodecoords.csv', 'rb',encoding='utf-8') as csvfile: '''Example row: >>123(osmid),40.11545(latitude),-88.24111(longitude)<<''' spamreader = csv.reader(csvfile, delimiter=',', quotechar='$') for row in spamreader: c1,c2=float(row[1]),float(row[2]) '''c1--lat, c2--lon''' #c1,c2=round(float(row[1]),ROUNDINGDIGITS),round(float(row[2]),ROUNDINGDIGITS) if (c1,c2) not in approxCoord_map: approxCoord_map[(c1,c2)]=int(row[0]) '''row[0]--coordid''' refnodes_index_dict[int(row[0])]=len(refnodes_coord_list) refnodes.append(int(row[0])) refnodes_coord_list.append((c1,c2)) refnode_mapper[int(row[0])]=int(row[0]) else: refnode_mapper[int(row[0])]=approxCoord_map[(c1,c2)] stopt=time.time() print("Loading refnode coordinates. Time:("+str(stopt-startt)+")") def loadwaysegments(): '''Load way segments from csv format output''' startt=time.time() with codecs.open(FOLDER+FILE+'_waysegments.csv', 'rb',encoding='utf-8') as csvfile: '''Example row: >>1234567(osmid1),7654321(osmid2),1435(gauge),350(maxspeed in kph),yes(highspeed or not),N/A(service),main(usage)<<''' spamreader = csv.reader(csvfile, delimiter=',', quotechar='$') header=spamreader.next() attr_list=header[2:] attr_list.append('distance') for row in spamreader: if refnode_mapper.get(int(row[0])) is None: print ("none") else: mfrom=refnode_mapper[int(row[0])] mto=refnode_mapper[int(row[1])] if mfrom not in node_fromto_dict: node_fromto_dict[mfrom]=[] if mto not in node_fromto_dict: node_fromto_dict[mto]=[] distance=haversine.hav_distance(refnodes_coord_list[refnodes_index_dict[mfrom]][0],refnodes_coord_list[refnodes_index_dict[mfrom]][1], refnodes_coord_list[refnodes_index_dict[mto]][0],refnodes_coord_list[refnodes_index_dict[mto]][1]) attr_dict={} for i in attr_list: if i=='distance': attr_dict[i]=str(distance) else: attr_dict[i]=row[header.index(i)] attribute_mapper[(mfrom,mto)]=attr_dict attribute_mapper[(mto,mfrom)]=attr_dict if (mfrom,mto) not in node_fromto_dict[mfrom] and mfrom!=mto: node_fromto_dict[mfrom].append((mfrom,mto)) if (mto,mfrom) not in node_fromto_dict[mto] and mfrom!=mto: node_fromto_dict[mto].append((mto,mfrom)) '''station's connectivity judging by suffix''' for s in stations: if s not in node_fromto_dict: disconnected_stations.append(s) stations[s].append('disconnected') else: connected_stations.append(s) stations[s].append('connected') stopt=time.time() print("Loading way segments ("+str(stopt-startt)+")") def output_nodes_csv(): target = codecs.open(FOLDER+FILE+"_nodes.csv", 'w',encoding='utf-8') for x in node_fromto_dict: if x in stations: if len(node_fromto_dict[x])!=0: target.write(str(x)+",$"+stations[x][0].decode('utf-8')+"$,"+str(stations[x][1])+","+str(stations[x][2])+"\n") else: target.write(str(x)+",$$,"+str(refnodes_coord_list[refnodes_index_dict[x]][0])+","+str(refnodes_coord_list[refnodes_index_dict[x]][1])+"\n") target.close() '''Example row: >>1234(osmid),$Illinois Terminal$($name$),40.11545(latitude),-88.24111(longitude)<<''' def output_links_csv(): target = codecs.open(FOLDER+FILE+"_links.csv", 'w',encoding='utf-8') headerkeys=attribute_mapper.values()[0].keys() header='vertex_1,vertex_2' for k in headerkeys: header=header+','+k target.write(header+'\n') for x in node_fromto_dict: for (a,b) in node_fromto_dict[x]: if a in node_fromto_dict and b in node_fromto_dict: row_to_write=str(a)+","+str(b) for attr in headerkeys: row_to_write=row_to_write+','+attribute_mapper[(a,b)].get(attr,"N/A") target.write(row_to_write+"\n") target.close() '''Example row: >>1234(osmid_vertex1),5678(osmid_vertex2),0.1534285(haversine_distance)<<''' loadstations() loadcoordinates() loadwaysegments() output_nodes_csv() output_links_csv() return node_fromto_dict if __name__ == '__main__': print ("===you're in test mode of network_process.py===") FILE='beijing_china_latest.osm.pbf' FOLDER='/home/hegxiten/workspace/data/'+FILE+'/' CONCURRENCYVAL=4 GLOBALROUNDINGDIGITS=5 node_fromto_dict=process_network(FOLDER, FILE, CONCURRENCYVAL, GLOBALROUNDINGDIGITS) print ("===test mode of network_process.py terminated===")
hegxiten/Worldwide-Railway-Network
WorldRailNetwork_WRN_workspace/src/network_process.py
Python
apache-2.0
7,862
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Templates for tangent expressions. The first argument to the tangent must be the return value of the primal. Use `d[x]` to denote the derivative of a variable `x`. If the primal returns a tuple, the first argument to the tangent is a tuple, and the adjoint is supposed to define `d[y]` as a tuple. Templates do not support use of `**kwargs`. If a keyword argument isn't present in the tangent compound statements, it means that Tangent doesn't support it, and an error will be raised if it appears in user code. Tangents have access to the inputs and outputs of the primal. They are expected to contain expressions for the derivative with respect to the output. They don't have access to any intermediate variables from the primal. """ from __future__ import absolute_import import math import gast import numpy import tangent from tangent import grads tangents = {} tangent_ = grads.create_register(tangents) # # AST tangents # @tangent_(gast.Assign) def tassign(temp, tangent, target, value): temp = value tangent target = temp @tangent_(gast.Num) def tnum(z, x): d[z] = tangent.init_grad(x) @tangent_(gast.Name) def tname(z, x): d[z] = d[x] @tangent_(gast.Attribute) def tattr(z, x): d[z] = tangent.init_grad(x) @tangent_(gast.Subscript) def tsubscript(z, x): d[z] = d[x] # For a reference for primitive tangents, see: # https://en.wikipedia.org/wiki/Automatic_differentiation#Automatic_differentiation_using_dual_numbers # or # https://en.wikipedia.org/wiki/Differentiation_rules # Note that we don't use "dual numbers", that's a data structure that's useful # for doing run-time forward-mode automatic differentiation. We're doing # compile-time autodiff, and we can keep track of the directional derivatives # in individual variables, with no need to store them alongside the # original values. @tangent_(gast.Add) def tadd(z, x, y): d[z] = d[x] + d[y] @tangent_(gast.Mult) def tmult(z, x, y): d[z] = d[x] * y + x * d[y] @tangent_(gast.Sub) def tsub(z, x, y): d[z] = d[x] - d[y] @tangent_(gast.Div) def tdiv(z, x, y): d[z] = (d[x] * y - x * d[y]) / (y * y) @tangent_(gast.Pow) def tpow(z, x, y): d[z] = y * (x ** (y - 1.0)) * d[x] @tangent_(gast.USub) def tusub(z, x): d[z] = -d[x] # # Collection tangents # @tangent_(tuple) def ttangent(z, x): d[z] = tuple(d[x]) @tangent_(list) def tlist(z, x): d[z] = list(d[x]) # # NumPy tangents # @tangent_(numpy.cos) def tcos(z, x): d[z] = -d[x] * numpy.sin(x) @tangent_(numpy.sin) def tsin(z, x): d[z] = d[x] * numpy.cos(x) @tangent_(numpy.tan) def ttan(z, x): cx = numpy.cos(x) d[z] = d[x] / (cx * cx) @tangent_(numpy.cosh) def tcosh(z, x): d[z] = d[x] * numpy.sinh(x) @tangent_(numpy.sinh) def tsinh(z, x): d[z] = d[x] * numpy.cosh(x) @tangent_(numpy.tanh) def ttanh(z, x): cx = numpy.cosh(x) d[z] = d[x] / (cx * cx) @tangent_(numpy.arccos) def tarccos(z, x): d[z] = -d[x] / numpy.sqrt(1.0 - x * x) @tangent_(numpy.arcsin) def tarcsin(z, x): d[z] = d[x] / numpy.sqrt(1.0 - x * x) @tangent_(numpy.arctan) def tarctan(z, x): d[z] = d[x] / (1.0 + x * x) @tangent_(numpy.exp) def texp(z, x): d[z] = d[x] * z @tangent_(numpy.log) def tlog(z, x): d[z] = d[x] / x @tangent_(numpy.sqrt) def tsqrt(z, x): d[z] = d[x] / (2 * z) @tangent_(numpy.dot) def tdot(z, x, y): d[z] = numpy.dot(d[x], y) + numpy.dot(x, d[y]) @tangent_(numpy.atleast_1d) def tatleast_1d(z, x): d[z] = numpy.atleast_1d(d[x]) @tangent_(numpy.atleast_2d) def tatleast_2d(z, x): d[z] = numpy.atleast_2d(d[x]) @tangent_(numpy.atleast_3d) def tatleast_3d(z, x): d[z] = numpy.atleast_3d(d[x]) @tangent_(numpy.transpose) def ttranspose(z, x): d[z] = numpy.transpose(d[x]) @tangent_(numpy.sum) def tsum(y, x, axis=None, dtype=None, keepdims=False): d[y] = numpy.sum(d[x], axis=axis, dtype=dtype, keepdims=keepdims) @tangent_(numpy.mean) def tmean( y, x, axis=None, dtype=None, keepdims=False): d[y] = numpy.mean(d[x], axis=axis, dtype=dtype, keepdims=keepdims) @tangent_(numpy.multiply) def tmultiply(z, x, y): d[z] = numpy.multiply(d[x], y) + numpy.multiply(x, d[y]) @tangent_(numpy.arange) def tarange(z, stop): d[z] = numpy.zeros_like(z) @tangent_(numpy.ndim) def tndim(z, x): d[z] = numpy.ndim(d[x]) @tangent_(numpy.rollaxis) def trollaxis(z, a, axis, start=0): d[z] = numpy.rollaxis(d[a], axis, start) @tangent_(numpy.shape) def tshape(z, x): d[z] = numpy.shape(d[x]) @tangent_(numpy.array) def tarray(z, x): d[z] = numpy.array(d[x]) # # Tangent tangents # @tangent_(tangent.add_grad) def tadd_grad(z, x, y): d[z] = tangent.add_grad(d[x], d[y]) @tangent_(tangent.init_grad) def tinit_grad(z, x, allow_lazy_initializer=False): d[z] = tangent.init_grad(d[x], allow_lazy_initializer=False) @tangent_(tangent.push) def tpush(x, stack, op_id): tangent.push(d[stack], d[x], d[op_id]) @tangent_(tangent.push_stack) def tpush_stack(x, stack, op_id): tangent.push_stack(d[stack], d[x], d[op_id]) @tangent_(tangent.pop) def tpop(x, stack, op_id): d[x] = tangent.pop(d[stack], d[op_id]) @tangent_(tangent.pop_stack) def tpop_stack(x, stack, op_id): d[x] = tangent.pop_stack(d[stack], d[op_id]) @tangent_(tangent.unbroadcast) def tunbroadcast(z, x, y): d[z] = tangent.unbroadcast(d[x], d[y]) @tangent_(tangent.Stack) def tstack(z): d[z] = tangent.Stack() @tangent_(tangent.astype) def tastype(z, x, y): d[z] = tangent.astype(d[x], d[y]) @tangent_(tangent.unreduce) def tunreduce(z, array, shape, axis, keepdims): d[z] = tangent.unreduce(d[array], d[shape], axis, keepdims) # Until we've written the adjoints of all functions we want to support, # we will throw an explicit "no tangent found" error for those we have not # finished. UNIMPLEMENTED will contain the list of all of these unimplemented # tangent functions UNIMPLEMENTED_TANGENTS = grads.get_module_functions( (numpy, numpy.fft, numpy.linalg, numpy.random, math)) - set(tangents)
google/tangent
tangent/tangents.py
Python
apache-2.0
6,571
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright 2017 Fedele Mantuano (https://www.linkedin.com/in/fmantuano/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import logging import copy import os import unittest import mailparser from pyfaup.faup import Faup from context import mails from context import utils phishing = mails.phishing base_path = os.path.realpath(os.path.dirname(__file__)) mail_thug = os.path.join(base_path, 'samples', 'mail_thug') mail_form = os.path.join(base_path, 'samples', 'mail_form') mail_test_5 = os.path.join(base_path, 'samples', 'mail_test_5') mail_test_6 = os.path.join(base_path, 'samples', 'mail_test_6') logging.getLogger().addHandler(logging.NullHandler()) class TestPhishing(unittest.TestCase): faup = Faup() def setUp(self): parser = mailparser.parse_from_file(mail_thug) self.email = parser.mail self.attachments = parser.attachments parser = mailparser.parse_from_file(mail_form) self.email_form = parser.mail body = self.email_form.get("body") self.urls = utils.urls_extractor(body, self.faup) d = {"generic": "conf/keywords/targets.example.yml", "custom": "conf/keywords/targets_english.example.yml"} self.targets = utils.load_keywords_dict(d) d = {"generic": "conf/keywords/subjects.example.yml", "custom": "conf/keywords/subjects_english.example.yml"} self.subjects = utils.load_keywords_list(d) def test_ParserError(self): parser = mailparser.parse_from_file(mail_test_6) body = parser.mail.get("body") flag_form = phishing.check_form(body) self.assertFalse(flag_form) def test_none_values(self): email = copy.deepcopy(self.email) email.pop("body", None) email.pop("subjects", None) email.pop("from", None) phishing.check_phishing( email=email, attachments=self.attachments, urls_body=self.urls, urls_attachments=self.urls, target_keys=self.targets, subject_keys=self.subjects) def test_check_form(self): body = self.email_form.get("body") flag_form = phishing.check_form(body) self.assertTrue(flag_form) body = self.email.get("body") flag_form = phishing.check_form(body) self.assertFalse(flag_form) def test_form_value_error(self): parser = mailparser.parse_from_file(mail_test_5) body = parser.mail.get("body") flag_form = phishing.check_form(body) self.assertFalse(flag_form) def test_check_urls(self): flag = False if any(phishing.check_urls(self.urls, i) for i in self.targets.values()): flag = True self.assertTrue(flag) def test_check_phishing(self): results = phishing.check_phishing( email=self.email, attachments=self.attachments, urls_body=self.urls, urls_attachments=self.urls, target_keys=self.targets, subject_keys=self.subjects) self.assertIsInstance(results, dict) self.assertEqual(results["score"], 123) self.assertIn("filename_attachments", results["score_expanded"]) self.assertIn("mail_subject", results["score_expanded"]) self.assertIn("mail_body", results["score_expanded"]) self.assertIn("mail_from", results["score_expanded"]) self.assertIn("urls_body", results["score_expanded"]) self.assertIn("urls_attachments", results["score_expanded"]) self.assertIn("Test", results["targets"]) self.assertTrue(results["with_phishing"]) def test_check_phishing_form(self): results = phishing.check_phishing( email=self.email_form, attachments=self.attachments, urls_body=self.urls, urls_attachments=self.urls, target_keys=self.targets, subject_keys=self.subjects) self.assertIn("mail_form", results["score_expanded"]) if __name__ == '__main__': unittest.main(verbosity=2)
SpamScope/spamscope
tests/test_phishing.py
Python
apache-2.0
4,631
import logging from typing import Union, cast import great_expectations.exceptions as ge_exceptions from great_expectations.data_context.store import ( CheckpointStore, ConfigurationStore, StoreBackend, ) from great_expectations.data_context.types.base import BaseYamlConfig, CheckpointConfig from great_expectations.data_context.types.resource_identifiers import ( ConfigurationIdentifier, ) from great_expectations.data_context.util import build_store_from_config logger = logging.getLogger(__name__) def build_configuration_store( class_name: str, store_name: str, store_backend: Union[StoreBackend, dict], *, module_name: str = "great_expectations.data_context.store", overwrite_existing: bool = False, **kwargs, ) -> ConfigurationStore: logger.debug( f"Starting data_context/store/util.py#build_configuration_store for store_name {store_name}" ) if store_backend is not None and issubclass(type(store_backend), StoreBackend): store_backend = store_backend.config elif not isinstance(store_backend, dict): raise ge_exceptions.DataContextError( "Invalid configuration: A store_backend needs to be a dictionary or inherit from the StoreBackend class." ) store_backend.update(**kwargs) store_config: dict = { "store_name": store_name, "module_name": module_name, "class_name": class_name, "overwrite_existing": overwrite_existing, "store_backend": store_backend, } configuration_store: ConfigurationStore = build_store_from_config( store_config=store_config, module_name=module_name, runtime_environment=None, ) return configuration_store def build_checkpoint_store_using_store_backend( store_name: str, store_backend: Union[StoreBackend, dict], overwrite_existing: bool = False, ) -> CheckpointStore: return cast( CheckpointStore, build_configuration_store( class_name="CheckpointStore", module_name="great_expectations.data_context.store", store_name=store_name, store_backend=store_backend, overwrite_existing=overwrite_existing, ), ) def save_config_to_store_backend( class_name: str, module_name: str, store_name: str, store_backend: Union[StoreBackend, dict], configuration_key: str, configuration: BaseYamlConfig, ): config_store: ConfigurationStore = build_configuration_store( class_name=class_name, module_name=module_name, store_name=store_name, store_backend=store_backend, overwrite_existing=True, ) key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=configuration_key, ) config_store.set(key=key, value=configuration) def load_config_from_store_backend( class_name: str, module_name: str, store_name: str, store_backend: Union[StoreBackend, dict], configuration_key: str, ) -> BaseYamlConfig: config_store: ConfigurationStore = build_configuration_store( class_name=class_name, module_name=module_name, store_name=store_name, store_backend=store_backend, overwrite_existing=False, ) key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=configuration_key, ) return config_store.get(key=key) def delete_config_from_store_backend( class_name: str, module_name: str, store_name: str, store_backend: Union[StoreBackend, dict], configuration_key: str, ): config_store: ConfigurationStore = build_configuration_store( class_name=class_name, module_name=module_name, store_name=store_name, store_backend=store_backend, overwrite_existing=True, ) key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=configuration_key, ) config_store.remove_key(key=key) def save_checkpoint_config_to_store_backend( store_name: str, store_backend: Union[StoreBackend, dict], checkpoint_name: str, checkpoint_configuration: CheckpointConfig, ): config_store: CheckpointStore = build_checkpoint_store_using_store_backend( store_name=store_name, store_backend=store_backend, overwrite_existing=True, ) key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=checkpoint_name, ) config_store.set(key=key, value=checkpoint_configuration) def load_checkpoint_config_from_store_backend( store_name: str, store_backend: Union[StoreBackend, dict], checkpoint_name: str, ) -> CheckpointConfig: config_store: CheckpointStore = build_checkpoint_store_using_store_backend( store_name=store_name, store_backend=store_backend, ) key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=checkpoint_name, ) try: return config_store.get(key=key) except ge_exceptions.InvalidBaseYamlConfigError as exc: logger.error(exc.messages) raise ge_exceptions.InvalidCheckpointConfigError( "Error while processing DataContextConfig.", exc ) def delete_checkpoint_config_from_store_backend( store_name: str, store_backend: Union[StoreBackend, dict], checkpoint_name: str, ): config_store: CheckpointStore = build_checkpoint_store_using_store_backend( store_name=store_name, store_backend=store_backend, ) key: ConfigurationIdentifier = ConfigurationIdentifier( configuration_key=checkpoint_name, ) config_store.remove_key(key=key)
great-expectations/great_expectations
great_expectations/data_context/store/util.py
Python
apache-2.0
5,702
import abc import random import fudge from contextlib import contextmanager from ..support.models import * class BackendTestCaseMixin(object): __metaclass__ = abc.ABCMeta @abc.abstractproperty # pragma: no cover def backend_class(self): """backend_class = TestThisBackend""" def __init__(self, *args, **kwargs): super(BackendTestCaseMixin, self).__init__(*args, **kwargs) self.backend = self.backend_class() self.name = "full_page" basemodel = Foobar() self.root_model_path = 'layout/%s/%s/' % ( basemodel._meta.app_label, basemodel._meta.object_name.lower()) @staticmethod @contextmanager def model_meta_randomizer(model, attr): original = getattr(model._meta, attr) value = "random_%d" % random.randint(100, 200) setattr(model._meta, attr, value) yield value setattr(model._meta, attr, original) def test_requires_a_model_instance(self): with self.assertRaises(TypeError): self.backend.get_layout_template_name(Foobar, self.name) def test_returns_proper_path(self): expected = ['%s%s.html' % (self.root_model_path, self.name)] result = self.backend.get_layout_template_name(Foobar(), self.name) self.assertEqual(expected, result) def test_renderer_can_specify_base_path(self): model = Foobar() with fudge.patched_context(self.backend, "base_layout_directory", "different"): result = self.backend.get_layout_template_name(model, self.name) expected = ['different/%s/%s/%s.html' % ( model._meta.app_label, model._meta.object_name.lower(), self.name)] self.assertEqual(expected, result) def test_missing_file_is_okay(self): model = Foobar() file_doesnt_exist = "fake_template" expected = ['layout/%s/%s/%s.html' % ( model._meta.app_label, model._meta.object_name.lower(), file_doesnt_exist)] result = self.backend.get_layout_template_name(model, file_doesnt_exist) self.assertEqual(expected, result) def test_uses_app_label_in_template_name(self): model = Foobar() with self.model_meta_randomizer(model, 'app_label') as app_label: expected = ['layout/%s/%s/%s.html' % ( app_label, model._meta.object_name.lower(), self.name)] result = self.backend.get_layout_template_name(model, self.name) self.assertEqual(expected, result) def test_uses_model_name_in_template_name(self): model = Foobar() with self.model_meta_randomizer(model, 'object_name') as object_name: expected = ['layout/%s/%s/%s.html' % ( model._meta.app_label, object_name, self.name)] result = self.backend.get_layout_template_name(model, self.name) self.assertEqual(expected, result) def test_uses_name_in_template_name(self): name = "random_%d" % random.randint(100, 200) expected = ['%s%s.html' % (self.root_model_path, name)] result = self.backend.get_layout_template_name(Foobar(), name) self.assertEqual(expected, result) def test_proper_model_inheritance_order(self): model = SubFoobar() model_path = 'layout/%s/%s/' % \ (model._meta.app_label, model._meta.object_name.lower()) expected = [ '%s%s.html' % (model_path, self.name), '%s%s.html' % (self.root_model_path, self.name)] result = self.backend.get_layout_template_name(model, self.name) self.assertEqual(expected, result) def test_abstract_models_are_used(self): concrete = ConcreteFoo() abstract = AbstractFoo() concrete_path = 'layout/%s/%s/' % \ (concrete._meta.app_label, concrete._meta.object_name.lower()) abstract_path = 'layout/%s/%s/' % \ (abstract._meta.app_label, abstract._meta.object_name.lower()) expected = [ '%s%s.html' % (concrete_path, self.name), '%s%s.html' % (abstract_path, self.name), '%s%s.html' % (self.root_model_path, self.name)] result = self.backend.get_layout_template_name(concrete, self.name) self.assertEqual(expected, result) def test_proxy_models_are_used(self): model = ProxyFoo() model_path = 'layout/%s/%s/' % \ (model._meta.app_label, model._meta.object_name.lower()) expected = [ '%s%s.html' % (model_path, self.name), '%s%s.html' % (self.root_model_path, self.name)] result = self.backend.get_layout_template_name(model, self.name) self.assertEqual(expected, result)
armstrong/armstrong.core.arm_layout
tests/backends/_common.py
Python
apache-2.0
4,747
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from datetime import datetime from pprint import pformat from six import iteritems class License(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ License - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'int', 'full_name': 'str', 'full_content': 'str', 'ref_url': 'str', 'short_name': 'str', 'field_handler': 'FieldHandler' } self.attribute_map = { 'id': 'id', 'full_name': 'fullName', 'full_content': 'fullContent', 'ref_url': 'refUrl', 'short_name': 'shortName', 'field_handler': 'fieldHandler' } self._id = None self._full_name = None self._full_content = None self._ref_url = None self._short_name = None self._field_handler = None @property def id(self): """ Gets the id of this License. :return: The id of this License. :rtype: int """ return self._id @id.setter def id(self, id): """ Sets the id of this License. :param id: The id of this License. :type: int """ self._id = id @property def full_name(self): """ Gets the full_name of this License. :return: The full_name of this License. :rtype: str """ return self._full_name @full_name.setter def full_name(self, full_name): """ Sets the full_name of this License. :param full_name: The full_name of this License. :type: str """ self._full_name = full_name @property def full_content(self): """ Gets the full_content of this License. :return: The full_content of this License. :rtype: str """ return self._full_content @full_content.setter def full_content(self, full_content): """ Sets the full_content of this License. :param full_content: The full_content of this License. :type: str """ self._full_content = full_content @property def ref_url(self): """ Gets the ref_url of this License. :return: The ref_url of this License. :rtype: str """ return self._ref_url @ref_url.setter def ref_url(self, ref_url): """ Sets the ref_url of this License. :param ref_url: The ref_url of this License. :type: str """ self._ref_url = ref_url @property def short_name(self): """ Gets the short_name of this License. :return: The short_name of this License. :rtype: str """ return self._short_name @short_name.setter def short_name(self, short_name): """ Sets the short_name of this License. :param short_name: The short_name of this License. :type: str """ self._short_name = short_name @property def field_handler(self): """ Gets the field_handler of this License. :return: The field_handler of this License. :rtype: FieldHandler """ return self._field_handler @field_handler.setter def field_handler(self, field_handler): """ Sets the field_handler of this License. :param field_handler: The field_handler of this License. :type: FieldHandler """ self._field_handler = field_handler def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, datetime): result[attr] = str(value.date()) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str()
thauser/pnc-cli
pnc_cli/swagger_client/models/license.py
Python
apache-2.0
5,572
"""Dev server used for running a chalice app locally. This is intended only for local development purposes. """ from __future__ import print_function import re import threading import time import uuid import base64 import functools import warnings from collections import namedtuple import json from six.moves.BaseHTTPServer import HTTPServer from six.moves.BaseHTTPServer import BaseHTTPRequestHandler from six.moves.socketserver import ThreadingMixIn from typing import ( List, Any, Dict, Tuple, Callable, Optional, Union, ) # noqa from chalice.app import Chalice # noqa from chalice.app import CORSConfig # noqa from chalice.app import ChaliceAuthorizer # noqa from chalice.app import CognitoUserPoolAuthorizer # noqa from chalice.app import RouteEntry # noqa from chalice.app import Request # noqa from chalice.app import AuthResponse # noqa from chalice.app import BuiltinAuthConfig # noqa from chalice.config import Config # noqa from chalice.compat import urlparse, parse_qs MatchResult = namedtuple('MatchResult', ['route', 'captured', 'query_params']) EventType = Dict[str, Any] ContextType = Dict[str, Any] HeaderType = Dict[str, Any] ResponseType = Dict[str, Any] HandlerCls = Callable[..., 'ChaliceRequestHandler'] ServerCls = Callable[..., 'HTTPServer'] class Clock(object): def time(self): # type: () -> float return time.time() def create_local_server(app_obj, config, host, port): # type: (Chalice, Config, str, int) -> LocalDevServer app_obj.__class__ = LocalChalice return LocalDevServer(app_obj, config, host, port) class LocalARNBuilder(object): ARN_FORMAT = ('arn:aws:execute-api:{region}:{account_id}' ':{api_id}/{stage}/{method}/{resource_path}') LOCAL_REGION = 'mars-west-1' LOCAL_ACCOUNT_ID = '123456789012' LOCAL_API_ID = 'ymy8tbxw7b' LOCAL_STAGE = 'api' def build_arn(self, method, path): # type: (str, str) -> str # In API Gateway the method and URI are separated by a / so typically # the uri portion omits the leading /. In the case where the entire # url is just '/' API Gateway adds a / to the end so that the arn end # with a '//'. if path != '/': path = path[1:] return self.ARN_FORMAT.format( region=self.LOCAL_REGION, account_id=self.LOCAL_ACCOUNT_ID, api_id=self.LOCAL_API_ID, stage=self.LOCAL_STAGE, method=method, resource_path=path ) class ARNMatcher(object): def __init__(self, target_arn): # type: (str) -> None self._arn = target_arn def _resource_match(self, resource): # type: (str) -> bool # Arn matching supports two special case characetrs that are not # escapable. * represents a glob which translates to a non-greedy # match of any number of characters. ? which is any single character. # These are easy to translate to a regex using .*? and . respectivly. escaped_resource = re.escape(resource) resource_regex = escaped_resource.replace(r'\?', '.').replace( r'\*', '.*?') resource_regex = '^%s$' % resource_regex return re.match(resource_regex, self._arn) is not None def does_any_resource_match(self, resources): # type: (List[str]) -> bool for resource in resources: if self._resource_match(resource): return True return False class RouteMatcher(object): def __init__(self, route_urls): # type: (List[str]) -> None # Sorting the route_urls ensures we always check # the concrete routes for a prefix before the # variable/capture parts of the route, e.g # '/foo/bar' before '/foo/{capture}' self.route_urls = sorted(route_urls) def match_route(self, url): # type: (str) -> MatchResult """Match the url against known routes. This method takes a concrete route "/foo/bar", and matches it against a set of routes. These routes can use param substitution corresponding to API gateway patterns. For example:: match_route('/foo/bar') -> '/foo/{name}' """ # Otherwise we need to check for param substitution parsed_url = urlparse(url) query_params = parse_qs(parsed_url.query, keep_blank_values=True) path = parsed_url.path # API Gateway removes the trailing slash if the route is not the root # path. We do the same here so our route matching works the same way. if path != '/' and path.endswith('/'): path = path[:-1] parts = path.split('/') captured = {} for route_url in self.route_urls: url_parts = route_url.split('/') if len(parts) == len(url_parts): for i, j in zip(parts, url_parts): if j.startswith('{') and j.endswith('}'): captured[j[1:-1]] = i continue if i != j: break else: return MatchResult(route_url, captured, query_params) raise ValueError("No matching route found for: %s" % url) class LambdaEventConverter(object): LOCAL_SOURCE_IP = '127.0.0.1' """Convert an HTTP request to an event dict used by lambda.""" def __init__(self, route_matcher, binary_types=None): # type: (RouteMatcher, List[str]) -> None self._route_matcher = route_matcher if binary_types is None: binary_types = [] self._binary_types = binary_types def _is_binary(self, headers): # type: (Dict[str,Any]) -> bool return headers.get('content-type', '') in self._binary_types def create_lambda_event(self, method, path, headers, body=None): # type: (str, str, Dict[str, str], str) -> EventType view_route = self._route_matcher.match_route(path) event = { 'requestContext': { 'httpMethod': method, 'resourcePath': view_route.route, 'identity': { 'sourceIp': self.LOCAL_SOURCE_IP }, 'path': path.split('?')[0], }, 'headers': {k.lower(): v for k, v in headers.items()}, 'pathParameters': view_route.captured, 'stageVariables': {}, } if view_route.query_params: event['multiValueQueryStringParameters'] = view_route.query_params else: # If no query parameters are provided, API gateway maps # this to None so we're doing this for parity. event['multiValueQueryStringParameters'] = None if self._is_binary(headers) and body is not None: event['body'] = base64.b64encode(body).decode('ascii') event['isBase64Encoded'] = True else: event['body'] = body return event class LocalGatewayException(Exception): CODE = 0 def __init__(self, headers, body=None): # type: (HeaderType, Optional[bytes]) -> None self.headers = headers self.body = body class InvalidAuthorizerError(LocalGatewayException): CODE = 500 class ForbiddenError(LocalGatewayException): CODE = 403 class NotAuthorizedError(LocalGatewayException): CODE = 401 class LambdaContext(object): def __init__(self, function_name, memory_size, max_runtime_ms=3000, time_source=None): # type: (str, int, int, Optional[Clock]) -> None if time_source is None: time_source = Clock() self._time_source = time_source self._start_time = self._current_time_millis() self._max_runtime = max_runtime_ms # Below are properties that are found on the real LambdaContext passed # by lambda and their associated documentation. # Name of the Lambda function that is executing. self.function_name = function_name # The Lambda function version that is executing. If an alias is used # to invoke the function, then function_version will be the version # the alias points to. # Chalice local obviously does not support versioning so it will always # be set to $LATEST. self.function_version = '$LATEST' # The ARN used to invoke this function. It can be function ARN or # alias ARN. An unqualified ARN executes the $LATEST version and # aliases execute the function version it is pointing to. self.invoked_function_arn = '' # Memory limit, in MB, you configured for the Lambda function. You set # the memory limit at the time you create a Lambda function and you # can change it later. self.memory_limit_in_mb = memory_size # AWS request ID associated with the request. This is the ID returned # to the client that called the invoke method. self.aws_request_id = str(uuid.uuid4()) # The name of the CloudWatch log group where you can find logs written # by your Lambda function. self.log_group_name = '' # The name of the CloudWatch log stream where you can find logs # written by your Lambda function. The log stream may or may not # change for each invocation of the Lambda function. # # The value is null if your Lambda function is unable to create a log # stream, which can happen if the execution role that grants necessary # permissions to the Lambda function does not include permissions for # the CloudWatch Logs actions. self.log_stream_name = '' # The last two attributes have the following comment in the # documentation: # Information about the client application and device when invoked # through the AWS Mobile SDK, it can be null. # Chalice local doens't need to set these since they are specifically # for the mobile SDK. self.identity = None self.client_context = None def _current_time_millis(self): # type: () -> float return self._time_source.time() * 1000 def get_remaining_time_in_millis(self): # type: () -> float runtime = self._current_time_millis() - self._start_time return self._max_runtime - runtime LocalAuthPair = Tuple[EventType, LambdaContext] class LocalGatewayAuthorizer(object): """A class for running user defined authorizers in local mode.""" def __init__(self, app_object): # type: (Chalice) -> None self._app_object = app_object self._arn_builder = LocalARNBuilder() def authorize(self, raw_path, lambda_event, lambda_context): # type: (str, EventType, LambdaContext) -> LocalAuthPair method = lambda_event['requestContext']['httpMethod'] route_entry = self._route_for_event(lambda_event) if not route_entry: return lambda_event, lambda_context authorizer = route_entry.authorizer if not authorizer: return lambda_event, lambda_context # If authorizer is Cognito then try to parse the JWT and simulate an # APIGateway validated request if isinstance(authorizer, CognitoUserPoolAuthorizer): if "headers" in lambda_event\ and "authorization" in lambda_event["headers"]: token = lambda_event["headers"]["authorization"] claims = self._decode_jwt_payload(token) try: cognito_username = claims["cognito:username"] except KeyError: # If a key error is raised when trying to get the cognito # username then it is a machine-to-machine communication. # This kind of cognito authorization flow is not # supported in local mode. We can ignore it here to allow # users to test their code local with a different cognito # authorization flow. warnings.warn( '%s for machine-to-machine communicaiton is not ' 'supported in local mode. All requests made against ' 'a route will be authorized to allow local testing.' % authorizer.__class__.__name__ ) return lambda_event, lambda_context auth_result = {"context": {"claims": claims}, "principalId": cognito_username} lambda_event = self._update_lambda_event(lambda_event, auth_result) if not isinstance(authorizer, ChaliceAuthorizer): # Currently the only supported local authorizer is the # BuiltinAuthConfig type. Anything else we will err on the side of # allowing local testing by simply admiting the request. Otherwise # there is no way for users to test their code in local mode. warnings.warn( '%s is not a supported in local mode. All requests made ' 'against a route will be authorized to allow local testing.' % authorizer.__class__.__name__ ) return lambda_event, lambda_context arn = self._arn_builder.build_arn(method, raw_path) auth_event = self._prepare_authorizer_event(arn, lambda_event, lambda_context) auth_result = authorizer(auth_event, lambda_context) if auth_result is None: raise InvalidAuthorizerError( {'x-amzn-RequestId': lambda_context.aws_request_id, 'x-amzn-ErrorType': 'AuthorizerConfigurationException'}, b'{"message":null}' ) authed = self._check_can_invoke_view_function(arn, auth_result) if authed: lambda_event = self._update_lambda_event(lambda_event, auth_result) else: raise ForbiddenError( {'x-amzn-RequestId': lambda_context.aws_request_id, 'x-amzn-ErrorType': 'AccessDeniedException'}, (b'{"Message": ' b'"User is not authorized to access this resource"}')) return lambda_event, lambda_context def _check_can_invoke_view_function(self, arn, auth_result): # type: (str, ResponseType) -> bool policy = auth_result.get('policyDocument', {}) statements = policy.get('Statement', []) allow_resource_statements = [] for statement in statements: if statement.get('Effect') == 'Allow' and \ statement.get('Action') == 'execute-api:Invoke': for resource in statement.get('Resource'): allow_resource_statements.append(resource) arn_matcher = ARNMatcher(arn) return arn_matcher.does_any_resource_match(allow_resource_statements) def _route_for_event(self, lambda_event): # type: (EventType) -> Optional[RouteEntry] # Authorizer had to be made into an Any type since mypy couldn't # detect that app.ChaliceAuthorizer was callable. resource_path = lambda_event.get( 'requestContext', {}).get('resourcePath') http_method = lambda_event['requestContext']['httpMethod'] try: route_entry = self._app_object.routes[resource_path][http_method] except KeyError: # If a key error is raised when trying to get the route entry # then this route does not support this method. A method error # will be raised by the chalice handler method. We can ignore it # here by returning no authorizer to avoid duplicating the logic. return None return route_entry def _update_lambda_event(self, lambda_event, auth_result): # type: (EventType, ResponseType) -> EventType auth_context = auth_result['context'] auth_context.update({ 'principalId': auth_result['principalId'] }) lambda_event['requestContext']['authorizer'] = auth_context return lambda_event def _prepare_authorizer_event(self, arn, lambda_event, lambda_context): # type: (str, EventType, LambdaContext) -> EventType """Translate event for an authorizer input.""" authorizer_event = lambda_event.copy() authorizer_event['type'] = 'TOKEN' try: authorizer_event['authorizationToken'] = authorizer_event.get( 'headers', {})['authorization'] except KeyError: raise NotAuthorizedError( {'x-amzn-RequestId': lambda_context.aws_request_id, 'x-amzn-ErrorType': 'UnauthorizedException'}, b'{"message":"Unauthorized"}') authorizer_event['methodArn'] = arn return authorizer_event def _decode_jwt_payload(self, jwt): # type: (str) -> Dict payload_segment = jwt.split(".", 2)[1] payload = base64.urlsafe_b64decode(self._base64_pad(payload_segment)) return json.loads(payload) def _base64_pad(self, value): # type: (str) -> str rem = len(value) % 4 if rem > 0: value += "=" * (4 - rem) return value class LocalGateway(object): """A class for faking the behavior of API Gateway.""" def __init__(self, app_object, config): # type: (Chalice, Config) -> None self._app_object = app_object self._config = config self.event_converter = LambdaEventConverter( RouteMatcher(list(app_object.routes)), self._app_object.api.binary_types ) self._authorizer = LocalGatewayAuthorizer(app_object) def _generate_lambda_context(self): # type: () -> LambdaContext if self._config.lambda_timeout is None: timeout = None else: timeout = self._config.lambda_timeout * 1000 return LambdaContext( function_name=self._config.function_name, memory_size=self._config.lambda_memory_size, max_runtime_ms=timeout ) def _generate_lambda_event(self, method, path, headers, body): # type: (str, str, HeaderType, Optional[str]) -> EventType lambda_event = self.event_converter.create_lambda_event( method=method, path=path, headers=headers, body=body, ) return lambda_event def _has_user_defined_options_method(self, lambda_event): # type: (EventType) -> bool route_key = lambda_event['requestContext']['resourcePath'] return 'OPTIONS' in self._app_object.routes[route_key] def handle_request(self, method, path, headers, body): # type: (str, str, HeaderType, Optional[str]) -> ResponseType lambda_context = self._generate_lambda_context() try: lambda_event = self._generate_lambda_event( method, path, headers, body) except ValueError: # API Gateway will return a different error on route not found # depending on whether or not we have an authorization token in our # request. Since we do not do that check until we actually find # the authorizer that we will call we do not have that information # available at this point. Instead we just check to see if that # header is present and change our response if it is. This will # need to be refactored later if we decide to more closely mirror # how API Gateway does their auth and routing. error_headers = {'x-amzn-RequestId': lambda_context.aws_request_id, 'x-amzn-ErrorType': 'UnauthorizedException'} auth_header = headers.get('authorization') if auth_header is None: auth_header = headers.get('Authorization') if auth_header is not None: raise ForbiddenError( error_headers, (b'{"message": "Authorization header requires ' b'\'Credential\'' b' parameter. Authorization header requires \'Signature\'' b' parameter. Authorization header requires ' b'\'SignedHeaders\' parameter. Authorization header ' b'requires existence of either a \'X-Amz-Date\' or a' b' \'Date\' header. Authorization=%s"}' % auth_header.encode('ascii'))) raise ForbiddenError( error_headers, b'{"message": "Missing Authentication Token"}') # This can either be because the user's provided an OPTIONS method # *or* this is a preflight request, which chalice automatically # responds to without invoking a user defined route. if method == 'OPTIONS' and \ not self._has_user_defined_options_method(lambda_event): # No options route was defined for this path. API Gateway should # automatically generate our CORS headers. options_headers = self._autogen_options_headers(lambda_event) return { 'statusCode': 200, 'headers': options_headers, 'multiValueHeaders': {}, 'body': None } # The authorizer call will be a noop if there is no authorizer method # defined for route. Otherwise it will raise a ForbiddenError # which will be caught by the handler that called this and a 403 or # 401 will be sent back over the wire. lambda_event, lambda_context = self._authorizer.authorize( path, lambda_event, lambda_context) response = self._app_object(lambda_event, lambda_context) response = self._handle_binary(response) return response def _autogen_options_headers(self, lambda_event): # type:(EventType) -> HeaderType route_key = lambda_event['requestContext']['resourcePath'] route_dict = self._app_object.routes[route_key] route_methods = list(route_dict.keys()) # Chalice ensures that routes with multiple views have the same # CORS configuration, so if any view has a CORS Config we can use # that config since they will all be the same. cors_config = route_dict[route_methods[0]].cors cors_headers = cors_config.get_access_control_headers() # We need to add OPTIONS since it is not a part of the CORSConfig # object. APIGateway handles this entirely based on the API definition. # So our local version needs to add this manually to our set of allowed # headers. route_methods.append('OPTIONS') # The Access-Control-Allow-Methods header is not added by the # CORSConfig object it is added to the API Gateway route during # deployment, so we need to manually add those headers here. cors_headers.update({ 'Access-Control-Allow-Methods': '%s' % ','.join(route_methods) }) return cors_headers def _handle_binary(self, response): # type: (Dict[str,Any]) -> Dict[str,Any] if response.get('isBase64Encoded'): body = base64.b64decode(response['body']) response['body'] = body return response class ChaliceRequestHandler(BaseHTTPRequestHandler): """A class for mapping raw HTTP events to and from LocalGateway.""" protocol_version = 'HTTP/1.1' def __init__(self, request, client_address, server, app_object, config): # type: (bytes, Tuple[str, int], HTTPServer, Chalice, Config) -> None self.local_gateway = LocalGateway(app_object, config) BaseHTTPRequestHandler.__init__( self, request, client_address, server) # type: ignore def _parse_payload(self): # type: () -> Tuple[HeaderType, Optional[str]] body = None content_length = int(self.headers.get('content-length', '0')) if content_length > 0: body = self.rfile.read(content_length) converted_headers = dict(self.headers) return converted_headers, body def _generic_handle(self): # type: () -> None headers, body = self._parse_payload() try: response = self.local_gateway.handle_request( method=self.command, path=self.path, headers=headers, body=body ) status_code = response['statusCode'] headers = response['headers'].copy() headers.update(response['multiValueHeaders']) body = response['body'] self._send_http_response(status_code, headers, body) except LocalGatewayException as e: self._send_error_response(e) def _send_error_response(self, error): # type: (LocalGatewayException) -> None code = error.CODE headers = error.headers body = error.body self._send_http_response(code, headers, body) def _send_http_response(self, code, headers, body): # type: (int, HeaderType, Optional[Union[str,bytes]]) -> None if body is None: self._send_http_response_no_body(code, headers) else: self._send_http_response_with_body(code, headers, body) def _send_http_response_with_body(self, code, headers, body): # type: (int, HeaderType, Union[str,bytes]) -> None self.send_response(code) if not isinstance(body, bytes): body = body.encode('utf-8') self.send_header('Content-Length', str(len(body))) content_type = headers.pop( 'Content-Type', 'application/json') self.send_header('Content-Type', content_type) self._send_headers(headers) self.wfile.write(body) do_GET = do_PUT = do_POST = do_HEAD = do_DELETE = do_PATCH = do_OPTIONS = \ _generic_handle def _send_http_response_no_body(self, code, headers): # type: (int, HeaderType) -> None headers['Content-Length'] = '0' self.send_response(code) self._send_headers(headers) def _send_headers(self, headers): # type: (HeaderType) -> None for header_name, header_value in headers.items(): if isinstance(header_value, list): for value in header_value: self.send_header(header_name, value) else: self.send_header(header_name, header_value) self.end_headers() class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): """Threading mixin to better support browsers. When a browser sends a GET request to Chalice it keeps the connection open for reuse. In the single threaded model this causes Chalice local to become unresponsive to all clients other than that browser socket. Even sending a header requesting that the client close the connection is not good enough, the browswer will simply open another one and sit on it. """ daemon_threads = True class LocalDevServer(object): def __init__(self, app_object, config, host, port, handler_cls=ChaliceRequestHandler, server_cls=ThreadedHTTPServer): # type: (Chalice, Config, str, int, HandlerCls, ServerCls) -> None self.app_object = app_object self.host = host self.port = port self._wrapped_handler = functools.partial( handler_cls, app_object=app_object, config=config) self.server = server_cls((host, port), self._wrapped_handler) def handle_single_request(self): # type: () -> None self.server.handle_request() def serve_forever(self): # type: () -> None print("Serving on http://%s:%s" % (self.host, self.port)) self.server.serve_forever() def shutdown(self): # type: () -> None # This must be called from another thread of else it # will deadlock. self.server.shutdown() class HTTPServerThread(threading.Thread): """Thread that manages starting/stopping local HTTP server. This is a small wrapper around a normal threading.Thread except that it adds shutdown capability of the HTTP server, which is not part of the normal threading.Thread interface. """ def __init__(self, server_factory): # type: (Callable[[], LocalDevServer]) -> None threading.Thread.__init__(self) self._server_factory = server_factory self._server = None # type: Optional[LocalDevServer] self.daemon = True def run(self): # type: () -> None self._server = self._server_factory() self._server.serve_forever() def shutdown(self): # type: () -> None if self._server is not None: self._server.shutdown() class LocalChalice(Chalice): _THREAD_LOCAL = threading.local() # This is a known mypy bug where you can't override instance # variables with properties. So this should be type safe, which # is why we're adding the type: ignore comments here. # See: https://github.com/python/mypy/issues/4125 @property # type: ignore def current_request(self): # type: ignore # type: () -> Request return self._THREAD_LOCAL.current_request @current_request.setter def current_request(self, value): # type: ignore # type: (Request) -> None self._THREAD_LOCAL.current_request = value
awslabs/chalice
chalice/local.py
Python
apache-2.0
29,826
# ========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ==========================================================================*/ # a short program to check the value returned by the GetNameOfClass() methods import itk import sys itk.auto_progress(2) # must force the load to return all the names with dir(itk) itk.force_load() # itk.ImageToImageFilter def wrongClassName(cl, name): o = cl.New() # be sure that the type of the instantiated object is the same # than the one of the class. It can be different if the class # is an "abstract" one and don't provide any New() method. # In that case, the one of the superclass is used. return o.GetNameOfClass() != name and itk.class_(o) == cl # a list of classes to exclude. Typically, the classes with a custom New() # method, which return a subclass of the current class exclude = [ "ForwardFFTImageFilter", "Forward1DFFTImageFilter", "InverseFFTImageFilter", "Inverse1DFFTImageFilter", "OutputWindow", "MultiThreaderBase", "FFTComplexToComplexImageFilter", "ComplexToComplexFFTImageFilter", "ComplexToComplex1DImageFilter", "templated_class", "HalfHermitianToRealInverseFFTImageFilter", "RealToHalfHermitianForwardFFTImageFilter", "CustomColormapFunction", "ScanlineFilterCommon", # Segfault "cvar", ] wrongName = 0 totalName = 0 for t in dir(itk): if t not in exclude: T = itk.__dict__[t] # first case - that's a templated class if isinstance(T, itk.Vector.__class__) and len(T) > 0: # use only the first specialization - all of them return the same # name i = T.values()[0] # GetNameOfClass() is a virtual method of the LightObject class, # so we must instantiate an object with the New() method if "New" in dir(i) and "GetNameOfClass" in dir(i): totalName += 1 if wrongClassName(i, t): msg = f"{T}: wrong class name: {t}" print(msg, file=sys.stderr) wrongName += 1 else: if "New" in dir(T) and "GetNameOfClass" in dir(T): totalName += 1 if wrongClassName(T, t): msg = f"{T}: wrong class name: {t}" print(msg, file=sys.stderr) o = T.New() print(itk.class_(o), file=sys.stderr) print(o.GetNameOfClass(), file=sys.stderr) wrongName += 1 print(f"{totalName} classes checked.") if wrongName: print(f"{wrongName} classes are not providing the correct name.", file=sys.stderr) sys.exit(1)
vfonov/ITK
Wrapping/Generators/Python/Tests/getNameOfClass.py
Python
apache-2.0
3,326
import pytest import time import ray from ray import serve def test_redeploy_start_time(serve_instance): """Check that redeploying a deployment doesn't reset its start time.""" controller = serve.api._global_client._controller @serve.deployment def test(_): return "1" test.deploy() deployment_info_1, route_1 = ray.get(controller.get_deployment_info.remote("test")) start_time_ms_1 = deployment_info_1.start_time_ms time.sleep(0.1) @serve.deployment def test(_): return "2" test.deploy() deployment_info_2, route_2 = ray.get(controller.get_deployment_info.remote("test")) start_time_ms_2 = deployment_info_2.start_time_ms assert start_time_ms_1 == start_time_ms_2 if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", "-s", __file__]))
ray-project/ray
python/ray/serve/tests/test_controller.py
Python
apache-2.0
842
#!/usr/bin/env python # # Update a redis server cache when an evenement is trigger # in MySQL replication log # from pymysqlreplication import BinLogStreamReader from pymysqlreplication.row_event import * mysql_settings = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': ''} import json import cherrypy class Streamer(object): def __init__(self): self.stream = BinLogStreamReader(connection_settings = mysql_settings, only_events = [DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent], blocking = True, resume_stream = True) def index(self): cherrypy.response.headers['Content-Type'] = 'text/plain' def content(): for binlogevent in self.stream: for row in binlogevent.rows: if isinstance(binlogevent, DeleteRowsEvent): yield json.dumps({ "action": "delete", "id": row["values"]["id"]}) + "\n" elif isinstance(binlogevent, UpdateRowsEvent): yield json.dumps({ "action": "update", "id": row["after_values"]["id"], "doc": row["after_values"]}) + "\n" elif isinstance(binlogevent, WriteRowsEvent): yield json.dumps({ "action": "insert", "id": row["values"]["id"], "doc": row["values"]}) + "\n" return content() index.exposed = True index._cp_config = {"response.stream": True} cherrypy.quickstart(Streamer())
scharron/elasticsearch-river-mysql
http_stream/http_stream.py
Python
apache-2.0
1,681
# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Cisco Zone Driver is responsible to manage access control using FC zoning for Cisco FC fabrics. This is a concrete implementation of FCZoneDriver interface implementing add_connection and delete_connection interfaces. **Related Flags** :zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True :zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack' """ from oslo.utils import excutils from oslo.utils import importutils from oslo_concurrency import lockutils from oslo_config import cfg import six from cinder import exception from cinder.i18n import _, _LE, _LI from cinder.openstack.common import log as logging from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts from cinder.zonemanager.drivers.fc_zone_driver import FCZoneDriver from cinder.zonemanager.utils import get_formatted_wwn LOG = logging.getLogger(__name__) cisco_opts = [ cfg.StrOpt('cisco_sb_connector', default='cinder.zonemanager.drivers.cisco' '.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI', help='Southbound connector for zoning operation'), ] CONF = cfg.CONF CONF.register_opts(cisco_opts, 'fc-zone-manager') class CiscoFCZoneDriver(FCZoneDriver): """Cisco FC zone driver implementation. OpenStack Fibre Channel zone driver to manage FC zoning in Cisco SAN fabrics. Version history: 1.0 - Initial Cisco FC zone driver """ VERSION = "1.0.0" def __init__(self, **kwargs): super(CiscoFCZoneDriver, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(cisco_opts) # Adding a hack to handle parameters from super classes # in case configured with multi backends. fabric_names = self.configuration.safe_get('fc_fabric_names') activate = self.configuration.safe_get('cisco_zone_activate') prefix = self.configuration.safe_get('cisco_zone_name_prefix') base_san_opts = [] if not fabric_names: base_san_opts.append( cfg.StrOpt('fc_fabric_names', default=None, help='Comma separated list of fibre channel ' 'fabric names. This list of names is used to' ' retrieve other SAN credentials for connecting' ' to each SAN fabric' )) if not activate: base_san_opts.append( cfg.BoolOpt('cisco_zone_activate', default=True, help='Indicates whether zone should ' 'be activated or not')) if not prefix: base_san_opts.append( cfg.StrOpt('cisco_zone_name_prefix', default="openstack", help="A prefix to be used when naming zone")) if len(base_san_opts) > 0: CONF.register_opts(base_san_opts) self.configuration.append_config_values(base_san_opts) fabric_names = [x.strip() for x in self. configuration.fc_fabric_names.split(',')] # There can be more than one SAN in the network and we need to # get credentials for each SAN. if fabric_names: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) @lockutils.synchronized('cisco', 'fcfabric-', True) def add_connection(self, fabric, initiator_target_map): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric:%s", fabric) LOG.info(_LI("CiscoFCZoneDriver - Add connection " "for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_LI("Zoning policy for Fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} initiator = initiator_key.lower() t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for t in t_list: target = t.lower() zone_members = [get_formatted_wwn(initiator), get_formatted_wwn(target)] zone_name = (self. configuration.cisco_zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. LOG.info(_LI("Zone exists in I-T mode. " "Skipping zone creation %s"), zone_name) elif zoning_policy == 'initiator': zone_members = [get_formatted_wwn(initiator)] for t in t_list: target = t.lower() zone_members.append(get_formatted_wwn(target)) zone_name = self.configuration.cisco_zone_name_prefix \ + initiator.replace(':', '') if len(zone_names) > 0 and (zone_name in zone_names): zone_members = zone_members + filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) zone_map[zone_name] = zone_members else: msg = _("Zoning Policy: %s, not" " recognized") % zoning_policy LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.info(_LI("Zone map to add: %s"), zone_map) if len(zone_map) > 0: conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except exception.CiscoZoningCliException as cisco_ex: msg = _("Exception: %s") % six.text_type(cisco_ex) raise exception.FCZoneDriverException(msg) except Exception as e: LOG.error(_LE("Exception: %s") % six.text_type(e)) msg = (_("Failed to add zoning configuration %s") % six.text_type(e)) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) @lockutils.synchronized('cisco', 'fcfabric-', True) def delete_connection(self, fabric, initiator_target_map): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric:%s", fabric) LOG.info(_LI("CiscoFCZoneDriver - Delete connection for I-T map: %s"), initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info(_LI("Zoning policy for fabric %s"), zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push # changes to fabric. This operation could result in an update # for zone config with new member list or deleting zones from # active cfg. LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = ( self.configuration.cisco_zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) LOG.debug("Zone name to del: %s", zone_name) if (len(zone_names) > 0 and (zone_name in zone_names)): # delete zone. LOG.debug("Added zone to delete to list: %s", zone_name) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(get_formatted_wwn(target)) zone_name = self.configuration.cisco_zone_name_prefix \ + initiator.replace(':', '') if (zone_names and (zone_name in zone_names)): filtered_members = filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name]) # The assumption here is that initiator is always # there in the zone as it is 'initiator' policy. # We find the filtered list and if it is non-empty, # add initiator to it and update zone if filtered # list is empty, we remove that zone. LOG.debug("Zone delete - I mode: filtered targets:%s", filtered_members) if filtered_members: filtered_members.append(formatted_initiator) LOG.debug("Filtered zone members to update: %s", filtered_members) zone_map[zone_name] = filtered_members LOG.debug("Filtered zone Map to update: %s", zone_map) else: zones_to_delete.append(zone_name) else: LOG.info(_LI("Zoning Policy: %s, not recognized"), zoning_policy) LOG.debug("Final Zone map to update: %s", zone_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) # Update zone membership. if zone_map: conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ('%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = ('%s%s%s' % ( zone_name_string, ';', zones_to_delete[i])) conn.delete_zones(zone_name_string, self.configuration. cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except Exception as e: msg = _("Exception: %s") % six.text_type(e) LOG.error(msg) msg = _("Failed to update or delete zoning configuration") raise exception.FCZoneDriverException(msg) LOG.debug("Zones deleted successfully: %s", zone_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fabrics = [x.strip() for x in self. configuration.fc_fabric_names.split(',')] LOG.debug("Fabric List: %s", fabrics) LOG.debug("Target wwn List: %s", target_wwn_list) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append(get_formatted_wwn(t.lower())) LOG.debug("Formatted Target wwn List: %s", formatted_target_list) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and get the targets # logged in. nsinfo = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) nsinfo = conn.get_nameserver_info() LOG.debug("show fcns database info from fabric:%s", nsinfo) conn.cleanup() except exception.CiscoZoningCliException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error getting show fcns database " "info: %s"), six.text_type(ex)) except Exception as e: msg = (_("Failed to get show fcns database info:%s") % six.text_type(e)) LOG.error(msg) raise exception.FCZoneDriverException(msg) visible_targets = filter( lambda x: x in formatted_target_list, nsinfo) if visible_targets: LOG.info(_LI("Filtered targets for SAN is: %s"), {fabric_name: visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = six.text_type( visible_targets[idx]).replace(':', '') fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets are in the fcns info for SAN %s", fabric_name) LOG.debug("Return SAN context output:%s", fabric_map) return fabric_map def get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets active zoneset config for vsan.""" cfgmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) cfgmap = conn.get_active_zone_set() conn.cleanup() except Exception as e: msg = (_("Failed to access active zoning configuration:%s") % six.text_type(e)) LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Active zone set from fabric: %s", cfgmap) return cfgmap def get_zoning_status(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets zoneset status and mode.""" statusmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) statusmap = conn.get_zoning_status() conn.cleanup() except Exception as e: msg = (_("Failed to access zoneset status:%s") % six.text_type(e)) LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zoneset status from fabric: %s", statusmap) return statusmap
hguemar/cinder
cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py
Python
apache-2.0
23,043
#from flask.templating import render_template # Also installed redis from app import app from flask import Flask, request, url_for, Response, redirect from extended_client import extended_client import json from jinja2 import Environment, PackageLoader import logging from time import sleep #To access JMX Rest api import requests #To allow calling of sh commands from python import commands #Threading purposes import threading #For async tasks from celery import Celery #For doing msg_out rate calculations import math #For the timing of things import datetime #messages_in_topic_per_second = 'java -cp $JAVA_HOME/lib/tools.jar:../target/scala-2.10/cjmx.jar cjmx.Main 3628 \"mbeans \'kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,*\' select *\"' #For getting the process id of kafka #import os #PIDs = os.system("ps aux | grep \"kafka.Kafka\" | grep -v grep | awk '{print $2}'") #For getting ipaddress import socket ip = socket.gethostbyname(socket.gethostname()) + "" host = {} host["ip"] = ip #Jinja templating env = Environment(loader=PackageLoader('app','templates')) ext_client=None json_data=None json_nodes=None zk=None json_topics=None remote_server = {} remote_server["host"]= "John" local = "local" remote = "remote" #CSS File #reading_from={} reading_from="" #Store the offsets for each topic all consumers consume from #Objects for keeping track of rates for CONSUMERS prev_consumer_info = {} prev_consumer_counts = {} #Store the accumulated offset accumulated_topic_rates = {} consumers = "" #Stores information for msgs_out second_counter = 0 seconds_in_a_day = 86400 #(60*60*24) #Objects for keeping track of rates for TOPICS topic_sums = {} prev_topic_info = {} prev_topic_counts = {} #Proxy server proxy = None #reading_from["data"] = None # # # FUNCTIONS # # # The thing that the user sees @app.route('/') @app.route('/index') def index(): print "Index called" template = env.get_template('index.html') title = "Fufuka" client_url = ""#ext_client.url_port return template.render(page_title=title, zk_client=client_url) # Gets all the form data from the "Start visualization page" @app.route('/', methods=['POST']) def index_return_values(): print "/ with data. Form received" start = datetime.datetime.now() #hostname = request.local dictionary = request.form print "Dict: " + str(dictionary) + " :" + str(len(dictionary)) #print list(v for k,v in dictionary.iteritems() if 'jmx' in k) if len(dictionary) > 1: #Dealing with a remote connection print "Remotely" global reading_from #reading_from["data"] = str(remote) reading_from = str(remote) hostname = request.form.get("hostname", None) zkhostnamePort = request.form.get("zkhostnameport", None) proxy = request.form.get("proxy", None) print "Connecting to: " + hostname print "With zk at: " + zkhostnamePort global proxy print "Proxy: " + proxy global hostandport #Set the remote host remote_server["host"] = str(hostname) #Set all the JMX ports that need to be listened to jmx_ports = list(v for k,v in dictionary.iteritems() if 'jmx' in k) remote_server["ports"] = [] for port in jmx_ports: print "JMX ports: " + str(port) remote_server["ports"].append(str(port)) else: #Dealing with a local connection global reading_from #reading_from["data"] = str(local) reading_from = str(local) print "Local" zkhostnamePort = request.form.get("zkhostnameport", None) print "Connecting to: " + zkhostnamePort # Process data for getting to zk instance # # split = zkhostnamePort.index(':') hostname = zkhostnamePort[:split] port = int(zkhostnamePort[split+1:]) #Start an instance of the extended_client global ext_client ext_client = extended_client(hostname, port) #Start zookeeper client global zk zk = ext_client.zk zk.start() #Once the returned values are found, set them all #Get consumers and producers topics = ext_client.show_all_topics(zk) #Populate topic holder for t in topics: topic_sums[t] = 0 prev_topic_info[t] = {} prev_topic_counts[t] = [] global json_topics json_topics = json.dumps(topics) #Get the json data and store it global json_data json_data = json.dumps(ext_client.get_json(zk)) global json_nodes json_nodes = json.dumps(ext_client.get_nodes_json(zk)) json_edges = json.dumps(ext_client.get_edges_json(zk)) end = datetime.datetime.now() print "Total time to load zk information: " + str(end-start) return redirect("/zk") # Main viewing area for zks @app.route('/zk') def zk_client(): print "/zk called" #Set the consumers then continously calculate their offsets print "Creating consumer holders:" start_time = datetime.datetime.now() global consumers consumers = ext_client.show_all_consumers(zk) #Populate consumer holders for c in consumers: prev_consumer_info[c] = {} prev_consumer_counts[c] = [] for c in consumers: topics = ext_client.show_topics_consumed(zk, c) for t in topics: prev_consumer_info[c][t] = {} #print prev_consumer_info end_time = datetime.datetime.now() calculate_offsets() #Set the template of the page template = env.get_template('zk_client.html') #brokers = ext_client.show_brokers_ids(zk) #Get the information of the current zookeeper instance data = {} data["zkinfo"] = str(ext_client.url_port) print "Total con: " + str(len(consumers)) print "Total time to load /zk page: " + str(end_time-start_time) return template.render(data=data)#consumers=consumers, brokers=brokers, producers=producers, topics=topics)#, r=r.content) # Loads the d3 graph onto the iframe @app.route('/test') def test_2(): print "/test called" start = datetime.datetime.now() template = env.get_template('test2_graph.html') js_url = url_for('static', filename='js/loadGraph.js') # graph={} # graph["nodes"] = json_nodes # graph["edges"] = json_edges data = {} data["json_data"] = json_data data["json_nodes"] = json_nodes data["json_topics"] = json_topics data["js_url"] = js_url data["host"] = host data["remote_server"] = remote_server data["reading_from"] = reading_from data["largest_weight"] = ext_client.get_largest_weight(zk) data["smallest_weight"] = ext_client.get_smallest_weight(zk) data["proxy"] = proxy sendData = json.dumps(data) # print "---------------------------" # print "---------------------------" # print "---------------------------" end = datetime.datetime.now() print "Total time to load /test page: " + str(end-start) #print data return template.render(data=sendData)#json_data=json_data, json_nodes=json_nodes, json_topics=json_topics, js_url=js_url, host=host, remote_server=remote_server, readingFrom=reading_from) # Method to return offset rates def get_rate(rate_type, prevData): one_minute = 60 if rate_type == "minute": #Get the minute rate if len(prevData) > one_minute: #print " Min rate " #print "L: " + str(prevData[second_counter+1]) + " S: " + str(prevData[second_counter-one_minute]) #min_rate = abs(prevData[second_counter+1] - prevData[second_counter-one_minute]) min_rate = abs(prevData[second_counter] - prevData[second_counter-one_minute])/(one_minute + 0.0) return min_rate else: min_rate = 0 return min_rate if rate_type == "mean": #Get the mean rate global second_counter if second_counter > 0: #print " Mean rate" #Method 1 #global predata_sum #mean_rate = predata_sum/(second_counter+0.0) #Method 2 # print "L: " + str(prevData[second_counter+1]) + " S: " + str(prevData[0]) # mean_rate = abs(prevData[second_counter+1] - prevData[0])/(second_counter+0.0) #Method 3 # print " ArrLen: " + str(len(prevData)) # print " SC: " + str(second_counter) # print " L: " + str(prevData[second_counter])+ " S: " + str(prevData[0]) mean_rate = abs(prevData[second_counter] - prevData[0])/(second_counter+0.0) #print " MeanR " + str(mean_rate) return mean_rate else: mean_rate = -1 return mean_rate # Threaded method which calculates the offsets def calculate_offsets(): #Get individual offsets of a consumer for c in consumers: global prev_consumer_info #prev_consumer_info[c] = {} topics = ext_client.show_topics_consumed(zk, c) for t in topics: # # # Consumer Rates # # # Get the offsets for every consumer and correpsonding topic offset = ext_client.get_consumer_offset(zk, c, t) #Append count to the array holder prev_consumer_counts[c].append(offset) #Get the msg_out_minute_rate for this topic min_rate = get_rate("minute", prev_consumer_counts[c]) #print "Min: " + str(min_rate) mean_rate = get_rate("mean", prev_consumer_counts[c]) #print "Mean: " + str(mean_rate) if mean_rate == -1: mean_rate = 0 #Update the holder for this topic global prev_consumer_info prev_consumer_info[c][t]["count"] = offset prev_consumer_info[c][t]["min_rate"] = min_rate prev_consumer_info[c][t]["mean_rate"] = mean_rate # # # Topic rates # # #Get the count for this topic count = ext_client.get_accumulated_topic_offset(zk, t) #Update the sum for this topic topic_sums[t] = topic_sums[t] + count #Append count to the array holder prev_topic_counts[t].append(count) #Get the msg_out_minute_rate for this topic min_rate = get_rate("minute", prev_topic_counts[t]) mean_rate = get_rate("mean", prev_topic_counts[t]) if mean_rate == -1: mean_rate = 0 #Update the holder for this topic global prev_topic_info prev_topic_info[t]["count"] = count prev_topic_info[t]["min_rate"] = min_rate prev_topic_info[t]["mean_rate"] = mean_rate global second_counter second_counter = second_counter + 1 #Reset the rate calculations every 24hrs if second_counter == seconds_in_a_day: second_counter = 0 threading.Timer(1, calculate_offsets).start() # Returns the consumer offsets @app.route('/getconsumerrates') def get_consumer_offsets(): return json.dumps(prev_consumer_info) # Returns the accumulated offsets for each topic @app.route('/getaccumulatedrates') def get_accumulated_offsets(): return json.dumps(prev_topic_info) # Takes care of the currently selected node @app.route('/current_node') def draw_node(): print "Draw node called" template = env.get_template('node.html') return template.render(json_data=json_data) @app.route('/orgraph') def or_graph(): template = env.get_template('orgraph.html') return template.render(json_data=json_data)
johankaito/fufuka
microblog/app/views.py
Python
apache-2.0
10,442
"""ParameterConfig wraps ParameterConfig and ParameterSpec protos.""" import collections import copy import enum import math from typing import Generator, List, Optional, Sequence, Tuple, Union from absl import logging import attr from vizier.pyvizier.shared import trial class ParameterType(enum.IntEnum): """Valid Values for ParameterConfig.type.""" DOUBLE = 1 INTEGER = 2 CATEGORICAL = 3 DISCRETE = 4 def is_numeric(self) -> bool: return self in [self.DOUBLE, self.INTEGER, self.DISCRETE] class ScaleType(enum.IntEnum): """Valid Values for ParameterConfig.scale_type.""" LINEAR = 1 LOG = 2 REVERSE_LOG = 3 UNIFORM_DISCRETE = 4 class ExternalType(enum.IntEnum): """Valid Values for ParameterConfig.external_type.""" INTERNAL = 0 BOOLEAN = 1 INTEGER = 2 FLOAT = 3 # A sequence of possible internal parameter values. MonotypeParameterSequence = Union[Sequence[Union[int, float]], Sequence[str]] MonotypeParameterList = Union[List[Union[int, float]], List[str]] def _validate_bounds(bounds: Union[Tuple[int, int], Tuple[float, float]]): """Validates the bounds.""" if len(bounds) != 2: raise ValueError('Bounds must have length 2. Given: {}'.format(bounds)) lower = bounds[0] upper = bounds[1] if not all([math.isfinite(v) for v in (lower, upper)]): raise ValueError( 'Both "lower" and "upper" must be finite. Given: (%f, %f)' % (lower, upper)) if lower > upper: raise ValueError( 'Lower cannot be greater than upper: given lower={} upper={}'.format( lower, upper)) def _get_feasible_points_and_bounds( feasible_values: Sequence[float] ) -> Tuple[List[float], Union[Tuple[int, int], Tuple[float, float]]]: """Validates and converts feasible values to floats.""" if not all([math.isfinite(p) for p in feasible_values]): raise ValueError('Feasible values must all be finite. Given: {}' % feasible_values) feasible_points = list(sorted(feasible_values)) bounds = (feasible_points[0], feasible_points[-1]) return feasible_points, bounds def _get_categories(categories: Sequence[str]) -> List[str]: """Returns the categories.""" return sorted(list(categories)) def _get_default_value( param_type: ParameterType, default_value: Union[float, int, str]) -> Union[float, int, str]: """Validates and converts the default_value to the right type.""" if (param_type in (ParameterType.DOUBLE, ParameterType.DISCRETE) and (isinstance(default_value, float) or isinstance(default_value, int))): return float(default_value) elif (param_type == ParameterType.INTEGER and (isinstance(default_value, float) or isinstance(default_value, int))): if isinstance(default_value, int): return default_value else: # Check if the float rounds nicely. default_int_value = round(default_value) if not math.isclose(default_value, default_int_value): raise ValueError('default_value for an INTEGER parameter should be an ' 'integer, got float: [{}]'.format(default_value)) return default_int_value elif (param_type == ParameterType.CATEGORICAL and isinstance(default_value, str)): return default_value raise ValueError( 'default_value has an incorrect type. ParameterType has type {}, ' 'but default_value has type {}'.format(param_type.name, type(default_value))) @attr.s(auto_attribs=True, frozen=True, init=True, slots=True) class ParameterConfig: """A Vizier ParameterConfig. Use ParameterConfig.factory to create a valid instance. """ _name: str = attr.ib( init=True, validator=attr.validators.instance_of(str), kw_only=True) _type: ParameterType = attr.ib( init=True, validator=attr.validators.instance_of(ParameterType), repr=lambda v: v.name if v is not None else 'None', kw_only=True) # Only one of _feasible_values, _bounds will be set at any given time. _bounds: Optional[Union[Tuple[int, int], Tuple[float, float]]] = attr.ib( init=True, validator=attr.validators.optional( attr.validators.deep_iterable( member_validator=attr.validators.instance_of((int, float)), iterable_validator=attr.validators.instance_of(tuple))), kw_only=True) _feasible_values: Optional[MonotypeParameterList] = attr.ib( init=True, validator=attr.validators.optional( attr.validators.deep_iterable( member_validator=attr.validators.instance_of((int, float, str)), iterable_validator=attr.validators.instance_of((list, tuple)))), kw_only=True) _scale_type: Optional[ScaleType] = attr.ib( init=True, validator=attr.validators.optional( attr.validators.instance_of(ScaleType)), repr=lambda v: v.name if v is not None else 'None', kw_only=True) _default_value: Optional[Union[float, int, str]] = attr.ib( init=True, validator=attr.validators.optional( attr.validators.instance_of((float, int, str))), kw_only=True) _external_type: Optional[ExternalType] = attr.ib( init=True, validator=attr.validators.optional( attr.validators.instance_of(ExternalType)), repr=lambda v: v.name if v is not None else 'None', kw_only=True) # Parent values for this ParameterConfig. If set, then this is a child # ParameterConfig. _matching_parent_values: Optional[MonotypeParameterList] = attr.ib( init=True, validator=attr.validators.optional( attr.validators.deep_iterable( member_validator=attr.validators.instance_of((int, float, str)), iterable_validator=attr.validators.instance_of((list, tuple)))), kw_only=True) # Children ParameterConfig. If set, then this is a parent ParameterConfig. _child_parameter_configs: Optional[List['ParameterConfig']] = attr.ib( init=True, kw_only=True) # Pytype treats instances of EnumTypeWrapper as types, but they can't be # evaluated at runtime, so a Union[] of proto enums has to be a forward # reference below. @classmethod def factory( cls, name: str, *, bounds: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None, feasible_values: Optional[MonotypeParameterSequence] = None, children: Optional[Sequence[Tuple[MonotypeParameterSequence, 'ParameterConfig']]] = None, scale_type: Optional[ScaleType] = None, default_value: Optional[Union[float, int, str]] = None, external_type: Optional[ExternalType] = ExternalType.INTERNAL ) -> 'ParameterConfig': """Factory method. Args: name: The parameter's name. Cannot be empty. bounds: REQUIRED for INTEGER or DOUBLE type. Specifies (min, max). The type of (min, max) determines the created ParameterConfig's type. feasible_values: REQUIRED for DISCRETE or CATEGORICAL type. The elements' type determines the created ParameterConfig's type. children: sequence of tuples formatted as: (matching_parent_values, ParameterConfig). See cs/learning_vizier.service.ParameterConfig.child_parameter_configs for details. ONLY THE TYPES ARE VALIDATED. If the child ParameterConfig protos already have parent values set, they will be overridden by the provided matching_parent_values. scale_type: Scaling to be applied. NOT VALIDATED. default_value: A default value for the Parameter. external_type: An annotation indicating the type this parameter should be cast to. Returns: A ParameterConfig object which wraps a partially validated proto. Raises: ValueError: Exactly one of feasible_values and bounds must be convertible to Boolean true. Bounds and numeric feasible_values must be finite. Bounds and feasible_values, if provided, must consist of elements of the same type. TypeError: If children's matching_parent_values are not compatible with the ParameterConfig being created. """ if not name: raise ValueError('Parameter name cannot be empty.') if bool(feasible_values) == bool(bounds): raise ValueError( 'While creating Parameter with name={}: exactly one of ' '"feasible_values" or "bounds" must be provided, but given ' 'feasible_values={} and bounds={}.'.format(name, feasible_values, bounds)) if feasible_values: if len(set(feasible_values)) != len(feasible_values): counter = collections.Counter(feasible_values) duplicate_dict = {k: v for k, v in counter.items() if v > 1} raise ValueError( 'Feasible values cannot have duplicates: {}'.format(duplicate_dict)) if all(isinstance(v, (float, int)) for v in feasible_values): inferred_type = ParameterType.DISCRETE feasible_values, bounds = _get_feasible_points_and_bounds( feasible_values) elif all(isinstance(v, str) for v in feasible_values): inferred_type = ParameterType.CATEGORICAL feasible_values = _get_categories(feasible_values) else: raise ValueError( 'Feasible values must all be numeric or strings. Given {}'.format( feasible_values)) else: # bounds were specified. if isinstance(bounds[0], int) and isinstance(bounds[1], int): inferred_type = ParameterType.INTEGER _validate_bounds(bounds) elif isinstance(bounds[0], float) and isinstance(bounds[1], float): inferred_type = ParameterType.DOUBLE _validate_bounds(bounds) else: raise ValueError( 'Bounds must both be integers or doubles. Given: {}'.format(bounds)) if default_value is not None: default_value = _get_default_value(inferred_type, default_value) pc = cls( name=name, type=inferred_type, bounds=bounds, feasible_values=feasible_values, scale_type=scale_type, default_value=default_value, external_type=external_type, matching_parent_values=None, child_parameter_configs=None) if children: pc = pc.add_children(children) return pc @property def name(self) -> str: return self._name @property def type(self) -> ParameterType: return self._type @property def external_type(self) -> ExternalType: return self._external_type @property def scale_type(self) -> Optional[ScaleType]: return self._scale_type @property def bounds(self) -> Union[Tuple[float, float], Tuple[int, int]]: """Returns the bounds, if set, or raises a ValueError.""" if self.type == ParameterType.CATEGORICAL: raise ValueError('Accessing bounds of a categorical parameter: %s' % self.name) return self._bounds @property def matching_parent_values(self) -> MonotypeParameterList: """Returns the matching parent values, if this is a child parameter.""" if not self._matching_parent_values: return [] return copy.copy(self._matching_parent_values) @property def child_parameter_configs(self) -> List['ParameterConfig']: if not self._child_parameter_configs: return [] return copy.deepcopy(self._child_parameter_configs) def _del_child_parameter_configs(self): """Deletes the current child ParameterConfigs.""" object.__setattr__(self, '_child_parameter_configs', None) @property def clone_without_children(self) -> 'ParameterConfig': """Returns the clone of self, without child_parameter_configs.""" clone = copy.deepcopy(self) clone._del_child_parameter_configs() # pylint: disable='protected-access' return clone @property def feasible_values(self) -> Union[List[int], List[float], List[str]]: if self.type in (ParameterType.DISCRETE, ParameterType.CATEGORICAL): if not self._feasible_values: return [] return copy.copy(self._feasible_values) elif self.type == ParameterType.INTEGER: return list(range(self.bounds[0], self.bounds[1] + 1)) raise ValueError('feasible_values is invalid for type: %s' % self.type) @property def default_value(self) -> Optional[Union[int, float, str]]: """Returns the default value, or None if not set.""" return self._default_value def _set_matching_parent_values(self, parent_values: MonotypeParameterSequence): """Sets the given matching parent values in this object, without validation. Args: parent_values: Parent values for which this child ParameterConfig is active. Existing values will be replaced. """ object.__setattr__(self, '_matching_parent_values', list(parent_values)) def _set_child_parameter_configs(self, children: List['ParameterConfig']): """Sets the given child ParameterConfigs in this object, without validation. Args: children: The children to set in this object. Existing children will be replaced. """ object.__setattr__(self, '_child_parameter_configs', children) def add_children( self, new_children: Sequence[Tuple[MonotypeParameterSequence, 'ParameterConfig']] ) -> 'ParameterConfig': """Clones the ParameterConfig and adds new children to it. Args: new_children: A sequence of tuples formatted as: (matching_parent_values, ParameterConfig). If the child ParameterConfig have pre-existing parent values, they will be overridden. Returns: A parent parameter config, with children set. Raises: ValueError: If the child configs are invalid TypeError: If matching parent values are invalid """ parent = copy.deepcopy(self) if not new_children: return parent for child_pair in new_children: if len(child_pair) != 2: raise ValueError('Each element in new_children must be a tuple of ' '(Sequence of valid parent values, ParameterConfig),' ' given: {}'.format(child_pair)) logging.debug('add_children: new_children=%s', new_children) child_parameter_configs = parent.child_parameter_configs for unsorted_parent_values, child in new_children: parent_values = sorted(unsorted_parent_values) child_copy = copy.deepcopy(child) if parent.type == ParameterType.DISCRETE: if not all(isinstance(v, (float, int)) for v in parent_values): raise TypeError('Parent is DISCRETE-typed, but a child is specifying ' 'one or more non float/int parent values: child={} ' ', parent_values={}'.format(child, parent_values)) child_copy._set_matching_parent_values(parent_values) # pylint: disable='protected-access' elif parent.type == ParameterType.CATEGORICAL: if not all(isinstance(v, str) for v in parent_values): raise TypeError('Parent is CATEGORICAL-typed, but a child is ' 'specifying one or more non float/int parent values: ' 'child={}, parent_values={}'.format( child, parent_values)) child_copy._set_matching_parent_values(parent_values) # pylint: disable='protected-access' elif parent.type == ParameterType.INTEGER: # Allow {int, float}->float conversion but block str->float conversion. int_values = [int(v) for v in parent_values] if int_values != parent_values: raise TypeError( 'Parent is INTEGER-typed, but a child is specifying one or more ' 'non-integral parent values: {}'.format(parent_values)) child_copy._set_matching_parent_values(int_values) # pylint: disable='protected-access' else: raise ValueError('DOUBLE type cannot have child parameters') child_parameter_configs.extend([child_copy]) parent._set_child_parameter_configs(child_parameter_configs) # pylint: disable='protected-access' return parent def continuify(self) -> 'ParameterConfig': """Returns a newly created DOUBLE parameter with the same range.""" if self.type == ParameterType.DOUBLE: return copy.deepcopy(self) elif not ParameterType.is_numeric(self.type): raise ValueError( 'Cannot convert a non-numeric parameter to DOUBLE: {}'.format(self)) elif self._child_parameter_configs: raise ValueError( 'Cannot convert a parent parameter to DOUBLE: {}'.format(self)) scale_type = self.scale_type if scale_type == ScaleType.UNIFORM_DISCRETE: logging.log_every_n( logging.WARNING, 'Converting a UNIFORM_DISCRETE scaled discrete parameter ' 'to DOUBLE: %s', 10, self) scale_type = None default_value = self.default_value if default_value is not None: default_value = float(default_value) return ParameterConfig.factory( self.name, bounds=(float(self.bounds[0]), float(self.bounds[1])), scale_type=scale_type, default_value=default_value) @classmethod def merge(cls, one: 'ParameterConfig', other: 'ParameterConfig') -> 'ParameterConfig': """Merge two ParameterConfigs. Args: one: ParameterConfig with no child parameters. other: Must have the same type as one, and may not have child parameters. Returns: For Categorical, Discrete or Integer ParameterConfigs, the resulting config will be the union of all feasible values. For Double ParameterConfigs, the resulting config will have [min_value, max_value] set to the smallest and largest bounds. Raises: ValueError: If any of the input configs has child parameters, or if the two parameters have different types. """ if one.child_parameter_configs or other.child_parameter_configs: raise ValueError( 'Cannot merge parameters with child_parameter_configs: %s and %s' % one, other) if one.type != other.type: raise ValueError('Type conflicts between {} and {}'.format( one.type.name, other.type.name)) if one.scale_type != other.scale_type: logging.warning('Scale type conflicts while merging %s and %s', one, other) if one.type in (ParameterType.CATEGORICAL, ParameterType.DISCRETE): new_feasible_values = list( set(one.feasible_values + other.feasible_values)) return ParameterConfig.factory( name=one.name, feasible_values=new_feasible_values, scale_type=one.scale_type) elif one.type in (ParameterType.INTEGER, ParameterType.DOUBLE): original_min, original_max = one.bounds other_min, other_max = other.bounds new_bounds = (min(original_min, other_min), max(original_max, other_max)) return ParameterConfig.factory( name=one.name, bounds=new_bounds, scale_type=one.scale_type) raise ValueError('Unknown type {}. This is currently' 'an unreachable code.'.format(one.type)) def traverse( self, show_children: bool = False) -> Generator['ParameterConfig', None, None]: """DFS Generator for parameter configs. Args: show_children: If True, every generated ParameterConfig has child_parameter_configs. For example, if 'foo' has two child configs 'bar1' and 'bar2', then traversing 'foo' with show_children=True generates (foo, with bar1,bar2 as children), (bar1), and (bar2). If show_children=False, it generates (foo, without children), (bar1), and (bar2). Yields: DFS on all parameter configs. """ if show_children: yield self else: yield self.clone_without_children for child in self.child_parameter_configs: yield from child.traverse(show_children) def contains( self, value: Union[trial.ParameterValueTypes, trial.ParameterValue]) -> bool: """Check if the `value` is a valid value for this parameter config.""" if not isinstance(value, trial.ParameterValue): value = trial.ParameterValue(value) if self.type == ParameterType.DOUBLE: return self.bounds[0] <= value.as_float and value.as_float <= self.bounds[ 1] elif self.type == ParameterType.INTEGER: if value.as_int != value.as_float: return False return self.bounds[0] <= value.as_int and value.as_int <= self.bounds[1] elif self.type == ParameterType.DISCRETE: return value.as_float in self.feasible_values elif self.type == ParameterType.CATEGORICAL: return value.as_str in self.feasible_values else: raise NotImplementedError(f'Cannot determine whether {value} is feasible' f'for Unknown parameter type {self.type}.\n' f'Full config: {repr(self)}') @property def num_feasible_values(self) -> Union[float, int]: if self.type == ParameterType.DOUBLE: return float('inf') elif self.type == ParameterType.INTEGER: return self.bounds[1] - self.bounds[0] + 1 else: return len(self.feasible_values)
google/vizier
vizier/pyvizier/shared/parameter_config.py
Python
apache-2.0
21,349
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ambari Agent """ from resource_management import * class MahoutServiceCheck(Script): def service_check(self, env): import params env.set_params(params) mahout_command = format("mahout seqdirectory --input /user/{smokeuser}/mahoutsmokeinput/sample-mahout-test.txt " "--output /user/{smokeuser}/mahoutsmokeoutput/ --charset utf-8") test_command = format("fs -test -e /user/{smokeuser}/mahoutsmokeoutput/_SUCCESS") File( format("{tmp_dir}/sample-mahout-test.txt"), content = "Test text which will be converted to sequence file.", mode = 0755 ) params.HdfsResource(format("/user/{smokeuser}"), type="directory", action="create_on_execute", owner=params.smokeuser, mode=params.smoke_hdfs_user_mode, ) params.HdfsResource(format("/user/{smokeuser}/mahoutsmokeoutput"), action="delete_on_execute", type="directory", ) params.HdfsResource(format("/user/{smokeuser}/mahoutsmokeinput"), action="create_on_execute", type="directory", owner=params.smokeuser, ) params.HdfsResource(format("/user/{smokeuser}/mahoutsmokeinput/sample-mahout-test.txt"), action="create_on_execute", type="file", owner=params.smokeuser, source=format("{tmp_dir}/sample-mahout-test.txt") ) params.HdfsResource(None, action="execute") if params.security_enabled: kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") Execute(kinit_cmd, user=params.smokeuser) Execute( mahout_command, tries = 3, try_sleep = 5, environment={'MAHOUT_HOME': params.mahout_home,'JAVA_HOME': params.java64_home}, path = format('/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'), user = params.smokeuser ) ExecuteHadoop( test_command, tries = 10, try_sleep = 6, user = params.smokeuser, conf_dir = params.hadoop_conf_dir, bin_dir = params.hadoop_bin_dir ) if __name__ == "__main__": MahoutServiceCheck().execute()
arenadata/ambari
ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/service_check.py
Python
apache-2.0
3,213
import datetime import mock from django.utils import timezone from mock import Mock, call, PropertyMock from django.test import TestCase from django.contrib.sessions.models import Session from mysite.celery import send_outcome, check_anonymous class CeleryTasksTest(TestCase): @mock.patch('mysite.celery.UserSession.objects.filter') @mock.patch('mysite.celery.User.objects.filter') def test_check_anonymous_user_session_no_session(self, mock_User_filter, mock_UserSession_filter): mock_user = Mock(id=1) call_mock_User_filter = [mock_user] mock_session = Mock(id=2) # user_session.session p = PropertyMock(return_value=3, side_effect=Session.DoesNotExist('Object Does not exist')) type(mock_session).session = p call_mock_UserSession_filter = [mock_session] mock_User_filter.return_value = call_mock_User_filter mock_UserSession_filter.return_value = call_mock_UserSession_filter mock_user_del = Mock() mock_user.delete = mock_user_del response = check_anonymous() mock_user_del.assert_called_once_with() mock_User_filter.assert_called_with(groups__name='Temporary') mock_UserSession_filter.assert_called_with(user__groups__name='Temporary') @mock.patch('mysite.celery.UserSession.objects.filter') @mock.patch('mysite.celery.User.objects.filter') def test_check_anonymous_user_session_has_session(self, mock_User_filter, mock_UserSession_filter): mock_user = Mock(id=1) call_mock_User_filter = [mock_user] mock_session = Mock(id=2) # user_session.session mock_session.session.expire_date = timezone.now() - datetime.timedelta(days=1) sess_session_del = Mock() sess_user_del = Mock() mock_session.session.delete = sess_session_del mock_session.user.delete = sess_user_del call_mock_UserSession_filter = [mock_session] mock_User_filter.return_value = call_mock_User_filter mock_UserSession_filter.return_value = call_mock_UserSession_filter mock_user_del = Mock() mock_user.delete = mock_user_del response = check_anonymous() sess_session_del.assert_called_once_with() sess_user_del.assert_called_once_with() mock_user_del.assert_called_once_with() mock_User_filter.assert_called_with(groups__name='Temporary') mock_UserSession_filter.assert_called_with(user__groups__name='Temporary') @mock.patch('mysite.celery.GradedLaunch.objects.get') @mock.patch('mysite.celery.send_score_update') def test_send_outcome(self, mock_send_score_update, mock_GradedLaunch_get): get_mock_ret_val = Mock() mock_GradedLaunch_get.return_value = get_mock_ret_val result = send_outcome('0', assignment_id=1) mock_GradedLaunch_get.assert_called_once_with(id=1) mock_send_score_update.assert_called_once_with(get_mock_ret_val, '0')
raccoongang/socraticqs2
mysite/mysite/tests/celery.py
Python
apache-2.0
2,987
# Copyright 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Isilon specific NAS backend plugin. """ import os from oslo_config import cfg from oslo_log import log from oslo_utils import units from requests.exceptions import HTTPError import six from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.plugins import base from manila.share.drivers.dell_emc.plugins.isilon import isilon_api CONF = cfg.CONF VERSION = "0.1.0" LOG = log.getLogger(__name__) class IsilonStorageConnection(base.StorageConnection): """Implements Isilon specific functionality for EMC Manila driver.""" def __init__(self, *args, **kwargs): super(IsilonStorageConnection, self).__init__(*args, **kwargs) self._server = None self._port = None self._username = None self._password = None self._server_url = None self._connect_resp = None self._root_dir = None self._verify_ssl_cert = None self._containers = {} self._shares = {} self._snapshots = {} self._isilon_api = None self._isilon_api_class = isilon_api.IsilonApi self.driver_handles_share_servers = False def _get_container_path(self, share): """Return path to a container.""" return os.path.join(self._root_dir, share['name']) def create_share(self, context, share, share_server): """Is called to create share.""" if share['share_proto'] == 'NFS': location = self._create_nfs_share(share) elif share['share_proto'] == 'CIFS': location = self._create_cifs_share(share) else: message = (_('Unsupported share protocol: %(proto)s.') % {'proto': share['share_proto']}) LOG.error(message) raise exception.InvalidShare(reason=message) # apply directory quota based on share size max_share_size = share['size'] * units.Gi self._isilon_api.quota_create( self._get_container_path(share), 'directory', max_share_size) return location def create_share_from_snapshot(self, context, share, snapshot, share_server): """Creates a share from the snapshot.""" # Create share at new location location = self.create_share(context, share, share_server) # Clone snapshot to new location fq_target_dir = self._get_container_path(share) self._isilon_api.clone_snapshot(snapshot['name'], fq_target_dir) return location def _create_nfs_share(self, share): """Is called to create nfs share.""" container_path = self._get_container_path(share) self._isilon_api.create_directory(container_path) share_created = self._isilon_api.create_nfs_export(container_path) if not share_created: message = ( _('The requested NFS share "%(share)s" was not created.') % {'share': share['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) location = '{0}:{1}'.format(self._server, container_path) return location def _create_cifs_share(self, share): """Is called to create cifs share.""" # Create the directory container_path = self._get_container_path(share) self._isilon_api.create_directory(container_path) self._isilon_api.create_smb_share(share['name'], container_path) share_path = '\\\\{0}\\{1}'.format(self._server, share['name']) return share_path def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" snapshot_path = os.path.join(self._root_dir, snapshot['share_name']) self._isilon_api.create_snapshot(snapshot['name'], snapshot_path) def delete_share(self, context, share, share_server): """Is called to remove share.""" if share['share_proto'] == 'NFS': self._delete_nfs_share(share) elif share['share_proto'] == 'CIFS': self._delete_cifs_share(share) else: message = (_('Unsupported share type: %(type)s.') % {'type': share['share_proto']}) LOG.error(message) raise exception.InvalidShare(reason=message) def _delete_nfs_share(self, share): """Is called to remove nfs share.""" share_id = self._isilon_api.lookup_nfs_export( self._root_dir + '/' + share['name']) if share_id is None: lw = ('Attempted to delete NFS Share "%s", but the share does ' 'not appear to exist.') LOG.warning(lw, share['name']) else: # attempt to delete the share export_deleted = self._isilon_api.delete_nfs_share(share_id) if not export_deleted: message = _('Error deleting NFS share: %s') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) def _delete_cifs_share(self, share): """Is called to remove CIFS share.""" smb_share = self._isilon_api.lookup_smb_share(share['name']) if smb_share is None: lw = ('Attempted to delete CIFS Share "%s", but the share does ' 'not appear to exist.') LOG.warning(lw, share['name']) else: share_deleted = self._isilon_api.delete_smb_share(share['name']) if not share_deleted: message = _('Error deleting CIFS share: %s') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) def delete_snapshot(self, context, snapshot, share_server): """Is called to remove snapshot.""" self._isilon_api.delete_snapshot(snapshot['name']) def ensure_share(self, context, share, share_server): """Invoked to ensure that share is exported.""" def extend_share(self, share, new_size, share_server=None): """Extends a share.""" new_quota_size = new_size * units.Gi self._isilon_api.quota_set( self._get_container_path(share), 'directory', new_quota_size) def allow_access(self, context, share, access, share_server): """Allow access to the share.""" if share['share_proto'] == 'NFS': self._nfs_allow_access(share, access) elif share['share_proto'] == 'CIFS': self._cifs_allow_access(share, access) else: message = _( 'Unsupported share protocol: %s. Only "NFS" and ' '"CIFS" are currently supported share protocols.') % share[ 'share_proto'] LOG.error(message) raise exception.InvalidShare(reason=message) def _nfs_allow_access(self, share, access): """Allow access to nfs share.""" access_type = access['access_type'] if access_type != 'ip': message = _('Only "ip" access type allowed for the NFS' 'protocol.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) export_path = self._get_container_path(share) access_ip = access['access_to'] access_level = access['access_level'] share_id = self._isilon_api.lookup_nfs_export(export_path) share_access_group = 'clients' if access_level == const.ACCESS_LEVEL_RO: share_access_group = 'read_only_clients' # Get current allowed clients export = self._get_existing_nfs_export(share_id) current_clients = export[share_access_group] # Format of ips could be '10.0.0.2', or '10.0.0.2, 10.0.0.0/24' ips = list() ips.append(access_ip) ips.extend(current_clients) export_params = {share_access_group: ips} url = '{0}/platform/1/protocols/nfs/exports/{1}'.format( self._server_url, share_id) resp = self._isilon_api.request('PUT', url, data=export_params) resp.raise_for_status() def _cifs_allow_access(self, share, access): access_type = access['access_type'] access_to = access['access_to'] access_level = access['access_level'] if access_type == 'ip': access_ip = access['access_to'] self._cifs_allow_access_ip(access_ip, share, access_level) elif access_type == 'user': self._cifs_allow_access_user(access_to, share, access_level) else: message = _('Only "ip" and "user" access types allowed for ' 'CIFS protocol.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) def _cifs_allow_access_ip(self, ip, share, access_level): if access_level == const.ACCESS_LEVEL_RO: message = _('Only RW Access allowed for CIFS Protocol when using ' 'the "ip" access type.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) allowed_ip = 'allow:' + ip smb_share = self._isilon_api.lookup_smb_share(share['name']) host_acl = smb_share['host_acl'] if allowed_ip not in host_acl: host_acl.append(allowed_ip) data = {'host_acl': host_acl} url = ('{0}/platform/1/protocols/smb/shares/{1}' .format(self._server_url, share['name'])) r = self._isilon_api.request('PUT', url, data=data) r.raise_for_status() def _cifs_allow_access_user(self, user, share, access_level): if access_level == const.ACCESS_LEVEL_RW: smb_permission = isilon_api.SmbPermission.rw elif access_level == const.ACCESS_LEVEL_RO: smb_permission = isilon_api.SmbPermission.ro else: message = _('Only "RW" and "RO" access levels are supported.') LOG.error(message) raise exception.InvalidShareAccess(reason=message) self._isilon_api.smb_permissions_add(share['name'], user, smb_permission) def deny_access(self, context, share, access, share_server): """Deny access to the share.""" if share['share_proto'] == 'NFS': self._nfs_deny_access(share, access) elif share['share_proto'] == 'CIFS': self._cifs_deny_access(share, access) def _nfs_deny_access(self, share, access): """Deny access to nfs share.""" if access['access_type'] != 'ip': return denied_ip = access['access_to'] access_level = access['access_level'] share_access_group = 'clients' if access_level == const.ACCESS_LEVEL_RO: share_access_group = 'read_only_clients' # Get list of currently allowed client ips export_id = self._isilon_api.lookup_nfs_export( self._get_container_path(share)) if export_id is None: message = _('Share %s should have been created, but was not ' 'found.') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) export = self._get_existing_nfs_export(export_id) try: clients = export[share_access_group] except KeyError: message = (_('Export %(export_name)s should have contained the ' 'JSON key %(json_key)s, but this key was not found.') % {'export_name': share['name'], 'json_key': share_access_group}) LOG.error(message) raise exception.ShareBackendException(msg=message) allowed_ips = set(clients) if allowed_ips.__contains__(denied_ip): allowed_ips.remove(denied_ip) data = {share_access_group: list(allowed_ips)} url = ('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._server_url, six.text_type(export_id))) r = self._isilon_api.request('PUT', url, data=data) r.raise_for_status() def _get_existing_nfs_export(self, export_id): export = self._isilon_api.get_nfs_export(export_id) if export is None: message = _('NFS share with export id %d should have been ' 'created, but was not found.') % export_id LOG.error(message) raise exception.ShareBackendException(msg=message) return export def _cifs_deny_access(self, share, access): access_type = access['access_type'] if access_type == 'ip': self._cifs_deny_access_ip(access['access_to'], share) elif access_type == 'user': self._cifs_deny_access_user(share, access) else: message = _('Access type for CIFS deny access request was ' '"%(access_type)s". Only "user" and "ip" access types ' 'are supported for CIFS protocol access.') % { 'access_type': access_type} LOG.warning(message) def _cifs_deny_access_ip(self, denied_ip, share): """Deny access to cifs share.""" share_json = self._isilon_api.lookup_smb_share(share['name']) host_acl_list = share_json['host_acl'] allow_ip = 'allow:' + denied_ip if allow_ip in host_acl_list: host_acl_list.remove(allow_ip) share_params = {"host_acl": host_acl_list} url = ('{0}/platform/1/protocols/smb/shares/{1}' .format(self._server_url, share['name'])) resp = self._isilon_api.request('PUT', url, data=share_params) resp.raise_for_status() def _cifs_deny_access_user(self, share, access): self._isilon_api.smb_permissions_remove(share['name'], access[ 'access_to']) def check_for_setup_error(self): """Check for setup error.""" def connect(self, emc_share_driver, context): """Connect to an Isilon cluster.""" self._server = emc_share_driver.configuration.safe_get( "emc_nas_server") self._port = ( int(emc_share_driver.configuration.safe_get("emc_nas_server_port")) ) self._server_url = ('https://' + self._server + ':' + six.text_type(self._port)) self._username = emc_share_driver.configuration.safe_get( "emc_nas_login") self._password = emc_share_driver.configuration.safe_get( "emc_nas_password") self._root_dir = emc_share_driver.configuration.safe_get( "emc_nas_root_dir") # TODO(Shaun Edwards): make verify ssl a config variable? self._verify_ssl_cert = False self._isilon_api = self._isilon_api_class(self._server_url, auth=( self._username, self._password), verify_ssl_cert=self._verify_ssl_cert) if not self._isilon_api.is_path_existent(self._root_dir): self._isilon_api.create_directory(self._root_dir, recursive=True) def update_share_stats(self, stats_dict): """TODO.""" # TODO(Shaun Edwards): query capacity, set storage_protocol, # QoS support? stats_dict['driver_version'] = VERSION def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" # TODO(Shaun Edwards) return 0 def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" # TODO(Shaun Edwards): Look into supporting share servers def teardown_server(self, server_details, security_services=None): """Teardown share server.""" # TODO(Shaun Edwards): Look into supporting share servers def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update share access.""" if share['share_proto'] == 'NFS': state_map = self._update_access_nfs(share, access_rules) if share['share_proto'] == 'CIFS': state_map = self._update_access_cifs(share, access_rules) return state_map def _update_access_nfs(self, share, access_rules): """Updates access on a NFS share.""" nfs_rw_ips = set() nfs_ro_ips = set() rule_state_map = {} for rule in access_rules: rule_state_map[rule['access_id']] = { 'state': 'error' } for rule in access_rules: if rule['access_level'] == const.ACCESS_LEVEL_RW: nfs_rw_ips.add(rule['access_to']) elif rule['access_level'] == const.ACCESS_LEVEL_RO: nfs_ro_ips.add(rule['access_to']) export_id = self._isilon_api.lookup_nfs_export( self._get_container_path(share)) if export_id is None: # share does not exist on backend (set all rules to error state) return rule_state_map data = { 'clients': list(nfs_rw_ips), 'read_only_clients': list(nfs_ro_ips) } url = ('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._server_url, six.text_type(export_id))) r = self._isilon_api.request('PUT', url, data=data) try: r.raise_for_status() except HTTPError: return rule_state_map # if we finish the bulk rule update with no error set rules to active for rule in access_rules: rule_state_map[rule['access_id']]['state'] = 'active' return rule_state_map def _update_access_cifs(self, share, access_rules): """Clear access on a CIFS share.""" cifs_ip_set = set() users = set() for rule in access_rules: if rule['access_type'] == 'ip': cifs_ip_set.add('allow:' + rule['access_to']) elif rule['access_type'] == 'user': users.add(rule['access_to']) smb_share = self._isilon_api.lookup_smb_share(share['name']) backend_smb_user_permissions = smb_share['permissions'] perms_to_remove = [] for perm in backend_smb_user_permissions: if perm['trustee']['name'] not in users: perms_to_remove.append(perm) for perm in perms_to_remove: backend_smb_user_permissions.remove(perm) data = { 'host_acl': list(cifs_ip_set), 'permissions': backend_smb_user_permissions, } url = ('{0}/platform/1/protocols/smb/shares/{1}' .format(self._server_url, share['name'])) r = self._isilon_api.request('PUT', url, data=data) try: r.raise_for_status() except HTTPError: # clear access rules failed so set all access rules to error state rule_state_map = {} for rule in access_rules: rule_state_map[rule['access_id']] = { 'state': 'error' } return rule_state_map # add access rules that don't exist on backend rule_state_map = {} for rule in access_rules: rule_state_map[rule['access_id']] = { 'state': 'error' } try: if rule['access_type'] == 'ip': self._cifs_allow_access_ip(rule['access_to'], share, rule['access_level']) rule_state_map[rule['access_id']]['state'] = 'active' elif rule['access_type'] == 'user': backend_users = set() for perm in backend_smb_user_permissions: backend_users.add(perm['trustee']['name']) if rule['access_to'] not in backend_users: self._cifs_allow_access_user( rule['access_to'], share, rule['access_level']) rule_state_map[rule['access_id']]['state'] = 'active' else: continue except exception.ManilaException: pass return rule_state_map
bswartz/manila
manila/share/drivers/dell_emc/plugins/isilon/isilon.py
Python
apache-2.0
20,977
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import transformers from transformers import GPT2Config, GPT2Tokenizer, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from .test_generation_flax_utils import FlaxGenerationTesterMixin from .test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gpt2.modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model if is_torch_available(): import torch class FlaxGPT2ModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def prepare_config_and_inputs(self, gradient_checkpointing=False): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = GPT2Config( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, n_ctx=self.max_position_embeddings, use_cache=False, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, ) return (config, input_ids, input_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4") position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], attention_mask=attention_mask, past_key_values=outputs_cache.past_key_values, position_ids=position_ids, ) outputs = model(input_ids) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask): max_decoder_length = 20 model = model_class_name(config) attention_mask_cache = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))], axis=-1, ) past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length) position_ids = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxGPT2ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxGPT2Model, FlaxGPT2LMHeadModel) if is_flax_available() else () all_generative_model_classes = (FlaxGPT2LMHeadModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxGPT2ModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="</s>", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="jax", padding=True, truncation=True) model = FlaxGPT2LMHeadModel.from_pretrained("gpt2") model.do_sample = False model.config.pad_token_id = model.config.eos_token_id jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of words. I'm going to try to explain what I mean.", "Hey, I'm not sure if I'm going to be able to do", ] self.assertListEqual(output_string, expected_string) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("gpt2", from_pt=True) outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
huggingface/pytorch-transformers
tests/test_modeling_flax_gpt2.py
Python
apache-2.0
14,464
# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect from oslo_utils import reflection import wrapt from debtcollector import _utils def remove(f=None, message=None, version=None, removal_version=None, stacklevel=3): """Decorates a function, method, or class to emit a deprecation warning :param str message: A message to include in the deprecation warning :param str version: Specify what version the removed function is present in :param str removal_version: What version the function will be removed. If '?' is used this implies an undefined future version :param int stacklevel: How many entries deep in the call stack before ignoring """ if f is None: return functools.partial(remove, message=message, version=version, removal_version=removal_version, stacklevel=stacklevel) @wrapt.decorator def wrapper(f, instance, args, kwargs): try: # Prefer the py3.x name (if we can get at it...) f_name = f.__qualname__ qualified = True if inspect.isclass(f): _prefix_pre = "Using class" else: _prefix_pre = "Using function/method" except AttributeError: f_name = f.__name__ qualified = False if not qualified: _prefix_pre = "Using function/method" if instance is None: # Decorator was used on a class if inspect.isclass(f): _prefix_pre = "Using class" module_name = inspect.getmodule(f).__name__ if module_name == '__main__': f_name = reflection.get_class_name( f, fully_qualified=False) else: f_name = reflection.get_class_name( f, fully_qualified=True) base_name = None # Decorator was a used on a function else: module_name = inspect.getmodule(f).__name__ if module_name != '__main__': f_name = reflection.get_callable_name(f) base_name = None # Decorator was used on a classmethod or instancemethod else: base_name = reflection.get_class_name(instance, fully_qualified=False) if base_name: function_name = ".".join([base_name, f_name]) else: function_name = f_name else: function_name = f_name _prefix = _prefix_pre + " %s is deprecated" % function_name out_message = _utils.generate_message( _prefix, version=version, removal_version=removal_version, message=message) _utils.deprecation(out_message, stacklevel) return f(*args, **kwargs) return wrapper(f)
citrix-openstack-build/debtcollector
debtcollector/removals.py
Python
apache-2.0
3,792
__source__ = 'https://leetcode.com/problems/minimum-window-substring/' # https://github.com/kamyu104/LeetCode/blob/master/Python/minimum-window-substring.py # Time: O(n) # Space: O(k), k is the number of different characters # Hashtable # # Description: Leetcode # 76. Minimum Window Substring # # Given a string S and a string T, find the minimum window in S # which will contain all the characters in T in complexity O(n). # # For example, # S = "ADOBECODEBANC" # T = "ABC" # Minimum window is "BANC". # # Note: # If there is no such window in S that covers all characters in T, return the emtpy string "". # # If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S. # Companies # LinkedIn Snapchat Uber Facebook # Related Topics # Hash Table Two Pointers String # Similar Questions # Substring with Concatenation of All Words Minimum Size Subarray Sum # Sliding Window Maximum Permutation in String Smallest Range # import unittest import collections class Solution: # @return a string def minWindow(self, S, T): current_count = [0 for i in xrange(52)] #Radix a-zA-Z -> 52 expected_count = [0 for i in xrange(52)] for char in T: expected_count[ord(char) - ord('a')] += 1 i, count, start, min_width, min_start = 0, 0, 0, float("inf"), 0 while i < len(S): current_count[ord(S[i]) - ord('a')] += 1 if current_count[ord(S[i]) - ord('a')] <= expected_count[ord(S[i]) - ord('a')]: count += 1 if count == len(T): while expected_count[ord(S[start]) - ord('a')] == 0 or \ current_count[ord(S[start]) - ord('a')] > expected_count[ord(S[start]) - ord('a')]: current_count[ord(S[start]) - ord('a')] -= 1 start += 1 if min_width > i - start + 1: min_width = i - start + 1 min_start = start i += 1 if min_width == float("inf"): return "" return S[min_start:min_width+min_start] class Solution2: def minWindow(self, s, t): """ :type s: str :type t: str :rtype: str """ res = "" len_s = len(s) len_t = len(t) dict = collections.defaultdict(int) cnt = 0 minLen = float("inf") for i in xrange(len_t): dict[t[i]] += 1 s_idx = 0 for i in xrange(len_s): if s[i] in dict: dict[s[i]] -= 1 if dict[s[i]] >= 0: cnt += 1 while cnt == len(t): if s[s_idx] in dict: dict[s[s_idx]] += 1 if dict[s[s_idx]] > 0: if minLen > i - s_idx + 1: minLen = i - s_idx + 1 res = s[s_idx: i+1] cnt -= 1 s_idx += 1 return res class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) self.assertEqual("BANC", Solution2().minWindow("ADOBECODEBANC", "ABC")) self.assertEqual("BANC", Solution().minWindow("ADOBECODEBANC", "ABC")) if __name__ == '__main__': unittest.main() Java = ''' # Thought: https://leetcode.com/problems/minimum-window-substring/solution/ # 25ms 52.80% class Solution { public String minWindow(String s, String t) { String res = ""; if(s == null || t == null || s.length() == 0 || t.length() == 0) return res; int minLen = Integer.MAX_VALUE; Map<Character, Integer> map = new HashMap<>(); for( int i = 0; i < t.length(); i++){ if(!map.containsKey(t.charAt(i))){ map.put(t.charAt(i), 0); } map.put(t.charAt(i), map.get(t.charAt(i)) + 1); } int cnt = 0; int prev = 0; for(int i = 0 ; i < s.length(); i++){ char c = s.charAt(i); if(map.containsKey(c)){ map.put(c, map.get(c) - 1); if(map.get(c) >= 0){ cnt += 1; } while(cnt == t.length()){ char p = s.charAt(prev); if(map.containsKey(p)){ map.put(p, map.get(p) + 1); if(map.get(p) > 0){ if(minLen > i - prev + 1){ minLen = i - prev + 1; res = s.substring(prev, i + 1); } cnt --; } } prev ++; } } } return res; } } #76.01% 7ms class Solution { public String minWindow(String s, String t) { int lenS = s.length(); int lenT = t.length(); if (lenS == 0 || lenT == 0) { return ""; } int[] sCount = new int[128]; int[] tCount = new int[128]; int count = lenT; int[] result = new int[] {-1, -1}; int start = 0; for (int i = 0; i < lenT; i++) { tCount[t.charAt(i)]++; } for (int i = 0; i < lenS; i++) { char c = s.charAt(i); sCount[c]++; if (sCount[c] <= tCount[c]) { count--; } if (count == 0) { while (true) { char remove = s.charAt(start); if (sCount[remove] <= tCount[remove]) { break; } sCount[remove]--; start++; } if (result[0] < 0 || result[1] - result[0] > i + 1 - start) { result[0] = start; result[1] = i + 1; } sCount[s.charAt(start++)]--; count++; } } return result[0] < 0 ? "" : s.substring(result[0], result[1]); } } # 4ms 96.63% class Solution { public String minWindow(String s, String t) { if (s == null || t == null || s.length() == 0 || t.length() == 0) { return null; } int start = -1; int end = s.length() + 1; int left = 0; int windowSize = 0; int[] count = new int[256]; for (char c : t.toCharArray()) { count[c]++; } for (int i = 0; i < s.length(); ++i) { if(--count[s.charAt(i)] >= 0) { windowSize++; } if (windowSize == t.length()) { while (++count[s.charAt(left)] <= 0) { left++; } if (i - left < end - start) { start = left; end = i; } left++; windowSize--; } } return start == -1 ? "" : s.substring(start, end + 1); } } '''
JulyKikuAkita/PythonPrac
cs15211/MinimumWindowSubstring.py
Python
apache-2.0
7,107
# coding: utf-8 from __future__ import unicode_literals from django.contrib import admin from core.admin import SubcategoryAdmin, DetailAdmin from .models import FinishDetail, FinishSubcategory class FinishSubcategoryAdmin(SubcategoryAdmin): def get_form(self, request, obj=None, **kwargs): from . import DETAIL_TYPE form = super(FinishSubcategoryAdmin, self).get_form(request, obj, **kwargs) if 'category' in form.base_fields: field = form.base_fields['category'] field.queryset = field.queryset.filter(type=DETAIL_TYPE) return form admin.site.register(FinishSubcategory, FinishSubcategoryAdmin) class FinishDetailAdmin(DetailAdmin): def get_form(self, request, obj=None, **kwargs): from . import DETAIL_TYPE form = super(FinishDetailAdmin, self).get_form(request, obj, **kwargs) if 'subcategory' in form.base_fields: field = form.base_fields['subcategory'] field.queryset = field.queryset.filter(type=DETAIL_TYPE) return form admin.site.register(FinishDetail, FinishDetailAdmin)
michaupl/materialsapp
finishes/admin.py
Python
apache-2.0
1,110
#!/usr/bin/python3 import cgi import cgitb import datetime import json import os import re import requests import subprocess import sys import time from bmdjson import check_address print("Content-Type: text/plain\n") print("testing keybase") print() print("PASS:") signature = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdyH7rvq v5weRa0zkSjiJmm 8dzt8BnSF7QPfAy AmWtlYORgWXP5hk aXmzZHPBPoIRpYD qsXcl0JX7RT65NS KLnnW8kwG9ujBNt r2bd6GNLnp4xVMr btCVAG2TMDpNhVf yXSbZmzQDnE6mIM Y4oS4YGVbw244Je Bc7lmO6225Gu6tj HgIwRnLz975GBZU Bc3GLDyRpvTEGXr AzRtx0gMk2FzHxf 2oimZKG. END KEYBASE SALTPACK SIGNED MESSAGE." sig_result = check_address(signature) for k, v in sorted(sig_result.items(), key=lambda x: x[0]): # is saying the leftmost of the pair k,v -- alphabetic sorting of keys # now sig_addr, sig_by, then sig_good -- display bugged me print("[" + str(k) + "] = ", v) print() print("FAIL: Bad String") signature2 = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdy27rvq v5weRa0zkDL3e9k D1e7HgTLY1WFWdi UfZI1s56lquWUJu lBvdIblMbFGwTGa M9oYSI9cU7KjGW9 2JOGghIjQX3Fqw5 xsvEpPo9pEuA25J Ut0J0Fur0C3F8oZ n50PAvVWVmb0iEP 5MNUBEMHMo5DTtF OhK66v3FFwu0qJe 8R35q5A5ycevVsR pdaOBQQ1VGcNIlF 9YU6a0Wi5kd85JH rjSupUZ. END KEYBASE SALTPACK SIGNED MESSAGE." sig_result = check_address(signature2) for k, v in sorted(sig_result.items(), key=lambda x: x[0]): # is saying the leftmost of the pair k,v -- alphabetic sorting of keys # now sig_addr, sig_by, then sig_good -- display bugged me print("[" + str(k) + "] = ", v) print() print("end.")
joezippy/paywall
test-keybase.py
Python
apache-2.0
1,556
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for ds_tc_resnet model in session mode.""" import numpy as np from kws_streaming.layers import test_utils from kws_streaming.layers.compat import tf from kws_streaming.layers.compat import tf1 from kws_streaming.layers.modes import Modes from kws_streaming.models import utils import kws_streaming.models.ds_tc_resnet as ds_tc_resnet from kws_streaming.train import inference class DsTcResnetTest(tf.test.TestCase): """Test ds_tc_resnet model in non streaming and streaming modes.""" def setUp(self): super(DsTcResnetTest, self).setUp() config = tf1.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf1.Session(config=config) tf1.keras.backend.set_session(self.sess) tf.keras.backend.set_learning_phase(0) test_utils.set_seed(123) self.params = utils.ds_tc_resnet_model_params(True) self.model = ds_tc_resnet.model(self.params) self.model.summary() self.input_data = np.random.rand(self.params.batch_size, self.params.desired_samples) # run non streaming inference self.non_stream_out = self.model.predict(self.input_data) def test_ds_tc_resnet_stream(self): """Test for tf streaming with internal state.""" # prepare tf streaming model model_stream = utils.to_streaming_inference( self.model, self.params, Modes.STREAM_INTERNAL_STATE_INFERENCE) model_stream.summary() # run streaming inference stream_out = inference.run_stream_inference_classification( self.params, model_stream, self.input_data) self.assertAllClose(stream_out, self.non_stream_out, atol=1e-5) def test_ds_tc_resnet_stream_tflite(self): """Test for tflite streaming with external state.""" tflite_streaming_model = utils.model_to_tflite( self.sess, self.model, self.params, Modes.STREAM_EXTERNAL_STATE_INFERENCE) interpreter = tf.lite.Interpreter(model_content=tflite_streaming_model) interpreter.allocate_tensors() # before processing new test sequence we reset model state inputs = [] for detail in interpreter.get_input_details(): inputs.append(np.zeros(detail['shape'], dtype=np.float32)) stream_out = inference.run_stream_inference_classification_tflite( self.params, interpreter, self.input_data, inputs) self.assertAllClose(stream_out, self.non_stream_out, atol=1e-5) if __name__ == '__main__': tf1.disable_eager_execution() tf.test.main()
google-research/google-research
kws_streaming/models/ds_tc_resnet_test.py
Python
apache-2.0
3,076
# Copyright 2020 The Kubeflow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from kfp import components from kfp import dsl from kfp import compiler component_op_1 = components.load_component_from_text(""" name: Write to GCS inputs: - {name: text, type: String, description: 'Content to be written to GCS'} outputs: - {name: output_gcs_path, type: GCSPath, description: 'GCS file path'} implementation: container: image: google/cloud-sdk:slim command: - sh - -c - | set -e -x echo "$0" | gsutil cp - "$1" - {inputValue: text} - {outputUri: output_gcs_path} """) component_op_2 = components.load_component_from_text(""" name: Read from GCS inputs: - {name: input_gcs_path, type: GCSPath, description: 'GCS file path'} implementation: container: image: google/cloud-sdk:slim command: - sh - -c - | set -e -x gsutil cat "$0" - {inputUri: input_gcs_path} """) @dsl.pipeline(name='simple-two-step-pipeline', pipeline_root='dummy_root') def my_pipeline(text: str = 'Hello world!'): component_1 = component_op_1(text=text).set_display_name('Producer') component_2 = component_op_2( input_gcs_path=component_1.outputs['output_gcs_path']) component_2.set_display_name('Consumer') if __name__ == '__main__': compiler.Compiler().compile( pipeline_func=my_pipeline, pipeline_parameters={'text': 'Hello KFP!'}, package_path=__file__.replace('.py', '.json'))
kubeflow/pipelines
sdk/python/kfp/compiler_cli_tests/test_data/two_step_pipeline.py
Python
apache-2.0
2,004
from __future__ import absolute_import, unicode_literals import os import os.path from freight.constants import PROJECT_ROOT from freight.exceptions import CommandError class UnknownRevision(CommandError): pass class Vcs(object): ssh_connect_path = os.path.join(PROJECT_ROOT, 'bin', 'ssh-connect') def __init__(self, workspace, url, username=None): self.url = url self.username = username self.workspace = workspace self._path_exists = None @property def path(self): return self.workspace.path def get_default_env(self): return {} def run(self, command, capture=False, workspace=None, *args, **kwargs): if workspace is None: workspace = self.workspace if not self.exists(workspace=workspace): kwargs.setdefault('cwd', None) env = kwargs.pop('env', {}) for key, value in self.get_default_env().iteritems(): env.setdefault(key, value) env.setdefault('FREIGHT_SSH_REPO', self.url) kwargs['env'] = env if capture: handler = workspace.capture else: handler = workspace.run rv = handler(command, *args, **kwargs) if isinstance(rv, basestring): return rv.strip() return rv def exists(self, workspace=None): if workspace is None: workspace = self.workspace return os.path.exists(workspace.path) def clone_or_update(self): if self.exists(): self.update() else: self.clone() def clone(self): raise NotImplementedError def update(self): raise NotImplementedError def checkout(self, ref): raise NotImplementedError def describe(self, ref): """ Given a `ref` return the fully qualified version. """ raise NotImplementedError def get_default_revision(self): raise NotImplementedError
jkimbo/freight
freight/vcs/base.py
Python
apache-2.0
1,988
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layers that act as activation functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.keras._impl.keras import activations from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras import constraints from tensorflow.python.keras._impl.keras import initializers from tensorflow.python.keras._impl.keras import regularizers from tensorflow.python.keras._impl.keras.engine import InputSpec from tensorflow.python.keras._impl.keras.engine import Layer from tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion from tensorflow.python.util.tf_export import tf_export @tf_export('keras.layers.LeakyReLU') class LeakyReLU(Layer): """Leaky version of a Rectified Linear Unit. It allows a small gradient when the unit is not active: `f(x) = alpha * x for x < 0`, `f(x) = x for x >= 0`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha: float >= 0. Negative slope coefficient. """ def __init__(self, alpha=0.3, **kwargs): super(LeakyReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha = K.cast_to_floatx(alpha) def call(self, inputs): return K.relu(inputs, alpha=self.alpha) def get_config(self): config = {'alpha': float(self.alpha)} base_config = super(LeakyReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @tf_export('keras.layers.PReLU') class PReLU(Layer): """Parametric Rectified Linear Unit. It follows: `f(x) = alpha * x for x < 0`, `f(x) = x for x >= 0`, where `alpha` is a learned array with the same shape as x. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha_initializer: initializer function for the weights. alpha_regularizer: regularizer for the weights. alpha_constraint: constraint for the weights. shared_axes: the axes along which to share learnable parameters for the activation function. For example, if the incoming feature maps are from a 2D convolution with output shape `(batch, height, width, channels)`, and you wish to share parameters across space so that each filter only has one set of parameters, set `shared_axes=[1, 2]`. """ def __init__(self, alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None, **kwargs): super(PReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes) @shape_type_conversion def build(self, input_shape): param_shape = list(input_shape[1:]) self.param_broadcast = [False] * len(param_shape) if self.shared_axes is not None: for i in self.shared_axes: param_shape[i - 1] = 1 self.param_broadcast[i - 1] = True self.alpha = self.add_weight( shape=param_shape, name='alpha', initializer=self.alpha_initializer, regularizer=self.alpha_regularizer, constraint=self.alpha_constraint) # Set input spec axes = {} if self.shared_axes: for i in range(1, len(input_shape)): if i not in self.shared_axes: axes[i] = input_shape[i] self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) self.built = True def call(self, inputs, mask=None): pos = K.relu(inputs) if K.backend() == 'theano': neg = ( K.pattern_broadcast(self.alpha, self.param_broadcast) * (inputs - K.abs(inputs)) * 0.5) else: neg = -self.alpha * K.relu(-inputs) return pos + neg def get_config(self): config = { 'alpha_initializer': initializers.serialize(self.alpha_initializer), 'alpha_regularizer': regularizers.serialize(self.alpha_regularizer), 'alpha_constraint': constraints.serialize(self.alpha_constraint), 'shared_axes': self.shared_axes } base_config = super(PReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @tf_export('keras.layers.ELU') class ELU(Layer): """Exponential Linear Unit. It follows: `f(x) = alpha * (exp(x) - 1.) for x < 0`, `f(x) = x for x >= 0`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha: scale for the negative factor. """ def __init__(self, alpha=1.0, **kwargs): super(ELU, self).__init__(**kwargs) self.supports_masking = True self.alpha = K.cast_to_floatx(alpha) def call(self, inputs): return K.elu(inputs, self.alpha) def get_config(self): config = {'alpha': float(self.alpha)} base_config = super(ELU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @tf_export('keras.layers.ThresholdedReLU') class ThresholdedReLU(Layer): """Thresholded Rectified Linear Unit. It follows: `f(x) = x for x > theta`, `f(x) = 0 otherwise`. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: theta: float >= 0. Threshold location of activation. """ def __init__(self, theta=1.0, **kwargs): super(ThresholdedReLU, self).__init__(**kwargs) self.supports_masking = True self.theta = K.cast_to_floatx(theta) def call(self, inputs, mask=None): return inputs * K.cast(K.greater(inputs, self.theta), K.floatx()) def get_config(self): config = {'theta': float(self.theta)} base_config = super(ThresholdedReLU, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape @tf_export('keras.layers.Softmax') class Softmax(Layer): """Softmax activation function. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: axis: Integer, axis along which the softmax normalization is applied. """ def __init__(self, axis=-1, **kwargs): super(Softmax, self).__init__(**kwargs) self.supports_masking = True self.axis = axis def call(self, inputs): return activations.softmax(inputs, axis=self.axis) def get_config(self): config = {'axis': self.axis} base_config = super(Softmax, self).get_config() return dict(list(base_config.items()) + list(config.items())) @shape_type_conversion def compute_output_shape(self, input_shape): return input_shape
Xeralux/tensorflow
tensorflow/python/keras/_impl/keras/layers/advanced_activations.py
Python
apache-2.0
8,770
import torch from allennlp.data.vocabulary import Vocabulary from allennlp.models.model import Model class FakeModelForTestingNormalizationBiasVerification(Model): def __init__(self, use_bias=True): super().__init__(vocab=Vocabulary()) self.conv = torch.nn.Conv2d(3, 5, kernel_size=1, bias=use_bias) self.bn = torch.nn.BatchNorm2d(5) def forward(self, x): # x: (B, 3, H, W) out = self.bn(self.conv(x)) return {"loss": out.sum()}
allenai/allennlp
allennlp/common/testing/confidence_check_test.py
Python
apache-2.0
488
#!/usr/bin/env python # -*- coding: utf-8 -*- """ .. module:: redgem :platform: Unix, Windows :synopsis: RedGEM Algorithm .. moduleauthor:: pyTFA team Model class """ from pytfa.redgem.network_expansion import NetworkExpansion from pytfa.redgem.lumpgem import LumpGEM from cobra import Reaction from .utils import remove_blocked_reactions, set_medium import yaml class RedGEM(): def __init__(self, gem, parameters_path, inplace=False): self.read_parameters(parameters_path) # If inplace is True, no deepcopy is performed : the modifications are applied directly onto the gem prepared_gem = set_medium(gem, self.params['medium'], inplace) self._gem = prepared_gem # This one is used to perform the lumping self._source_gem = prepared_gem.copy() self.logger = self._gem.logger self.fill_default_params() self.set_solver() def read_parameters(self, parameters_path): with open(parameters_path, 'r') as stream: try: self.params = yaml.safe_load(stream) print("Opened parameters file") except yaml.YAMLError as exc: print(exc) def fill_default_params(self): # If auto is activated, automatically extracts inorganics from the gem if "inorganic" not in self.params or self.params["inorganics"] == "auto": self.logger.info("Automatically computing inorganics to use") self.params["inorganics"] = self._extract_inorganics() if "growth_rate" not in self.params or self.params["growth_rate"] == "auto": self.logger.info("Setting minimal growth rate to 95% of the TFA solution") obj_val = self._source_gem.slim_optimize() self.logger.info("Setting minimal growth rate to {}".format(obj_val)) self.params["growth_rate"] = 0.95*obj_val if "force_solve" not in self.params: self.params["force_solve"] = False if "timeout" not in self.params: self.logger.info("Using default timeout : 3600s") self.params["timeout"] = 3600 if "feasibility" not in self.params: self.logger.info("Using default solver feasibility : 1e-9") self.params["feasibility"] = 1e-9 else: # numbers like 1e-9 are detected as strings by yaml module # to enable their use, we cast them into floats try: self.params["feasibility"] = float(self.params["feasibility"]) except ValueError as v: self.logger.error(v) def set_solver(self): if "solver" not in self.params or self.params["solver"].lower() == "auto": return None elif 'gurobi' in self.params["solver"].lower(): solver = 'gurobi' elif 'cplex' in self.params["solver"].lower(): solver = 'cplex' elif 'glpk' in self.params["solver"].lower(): solver = 'glpk' else: solver = self.params["solver"] self._gem.solver = solver self._source_gem.solver = solver def run(self): # Extracting parameters core_subsystems = self.params["core_subsystems"] extracellular_system = self.params["extracellular_system"] biomass_rxn_ids = self.params["biomass_rxns"] biomass_rxns = [self._gem.reactions.get_by_id(x) for x in biomass_rxn_ids] main_bio_rxn = biomass_rxns[0] growth_rate = self.params["growth_rate"] small_metabolites = self.params["small_metabolites"] cofactor_pairs = self.params["cofactor_pairs"] # Flatten cofactor_pairs list cofactors = [cofactor for pair in cofactor_pairs for cofactor in pair] inorganics = self.params["inorganics"] d = self.params["d"] n = self.params["n"] lump_method = self.params["lump_method"] force_solve = self.params["force_solve"] timeout = self.params["timeout"] try: self._gem.solver.configuration.tolerances.feasibility = self.params["feasibility"] self._gem.solver.configuration.tolerances.integrality = self.params["feasibility"] except AttributeError as e: self.logger.error('Solver {} is not compatible with tolerance parameters'.format(self._gem.solver)) try: self._source_gem.solver.configuration.tolerances.feasibility = self.params["feasibility"] self._source_gem.solver.configuration.tolerances.integrality = self.params["feasibility"] except AttributeError as e: self.logger.error('Solver {} is not compatible with tolerance parameters'.format(self._source_gem.solver)) self.logger.info("Computing network expansion...") expander = NetworkExpansion(self._gem, core_subsystems, extracellular_system, cofactors, small_metabolites, inorganics, d, n) reduced_gem = expander.run() self.logger.info("Done.") # Add the expansion to core reactions core_reactions = reduced_gem.reactions self.logger.info("Computing lumps...") lumper = LumpGEM(self._source_gem, core_reactions, self.params) lumps = lumper.compute_lumps(force_solve, method = lump_method) self.logger.info("Done.") self.logger.info("Create final network...") to_add = [x for x in biomass_rxns +lumper._exchanges +lumper._transports +lumper._rcore if not x.id in reduced_gem.reactions] reduced_gem.add_reactions(to_add) for rxns in lumps.values(): the_lumps = [add_lump(reduced_gem,rxn,id_suffix='_{}'.format(e)) for e,rxn in enumerate(rxns)] # reduced_gem.add_reactions(rxns) self.logger.info("Done.") reduced_gem.objective = main_bio_rxn reduced_gem.reactions.get_by_id(main_bio_rxn.id).lower_bound = growth_rate if self.params['remove_blocked_reactions']: self.logger.info('Detecting blocked reactions') # Remove blocked reactions nrxn_1 = len(reduced_gem.reactions) self.removed_reactions = remove_blocked_reactions(reduced_gem) nrxn_2 = len(reduced_gem.reactions) self.logger.info('Removed {} blocked reaction with ' 'FVA post-processing'.format(nrxn_1-nrxn_2)) if main_bio_rxn.id not in reduced_gem.reactions: raise RuntimeError('Main Biomass reaction appears blocked') # For debugging purposes self.lumper = lumper main_bio_rxn.lower_bound = 0 return reduced_gem def _extract_inorganics(self): """ Extract inorganics from self._gem based on their formula :return: list of inorganics metabolites """ inorganics = [] for met in self._gem.metabolites: if not met.elements == {}: # Edge case # met is inorganic if it has 0 carbon in its formula if (not 'C' in met.elements) or met.elements['C'] <= 0: inorganics.append(met.id) return inorganics def add_lump(model, lump_object, id_suffix=''): new = Reaction(id = lump_object.id_+id_suffix) model.add_reaction(new) new.add_metabolites(lump_object.metabolites) new.gene_reaction_rule = lump_object.gene_reaction_rule new.subnetwork = lump_object.subnetwork return new
EPFL-LCSB/pytfa
pytfa/redgem/redgem.py
Python
apache-2.0
7,632
# coding=utf-8 # Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluation metrics.""" import dataclasses import logging from lsmmdma.mmdma_functions import compute_sqpairwise_distances import math import numpy as np from pykeops.torch import LazyTensor import scipy from sklearn.neighbors import KNeighborsClassifier import torch from typing import Optional, List @dataclasses.dataclass() class OutputSupervised(): foscttm: float top1: float top5: float no: float lta: float # Evaluation. @dataclasses.dataclass() class Evaluation(): """Computes evaluation metrics, knowing the ground truth alignment. Five metrics are being computed: - FOSCTTM (Fraction Of Samples Closer Than the True Match): it gives the average number of samples from one view that are closer to the true match in the other view, in the learned low dimensional space. As this metrics is not symmetrical if we exchange the first and second views, the results are averaged calculating the FOSCTTM in both directions. Two implementations are available, `_foscttm` and `_foscttm_keops`, the latter scaling to several hundreds of samples. - topK (top1 and top5): it computes the fraction of samples from one view correctly assigned among top K% nearest neighbours of the true match from the other view. As this metrics is not symmetrical if we exchange the first and second views, the results are averaged calculating top1 in both directions. K is set to be 1 and 5 by default. - Neighbourhood Overlap: it computes the fraction of samples from one view correctly assigned among the top K nearest neighbours of the true match from the other view, for all K. The output is a vector of dimension K. - Label Transfer Accuracy: assigns the label to samples from the second_view (resp. first_view) based on the 5-nearest neighbours from the first_view (resp. seconv_view). Calculates accuracy of the predicted labels. """ ground_truth_alignment: Optional[np.ndarray] = None cell_labels: Optional[List[np.ndarray]] = None n_neighbours: int = 5 device: torch.device = torch.device('cuda') short: bool = False def compute_all_evaluation( self, first_view: torch.FloatTensor, second_view: torch.FloatTensor ) -> OutputSupervised: """Computes evaluation measures, knowing the ground truth alignment.""" if self.ground_truth_alignment is not None: second_view_aligned = second_view[self.ground_truth_alignment] n = first_view.shape[0] m = second_view.shape[0] foscttm = -1 top1 = -1 top5 = -1 neigh_overlap = -1 lta = -1 if self.ground_truth_alignment is not None: try: if n == m and n < 5000: foscttm = self._foscttm(first_view, second_view_aligned) elif n == m: foscttm = self._foscttm_keops(first_view, second_view_aligned) else: logging.warning(f'FOSCTTM was not computed because {n} != {m}.') except: logging.warning( 'FOSCTTM was not computed and most likely led to an OOM issue.') if not self.short: try: top1 = self._topx_keops( first_view, second_view_aligned, topk=1, percentage=True) except: logging.warning( 'Top1 can not be computed with a number of samples <100.') try: top5 = self._topx_keops( first_view, second_view_aligned, topk=5, percentage=True) except: logging.warning( 'Top5 can not be computed with a number of samples <20.') try: if n == m: if n * m < 1e7: neigh_overlap = self._neighbour_overlap( first_view, second_view_aligned) else: logging.warning("""Switching to batch version""") neigh_overlap = self._neighbour_overlap_batch( first_view, second_view_aligned) except: logging.warning(f"""Neighbourhood overlap was not computed, either because {n} != {m} or because it might have led to an OOM issue""") else: logging.info("""TopK and NO were not computed.""") if self.cell_labels is not None: try: lta1 = self._label_transfer_accuracy( first_view, second_view, self.cell_labels, k=self.n_neighbours) lta2 = self._label_transfer_accuracy( second_view, first_view, self.cell_labels[::-1], k=self.n_neighbours) lta = (lta1 + lta2) / 2.0 except: logging.warning("""LTA was not computed because it might have led to an OOM issue""") return OutputSupervised(foscttm, top1, top5, neigh_overlap, lta) def _foscttm( self, first_view: torch.FloatTensor, second_view: torch.FloatTensor ) -> float: """Computes the fraction of samples closer to the true match based on squared euclidean distances between samples.""" n = first_view.shape[0] # Assumes the views are aligned. distances = compute_sqpairwise_distances(first_view, second_view) fraction = ( torch.sum(distances < torch.diag(distances)) + torch.sum(torch.t(distances) < torch.diag(distances)) ) / (2 * n * (n - 1)) return fraction.item() def _foscttm_keops( self, first_view: torch.FloatTensor, second_view: torch.FloatTensor, ) -> float: """Computes the fraction of samples closer to the true match based on squared euclidean distances between samples.""" n, d = first_view.shape first_view_i = LazyTensor(first_view.view(n, 1, d)) second_view_j = LazyTensor(second_view.view(1, n, d)) distance_ij = ((first_view_i - second_view_j)**2).sum(dim=2) diagonal = ((first_view - second_view)**2).sum(axis=1) diagonal1 = LazyTensor(diagonal.view(-1, 1, 1)) diagonal2 = LazyTensor(diagonal.view(1, -1, 1)) cttm1 = (diagonal1 - distance_ij).sign().relu() cttm2 = (diagonal2 - distance_ij).sign().relu() cttm1 = cttm1.sum(1) cttm2 = cttm2.sum(1) return (cttm1.sum() + cttm2.sum()) / (n * (n - 1) * 2) def _topx_keops( self, first_view: torch.FloatTensor, second_view: torch.FloatTensor, topk: int = 1, percentage: bool = True ) -> float: """Computes fraction of samples correctly assigned among topk (%) nearest neighbours.""" def get_count_knn(distance: LazyTensor, n_sample, dim: int = 0): # Grid <-> Samples, (M**2, K) integer tensor. if percentage: indknn = distance.argKmin(int(topk * n_sample / 100), dim=dim) else: indknn = distance.argKmin(topk, dim=dim) frac = indknn - torch.arange(n_sample).reshape(-1, 1).to(self.device) return torch.count_nonzero(frac == 0).item() n = first_view.shape[0] m = second_view.shape[0] first_view_i = LazyTensor(first_view[:, None, :]) # (M**2, 1, 2) second_view_j = LazyTensor(second_view[None, :, :]) # (1, N, 2) # (M**2, N) symbolic matrix of squared distances. distance_ij = ((first_view_i - second_view_j) ** 2).sum(-1) count_nn0 = get_count_knn(distance_ij, n, dim=0) count_nn1 = get_count_knn(distance_ij, m, dim=1) return (count_nn0 / m + count_nn1 / n) / 2.0 def _neighbour_overlap( self, first_view: torch.FloatTensor, second_view: torch.FloatTensor, ) -> float: """Computes the neighbourhood overlap metric. The Neihgbourhood Overlap is the fraction of samples that have among their k-neighbours the true match. This function assumes that the two datasets are aligned (sample1 in first_view corresponds to sample1 in second_view) and have the same number of cells. """ n = first_view.shape[0] distances = compute_sqpairwise_distances(first_view, second_view) ranking = torch.diagonal(torch.argsort(distances, dim=1)) cumul_rank = [torch.sum(ranking <= rank).item() for rank in range(n)] cumul_frac = np.array(cumul_rank) / n return cumul_frac def _neighbour_overlap_batch( self, first_view: torch.FloatTensor, second_view: torch.FloatTensor, batch_size: int = 1024, ) -> float: """Computes the neighbourhood overlap metrics by batches of the first_view. The Neihgbourhood Overlap is the fraction of samples that have among their k-neighbours the true match. This function assumes that the two datasets are aligned (sample1 in first_view corresponds to sample1 in second_view) and have the same number of cells. """ n = first_view.shape[0] n_batch = math.ceil(n / batch_size) cumul_frac = np.zeros(n) for i in range(n_batch): distances = compute_sqpairwise_distances( first_view[i * batch_size: (i+1) * batch_size], second_view) ranking = torch.diagonal(torch.argsort(distances, dim=1)) cumul_rank = [torch.sum(ranking <= rank).item() for rank in range(n)] cumul_frac += np.array(cumul_rank) return cumul_frac / n def _label_transfer_accuracy( self, first_view: torch.FloatTensor, second_view: torch.FloatTensor, cell_labels: List[np.ndarray], k: int = 5): """Computes the Label Transfer Accuracy metrics.""" first_view = first_view.detach().cpu() second_view = second_view.detach().cpu() knn = KNeighborsClassifier(n_neighbors=k) knn.fit(first_view, cell_labels[0]) predictions = knn.predict(second_view) count = 0 for label1, label2 in zip(predictions, cell_labels[1]): if label1 == label2: count += 1 return count / second_view.shape[0]
google-research/large_scale_mmdma
lsmmdma/metrics.py
Python
apache-2.0
10,204
# Copyright 2011 Nicholas Bray # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class Bracket(object): def __init__(self, data): self.data = data self.prev = None self.next = None def delete(self): assert not self.isOrphaned() self.prev.next = self.next self.next.prev = self.prev self.prev = None self.next = None def insertAfter(self, other): assert other.isOrphaned() other.prev = self other.next = self.next self.next.prev = other self.next = other def isOrphaned(self): return self.prev == None and self.next == None def __repr__(self): return "Bracket(%r)" % self.data class BracketList(object): def __init__(self): self.root = Bracket(None) self.root.next = self.root self.root.prev = self.root self.__size = 0 def size(self): return self.__size def push(self, bracket): self.root.insertAfter(bracket) self.__size += 1 def top(self): return self.root.next def delete(self, bracket): bracket.delete() self.__size -= 1 def forwards(self): current = self.root.next while current != self.root: yield current current = current.next def backwards(self): current = self.root.prev while current != self.root: yield current current = current.prev def concat(self, other): # Join the lists self.root.prev.next = other.root.next other.root.next.prev = self.root.prev # Connect the end of the new list too the root. self.root.prev = other.root.prev other.root.prev.next = self.root self.__size += other.__size # Reset the other root. other.root.next = other.root other.root.prev = other.root other.__size = 0
ncbray/pystream
sandbox/sese/bracket.py
Python
apache-2.0
2,128
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tests for the Docker JSON parser.""" import unittest from plaso.lib import definitions from plaso.parsers import docker from tests.parsers import test_lib class DockerJSONUnitTest(test_lib.ParserTestCase): """Tests for the Docker JSON parser.""" def testParseContainerLog(self): """Tests the _ParseContainerLogJSON function.""" container_identifier = ( 'e7d0b7ea5ccf08366e2b0c8afa2318674e8aefe802315378125d2bb83fe3110c') parser = docker.DockerJSONParser() path_segments = [ 'docker', 'containers', container_identifier, 'container-json.log'] storage_writer = self._ParseFile(path_segments, parser) self.assertEqual(storage_writer.number_of_events, 10) self.assertEqual(storage_writer.number_of_extraction_warnings, 0) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) events = list(storage_writer.GetEvents()) expected_timestamps = [ '2016-01-07 16:49:10.000000', '2016-01-07 16:49:10.200000', '2016-01-07 16:49:10.230000', '2016-01-07 16:49:10.237000', '2016-01-07 16:49:10.237200', '2016-01-07 16:49:10.237220', '2016-01-07 16:49:10.237222', '2016-01-07 16:49:10.237222', # losing sub microsec info '2016-01-07 16:49:10.237222', '2016-01-07 16:49:10.237222'] expected_event_values = { 'container_id': container_identifier, 'data_type': 'docker:json:container:log', 'log_line': ( '\x1b]0;root@e7d0b7ea5ccf: ' '/home/plaso\x07root@e7d0b7ea5ccf:/home/plaso# ls\r\n'), 'log_source': 'stdout'} for index, event in enumerate(events): self.CheckTimestamp(event.timestamp, expected_timestamps[index]) self.CheckEventValues(storage_writer, event, expected_event_values) def testParseContainerConfig(self): """Tests the _ParseContainerConfigJSON function.""" container_identifier = ( 'e7d0b7ea5ccf08366e2b0c8afa2318674e8aefe802315378125d2bb83fe3110c') parser = docker.DockerJSONParser() path_segments = [ 'docker', 'containers', container_identifier, 'config.json'] storage_writer = self._ParseFile(path_segments, parser) self.assertEqual(storage_writer.number_of_events, 2) self.assertEqual(storage_writer.number_of_extraction_warnings, 0) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) events = list(storage_writer.GetEvents()) expected_event_values = { 'action': 'Container Started', 'container_id': container_identifier, 'container_name': 'e7d0b7ea5ccf', 'data_type': 'docker:json:container', 'date_time': '2016-01-07 16:49:08.674873'} self.CheckEventValues(storage_writer, events[0], expected_event_values) expected_event_values = { 'action': 'Container Created', 'container_id': container_identifier, 'container_name': 'e7d0b7ea5ccf', 'data_type': 'docker:json:container', 'date_time': '2016-01-07 16:49:08.507979'} self.CheckEventValues(storage_writer, events[1], expected_event_values) def testParseLayerConfig(self): """Tests the _ParseLayerConfigJSON function.""" layer_identifier = ( '3c9a9d7cc6a235eb2de58ca9ef3551c67ae42a991933ba4958d207b29142902b') parser = docker.DockerJSONParser() path_segments = ['docker', 'graph', layer_identifier, 'json'] storage_writer = self._ParseFile(path_segments, parser) self.assertEqual(storage_writer.number_of_events, 1) self.assertEqual(storage_writer.number_of_extraction_warnings, 0) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) events = list(storage_writer.GetEvents()) expected_event_values = { 'command': ( '/bin/sh -c sed -i \'s/^#\\s*\\(deb.*universe\\)$/\\1/g\' ' '/etc/apt/sources.list'), 'data_type': 'docker:json:layer', 'date_time': '2015-10-12 17:27:03.079273', 'layer_id': layer_identifier, 'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED} self.CheckEventValues(storage_writer, events[0], expected_event_values) if __name__ == '__main__': unittest.main()
kiddinn/plaso
tests/parsers/docker.py
Python
apache-2.0
4,229
# -*- coding: utf-8 -*- from framework.tasks import app from framework.tasks.handlers import enqueue_task from website import settings from . import piwik @app.task(bind=True, max_retries=5, default_retry_delay=60) def _update_node(self, node_id, updated_fields=None): # Avoid circular imports from framework.transactions.context import TokuTransaction from website import models node = models.Node.load(node_id) try: with TokuTransaction(): piwik._update_node_object(node, updated_fields) except Exception as error: raise self.retry(exc=error) def update_node(node_id, updated_fields): if settings.USE_CELERY: signature = _update_node.s(node_id, updated_fields) enqueue_task(signature) else: _update_node(node_id, updated_fields)
AndrewSallans/osf.io
framework/analytics/tasks.py
Python
apache-2.0
822
# coding=utf-8 from ..base import BitbucketBase class BitbucketCloudBase(BitbucketBase): def __init__(self, url, *args, **kwargs): """ Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing """ expected_type = kwargs.pop("expected_type", None) super(BitbucketCloudBase, self).__init__(url, *args, **kwargs) if expected_type is not None and not expected_type == self.get_data("type"): raise ValueError("Expected type of data is [{}], got [{}].".format(expected_type, self.get_data("type"))) def get_link(self, link): """ Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present """ links = self.get_data("links") if links is None or link not in links: return None return links[link]["href"] def _get_paged( self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False ): """ Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameters :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements """ if params is None: params = {} if paging_workaround: params["page"] = 1 while True: response = super(BitbucketCloudBase, self).get( url, trailing=trailing, params=params, data=data, flags=flags, absolute=absolute, ) if len(response.get("values", [])) == 0: return for value in response["values"]: yield value if paging_workaround: params["page"] += 1 else: url = response.get("next") if url is None: break # From now on we have absolute URLs with parameters absolute = True # Params are now provided by the url params = {} # Trailing should not be added as it is already part of the url trailing = False return
AstroTech/atlassian-python-api
atlassian/bitbucket/cloud/base.py
Python
apache-2.0
3,104
#!/usr/bin/env python3 # # (C) Copyright 2015 by Marek Hakala <hakala.marek@gmail.com> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # class FileLoader(object): def __init__(self, filename): self.filename = filename def loadFile(self): self.file = open(self.filename, mode='rb') self.fileContent = self.file.read() def closeFile(self): self.file.close() def getContent(self): return self.fileContent
marekhakala/temperaturehub
Thermometer_DUMMY_server/classes/file_loader.py
Python
apache-2.0
957
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.8.2 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import kubernetes.client from kubernetes.client.rest import ApiException from kubernetes.client.models.v1beta1_user_info import V1beta1UserInfo class TestV1beta1UserInfo(unittest.TestCase): """ V1beta1UserInfo unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV1beta1UserInfo(self): """ Test V1beta1UserInfo """ # FIXME: construct object with mandatory attributes with example values #model = kubernetes.client.models.v1beta1_user_info.V1beta1UserInfo() pass if __name__ == '__main__': unittest.main()
mbohlool/client-python
kubernetes/test/test_v1beta1_user_info.py
Python
apache-2.0
953
# Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for NetApp API layer """ import ddt from lxml import etree import mock from oslo_utils import netutils import paramiko import six from six.moves import urllib from cinder import exception from cinder.i18n import _ from cinder import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as zapi_fakes) from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api @ddt.ddt class NetAppApiServerTests(test.TestCase): """Test case for NetApp API server methods""" def setUp(self): self.root = netapp_api.NaServer('127.0.0.1') super(NetAppApiServerTests, self).setUp() @ddt.data(None, 'ftp') def test_set_transport_type_value_error(self, transport_type): """Tests setting an invalid transport type""" self.assertRaises(ValueError, self.root.set_transport_type, transport_type) @ddt.data({'params': {'transport_type': 'http', 'server_type_filer': 'filer'}}, {'params': {'transport_type': 'http', 'server_type_filer': 'xyz'}}, {'params': {'transport_type': 'https', 'server_type_filer': 'filer'}}, {'params': {'transport_type': 'https', 'server_type_filer': 'xyz'}}) @ddt.unpack def test_set_transport_type_valid(self, params): """Tests setting a valid transport type""" self.root._server_type = params['server_type_filer'] mock_invoke = self.mock_object(self.root, 'set_port') self.root.set_transport_type(params['transport_type']) expected_call_args = zapi_fakes.FAKE_CALL_ARGS_LIST self.assertIn(mock_invoke.call_args, expected_call_args) @ddt.data('stor', 'STORE', '') def test_set_server_type_value_error(self, server_type): """Tests Value Error on setting the wrong server type""" self.assertRaises(ValueError, self.root.set_server_type, server_type) @ddt.data('!&', '80na', '') def test_set_port__value_error(self, port): """Tests Value Error on trying to set port with a non-integer""" self.assertRaises(ValueError, self.root.set_port, port) @ddt.data('!&', '80na', '') def test_set_timeout_value_error(self, timeout): """Tests Value Error on trying to set port with a non-integer""" self.assertRaises(ValueError, self.root.set_timeout, timeout) @ddt.data({'params': {'major': 1, 'minor': '20a'}}, {'params': {'major': '20a', 'minor': 1}}, {'params': {'major': '!*', 'minor': '20a'}}) @ddt.unpack def test_set_api_version_value_error(self, params): """Tests Value Error on setting non-integer version""" self.assertRaises(ValueError, self.root.set_api_version, **params) def test_set_api_version_valid(self): """Tests Value Error on setting non-integer version""" args = {'major': '20', 'minor': 1} expected_call_args_list = [mock.call('20'), mock.call(1)] mock_invoke = self.mock_object(six, 'text_type', return_value='str') self.root.set_api_version(**args) self.assertEqual(expected_call_args_list, mock_invoke.call_args_list) @ddt.data({'params': {'result': zapi_fakes.FAKE_RESULT_API_ERR_REASON}}, {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_INVALID}}, {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_VALID}}) @ddt.unpack def test_invoke_successfully_naapi_error(self, params): """Tests invoke successfully raising NaApiError""" self.mock_object(self.root, 'send_http_request', return_value=params['result']) self.assertRaises(netapp_api.NaApiError, self.root.invoke_successfully, zapi_fakes.FAKE_NA_ELEMENT) def test_invoke_successfully_no_error(self): """Tests invoke successfully with no errors""" self.mock_object(self.root, 'send_http_request', return_value=zapi_fakes.FAKE_RESULT_SUCCESS) self.assertEqual(zapi_fakes.FAKE_RESULT_SUCCESS.to_string(), self.root.invoke_successfully( zapi_fakes.FAKE_NA_ELEMENT).to_string()) def test__create_request(self): """Tests method _create_request""" self.root._ns = zapi_fakes.FAKE_XML_STR self.root._api_version = '1.20' self.mock_object(self.root, '_enable_tunnel_request') self.mock_object(netapp_api.NaElement, 'add_child_elem') self.mock_object(netapp_api.NaElement, 'to_string', return_value=zapi_fakes.FAKE_XML_STR) mock_invoke = self.mock_object(urllib.request, 'Request') self.root._create_request(zapi_fakes.FAKE_NA_ELEMENT, True) self.assertTrue(mock_invoke.called) @ddt.data({'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_5}}, {'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_14}}) @ddt.unpack def test__enable_tunnel_request__value_error(self, params): """Tests value errors with creating tunnel request""" self.assertRaises(ValueError, params['server']._enable_tunnel_request, 'test') def test__enable_tunnel_request_valid(self): """Tests creating tunnel request with correct values""" netapp_elem = zapi_fakes.FAKE_NA_ELEMENT server = zapi_fakes.FAKE_NA_SERVER_API_1_20 mock_invoke = self.mock_object(netapp_elem, 'add_attr') expected_call_args = [mock.call('vfiler', 'filer'), mock.call('vfiler', 'server')] server._enable_tunnel_request(netapp_elem) self.assertEqual(expected_call_args, mock_invoke.call_args_list) def test__parse_response__naapi_error(self): """Tests NaApiError on no response""" self.assertRaises(netapp_api.NaApiError, self.root._parse_response, None) def test__parse_response_no_error(self): """Tests parse function with appropriate response""" mock_invoke = self.mock_object(etree, 'XML', return_value='xml') self.root._parse_response(zapi_fakes.FAKE_XML_STR) mock_invoke.assert_called_with(zapi_fakes.FAKE_XML_STR) def test__build_opener_not_implemented_error(self): """Tests whether certificate style authorization raises Exception""" self.root._auth_style = 'not_basic_auth' self.assertRaises(NotImplementedError, self.root._build_opener) def test__build_opener_valid(self): """Tests whether build opener works with valid parameters""" self.root._auth_style = 'basic_auth' mock_invoke = self.mock_object(urllib.request, 'build_opener') self.root._build_opener() self.assertTrue(mock_invoke.called) @ddt.data(None, zapi_fakes.FAKE_XML_STR) def test_send_http_request_value_error(self, na_element): """Tests whether invalid NaElement parameter causes error""" self.assertRaises(ValueError, self.root.send_http_request, na_element) def test_send_http_request_http_error(self): """Tests handling of HTTPError""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', side_effect=urllib.error.HTTPError(url='', hdrs='', fp=None, code='401', msg='httperror')) self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, na_element) def test_send_http_request_unknown_exception(self): """Tests handling of Unknown Exception""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) mock_log = self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', side_effect=Exception) self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, na_element) self.assertEqual(1, mock_log.exception.call_count) def test_send_http_request_valid(self): """Tests the method send_http_request with valid parameters""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root, '_get_result', return_value=zapi_fakes.FAKE_NA_ELEMENT) opener_mock = self.mock_object(self.root._opener, 'open') opener_mock.read.side_effect = ['resp1', 'resp2'] self.root.send_http_request(na_element) @ddt.data('192.168.1.0', '127.0.0.1', '0.0.0.0', '::ffff:8', 'fdf8:f53b:82e4::53', '2001::1', 'fe80::200::abcd', '2001:0000:4136:e378:8000:63bf:3fff:fdd2') def test__get_url(self, host): port = '80' root = netapp_api.NaServer(host, port=port) protocol = root.TRANSPORT_TYPE_HTTP url = root.URL_FILER if netutils.is_valid_ipv6(host): host = netutils.escape_ipv6(host) result = '%s://%s:%s/%s' % (protocol, host, port, url) url = root._get_url() self.assertEqual(result, url) class NetAppApiElementTransTests(test.TestCase): """Test case for NetApp API element translations.""" def test_translate_struct_dict_unique_key(self): """Tests if dict gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'} root.translate_struct(child) self.assertEqual(3, len(root.get_children())) self.assertEqual('v1', root.get_child_content('e1')) self.assertEqual('v2', root.get_child_content('e2')) self.assertEqual('v3', root.get_child_content('e3')) def test_translate_struct_dict_nonunique_key(self): """Tests if list/dict gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}] root.translate_struct(child) self.assertEqual(3, len(root.get_children())) children = root.get_children() for c in children: if c.get_name() == 'e1': self.assertIn(c.get_content(), ['v1', 'v3']) else: self.assertEqual('v2', c.get_content()) def test_translate_struct_list(self): """Tests if list gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = ['e1', 'e2'] root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_struct_tuple(self): """Tests if tuple gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = ('e1', 'e2') root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_invalid_struct(self): """Tests if invalid data structure raises exception.""" root = netapp_api.NaElement('root') child = 'random child element' self.assertRaises(ValueError, root.translate_struct, child) def test_setter_builtin_types(self): """Tests str, int, float get converted to NaElement.""" root = netapp_api.NaElement('root') root['e1'] = 'v1' root['e2'] = 1 root['e3'] = 2.0 root['e4'] = 8 self.assertEqual(4, len(root.get_children())) self.assertEqual('v1', root.get_child_content('e1')) self.assertEqual('1', root.get_child_content('e2')) self.assertEqual('2.0', root.get_child_content('e3')) self.assertEqual('8', root.get_child_content('e4')) def test_setter_na_element(self): """Tests na_element gets appended as child.""" root = netapp_api.NaElement('root') root['e1'] = netapp_api.NaElement('nested') self.assertEqual(1, len(root.get_children())) e1 = root.get_child_by_name('e1') self.assertIsInstance(e1, netapp_api.NaElement) self.assertIsInstance(e1.get_child_by_name('nested'), netapp_api.NaElement) def test_setter_child_dict(self): """Tests dict is appended as child to root.""" root = netapp_api.NaElement('root') root['d'] = {'e1': 'v1', 'e2': 'v2'} e1 = root.get_child_by_name('d') self.assertIsInstance(e1, netapp_api.NaElement) sub_ch = e1.get_children() self.assertEqual(2, len(sub_ch)) for c in sub_ch: self.assertIn(c.get_name(), ['e1', 'e2']) if c.get_name() == 'e1': self.assertEqual('v1', c.get_content()) else: self.assertEqual('v2', c.get_content()) def test_setter_child_list_tuple(self): """Tests list/tuple are appended as child to root.""" root = netapp_api.NaElement('root') root['l'] = ['l1', 'l2'] root['t'] = ('t1', 't2') l = root.get_child_by_name('l') self.assertIsInstance(l, netapp_api.NaElement) t = root.get_child_by_name('t') self.assertIsInstance(t, netapp_api.NaElement) for le in l.get_children(): self.assertIn(le.get_name(), ['l1', 'l2']) for te in t.get_children(): self.assertIn(te.get_name(), ['t1', 't2']) def test_setter_no_value(self): """Tests key with None value.""" root = netapp_api.NaElement('root') root['k'] = None self.assertIsNone(root.get_child_content('k')) def test_setter_invalid_value(self): """Tests invalid value raises exception.""" root = netapp_api.NaElement('root') try: root['k'] = netapp_api.NaServer('localhost') except Exception as e: if not isinstance(e, TypeError): self.fail(_('Error not a TypeError.')) def test_setter_invalid_key(self): """Tests invalid value raises exception.""" root = netapp_api.NaElement('root') try: root[None] = 'value' except Exception as e: if not isinstance(e, KeyError): self.fail(_('Error not a KeyError.')) def test_getter_key_error(self): """Tests invalid key raises exception""" root = netapp_api.NaElement('root') self.mock_object(root, 'get_child_by_name', return_value=None) self.mock_object(root, 'has_attr', return_value=None) self.assertRaises(KeyError, netapp_api.NaElement.__getitem__, root, '123') def test_getter_na_element_list(self): """Tests returning NaElement list""" root = netapp_api.NaElement('root') root['key'] = ['val1', 'val2'] self.assertEqual(root.get_child_by_name('key').get_name(), root.__getitem__('key').get_name()) def test_getter_child_text(self): """Tests NaElement having no children""" root = netapp_api.NaElement('root') root.set_content('FAKE_CONTENT') self.mock_object(root, 'get_child_by_name', return_value=root) self.assertEqual('FAKE_CONTENT', root.__getitem__('root')) def test_getter_child_attr(self): """Tests invalid key raises exception""" root = netapp_api.NaElement('root') root.add_attr('val', 'FAKE_VALUE') self.assertEqual('FAKE_VALUE', root.__getitem__('val')) def test_add_node_with_children(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(netapp_api.NaElement, 'create_node_with_children', return_value=zapi_fakes.FAKE_INVOKE_DATA) mock_invoke = self.mock_object(root, 'add_child_elem') root.add_node_with_children('options') mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA) def test_create_node_with_children(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(root, 'add_new_child', return_value='abc') result_xml = str(root.create_node_with_children( 'options', test1=zapi_fakes.FAKE_XML_STR, test2=zapi_fakes.FAKE_XML_STR)) # No ordering is guaranteed for elements in this XML. self.assertTrue(result_xml.startswith("<options>"), result_xml) self.assertTrue("<test1>abc</test1>" in result_xml, result_xml) self.assertTrue("<test2>abc</test2>" in result_xml, result_xml) self.assertTrue(result_xml.rstrip().endswith("</options>"), result_xml) def test_add_new_child(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(netapp_api.NaElement, '_convert_entity_refs', return_value=zapi_fakes.FAKE_INVOKE_DATA) root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA) self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string()) def test_get_attr_names_empty_attr(self): """Tests _elements.attrib being empty""" root = netapp_api.NaElement('root') self.assertEqual([], root.get_attr_names()) def test_get_attr_names(self): """Tests _elements.attrib being non-empty""" root = netapp_api.NaElement('root') root.add_attr('attr1', 'a1') root.add_attr('attr2', 'a2') self.assertEqual(['attr1', 'attr2'], root.get_attr_names()) @ddt.ddt class SSHUtilTests(test.TestCase): """Test Cases for SSH API invocation.""" def setUp(self): super(SSHUtilTests, self).setUp() self.mock_object(netapp_api.SSHUtil, '_init_ssh_pool') self.sshutil = netapp_api.SSHUtil('127.0.0.1', 'fake_user', 'fake_password') def test_execute_command(self): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files( paramiko.ChannelFile) self.mock_object(ssh, 'exec_command', return_value=(stdin, stdout, stderr)) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') stdout_read = self.mock_object(stdout, 'read', return_value='') self.sshutil.execute_command(ssh, 'ls') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with() def test_execute_read_exception(self): ssh = mock.Mock(paramiko.SSHClient) exec_command = self.mock_object(ssh, 'exec_command') exec_command.side_effect = paramiko.SSHException('Failure') wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.assertRaises(paramiko.SSHException, self.sshutil.execute_command, ssh, 'ls') wait_on_stdout.assert_not_called() @ddt.data('Password:', 'Password: ', 'Password: \n\n') def test_execute_command_with_prompt(self, response): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) stdout_read = self.mock_object(stdout.channel, 'recv', return_value=response) stdin_write = self.mock_object(stdin, 'write') self.mock_object(ssh, 'exec_command', return_value=(stdin, stdout, stderr)) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.sshutil.execute_command_with_prompt(ssh, 'sudo ls', 'Password:', 'easypass') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with(999) stdin_write.assert_called_once_with('easypass' + '\n') def test_execute_command_unexpected_response(self): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) stdout_read = self.mock_object(stdout.channel, 'recv', return_value='bad response') self.mock_object(ssh, 'exec_command', return_value=(stdin, stdout, stderr)) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.assertRaises(exception.VolumeBackendAPIException, self.sshutil.execute_command_with_prompt, ssh, 'sudo ls', 'Password:', 'easypass') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with(999) def test_wait_on_stdout(self): stdout = mock.Mock() stdout.channel = mock.Mock(paramiko.Channel) exit_status = self.mock_object(stdout.channel, 'exit_status_ready', return_value=False) self.sshutil._wait_on_stdout(stdout, 1) exit_status.assert_any_call() self.assertGreater(exit_status.call_count, 2) def _mock_ssh_channel_files(self, channel): stdin = mock.Mock() stdin.channel = mock.Mock(channel) stdout = mock.Mock() stdout.channel = mock.Mock(channel) stderr = mock.Mock() stderr.channel = mock.Mock(channel) return stdin, stdout, stderr
Datera/cinder
cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py
Python
apache-2.0
23,756
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Random augment.""" import tensorflow as tf from augment import augment_ops # Reference for Imagenet: # https://cs.corp.google.com/piper///depot/google3/learning/brain/research/meta_architect/image/image_processing.py?rcl=275474938&l=2950 IMAGENET_AUG_OPS = [ 'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'SolarizeAdd', 'Identity', ] # Levels in this file are assumed to be floats in [0, 1] range # If you need quantization or integer levels, this should be controlled # in client code. MAX_LEVEL = 1. # Constant which is used when computing translation argument from level TRANSLATE_CONST = 100. def _randomly_negate_tensor(tensor): """With 50% prob turn the tensor negative.""" should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool) final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor) return final_tensor def _rotate_level_to_arg(level): level = (level/MAX_LEVEL) * 30. level = _randomly_negate_tensor(level) return (level,) def _enhance_level_to_arg(level): return ((level/MAX_LEVEL) * 1.8 + 0.1,) def _shear_level_to_arg(level): level = (level/MAX_LEVEL) * 0.3 # Flip level to negative with 50% chance level = _randomly_negate_tensor(level) return (level,) def _translate_level_to_arg(level): level = (level/MAX_LEVEL) * TRANSLATE_CONST # Flip level to negative with 50% chance level = _randomly_negate_tensor(level) return (level,) def _posterize_level_to_arg(level): return (int((level/MAX_LEVEL) * 4),) def _solarize_level_to_arg(level): return (int((level/MAX_LEVEL) * 256),) def _solarize_add_level_to_arg(level): return (int((level/MAX_LEVEL) * 110),) def _ignore_level_to_arg(level): del level return () def _divide_level_by_max_level_arg(level): return (level/MAX_LEVEL,) LEVEL_TO_ARG = { 'AutoContrast': _ignore_level_to_arg, 'Equalize': _ignore_level_to_arg, 'Invert': _ignore_level_to_arg, 'Rotate': _rotate_level_to_arg, 'Posterize': _posterize_level_to_arg, 'Solarize': _solarize_level_to_arg, 'SolarizeAdd': _solarize_add_level_to_arg, 'Color': _enhance_level_to_arg, 'Contrast': _enhance_level_to_arg, 'Brightness': _enhance_level_to_arg, 'Sharpness': _enhance_level_to_arg, 'ShearX': _shear_level_to_arg, 'ShearY': _shear_level_to_arg, 'TranslateX': _translate_level_to_arg, 'TranslateY': _translate_level_to_arg, 'Identity': _ignore_level_to_arg, 'Blur': _divide_level_by_max_level_arg, 'Smooth': _divide_level_by_max_level_arg, 'Rescale': _divide_level_by_max_level_arg, } class RandAugment(object): """Random augment with fixed magnitude.""" def __init__(self, num_layers=2, prob_to_apply=None, magnitude=None, num_levels=10): """Initialized rand augment. Args: num_layers: number of augmentation layers, i.e. how many times to do augmentation. prob_to_apply: probability to apply on each layer. If None then always apply. magnitude: default magnitude in range [0, 1], if None then magnitude will be chosen randomly. num_levels: number of levels for quantization of the magnitude. """ self.num_layers = num_layers self.prob_to_apply = ( float(prob_to_apply) if prob_to_apply is not None else None) self.num_levels = int(num_levels) if num_levels else None self.level = float(magnitude) if magnitude is not None else None def _get_level(self): if self.level is not None: return tf.convert_to_tensor(self.level) if self.num_levels is None: return tf.random.uniform(shape=[], dtype=tf.float32) else: level = tf.random.uniform(shape=[], maxval=self.num_levels + 1, dtype=tf.int32) return tf.cast(level, tf.float32) / self.num_levels def _apply_one_layer(self, image): """Applies one level of augmentation to the image.""" level = self._get_level() branch_fns = [] for augment_op_name in IMAGENET_AUG_OPS: augment_fn = augment_ops.NAME_TO_FUNC[augment_op_name] level_to_args_fn = LEVEL_TO_ARG[augment_op_name] def _branch_fn(image=image, augment_fn=augment_fn, level_to_args_fn=level_to_args_fn): args = [image] + list(level_to_args_fn(level)) return augment_fn(*args) branch_fns.append(_branch_fn) branch_index = tf.random.uniform( shape=[], maxval=len(branch_fns), dtype=tf.int32) aug_image = tf.switch_case(branch_index, branch_fns, default=lambda: image) if self.prob_to_apply is not None: return tf.cond( tf.random.uniform(shape=[], dtype=tf.float32) < self.prob_to_apply, lambda: aug_image, lambda: image) else: return aug_image def __call__(self, image, aug_image_key='image'): output_dict = {} if aug_image_key is not None: aug_image = image for _ in range(self.num_layers): aug_image = self._apply_one_layer(aug_image) output_dict[aug_image_key] = aug_image if aug_image_key != 'image': output_dict['image'] = image return output_dict
google-research/fixmatch
imagenet/augment/rand_augment.py
Python
apache-2.0
5,934
# -*- coding: utf-8 -*- from django.shortcuts import render, redirect from django.views.generic import View from django.http import HttpResponse from reportlab.pdfgen import canvas from reportlab.lib.pagesizes import letter from reportlab.lib.styles import getSampleStyleSheet from reportlab.lib.units import inch from reportlab.platypus import (Flowable, Paragraph, SimpleDocTemplate, Spacer) from .models import TablaSolicitud from .models import Bitacora from .models import TablaAlumno # Create your views here. class ConsultarDocumento(View): template_name = "consultarDocumento.html" def get(self, request): return render( request, self.template_name, ) class VerDocumento(View): template_name = "verDocumento.html" model = TablaAlumno model2 = TablaSolicitud def get(self, request, folio): self.request.session['errorConsulta'] = None print(folio) context = dict() try: alumn=self.model.objects.get(codigo = folio) except: self.request.session['errorConsulta'] = "Es incorrecto el código insertado" return redirect('consultar') context['solicitudes'] = self.model2.objects.filter(alumno_id=alumn.id) return render( request, self.template_name, context ) class VerPdf(View): template_name = "verPdf.html" model = TablaSolicitud model2 = TablaAlumno def get(self, request, id, solicitudId): response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename="Documento.pdf"' p = canvas.Canvas(response) alumno = self.model2.objects.get(codigo=id) bitacora = self.model.objects.get(id = solicitudId) # x,y p.setFont("Helvetica", 16) p.drawCentredString(260,800,"INSTITUTO POLITECNICO NACIONAL") p.drawCentredString(260,770,"ESCUELA SUPERIOR DE COMPUTO") p.drawCentredString(280,740,"SUBDIRECCION DE SERVIVIOS EDUCATIVOS E INTEGRACION SOCIAL") p.line(120,700,580,700) p.setFont("Helvetica", 12) p.drawCentredString(260,715,"DEPARTAMENTO DE GESTION ESCOLAR") p.drawCentredString(260,700,str(bitacora.documento)) p.drawCentredString(100,695,"A QUIEN CORRESPONDA:") p.drawCentredString(100,670,"HACE CONSTAR QUE EL ALUMNO") p.drawCentredString(260,650,str(bitacora.alumno)) p.drawCentredString(100,630,"CON NUMERO DE BOLETA") p.drawCentredString(230,630,str(bitacora.alumno.boleta)) p.drawCentredString(380,630,"ESTA INSCRITO EN ESTE PLANTEL"); p.drawCentredString(200, 600, str(bitacora.fecha)) p.drawCentredString(200, 610, str(bitacora.estado)) p.drawCentredString(200, 620, str(bitacora.folio)) p.showPage() p.save() return response
CallmeTorre/Idalia
ESCOM/ConsultarDocumento/views.py
Python
apache-2.0
2,955
#!/usr/bin/python # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow utils for loss function implementations. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from lsi.nnutils import helpers as nn_helpers import tensorflow as tf def event_prob(layer_masks): """Per-pixel layer assignment probs using an ordered multiplication. Args: layer_masks: L X [...] X 1, indicating which layer pixels are valid Returns: layer_probs: L X [...] X 1, indicating prob. of layer assignment escape_probs: 1 X [...] X 1, indicating prob that no layer is assigned """ with tf.name_scope('event_prob'): eps = 1e-6 # Clip so that masks are not exactly 0 or 1. layer_masks = tf.clip_by_value(layer_masks, eps, 1 - eps) log_inv_m = tf.log(1 - layer_masks) log_prob = tf.cumsum(log_inv_m, axis=0) - log_inv_m + tf.log(layer_masks) layer_probs = tf.exp(log_prob, name='layer_probs') escape_probs = 1 - tf.reduce_sum(layer_probs, axis=0, keep_dims=True) return layer_probs, escape_probs def decreasing_disp_loss(layer_disps): """Penalizes if successive disparities across layers increase. Args: layer_disps: L X [...] X 1, laywewise per pixel disparity Returns: err: scalar error """ n_layers = layer_disps.get_shape().as_list()[0] if n_layers == 1: return 0 disps_pre = layer_disps[0:n_layers - 1] disps_pre = tf.stop_gradient(disps_pre) disps_post = layer_disps[1:n_layers] disps_incr = tf.nn.relu(disps_post - disps_pre) return tf.reduce_mean(disps_incr) def zbuffer_composition_loss(layer_imgs, layer_masks, layer_disps, trg_imgs, bg_layer_disp=0, max_disp=1, zbuf_scale=10): """Depth+Mask based composition loss between predictions and target. First computes per-pixel layer assignment probs using depth+masks based normalization, and then penalizes inconsistency in a weighed manner. Assumes a default white background image (to penalize the ray escaping). Args: layer_imgs: are L X [...] X C, typically RGB images per layer layer_masks: L X [...] X 1, indicating which layer pixels are valid layer_disps: L X [...] X 1, laywewise per pixel disparity trg_imgs: [...] X C targets bg_layer_disp: Assumed disparity for the bg plane max_disp: Used for normalization zbuf_scale: Denominator for exponentiation of negative depths Returns: err: scalar error """ # Add a layer with white color, disp=max_disp. shape_bg_img = layer_imgs.get_shape().as_list() shape_bg_img[0] = 1 shape_bg_mask = layer_masks.get_shape().as_list() shape_bg_mask[0] = 1 with tf.name_scope('zbuffer_composition_loss'): bg_img = tf.ones(shape_bg_img) bg_mask = tf.ones(shape_bg_mask) bg_disp = tf.ones(shape_bg_mask) * bg_layer_disp layer_imgs = tf.concat([layer_imgs, bg_img], 0) layer_masks = tf.concat([layer_masks, bg_mask], 0) layer_disps = tf.concat([layer_disps, bg_disp], 0) layer_probs = nn_helpers.zbuffer_weights( layer_disps / max_disp, scale=zbuf_scale) * layer_masks probs_sum = tf.reduce_sum(layer_probs, axis=0, keep_dims=True) layer_probs = nn_helpers.divide_safe(layer_probs, probs_sum) layerwise_cost = tf.square(layer_imgs - trg_imgs) * layer_probs layerwise_cost = tf.reduce_sum(layerwise_cost, axis=0) layerwise_cost = 0.5 * tf.reduce_mean(layerwise_cost) return layerwise_cost
google/layered-scene-inference
lsi/loss/loss.py
Python
apache-2.0
4,153
# -*- coding: utf-8 -*- """ Created on Wed Jan 03 09:37:33 2018 @author: tih """ import os import sys from DataAccess import DownloadData def main(Dir, Startdate='', Enddate='', latlim=[-60, 70], lonlim=[-180, 180], Waitbar = 1): """ This function downloads monthly ETmonitor data Keyword arguments: Dir -- 'C:/file/to/path/' Startdate -- 'yyyy-mm-dd' Enddate -- 'yyyy-mm-dd' latlim -- [ymin, ymax] (values must be between -60 and 70) lonlim -- [xmin, xmax] (values must be between -180 and 180) """ print '\nDownload monthly ETmonitor Ew data for the period %s till %s' %(Startdate, Enddate) Type = "ew" # Download data DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Type, Waitbar) if __name__ == '__main__': main(sys.argv)
wateraccounting/wa
Collect/ETmonitor/Ew_monthly.py
Python
apache-2.0
803
# Copyright 2013 Openstack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import datetime import mock import netaddr from neutron.common import exceptions as ex from quark.db import models from quark import exceptions as q_ex from quark.plugin_modules import floating_ips from quark.tests import test_quark_plugin class TestRemoveFloatingIPs(test_quark_plugin.TestQuarkPlugin): @contextlib.contextmanager def _stubs(self, flip=None): flip_model = None if flip: flip_model = models.IPAddress() flip_model.update(flip) with contextlib.nested( mock.patch("quark.db.api.floating_ip_find"), mock.patch("quark.db.api.floating_ip_disassociate_fixed_ip"), mock.patch("quark.db.api.port_disassociate_ip"), mock.patch("quark.db.api.ip_address_deallocate"), mock.patch("quark.ipam.QuarkIpam.deallocate_ip_address"), mock.patch("quark.drivers.unicorn_driver.UnicornDriver" ".remove_floating_ip") ) as (flip_find, db_fixed_ip_disassoc, db_port_disassoc, db_dealloc, mock_dealloc, mock_remove_flip): flip_find.return_value = flip_model yield def test_delete_floating_by_ip_address_id(self): flip = dict(id=1, address=3232235876, address_readable="192.168.1.100", subnet_id=1, network_id=2, version=4, used_by_tenant_id=1, network=dict(ipam_strategy="ANY")) with self._stubs(flip=flip): self.plugin.delete_floatingip(self.context, 1) def test_delete_floating_by_when_ip_address_does_not_exists_fails(self): with self._stubs(): with self.assertRaises(q_ex.FloatingIpNotFound): self.plugin.delete_floatingip(self.context, 1) class TestFloatingIPUtilityMethods(test_quark_plugin.TestQuarkPlugin): def test_get_next_available_fixed_ip_with_single_fixed_ip(self): port = models.Port() port.update(dict(id=1)) fixed_ip_addr = netaddr.IPAddress('192.168.0.1') fixed_ip = models.IPAddress() fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())) port.ip_addresses.append(fixed_ip) next_fixed_ip = floating_ips._get_next_available_fixed_ip(port) self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1') def test_get_next_available_fixed_ip_with_mult_fixed_ips(self): port = models.Port() port.update(dict(id=1)) for ip_addr in ["192.168.0.1", "192.168.0.2", "192.168.0.3"]: fixed_ip_addr = netaddr.IPAddress(ip_addr) fixed_ip = models.IPAddress() fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())) port.ip_addresses.append(fixed_ip) next_fixed_ip = floating_ips._get_next_available_fixed_ip(port) self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1') def test_get_next_available_fixed_ip_with_no_avail_fixed_ips(self): port = models.Port() port.update(dict(id=1)) fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ip = models.IPAddress() fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())) flip_addr = netaddr.IPAddress("10.0.0.1") flip = models.IPAddress() flip.update(dict(address_type="floating", address=int(flip_addr), version=4, address_readable=str(flip_addr), allocated_at=datetime.datetime.now())) flip.fixed_ip = fixed_ip port.ip_addresses.append(fixed_ip) port.ip_addresses.append(flip) fixed_ip_addr = netaddr.IPAddress("192.168.0.2") fixed_ip = models.IPAddress() fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())) flip_addr = netaddr.IPAddress("10.0.0.2") flip = models.IPAddress() flip.update(dict(address_type="floating", address=int(flip_addr), version=4, address_readable=str(flip_addr), allocated_at=datetime.datetime.now())) flip.fixed_ip = fixed_ip port.ip_addresses.append(fixed_ip) port.ip_addresses.append(flip) next_fixed_ip = floating_ips._get_next_available_fixed_ip(port) self.assertEqual(next_fixed_ip, None) def test_get_next_available_fixed_ip_with_avail_fixed_ips(self): port = models.Port() port.update(dict(id=1)) fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ip = models.IPAddress() fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())) flip_addr = netaddr.IPAddress("10.0.0.1") flip = models.IPAddress() flip.update(dict(address_type="floating", address=int(flip_addr), version=4, address_readable=str(flip_addr), allocated_at=datetime.datetime.now())) flip.fixed_ip = fixed_ip port.ip_addresses.append(fixed_ip) port.ip_addresses.append(flip) fixed_ip_addr = netaddr.IPAddress("192.168.0.2") fixed_ip = models.IPAddress() fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())) port.ip_addresses.append(fixed_ip) port.ip_addresses.append(flip) next_fixed_ip = floating_ips._get_next_available_fixed_ip(port) self.assertEqual(next_fixed_ip["address_readable"], "192.168.0.2") class TestCreateFloatingIPs(test_quark_plugin.TestQuarkPlugin): @contextlib.contextmanager def _stubs(self, flip=None, port=None, ips=None, network=None): port_model = None if port: port_model = models.Port() port_model.update(dict(port=port)) if ips: for ip in ips: ip_model = models.IPAddress() ip_model.update(ip) addr_type = ip.get("address_type") if addr_type == "floating" and "fixed_ip_addr" in ip: fixed_ip = models.IPAddress() fixed_ip.update(next(ip_addr for ip_addr in ips if (ip_addr["address_readable"] == ip["fixed_ip_addr"]))) ip_model.fixed_ip = fixed_ip port_model.ip_addresses.append(ip_model) flip_model = None if flip: flip_model = models.IPAddress() flip_model.update(flip) net_model = None if network: net_model = models.Network() net_model.update(network) def _alloc_ip(context, new_addr, net_id, port_m, *args, **kwargs): new_addr.append(flip_model) def _port_assoc(context, ports, addr, enable_port=None): addr.ports = ports return addr def _flip_fixed_ip_assoc(context, addr, fixed_ip): addr.fixed_ip = fixed_ip return addr with contextlib.nested( mock.patch("quark.db.api.floating_ip_find"), mock.patch("quark.db.api.network_find"), mock.patch("quark.db.api.port_find"), mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"), mock.patch("quark.drivers.unicorn_driver.UnicornDriver" ".register_floating_ip"), mock.patch("quark.db.api.port_associate_ip"), mock.patch("quark.db.api.floating_ip_associate_fixed_ip") ) as (flip_find, net_find, port_find, alloc_ip, mock_reg_flip, port_assoc, fixed_ip_assoc): flip_find.return_value = flip_model net_find.return_value = net_model port_find.return_value = port_model alloc_ip.side_effect = _alloc_ip port_assoc.side_effect = _port_assoc fixed_ip_assoc.side_effect = _flip_fixed_ip_assoc yield def test_create_with_a_port(self): floating_ip_addr = netaddr.IPAddress("10.0.0.1") floating_ip = dict(id=1, address=int(floating_ip_addr), version=4, address_readable=str(floating_ip_addr), subnet_id=1, network_id=2, used_by_tenant_id=1) network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())] port = dict(id="abcdefgh-1111-2222-3333-1234567890ab") with self._stubs(flip=floating_ip, port=port, ips=fixed_ips, network=network): request = dict(floating_network_id=network["id"], port_id=port["id"]) flip = self.plugin.create_floatingip(self.context, dict(floatingip=request)) self.assertEqual(flip["floating_ip_address"], "10.0.0.1") self.assertEqual(flip["fixed_ip_address"], "192.168.0.1") def test_create_without_a_port(self): floating_ip_addr = netaddr.IPAddress("10.0.0.1") floating_ip = dict(id=1, address=int(floating_ip_addr), version=4, address_readable=str(floating_ip_addr), subnet_id=1, network_id=2, used_by_tenant_id=1) network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())] with self._stubs(flip=floating_ip, port=None, ips=fixed_ips, network=network): request = dict(floating_network_id=network["id"], port_id=None) flip = self.plugin.create_floatingip(self.context, dict(floatingip=request)) self.assertEqual(flip["floating_ip_address"], "10.0.0.1") self.assertEqual(flip.get("fixed_ip_address"), None) def test_create_with_fixed_ip_specified(self): floating_ip_addr = netaddr.IPAddress("10.0.0.1") floating_ip = dict(id=1, address=int(floating_ip_addr), version=4, address_readable=str(floating_ip_addr), subnet_id=1, network_id=2, used_by_tenant_id=1) network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") fixed_ips = [] for ip_addr in ["192.168.0.1", "192.168.0.2"]: fixed_ip_addr = netaddr.IPAddress(ip_addr) fixed_ips.append(dict(address_type="fixed", version=4, address=int(fixed_ip_addr), address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())) port = dict(id="abcdefgh-1111-2222-3333-1234567890ab") with self._stubs(flip=floating_ip, port=port, ips=fixed_ips, network=network): request = dict(floating_network_id=network["id"], port_id=port["id"], fixed_ip_address="192.168.0.2") flip = self.plugin.create_floatingip(self.context, dict(floatingip=request)) self.assertEqual(flip["floating_ip_address"], "10.0.0.1") self.assertEqual(flip["fixed_ip_address"], "192.168.0.2") def test_create_with_floating_ip_specified(self): floating_ip_addr = netaddr.IPAddress("10.0.0.1") floating_ip = dict(id=1, address=int(floating_ip_addr), version=4, address_readable=str(floating_ip_addr), subnet_id=1, network_id=2, used_by_tenant_id=1) network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr), version=4, address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())] port = dict(id=2) with self._stubs(flip=floating_ip, port=port, ips=fixed_ips, network=network): request = dict(floating_network_id=network["id"], port_id=port["id"], floating_ip_address="10.0.0.1") flip = self.plugin.create_floatingip(self.context, dict(floatingip=request)) self.assertEqual(flip["floating_ip_address"], "10.0.0.1") self.assertEqual(flip["fixed_ip_address"], "192.168.0.1") def test_create_without_network_id_fails(self): with self._stubs(): with self.assertRaises(ex.BadRequest): request = dict(port_id=2, floating_ip_address="10.0.0.1") self.plugin.create_floatingip(self.context, dict(floatingip=request)) def test_create_with_invalid_network_fails(self): with self._stubs(): with self.assertRaises(ex.NetworkNotFound): request = dict(floating_network_id=123, port_id=2, floating_ip_address="10.0.0.1") self.plugin.create_floatingip(self.context, dict(floatingip=request)) def test_create_with_invalid_port_fails(self): network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") with self._stubs(network=network): with self.assertRaises(ex.PortNotFound): request = dict(floating_network_id=network["id"], port_id=2, floating_ip_address="10.0.0.1") self.plugin.create_floatingip(self.context, dict(floatingip=request)) def test_create_with_invalid_fixed_ip_for_port_fails(self): network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ips = [dict(address_type="fixed", version=4, address=int(fixed_ip_addr), address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now())] port = dict(id="abcdefgh-1111-2222-3333-1234567890ab") with self._stubs(port=port, ips=fixed_ips, network=network): with self.assertRaises( q_ex.FixedIpDoesNotExistsForPort): request = dict(floating_network_id=network["id"], port_id=port["id"], fixed_ip_address="192.168.0.2") flip = self.plugin.create_floatingip(self.context, dict(floatingip=request)) self.assertEqual(flip["address_readable"], "10.0.0.1") self.assertEqual(flip.fixed_ip["address_readable"], "192.168.0.2") def test_create_with_port_and_fixed_ip_with_existing_flip_fails(self): network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ip = dict(address_type="fixed", version=4, address=int(fixed_ip_addr), address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now()) floating_ip_addr = netaddr.IPAddress("10.0.0.1") floating_ip = dict(address_type="floating", version=4, address=int(floating_ip_addr), address_readable=str(floating_ip_addr), allocated_at=datetime.datetime.now(), fixed_ip_addr="192.168.0.1") ips = [fixed_ip, floating_ip] port = dict(id="abcdefgh-1111-2222-3333-1234567890ab") with self._stubs(port=port, ips=ips, network=network): with self.assertRaises( q_ex.PortAlreadyContainsFloatingIp): request = dict(floating_network_id=network["id"], port_id=port["id"], fixed_ip_address="192.168.0.1") self.plugin.create_floatingip(self.context, dict(floatingip=request)) def test_create_when_port_has_no_fixed_ips_fails(self): network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") port = dict(id="abcdefgh-1111-2222-3333-1234567890ab") with self._stubs(port=port, network=network): with self.assertRaises( q_ex.NoAvailableFixedIpsForPort): request = dict(floating_network_id=network["id"], port_id=port["id"]) self.plugin.create_floatingip(self.context, dict(floatingip=request)) def test_create_when_port_has_no_available_fixed_ips_fails(self): network = dict(id="00000000-0000-0000-0000-000000000000", ipam_strategy="ANY") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ip = dict(address_type="fixed", version=4, address=int(fixed_ip_addr), address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now()) floating_ip_addr = netaddr.IPAddress("10.0.0.1") floating_ip = dict(address_type="floating", version=4, address=int(floating_ip_addr), address_readable=str(floating_ip_addr), allocated_at=datetime.datetime.now(), fixed_ip_addr="192.168.0.1") ips = [fixed_ip, floating_ip] port = dict(id="abcdefgh-1111-2222-3333-1234567890ab") with self._stubs(port=port, ips=ips, network=network): with self.assertRaises( q_ex.NoAvailableFixedIpsForPort): request = dict(floating_network_id=network["id"], port_id=port["id"]) self.plugin.create_floatingip(self.context, dict(floatingip=request)) class TestUpdateFloatingIPs(test_quark_plugin.TestQuarkPlugin): @contextlib.contextmanager def _stubs(self, flip=None, curr_port=None, new_port=None, ips=None): curr_port_model = None if curr_port: curr_port_model = models.Port() curr_port_model.update(curr_port) new_port_model = None if new_port: new_port_model = models.Port() new_port_model.update(new_port) if ips: for ip in ips: ip_model = models.IPAddress() ip_model.update(ip) addr_type = ip.get("address_type") if addr_type == "floating" and "fixed_ip_addr" in ip: fixed_ip = models.IPAddress() fixed_ip.update(next(ip_addr for ip_addr in ips if (ip_addr["address_readable"] == ip["fixed_ip_addr"]))) ip_model.fixed_ip = fixed_ip new_port_model.ip_addresses.append(ip_model) flip_model = None if flip: flip_model = models.IPAddress() flip_model.update(flip) if curr_port_model: flip_model.ports = [curr_port_model] fixed_ip = flip.get("fixed_ip_address") if fixed_ip: addr = netaddr.IPAddress(fixed_ip) fixed_ip_model = models.IPAddress() fixed_ip_model.update(dict(address_readable=fixed_ip, address=int(addr), version=4, address_type="fixed")) flip_model.fixed_ip = fixed_ip_model def _find_port(context, id, **kwargs): return (curr_port_model if (curr_port_model and id == curr_port_model.id) else new_port_model) def _flip_assoc(context, addr, fixed_ip): addr.fixed_ip = fixed_ip return addr def _flip_disassoc(context, addr): addr.fixed_ip = None return addr def _port_assoc(context, ports, addr, enable_ports=None): addr.ports = ports return addr def _port_dessoc(context, ports, addr): addr.associations = [] addr.ports = [] return addr with contextlib.nested( mock.patch("quark.db.api.floating_ip_find"), mock.patch("quark.db.api.port_find"), mock.patch("quark.drivers.unicorn_driver.UnicornDriver" ".register_floating_ip"), mock.patch("quark.drivers.unicorn_driver.UnicornDriver" ".update_floating_ip"), mock.patch("quark.drivers.unicorn_driver.UnicornDriver" ".remove_floating_ip"), mock.patch("quark.db.api.port_associate_ip"), mock.patch("quark.db.api.port_disassociate_ip"), mock.patch("quark.db.api.floating_ip_associate_fixed_ip"), mock.patch("quark.db.api.floating_ip_disassociate_fixed_ip") ) as (flip_find, port_find, reg_flip, update_flip, rem_flip, port_assoc, port_dessoc, flip_assoc, flip_dessoc): flip_find.return_value = flip_model port_find.side_effect = _find_port port_assoc.side_effect = _port_assoc port_dessoc.side_effect = _port_dessoc flip_assoc.side_effect = _flip_assoc flip_dessoc.side_effect = _flip_disassoc yield def test_update_with_new_port_and_no_previous_port(self): new_port = dict(id="2") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ip = dict(address_type="fixed", version=4, address=int(fixed_ip_addr), address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now()) ips = [fixed_ip] addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip, new_port=new_port, ips=ips): content = dict(port_id=new_port["id"]) ret = self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) self.assertEqual(ret["fixed_ip_address"], "192.168.0.1") self.assertEqual(ret["port_id"], new_port["id"]) def test_update_with_new_port(self): curr_port = dict(id="1") new_port = dict(id="2") fixed_ip_addr = netaddr.IPAddress("192.168.0.1") fixed_ip = dict(address_type="fixed", version=4, address=int(fixed_ip_addr), address_readable=str(fixed_ip_addr), allocated_at=datetime.datetime.now()) ips = [fixed_ip] addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip, curr_port=curr_port, new_port=new_port, ips=ips): content = dict(port_id=new_port["id"]) ret = self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) self.assertEqual(ret["fixed_ip_address"], "192.168.0.1") self.assertEqual(ret["port_id"], new_port["id"]) def test_update_with_no_port(self): curr_port = dict(id="1") addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip, curr_port=curr_port): content = dict(port_id=None) ret = self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) self.assertEqual(ret.get("fixed_ip_address"), None) self.assertEqual(ret.get("port_id"), None) def test_update_with_non_existent_port_should_fail(self): addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip): with self.assertRaises(ex.PortNotFound): content = dict(port_id="123") self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) def test_update_with_port_with_no_fixed_ip_avail_should_fail(self): new_port = dict(id="123") addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip, new_port=new_port): with self.assertRaises(q_ex.NoAvailableFixedIpsForPort): content = dict(port_id="123") self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) def test_update_with_same_port_should_fail(self): new_port = dict(id="123") curr_port = dict(id="123") addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip, new_port=new_port, curr_port=curr_port): with self.assertRaises(q_ex.PortAlreadyAssociatedToFloatingIp): content = dict(port_id="123") self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) def test_update_when_port_has_a_different_flip_should_fail(self): new_port = dict(id="123") floating_ip_addr = netaddr.IPAddress("192.168.0.1") floating_ip = dict(address_type="floating", version=4, address=int(floating_ip_addr), address_readable=str(floating_ip_addr), allocated_at=datetime.datetime.now()) ips = [floating_ip] curr_port = dict(id="456") addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip, new_port=new_port, curr_port=curr_port, ips=ips): with self.assertRaises(q_ex.PortAlreadyContainsFloatingIp): content = dict(port_id="123") self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) def test_update_with_no_port_and_no_previous_port_should_fail(self): addr = netaddr.IPAddress("10.0.0.1") flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr), address_readable=str(addr)) with self._stubs(flip=flip): with self.assertRaises(q_ex.FloatingIpUpdateNoPortIdSupplied): content = dict(port_id=None) self.plugin.update_floatingip(self.context, flip["id"], dict(floatingip=content)) def test_update_with_missing_port_id_param_should_fail(self): with self._stubs(): with self.assertRaises(ex.BadRequest): content = {} self.plugin.update_floatingip(self.context, "123", dict(floatingip=content))
alanquillin/quark
quark/tests/plugin_modules/test_floating_ips.py
Python
apache-2.0
30,810
# Copyright (c) 2015 Scality SA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr', ], pbr=True, )
scality/scality-manila-utils
setup.py
Python
apache-2.0
663
class Solution(object): def strStr(self, haystack, needle): """ :type haystack: str :type needle: str :rtype: int """ for i in range(len(haystack)-len(needle) + 1): if haystack[i: i + len(needle)] == needle: return i return -1
scream7/leetcode
algorithms/python/28.py
Python
apache-2.0
315
# Copyright (c) 2013-2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Shared business logic. """ from barbican.common import utils from barbican.model import models LOG = utils.getLogger(__name__) def get_or_create_project(project_id, project_repo): """Returns project with matching project_id. Creates it if it does not exist. :param project_id: The external-to-Barbican ID for this project. :param project_repo: Project repository. :return: Project model instance """ project = project_repo.find_by_external_project_id(project_id, suppress_exception=True) if not project: LOG.debug('Creating project for %s', project_id) project = models.Project() project.external_id = project_id project.status = models.States.ACTIVE project_repo.create_from(project) return project
jmvrbanac/barbican
barbican/common/resources.py
Python
apache-2.0
1,430
from __future__ import annotations import logging from collections import OrderedDict import scipy.sparse import numpy as np from typing import ( Any, Dict, Text, List, Tuple, Callable, Set, Optional, Type, Union, ) from rasa.engine.graph import ExecutionContext, GraphComponent from rasa.engine.recipes.default_recipe import DefaultV1Recipe from rasa.engine.storage.resource import Resource from rasa.engine.storage.storage import ModelStorage from rasa.nlu.tokenizers.spacy_tokenizer import ( POS_TAG_KEY, SpacyTokenizer, ) from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer from rasa.nlu.featurizers.sparse_featurizer.sparse_featurizer import SparseFeaturizer from rasa.nlu.constants import TOKENS_NAMES from rasa.shared.constants import DOCS_URL_COMPONENTS from rasa.shared.nlu.training_data.training_data import TrainingData from rasa.shared.nlu.training_data.message import Message from rasa.shared.nlu.constants import TEXT from rasa.shared.exceptions import InvalidConfigException import rasa.shared.utils.io import rasa.utils.io logger = logging.getLogger(__name__) END_OF_SENTENCE = "EOS" BEGIN_OF_SENTENCE = "BOS" FEATURES = "features" @DefaultV1Recipe.register( DefaultV1Recipe.ComponentType.MESSAGE_FEATURIZER, is_trainable=True ) class LexicalSyntacticFeaturizer(SparseFeaturizer, GraphComponent): """Extracts and encodes lexical syntactic features. Given a sequence of tokens, this featurizer produces a sequence of features where the `t`-th feature encodes lexical and syntactic information about the `t`-th token and it's surrounding tokens. In detail: The lexical syntactic features can be specified via a list of configurations `[c_0, c_1, ..., c_n]` where each `c_i` is a list of names of lexical and syntactic features (e.g. `low`, `suffix2`, `digit`). For a given tokenized text, the featurizer will consider a window of size `n` around each token and evaluate the given list of configurations as follows: - It will extract the features listed in `c_m` where `m = (n-1)/2` if n is even and `n/2` from token `t` - It will extract the features listed in `c_{m-1}`,`c_{m-2}` ... , from the last, second to last, ... token before token `t`, respectively. - It will extract the features listed `c_{m+1}`, `c_{m+1}`, ... for the first, second, ... token `t`, respectively. It will then combine all these features into one feature for position `t`. Example: If we specify `[['low'], ['upper'], ['prefix2']]`, then for each position `t` the `t`-th feature will encode whether the token at position `t` is upper case, where the token at position `t-1` is lower case and the first two characters of the token at position `t+1`. """ FILENAME_FEATURE_TO_IDX_DICT = "feature_to_idx_dict.pkl" # NOTE: "suffix5" of the token "is" will be "is". Hence, when combining multiple # prefixes, short words will be represented/encoded repeatedly. _FUNCTION_DICT: Dict[Text, Callable[[Token], Union[Text, bool, None]]] = { "low": lambda token: token.text.islower(), "title": lambda token: token.text.istitle(), "prefix5": lambda token: token.text[:5], "prefix2": lambda token: token.text[:2], "suffix5": lambda token: token.text[-5:], "suffix3": lambda token: token.text[-3:], "suffix2": lambda token: token.text[-2:], "suffix1": lambda token: token.text[-1:], "pos": lambda token: token.data.get(POS_TAG_KEY, None), "pos2": lambda token: token.data.get(POS_TAG_KEY, [])[:2] if POS_TAG_KEY in token.data else None, "upper": lambda token: token.text.isupper(), "digit": lambda token: token.text.isdigit(), } SUPPORTED_FEATURES = sorted( set(_FUNCTION_DICT.keys()).union([END_OF_SENTENCE, BEGIN_OF_SENTENCE]) ) @classmethod def _extract_raw_features_from_token( cls, feature_name: Text, token: Token, token_position: int, num_tokens: int, ) -> Text: """Extracts a raw feature from the token at the given position. Args: feature_name: the name of a supported feature token: the token from which we want to extract the feature token_position: the position of the token inside the tokenized text num_tokens: the total number of tokens in the tokenized text Returns: the raw feature value as text """ if feature_name not in cls.SUPPORTED_FEATURES: raise InvalidConfigException( f"Configured feature '{feature_name}' not valid. Please check " f"'{DOCS_URL_COMPONENTS}' for valid configuration parameters." ) if feature_name == END_OF_SENTENCE: return str(token_position == num_tokens - 1) if feature_name == BEGIN_OF_SENTENCE: return str(token_position == 0) return str(cls._FUNCTION_DICT[feature_name](token)) @classmethod def required_components(cls) -> List[Type]: """Components that should be included in the pipeline before this component.""" return [Tokenizer] @staticmethod def get_default_config() -> Dict[Text, Any]: """Returns the component's default config.""" return { **SparseFeaturizer.get_default_config(), FEATURES: [ ["low", "title", "upper"], ["BOS", "EOS", "low", "upper", "title", "digit"], ["low", "title", "upper"], ], } def __init__( self, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, feature_to_idx_dict: Optional[Dict[Tuple[int, Text], Dict[Text, int]]] = None, ) -> None: """Instantiates a new `LexicalSyntacticFeaturizer` instance.""" super().__init__(execution_context.node_name, config) # graph component self._model_storage = model_storage self._resource = resource self._execution_context = execution_context # featurizer specific self._feature_config = self._config[FEATURES] self._set_feature_to_idx_dict( feature_to_idx_dict or {}, check_consistency_with_config=True ) @classmethod def validate_config(cls, config: Dict[Text, Any]) -> None: """Validates that the component is configured properly.""" if FEATURES not in config: return # will be replaced with default feature_config = config[FEATURES] message = ( f"Expected configuration of `features` to be a list of lists that " f"that contain names of lexical and syntactic features " f"(i.e. {cls.SUPPORTED_FEATURES}). " f"Received {feature_config} instead. " ) try: configured_feature_names = set( feature_name for pos_config in feature_config for feature_name in pos_config ) except TypeError as e: raise InvalidConfigException(message) from e if configured_feature_names.difference(cls.SUPPORTED_FEATURES): raise InvalidConfigException(message) def _set_feature_to_idx_dict( self, feature_to_idx_dict: Dict[Tuple[int, Text], Dict[Text, int]], check_consistency_with_config: bool = False, ) -> None: """Sets the "feature" to index mapping. Here, "feature" denotes the combination of window position, feature name, and feature_value. Args: feature_to_idx_dict: mapping from tuples of window position and feature name to a mapping from feature values to indices check_consistency_with_config: whether the consistency with the current `self.config` should be checked """ self._feature_to_idx_dict = feature_to_idx_dict self._number_of_features = sum( [ len(feature_values.values()) for feature_values in self._feature_to_idx_dict.values() ] ) if check_consistency_with_config: known_features = set(self._feature_to_idx_dict.keys()) not_in_config = known_features.difference( ( (window_idx, feature_name) for window_idx, feature_names in enumerate(self._feature_config) for feature_name in feature_names ) ) if not_in_config: rasa.shared.utils.io.raise_warning( f"A feature to index mapping has been loaded that does not match " f"the configured features. The given mapping configures " f" (position in window, feature_name): {not_in_config}. " f" These are not specified in the given config " f" {self._feature_config}. " f"Continuing with constant values for these features. " ) def train(self, training_data: TrainingData) -> Resource: """Trains the featurizer. Args: training_data: the training data Returns: the resource from which this trained component can be loaded """ self.warn_if_pos_features_cannot_be_computed(training_data) feature_to_idx_dict = self._create_feature_to_idx_dict(training_data) self._set_feature_to_idx_dict(feature_to_idx_dict=feature_to_idx_dict) if not self._feature_to_idx_dict: rasa.shared.utils.io.raise_warning( "No lexical syntactic features could be extracted from the training " "data. In order for this component to work you need to define " "`features` that can be found in the given training data." ) self.persist() return self._resource def warn_if_pos_features_cannot_be_computed( self, training_data: TrainingData ) -> None: """Warn if part-of-speech features are needed but not given.""" training_example = next( ( message for message in training_data.training_examples if message.get(TOKENS_NAMES[TEXT], []) ), Message(), ) tokens_example = training_example.get(TOKENS_NAMES[TEXT], []) configured_feature_names = set( feature_name for pos_config in self._feature_config for feature_name in pos_config ) if {"pos", "pos2"}.intersection( configured_feature_names ) and not tokens_example[0].data.get(POS_TAG_KEY, []): rasa.shared.utils.io.raise_warning( f"Expected training data to include tokens with part-of-speech tags" f"because the given configuration includes part-of-speech features " f"`pos` and/or `pos2`. " f"Please add a {SpacyTokenizer.__name__} to your " f"configuration if you want to use the part-of-speech-features in the" f"{self.__class__.__name__}. " f"Continuing without the part-of-speech-features." ) def _create_feature_to_idx_dict( self, training_data: TrainingData ) -> Dict[Tuple[int, Text], Dict[Text, int]]: """Create a nested dictionary of all feature values. Returns: a nested mapping that maps from tuples of positions (in the window) and supported feature names to "raw feature to index" mappings, i.e. mappings that map the respective raw feature values to unique indices (where `unique` means unique with respect to all indices in the *nested* mapping) """ # collect all raw feature values feature_vocabulary: Dict[Tuple[int, Text], Set[Text]] = dict() for example in training_data.training_examples: tokens = example.get(TOKENS_NAMES[TEXT], []) sentence_features = self._map_tokens_to_raw_features(tokens) for token_features in sentence_features: for position_and_feature_name, feature_value in token_features.items(): feature_vocabulary.setdefault(position_and_feature_name, set()).add( feature_value ) # assign a unique index to each feature value return self._build_feature_to_index_map(feature_vocabulary) def _map_tokens_to_raw_features( self, tokens: List[Token] ) -> List[Dict[Tuple[int, Text], Text]]: """Extracts the raw feature values. Args: tokens: a tokenized text Returns: a list of feature dictionaries for each token in the given list where each feature dictionary maps a tuple containing - a position (in the window) and - a supported feature name to the corresponding raw feature value """ sentence_features = [] # in case of an even number we will look at one more word before, # e.g. window size 4 will result in a window range of # [-2, -1, 0, 1] (0 = current word in sentence) window_size = len(self._feature_config) half_window_size = window_size // 2 window_range = range(-half_window_size, half_window_size + window_size % 2) assert len(window_range) == window_size for anchor in range(len(tokens)): token_features: Dict[Tuple[int, Text], Text] = {} for window_position, relative_position in enumerate(window_range): absolute_position = anchor + relative_position # skip, if current_idx is pointing to a non-existing token if absolute_position < 0 or absolute_position >= len(tokens): continue token = tokens[absolute_position] for feature_name in self._feature_config[window_position]: token_features[ (window_position, feature_name) ] = self._extract_raw_features_from_token( token=token, feature_name=feature_name, token_position=absolute_position, num_tokens=len(tokens), ) sentence_features.append(token_features) return sentence_features @staticmethod def _build_feature_to_index_map( feature_vocabulary: Dict[Tuple[int, Text], Set[Text]] ) -> Dict[Tuple[int, Text], Dict[Text, int]]: """Creates a nested dictionary for mapping raw features to indices. Args: feature_vocabulary: a mapping from tuples of positions (in the window) and supported feature names to the set of possible feature values Returns: a nested mapping that maps from tuples of positions (in the window) and supported feature names to "raw feature to index" mappings, i.e. mappings that map the respective raw feature values to unique indices (where `unique` means unique with respect to all indices in the *nested* mapping) """ # Note that this will only sort the top level keys - and we keep # doing it to ensure consistently with what was done before) ordered_feature_vocabulary: OrderedDict[ Tuple[int, Text], Set[Text] ] = OrderedDict(sorted(feature_vocabulary.items())) # create the nested mapping feature_to_idx_dict: Dict[Tuple[int, Text], Dict[Text, int]] = {} offset = 0 for ( position_and_feature_name, feature_values, ) in ordered_feature_vocabulary.items(): sorted_feature_values = sorted(feature_values) feature_to_idx_dict[position_and_feature_name] = { feature_value: feature_idx for feature_idx, feature_value in enumerate( sorted_feature_values, start=offset ) } offset += len(feature_values) return feature_to_idx_dict def process(self, messages: List[Message]) -> List[Message]: """Featurizes all given messages in-place. Args: messages: messages to be featurized. Returns: The same list with the same messages after featurization. """ for message in messages: self._process_message(message) return messages def process_training_data(self, training_data: TrainingData) -> TrainingData: """Processes the training examples in the given training data in-place. Args: training_data: the training data Returns: same training data after processing """ self.process(training_data.training_examples) return training_data def _process_message(self, message: Message) -> None: """Featurizes the given message in-place. Args: message: a message to be featurized """ if not self._feature_to_idx_dict: rasa.shared.utils.io.raise_warning( f"The {self.__class__.__name__} {self._identifier} has not been " f"trained properly yet. " f"Continuing without adding features from this featurizer." ) return tokens = message.get(TOKENS_NAMES[TEXT]) if tokens: sentence_features = self._map_tokens_to_raw_features(tokens) sparse_matrix = self._map_raw_features_to_indices(sentence_features) self.add_features_to_message( # FIXME: create sentence feature and make `sentence` non optional sequence=sparse_matrix, sentence=None, attribute=TEXT, message=message, ) def _map_raw_features_to_indices( self, sentence_features: List[Dict[Tuple[int, Text], Any]] ) -> scipy.sparse.coo_matrix: """Converts the raw features to one-hot encodings. Requires the "feature" to index dictionary, i.e. the featurizer must have been trained. Args: sentence_features: a list of feature dictionaries where the `t`-th feature dictionary maps a tuple containing - a position (in the window) and - a supported feature name to the raw feature value extracted from the window around the `t`-th token. Returns: a sparse matrix where the `i`-th row is a multi-hot vector that encodes the raw features extracted from the window around the `i`-th token """ rows = [] cols = [] shape = (len(sentence_features), self._number_of_features) for token_idx, token_features in enumerate(sentence_features): for position_and_feature_name, feature_value in token_features.items(): mapping = self._feature_to_idx_dict.get(position_and_feature_name) if not mapping: continue feature_idx = mapping.get(feature_value, -1) if feature_idx > -1: rows.append(token_idx) cols.append(feature_idx) rows = np.array(rows) cols = np.array(cols) data = np.ones(len(rows)) return scipy.sparse.coo_matrix((data, (rows, cols)), shape=shape) @classmethod def create( cls, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, ) -> LexicalSyntacticFeaturizer: """Creates a new untrained component (see parent class for full docstring).""" return cls(config, model_storage, resource, execution_context) @classmethod def load( cls, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, **kwargs: Any, ) -> LexicalSyntacticFeaturizer: """Loads trained component (see parent class for full docstring).""" try: with model_storage.read_from(resource) as model_path: feature_to_idx_dict = rasa.utils.io.json_unpickle( model_path / cls.FILENAME_FEATURE_TO_IDX_DICT, encode_non_string_keys=True, ) return cls( config=config, model_storage=model_storage, resource=resource, execution_context=execution_context, feature_to_idx_dict=feature_to_idx_dict, ) except ValueError: logger.debug( f"Failed to load `{cls.__class__.__name__}` from model storage. " f"Resource '{resource.name}' doesn't exist." ) return cls( config=config, model_storage=model_storage, resource=resource, execution_context=execution_context, ) def persist(self) -> None: """Persist this model (see parent class for full docstring).""" if not self._feature_to_idx_dict: return None with self._model_storage.write_to(self._resource) as model_path: rasa.utils.io.json_pickle( model_path / self.FILENAME_FEATURE_TO_IDX_DICT, self._feature_to_idx_dict, encode_non_string_keys=True, )
RasaHQ/rasa_nlu
rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py
Python
apache-2.0
21,849
from flask import Flask app = Flask(__name__) @app.route("/") def hello(): return "Hello World!" if __name__ == "__main__": app.run(host='0.0.0.0', port=5000, debug=True, threaded=True)
ipedrazas/dotmarks-api
src/app.py
Python
apache-2.0
197
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2010 Openstack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler base class that all Schedulers should inherit from """ import datetime from nova import db from nova import exception from nova import flags from nova import log as logging from nova import rpc from nova import utils from nova.compute import power_state from nova.compute import vm_states from nova.api.ec2 import ec2utils FLAGS = flags.FLAGS flags.DEFINE_integer('service_down_time', 60, 'maximum time since last checkin for up service') flags.DECLARE('instances_path', 'nova.compute.manager') class NoValidHost(exception.Error): """There is no valid host for the command.""" pass class WillNotSchedule(exception.Error): """The specified host is not up or doesn't exist.""" pass class Scheduler(object): """The base class that all Scheduler clases should inherit from.""" def __init__(self): self.zone_manager = None def set_zone_manager(self, zone_manager): """Called by the Scheduler Service to supply a ZoneManager.""" self.zone_manager = zone_manager @staticmethod def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. elapsed = utils.utcnow() - last_heartbeat return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time) def hosts_up(self, context, topic): """Return the list of hosts that have a running service for topic.""" services = db.service_get_all_by_topic(context, topic) return [service.host for service in services if self.service_is_up(service)] def schedule(self, context, topic, *_args, **_kwargs): """Must override at least this method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_live_migration(self, context, instance_id, dest, block_migration=False): """Live migration scheduling method. :param context: :param instance_id: :param dest: destination host :return: The host where instance is running currently. Then scheduler send request that host. """ # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) # Checking instance. self._live_migration_src_check(context, instance_ref) # Checking destination host. self._live_migration_dest_check(context, instance_ref, dest, block_migration) # Common checking. self._live_migration_common_check(context, instance_ref, dest, block_migration) # Changing instance_state. values = {"vm_state": vm_states.MIGRATING} db.instance_update(context, instance_id, values) # Changing volume state for volume_ref in instance_ref['volumes']: db.volume_update(context, volume_ref['id'], {'status': 'migrating'}) # Return value is necessary to send request to src # Check _schedule() in detail. src = instance_ref['host'] return src def _live_migration_src_check(self, context, instance_ref): """Live migration check routine (for src host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object """ # Checking instance is running. if instance_ref['power_state'] != power_state.RUNNING: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) raise exception.InstanceNotRunning(instance_id=instance_id) # Checing volume node is running when any volumes are mounted # to the instance. if len(instance_ref['volumes']) != 0: services = db.service_get_all_by_topic(context, 'volume') if len(services) < 1 or not self.service_is_up(services[0]): raise exception.VolumeServiceUnavailable() # Checking src host exists and compute node src = instance_ref['host'] services = db.service_get_all_compute_by_host(context, src) # Checking src host is alive. if not self.service_is_up(services[0]): raise exception.ComputeServiceUnavailable(host=src) def _live_migration_dest_check(self, context, instance_ref, dest, block_migration): """Live migration check routine (for destination host). :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Checking dest exists and compute node. dservice_refs = db.service_get_all_compute_by_host(context, dest) dservice_ref = dservice_refs[0] # Checking dest host is alive. if not self.service_is_up(dservice_ref): raise exception.ComputeServiceUnavailable(host=dest) # Checking whether The host where instance is running # and dest is not same. src = instance_ref['host'] if dest == src: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) raise exception.UnableToMigrateToSelf(instance_id=instance_id, host=dest) # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, instance_ref, dest, block_migration) def _live_migration_common_check(self, context, instance_ref, dest, block_migration): """Live migration common check routine. Below checkings are followed by http://wiki.libvirt.org/page/TodoPreMigrationChecks :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param block_migration if True, check for block_migration. """ # Checking shared storage connectivity # if block migration, instances_paths should not be on shared storage. try: self.mounted_on_same_shared_storage(context, instance_ref, dest) if block_migration: reason = _("Block migration can not be used " "with shared storage.") raise exception.InvalidSharedStorage(reason=reason, path=dest) except exception.FileNotFound: if not block_migration: src = instance_ref['host'] ipath = FLAGS.instances_path logging.error(_("Cannot confirm tmpfile at %(ipath)s is on " "same shared storage between %(src)s " "and %(dest)s.") % locals()) raise # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) dservice_ref = dservice_refs[0]['compute_node'][0] # Checking original host( where instance was launched at) exists. try: oservice_refs = db.service_get_all_compute_by_host(context, instance_ref['launched_on']) except exception.NotFound: raise exception.SourceHostUnavailable() oservice_ref = oservice_refs[0]['compute_node'][0] # Checking hypervisor is same. orig_hypervisor = oservice_ref['hypervisor_type'] dest_hypervisor = dservice_ref['hypervisor_type'] if orig_hypervisor != dest_hypervisor: raise exception.InvalidHypervisorType() # Checkng hypervisor version. orig_hypervisor = oservice_ref['hypervisor_version'] dest_hypervisor = dservice_ref['hypervisor_version'] if orig_hypervisor > dest_hypervisor: raise exception.DestinationHypervisorTooOld() # Checking cpuinfo. try: rpc.call(context, db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": 'compare_cpu', "args": {'cpu_info': oservice_ref['cpu_info']}}) except rpc.RemoteError: src = instance_ref['host'] logging.exception(_("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise def assert_compute_node_has_enough_resources(self, context, instance_ref, dest, block_migration): """Checks if destination host has enough resource for live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host :param block_migration: if True, disk checking has been done """ self.assert_compute_node_has_enough_memory(context, instance_ref, dest) if not block_migration: return self.assert_compute_node_has_enough_disk(context, instance_ref, dest) def assert_compute_node_has_enough_memory(self, context, instance_ref, dest): """Checks if destination host has enough memory for live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Getting total available memory and disk of host avail = self._get_compute_info(context, dest, 'memory_mb') # Getting total used memory and disk of host # It should be sum of memories that are assigned as max value, # because overcommiting is risky. used = 0 instance_refs = db.instance_get_all_by_host(context, dest) used_list = [i['memory_mb'] for i in instance_refs] if used_list: used = reduce(lambda x, y: x + y, used_list) mem_inst = instance_ref['memory_mb'] avail = avail - used if avail <= mem_inst: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) reason = _("Unable to migrate %(instance_id)s to %(dest)s: " "Lack of memory(host:%(avail)s <= " "instance:%(mem_inst)s)") raise exception.MigrationError(reason=reason % locals()) def assert_compute_node_has_enough_disk(self, context, instance_ref, dest): """Checks if destination host has enough disk for block migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ # Getting total available memory and disk of host avail = self._get_compute_info(context, dest, 'local_gb') # Getting total used memory and disk of host # It should be sum of disks that are assigned as max value # because overcommiting is risky. used = 0 instance_refs = db.instance_get_all_by_host(context, dest) used_list = [i['local_gb'] for i in instance_refs] if used_list: used = reduce(lambda x, y: x + y, used_list) disk_inst = instance_ref['local_gb'] avail = avail - used if avail <= disk_inst: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) reason = _("Unable to migrate %(instance_id)s to %(dest)s: " "Lack of disk(host:%(avail)s " "<= instance:%(disk_inst)s)") raise exception.MigrationError(reason=reason % locals()) def _get_compute_info(self, context, host, key): """get compute node's infomation specified by key :param context: security context :param host: hostname(must be compute node) :param key: column name of compute_nodes :return: value specified by key """ compute_node_ref = db.service_get_all_compute_by_host(context, host) compute_node_ref = compute_node_ref[0]['compute_node'][0] return compute_node_ref[key] def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. At first, dest host creates temp file, and src host can see it if they mounts same shared storage. Then src host erase it. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host """ src = instance_ref['host'] dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) try: # create tmpfile at dest host filename = rpc.call(context, dst_t, {"method": 'create_shared_storage_test_file'}) # make sure existence at src host. ret = rpc.call(context, src_t, {"method": 'check_shared_storage_test_file', "args": {'filename': filename}}) if not ret: raise exception.FileNotFound(file_path=filename) except exception.FileNotFound: raise finally: rpc.call(context, dst_t, {"method": 'cleanup_shared_storage_test_file', "args": {'filename': filename}})
nii-cloud/dodai-compute
nova/scheduler/driver.py
Python
apache-2.0
14,688
from django.test import TestCase from restclients.mock_http import MockHTTP from myuw.util.cache_implementation import MyUWCache from restclients.models import CacheEntryTimed from datetime import timedelta CACHE = 'myuw.util.cache_implementation.MyUWCache' class TestCustomCachePolicy(TestCase): def test_sws_default_policies(self): with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE): cache = MyUWCache() ok_response = MockHTTP() ok_response.status = 200 ok_response.data = "xx" response = cache.getCache('sws', '/student/myuwcachetest1', {}) self.assertEquals(response, None) cache.processResponse("sws", "/student/myuwcachetest1", ok_response) response = cache.getCache('sws', '/student/myuwcachetest1', {}) self.assertEquals(response["response"].data, 'xx') cache_entry = CacheEntryTimed.objects.get( service="sws", url="/student/myuwcachetest1") # Cached response is returned after 3 hours and 58 minutes orig_time_saved = cache_entry.time_saved cache_entry.time_saved = (orig_time_saved - timedelta(minutes=(60 * 4)-2)) cache_entry.save() response = cache.getCache('sws', '/student/myuwcachetest1', {}) self.assertNotEquals(response, None) # Cached response is not returned after 4 hours and 1 minute cache_entry.time_saved = (orig_time_saved - timedelta(minutes=(60 * 4)+1)) cache_entry.save() response = cache.getCache('sws', '/student/myuwcachetest1', {}) self.assertEquals(response, None) def test_sws_term_policy(self): with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE): cache = MyUWCache() ok_response = MockHTTP() ok_response.status = 200 ok_response.data = "xx" response = cache.getCache( 'sws', '/student/v5/term/1014,summer.json', {}) self.assertEquals(response, None) cache.processResponse( "sws", "/student/v5/term/1014,summer.json", ok_response) response = cache.getCache( 'sws', '/student/v5/term/1014,summer.json', {}) self.assertEquals(response["response"].data, 'xx') cache_entry = CacheEntryTimed.objects.get( service="sws", url="/student/v5/term/1014,summer.json") # Cached response is returned after 29 days orig_time_saved = cache_entry.time_saved cache_entry.time_saved = orig_time_saved - timedelta(days=29) cache_entry.save() response = cache.getCache( 'sws', '/student/v5/term/1014,summer.json', {}) self.assertNotEquals(response, None) # Cached response is not returned after 31 days cache_entry.time_saved = orig_time_saved - timedelta(days=31) cache_entry.save() response = cache.getCache( 'sws', '/student/v5/term/1014,summer.json', {}) self.assertEquals(response, None) def test_myplan_default(self): with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE): cache = MyUWCache() ok_response = MockHTTP() ok_response.status = 200 ok_response.data = "xx" response = cache.getCache('myplan', '/api/plan/xx', {}) self.assertEquals(response, None) cache.processResponse("myplan", "/api/plan/xx", ok_response) response = cache.getCache('myplan', '/api/plan/xx', {}) self.assertEquals(response, None) def test_default_policies(self): with self.settings(RESTCLIENTS_DAO_CACHE_CLASS=CACHE): cache = MyUWCache() ok_response = MockHTTP() ok_response.status = 200 ok_response.data = "xx" response = cache.getCache('no_such', '/student/myuwcachetest1', {}) self.assertEquals(response, None) cache.processResponse( "no_such", "/student/myuwcachetest1", ok_response) response = cache.getCache('no_such', '/student/myuwcachetest1', {}) self.assertEquals(response["response"].data, 'xx') cache_entry = CacheEntryTimed.objects.get( service="no_such", url="/student/myuwcachetest1") # Cached response is returned after 3 hours and 58 minutes orig_time_saved = cache_entry.time_saved cache_entry.time_saved = (orig_time_saved - timedelta(minutes=(60 * 4)-2)) cache_entry.save() response = cache.getCache('no_such', '/student/myuwcachetest1', {}) self.assertNotEquals(response, None) # Cached response is not returned after 4 hours and 1 minute cache_entry.time_saved = (orig_time_saved - timedelta(minutes=(60 * 4)+1)) cache_entry.save() response = cache.getCache('no_such', '/student/myuwcachetest1', {}) self.assertEquals(response, None)
fanglinfang/myuw
myuw/test/cache.py
Python
apache-2.0
5,375
# Copyright 2017 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from botocore.exceptions import ClientError import boto3 import click import json from c7n.credentials import assumed_session from c7n.utils import get_retry, dumps, chunks from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime, timedelta from dateutil.tz import tzutc, tzlocal from dateutil.parser import parse import fnmatch import functools import jsonschema import logging import time import os import operator from tabulate import tabulate import yaml from c7n.executor import MainThreadExecutor MainThreadExecutor.async = False logging.basicConfig(level=logging.INFO) logging.getLogger('c7n.worker').setLevel(logging.DEBUG) logging.getLogger('botocore').setLevel(logging.WARNING) log = logging.getLogger('c7n-log-exporter') CONFIG_SCHEMA = { '$schema': 'http://json-schema.org/schema#', 'id': 'http://schema.cloudcustodian.io/v0/logexporter.json', 'definitions': { 'destination': { 'type': 'object', 'additionalProperties': False, 'required': ['bucket'], 'properties': { 'bucket': {'type': 'string'}, 'prefix': {'type': 'string'}, }, }, 'account': { 'type': 'object', 'additionalProperties': False, 'required': ['role', 'groups'], 'properties': { 'name': {'type': 'string'}, 'role': {'oneOf': [ {'type': 'array', 'items': {'type': 'string'}}, {'type': 'string'}]}, 'groups': { 'type': 'array', 'items': {'type': 'string'} } } } }, 'type': 'object', 'additionalProperties': False, 'required': ['accounts', 'destination'], 'properties': { 'accounts': { 'type': 'array', 'items': {'$ref': '#/definitions/account'} }, 'destination': {'$ref': '#/definitions/destination'} } } def debug(func): @functools.wraps(func) def run(*args, **kw): try: return func(*args, **kw) except SystemExit: raise except Exception: import traceback import pdb import sys traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1]) raise return run @click.group() def cli(): """c7n cloudwatch log group exporter""" @cli.command() @click.option('--config', type=click.Path()) def validate(config): """validate config file""" with open(config) as fh: content = fh.read() try: data = yaml.safe_load(content) except Exception: log.error("config file: %s is not valid yaml", config) raise try: jsonschema.validate(data, CONFIG_SCHEMA) except Exception: log.error("config file: %s is not valid", config) raise log.info("config file valid, accounts:%d", len(data['accounts'])) return data @cli.command() @click.option('--config', type=click.Path(), required=True) @click.option('--start', required=True) @click.option('--end') @click.option('-a', '--accounts', multiple=True) @click.option('--debug', is_flag=True, default=False) def run(config, start, end, accounts): """run export across accounts and log groups specified in config.""" config = validate.callback(config) destination = config.get('destination') start = start and parse(start) or start end = end and parse(end) or datetime.now() executor = debug and MainThreadExecutor or ThreadPoolExecutor with executor(max_workers=32) as w: futures = {} for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue futures[ w.submit(process_account, account, start, end, destination)] = account for f in as_completed(futures): account = futures[f] if f.exception(): log.error("Error on account %s err: %s", account['name'], f.exception()) log.info("Completed %s", account['name']) def lambdafan(func): """simple decorator that will auto fan out async style in lambda. outside of lambda, this will invoke synchrously. """ if 'AWS_LAMBDA_FUNCTION_NAME' not in os.environ: return func @functools.wraps(func) def scaleout(*args, **kw): client = boto3.client('lambda') client.invoke( FunctionName=os.environ['AWS_LAMBDA_FUNCTION_NAME'], InvocationType='Event', Payload=dumps({ 'event': 'fanout', 'function': func.__name__, 'args': args, 'kwargs': kw}), Qualifier=os.environ['AWS_LAMBDA_FUNCTION_VERSION']) return scaleout @lambdafan def process_account(account, start, end, destination, incremental=True): session = get_session(account['role']) client = session.client('logs') paginator = client.get_paginator('describe_log_groups') all_groups = [] for p in paginator.paginate(): all_groups.extend([g for g in p.get('logGroups', ())]) group_count = len(all_groups) groups = filter_creation_date( filter_group_names(all_groups, account['groups']), start, end) if incremental: groups = filter_last_write(client, groups, start) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id log.info("account:%s matched %d groups of %d", account.get('name', account_id), len(groups), group_count) if not groups: log.warning("account:%s no groups matched, all groups \n %s", account.get('name', account_id), "\n ".join( [g['logGroupName'] for g in all_groups])) t = time.time() for g in groups: export.callback( g, destination['bucket'], prefix, g['exportStart'], end, account['role'], name=account['name']) log.info("account:%s exported %d log groups in time:%0.2f", account.get('name') or account_id, len(groups), time.time() - t) def get_session(role, session_name="c7n-log-exporter", session=None): if role == 'self': session = boto3.Session() elif isinstance(role, basestring): session = assumed_session(role, session_name) elif isinstance(role, list): session = None for r in role: session = assumed_session(r, session_name, session=session) else: session = boto3.Session() return session def filter_group_names(groups, patterns): """Filter log groups by shell patterns. """ group_names = [g['logGroupName'] for g in groups] matched = set() for p in patterns: matched.update(fnmatch.filter(group_names, p)) return [g for g in groups if g['logGroupName'] in matched] def filter_creation_date(groups, start, end): """Filter log groups by their creation date. Also sets group specific value for start to the minimum of creation date or start. """ results = [] for g in groups: created = datetime.fromtimestamp(g['creationTime'] / 1000.0) if created > end: continue if created > start: g['exportStart'] = created else: g['exportStart'] = start results.append(g) return results def filter_last_write(client, groups, start): """Filter log groups where the last write was before the start date. """ retry = get_retry(('ThrottlingException',)) def process_group(group_set): matched = [] for g in group_set: streams = retry( client.describe_log_streams, logGroupName=g['logGroupName'], orderBy='LastEventTime', limit=1, descending=True) if not streams.get('logStreams'): continue stream = streams['logStreams'][0] if stream['storedBytes'] == 0 and datetime.fromtimestamp( stream['creationTime'] / 1000) > start: matched.append(g) elif 'lastIngestionTime' in stream and datetime.fromtimestamp( stream['lastIngestionTime'] / 1000) > start: matched.append(g) return matched results = [] with ThreadPoolExecutor(max_workers=3) as w: futures = {} for group_set in chunks(groups, 10): futures[w.submit(process_group, group_set)] = group_set for f in as_completed(futures): if f.exception(): log.error( "Error processing groupset:%s error:%s", group_set, f.exception()) results.extend(f.result()) return results def filter_extant_exports(client, bucket, prefix, days, start, end=None): """Filter days where the bucket already has extant export keys. """ end = end or datetime.now() # days = [start + timedelta(i) for i in range((end-start).days)] try: tag_set = client.get_object_tagging(Bucket=bucket, Key=prefix).get('TagSet', []) except ClientError as e: if e.response['Error']['Code'] != 'NoSuchKey': raise tag_set = [] tags = {t['Key']: t['Value'] for t in tag_set} if 'LastExport' not in tags: return sorted(days) last_export = parse(tags['LastExport']) if last_export.tzinfo is None: last_export = last_export.replace(tzinfo=tzutc()) return [d for d in sorted(days) if d > last_export] @cli.command() @click.option('--config', type=click.Path(), required=True) @click.option('-a', '--accounts', multiple=True) def access(config, accounts=()): """Check iam permissions for log export access in each account""" config = validate.callback(config) accounts_report = [] def check_access(account): accounts_report.append(account) session = get_session(account['role']) identity = session.client('sts').get_caller_identity() account['account_id'] = identity['Account'] account.pop('groups') account.pop('role') client = session.client('iam') policy_arn = identity['Arn'] if policy_arn.count('/') > 1: policy_arn = policy_arn.rsplit('/', 1)[0] if ':sts:' in policy_arn: policy_arn = policy_arn.replace(':sts', ':iam') if ':assumed-role' in policy_arn: policy_arn = policy_arn.replace(':assumed-role', ':role') evaluation = client.simulate_principal_policy( PolicySourceArn=policy_arn, ActionNames=['logs:CreateExportTask'])['EvaluationResults'] account['access'] = evaluation[0]['EvalDecision'] with ThreadPoolExecutor(max_workers=16) as w: futures = {} for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue futures[w.submit(check_access, account)] = None for f in as_completed(futures): pass accounts_report.sort(key=operator.itemgetter('access'), reverse=True) print(tabulate(accounts_report, headers='keys')) def GetHumanSize(size, precision=2): # interesting discussion on 1024 vs 1000 as base # https://en.wikipedia.org/wiki/Binary_prefix suffixes = ['B','KB','MB','GB','TB', 'PB'] suffixIndex = 0 while size > 1024: suffixIndex += 1 size = size / 1024.0 return "%.*f %s" % (precision, size, suffixes[suffixIndex]) @cli.command() @click.option('--config', type=click.Path(), required=True) @click.option('-a', '--accounts', multiple=True) @click.option('--day', required=True, help="calculate sizes for this day") @click.option('--group', required=True) @click.option('--human/--no-human', default=True) def size(config, accounts=(), day=None, group=None, human=True): """size of exported records for a given day.""" config = validate.callback(config) destination = config.get('destination') client = boto3.Session().client('s3') day = parse(day) def export_size(client, account): paginator = client.get_paginator('list_objects_v2') count = 0 size = 0 session = get_session(account['role']) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id prefix = "%s/%s/%s" % (prefix, group, day.strftime("%Y/%m/%d")) account['account_id'] = account_id for page in paginator.paginate( Bucket=destination['bucket'], Prefix=prefix): for k in page.get('Contents', ()): size += k['Size'] count += 1 return (count, size) total_size = 0 accounts_report = [] logging.getLogger('botocore').setLevel(logging.ERROR) with ThreadPoolExecutor(max_workers=16) as w: futures = {} for account in config.get('accounts'): if accounts and account['name'] not in accounts: continue futures[w.submit(export_size, client, account)] = account for f in as_completed(futures): account = futures[f] count, size = f.result() account.pop('role') account.pop('groups') total_size += size if human: account['size'] = GetHumanSize(size) else: account['size'] = size account['count'] = count accounts_report.append(account) accounts_report.sort(key=operator.itemgetter('count'), reverse=True) print(tabulate(accounts_report, headers='keys')) log.info("total size:%s", GetHumanSize(total_size)) @cli.command() @click.option('--config', type=click.Path(), required=True) @click.option('-g', '--group', required=True) @click.option('-a', '--accounts', multiple=True) @click.option('--dryrun/--no-dryrun', is_flag=True, default=False) def sync(config, group, accounts=(), dryrun=False): """sync last recorded export to actual Use --dryrun to check status. """ config = validate.callback(config) destination = config.get('destination') client = boto3.Session().client('s3') for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue session = get_session(account['role']) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id prefix = "%s/%s" % (prefix, group) exports = get_exports(client, destination['bucket'], prefix + "/") role = account.pop('role') if isinstance(role, basestring): account['account_id'] = role.split(':')[4] else: account['account_id'] = role[-1].split(':')[4] account.pop('groups') if exports: last_export = exports.pop() account['export'] = last_export else: account['export'] = 'missing' last_export = None try: tag_set = client.get_object_tagging( Bucket=destination['bucket'], Key=prefix).get('TagSet', []) except: tag_set = [] tags = {t['Key']: t['Value'] for t in tag_set} tagged_last_export = None if 'LastExport' in tags: le = parse(tags['LastExport']) tagged_last_export = (le.year, le.month, le.day) account['sync'] = tagged_last_export else: account['sync'] = account['export'] != 'missing' and 'sync' or 'missing' if last_export is None: continue if tagged_last_export == last_export or account['export'] == 'missing': continue if dryrun: continue client.put_object( Bucket=destination['bucket'], Key=prefix, Body=json.dumps({}), ACL="bucket-owner-full-control", ServerSideEncryption="AES256") export_time = datetime.now().replace(tzinfo=tzlocal()).astimezone(tzutc()) export_time = export_time.replace( year=last_export[0], month=last_export[1], day=last_export[2], minute=0, second=0, microsecond=0, hour=0) client.put_object_tagging( Bucket=destination['bucket'], Key=prefix, Tagging={ 'TagSet': [{ 'Key': 'LastExport', 'Value': export_time.isoformat()}]}) accounts_report = [] for a in config.get('accounts'): if accounts and a['name'] not in accounts: continue if isinstance(a['sync'], tuple): a['sync'] = "%s/%s/%s" % (a['sync']) if isinstance(a['export'], tuple): a['export'] = "%s/%s/%s" % (a['export']) accounts_report.append(a) accounts_report.sort(key=operator.itemgetter('export'), reverse=True) print(tabulate(accounts_report, headers='keys')) @cli.command() @click.option('--config', type=click.Path(), required=True) @click.option('-g', '--group', required=True) @click.option('-a', '--accounts', multiple=True) def status(config, group, accounts=()): """report current export state status""" config = validate.callback(config) destination = config.get('destination') client = boto3.Session().client('s3') for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue session = get_session(account['role']) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id prefix = "%s/flow-log" % prefix role = account.pop('role') if isinstance(role, basestring): account['account_id'] = role.split(':')[4] else: account['account_id'] = role[-1].split(':')[4] account.pop('groups') try: tag_set = client.get_object_tagging( Bucket=destination['bucket'], Key=prefix).get('TagSet', []) except: account['export'] = 'missing' continue tags = {t['Key']: t['Value'] for t in tag_set} if 'LastExport' not in tags: account['export'] = 'empty' else: last_export = parse(tags['LastExport']) account['export'] = last_export.strftime('%Y/%m/%d') accounts = [a for a in config.get('accounts') if a in accounts or not accounts] accounts.sort(key=operator.itemgetter('export'), reverse=True) print(tabulate(accounts, headers='keys')) def get_exports(client, bucket, prefix, latest=True): """Find exports for a given account """ keys = client.list_objects_v2( Bucket=bucket, Prefix=prefix, Delimiter='/').get('CommonPrefixes', []) found = [] years = [] for y in keys: part = y['Prefix'].rsplit('/', 2)[-2] if not part.isdigit(): continue year = int(part) years.append(year) if not years: return [] years.sort(reverse=True) if latest: years = [years[0]] for y in years: keys = client.list_objects_v2( Bucket=bucket, Prefix="%s/%d/" % (prefix.strip('/'), y), Delimiter='/').get('CommonPrefixes', []) months = [] for m in keys: part = m['Prefix'].rsplit('/', 2)[-2] if not part.isdigit(): continue month = int(part) date_key = (y, month) months.append(month) months.sort(reverse=True) if not months: continue if latest: months = [months[0]] for m in months: keys = client.list_objects_v2( Bucket=bucket, Prefix="%s/%d/%s/" % ( prefix.strip('/'), y, ('%d' % m).rjust(2, '0')), Delimiter='/').get('CommonPrefixes', []) for d in keys: part = d['Prefix'].rsplit('/', 2)[-2] if not part.isdigit(): continue day = int(part) date_key = (y, m, day) found.append(date_key) found.sort(reverse=True) if latest: found = [found[0]] return found @cli.command() @click.option('--group', required=True) @click.option('--bucket', required=True) @click.option('--prefix') @click.option('--start', required=True, help="export logs from this date") @click.option('--end') @click.option('--role', help="sts role to assume for log group access") @click.option('--poll-period', type=float, default=300) # @click.option('--bucket-role', help="role to scan destination bucket") # @click.option('--stream-prefix) @lambdafan def export(group, bucket, prefix, start, end, role, poll_period=120, session=None, name=""): """export a given log group to s3""" start = start and isinstance(start, basestring) and parse(start) or start end = (end and isinstance(start, basestring) and parse(end) or end or datetime.now()) start = start.replace(tzinfo=tzlocal()).astimezone(tzutc()) end = end.replace(tzinfo=tzlocal()).astimezone(tzutc()) if session is None: session = get_session(role) client = session.client('logs') for _group in client.describe_log_groups()['logGroups']: if _group['logGroupName'] == group: break else: raise ValueError('Log group not found.') group = _group if prefix: prefix = "%s/%s" % (prefix.rstrip('/'), group['logGroupName'].strip('/')) else: prefix = group['logGroupName'] named_group = "%s:%s" % (name, group['logGroupName']) log.info( "Log exporting group:%s start:%s end:%s bucket:%s prefix:%s size:%s", named_group, start.strftime('%Y/%m/%d'), end.strftime('%Y/%m/%d'), bucket, prefix, group['storedBytes']) t = time.time() days = [(start + timedelta(i)).replace( minute=0, hour=0, second=0, microsecond=0) for i in range((end - start).days)] day_count = len(days) s3 = boto3.Session().client('s3') days = filter_extant_exports(s3, bucket, prefix, days, start, end) log.info("Group:%s filtering s3 extant keys from %d to %d start:%s end:%s", named_group, day_count, len(days), days[0] if days else '', days[-1] if days else '') t = time.time() retry = get_retry(('SlowDown',)) for idx, d in enumerate(days): date = d.replace(minute=0, microsecond=0, hour=0) export_prefix = "%s%s" % (prefix, date.strftime("/%Y/%m/%d")) params = { 'taskName': "%s-%s" % ("c7n-log-exporter", date.strftime("%Y-%m-%d")), 'logGroupName': group['logGroupName'], 'fromTime': int(time.mktime( date.replace( minute=0, microsecond=0, hour=0).timetuple()) * 1000), 'to': int(time.mktime( date.replace( minute=59, hour=23, microsecond=0).timetuple()) * 1000), 'destination': bucket, 'destinationPrefix': export_prefix } # if stream_prefix: # params['logStreamPrefix'] = stream_prefix try: s3.head_object(Bucket=bucket, Key=prefix) except ClientError as e: if e.response['Error']['Code'] != '404': # Not Found raise s3.put_object( Bucket=bucket, Key=prefix, Body=json.dumps({}), ACL="bucket-owner-full-control", ServerSideEncryption="AES256") t = time.time() counter = 0 while True: counter += 1 try: result = client.create_export_task(**params) except ClientError as e: if e.response['Error']['Code'] == 'LimitExceededException': time.sleep(poll_period) # log every 30m of export waiting if counter % 6 == 0: log.debug( "group:%s day:%s waiting for %0.2f minutes", named_group, d.strftime('%Y-%m-%d'), (counter * poll_period) / 60.0) continue raise retry( s3.put_object_tagging, Bucket=bucket, Key=prefix, Tagging={ 'TagSet': [{ 'Key': 'LastExport', 'Value': d.isoformat()}]}) break log.info( "Log export time:%0.2f group:%s day:%s bucket:%s prefix:%s task:%s", time.time() - t, named_group, d.strftime("%Y-%m-%d"), bucket, params['destinationPrefix'], result['taskId']) log.info( ("Exported log group:%s time:%0.2f days:%d start:%s" " end:%s bucket:%s prefix:%s"), named_group, time.time() - t, len(days), start.strftime('%Y/%m/%d'), end.strftime('%Y/%m/%d'), bucket, prefix) if __name__ == '__main__': cli()
sixfeetup/cloud-custodian
tools/c7n_logexporter/c7n_logexporter/exporter.py
Python
apache-2.0
26,318
from string import digits, ascii_letters valid_values = list(digits + ascii_letters) # приводим строку к списку radix = len(valid_values) #основание def convert(number): result =[] #будем сюда складывать остатки от деления while number: result.insert(0,valid_values[number % radix]) number //= radix return ''.join(result) def inverse(number): result = 0 for p, i in enumerate(reversed(number)): n = valid_values.index(i) # получаем индекс нужного нам элемента списка result += n * radix ** p return result
ksigorodetskaya/Python_Ifmo
Part1/url-shorter/url_shorter/converter.py
Python
apache-2.0
675
""" Created on Sep 14, 2015 @author: Mikhail """ from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.expected_conditions import visibility_of_element_located, visibility_of from selenium.common.exceptions import TimeoutException __author__ = 'Mikhail' class Page(object): def __init__(self, driver, url): self.driver = driver self.url = url self.wait = WebDriverWait(self.driver, 5) def open_page(self, url): self.driver.get(url) def is_element_visible_by_locator(self, locator): try: self.wait.until(visibility_of_element_located(locator)) except TimeoutException: return False return True def is_element_visible(self, element): try: self.wait.until(visibility_of(element)) except TimeoutException: return False return True
MikeLaptev/sandbox_python
mera/selenium_training_automation/pages/page.py
Python
apache-2.0
913
import requests from requests.auth import HTTPBasicAuth def get_data(config): auth = HTTPBasicAuth(config['authentication']['username'], config['authentication']['password']) resp = requests.get(config['host'] + '/api/queues', auth=auth) queues = resp.json() data = {} for queue in queues: name = queue['name'] message_stats = queue.get('message_stats', {}) queue_size = queue.get('messages') ack_rate = (message_stats.get('ack_details') or {}).get('rate') nack_rate = (message_stats.get('redeliver_details') or {}).get('rate') (inactive_threshold, active_threshold, nack_threshold) = (50, 5000, 1) for qs_name, qs_threshold in config['queue_sizes'].items(): if name.startswith(qs_name): (inactive_threshold, active_threshold, nack_threshold) = qs_threshold data[name + ' queue'] = { 'state': 'FAIL' if (queue_size > inactive_threshold and (ack_rate < 2 or ack_rate is None) or queue_size > active_threshold or nack_rate > nack_threshold) else 'OK', 'message': 'size is %d, ack rate is %.2f, nack rate is %.2f' % (queue_size if queue_size else 0, ack_rate if ack_rate else 0, nack_rate if nack_rate else 0) } return data
kierenbeckett/sentinel
sentinel/alert_plugins/rabbit_queues.py
Python
apache-2.0
1,275
# pylint: disable=missing-docstring # this fails on Python 2.6 but Slurm environment is 2.7 import unittest from datetime import datetime from reporting.plugins.slurm import SlurmInput class SlurmTestCase(unittest.TestCase): """Test cases for slurm module""" def test_all_heros(self): """Slurm plugin: no user other than hero should be in test/sacct-with-start-end.txt""" slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt') data = slurm_input.get_data() for job in data['jobs']: self.assertTrue(job['user'].startswith('hero')) def test_get_data(self): """Slurm plugin: get_data method should return a message in correct structure""" slurm_input = SlurmInput(path='tests/sacct-with-start-end.txt') data = slurm_input.get_data() self.assertIn('hostname', data) self.assertIn('timestamp', data) self.assertIn('jobs', data) self.assertTrue(isinstance(data['jobs'], list)) job = data['jobs'][0] for required_key in ('job_id', 'partition', 'user', 'start', 'end', 'cpu_seconds'): self.assertIn(required_key, job) def test_read_data(self): """Slurm plugin: _read_data should only return job summary not steps, those do not have User value""" data = SlurmInput._read_data('tests/sacct-with-start-end.txt') qualified_count = len(data) for message in data: if 'user' in message and len(message['user'].strip()): qualified_count -= 1 self.assertEqual(qualified_count, 0) def test_convert_to_timestamp(self): """Slurm plugin: _convert_to_timestamp should convert iso datetime to timestamp string correctly""" ISO_FORMAT = '%Y-%m-%dT%H:%M:%S' reference = datetime.utcnow().strftime(ISO_FORMAT) converted = datetime.utcfromtimestamp( SlurmInput._convert_to_timestamp(reference)).strftime(ISO_FORMAT) self.assertEqual(reference, converted)
eResearchSA/reporting-producers
tests/test_plugin_slurm.py
Python
apache-2.0
2,011
#BOOTSTRAP CODE try: from urllib3.request import urlopen except ImportError: from urllib import urlopen import random handle = urlopen("https://raw.githubusercontent.com/Jumpscale/jumpscale_core7/master/install/InstallTools.py?%s"%random.randint(1, 10000000)) #this is to protect against caching proxy servers exec(handle.read()) #look at methods in https://github.com/Jumpscale/jumpscale_core7/blob/master/install/InstallTools.py to see what can be used #there are some easy methods to allow git manipulation, copy of files, execution of items #there are many more functions available in jumpscale print "prepare jumpscale docker" do.installDocker() url="https://github.com/Jumpscale/docker" do.pullGitRepo(url,dest=None,login=None,passwd=None,depth=None,ignorelocalchanges=False,reset=False,branch="master") cmd="cd /opt/code/github/jumpscale/docker/image_js;docker build -t despiegk/js ." do.executeInteractive(cmd) # from JumpScale import j #j.system....
Jumpscale/docker
install/bootstrap_js.py
Python
apache-2.0
978
# -*- coding: utf-8 -*- # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # google-cloud-domains documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) # For plugins that can not read conf.py. # See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 sys.path.insert(0, os.path.abspath(".")) __version__ = "" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", "recommonmark", ] # autodoc/autosummary flags autoclass_content = "both" autodoc_default_options = {"members": True} autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' # The root toctree document. root_doc = "index" # General information about the project. project = "google-cloud-domains" copyright = "2019, Google" author = "Google APIs" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ "_build", "**/.nox/**/*", "samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md", "samples/snippets/README.rst", ] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "Google Cloud Client Libraries for google-cloud-domains", "github_user": "googleapis", "github_repo": "python-domains", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", "code_font_family": "'Roboto Mono', 'Consolas', monospace", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "google-cloud-domains-doc" # -- Options for warnings ------------------------------------------------------ suppress_warnings = [ # Temporarily suppress this to avoid "more than one target found for # cross-reference" warning, which are intractable for us to avoid while in # a mono-repo. # See https://github.com/sphinx-doc/sphinx/blob # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 "ref.python" ] # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( root_doc, "google-cloud-domains.tex", "google-cloud-domains Documentation", author, "manual", ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( root_doc, "google-cloud-domains", "google-cloud-domains Documentation", [author], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( root_doc, "google-cloud-domains", "google-cloud-domains Documentation", author, "google-cloud-domains", "google-cloud-domains Library", "APIs", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("https://python.readthedocs.org/en/latest/", None), "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), } # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = False napoleon_use_param = True napoleon_use_rtype = True
googleapis/python-domains
docs/conf.py
Python
apache-2.0
12,378
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.tf.scatter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def _AsType(v, vtype): return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v) def _NumpyAdd(ref, indices, updates): # Since numpy advanced assignment does not support repeated indices, # we run a simple loop to perform scatter_add. for i, indx in np.ndenumerate(indices): ref[indx] += updates[i] def _NumpyAddScalar(ref, indices, update): for _, indx in np.ndenumerate(indices): ref[indx] += update def _NumpySub(ref, indices, updates): for i, indx in np.ndenumerate(indices): ref[indx] -= updates[i] def _NumpySubScalar(ref, indices, update): for _, indx in np.ndenumerate(indices): ref[indx] -= update def _NumpyMul(ref, indices, updates): for i, indx in np.ndenumerate(indices): ref[indx] *= updates[i] def _NumpyMulScalar(ref, indices, update): for _, indx in np.ndenumerate(indices): ref[indx] *= update def _NumpyDiv(ref, indices, updates): for i, indx in np.ndenumerate(indices): ref[indx] /= updates[i] def _NumpyDivScalar(ref, indices, update): for _, indx in np.ndenumerate(indices): ref[indx] /= update def _NumpyMin(ref, indices, updates): for i, indx in np.ndenumerate(indices): ref[indx] = np.minimum(ref[indx], updates[i]) def _NumpyMinScalar(ref, indices, update): for _, indx in np.ndenumerate(indices): ref[indx] = np.minimum(ref[indx], update) def _NumpyMax(ref, indices, updates): for i, indx in np.ndenumerate(indices): ref[indx] = np.maximum(ref[indx], updates[i]) def _NumpyMaxScalar(ref, indices, update): for _, indx in np.ndenumerate(indices): ref[indx] = np.maximum(ref[indx], update) def _NumpyUpdate(ref, indices, updates): for i, indx in np.ndenumerate(indices): ref[indx] = updates[i] def _NumpyUpdateScalar(ref, indices, update): for _, indx in np.ndenumerate(indices): ref[indx] = update _TF_OPS_TO_NUMPY = { state_ops.scatter_update: _NumpyUpdate, state_ops.scatter_add: _NumpyAdd, state_ops.scatter_sub: _NumpySub, state_ops.scatter_mul: _NumpyMul, state_ops.scatter_div: _NumpyDiv, state_ops.scatter_min: _NumpyMin, state_ops.scatter_max: _NumpyMax, } _TF_OPS_TO_NUMPY_SCALAR = { state_ops.scatter_update: _NumpyUpdateScalar, state_ops.scatter_add: _NumpyAddScalar, state_ops.scatter_sub: _NumpySubScalar, state_ops.scatter_mul: _NumpyMulScalar, state_ops.scatter_div: _NumpyDivScalar, state_ops.scatter_min: _NumpyMinScalar, state_ops.scatter_max: _NumpyMaxScalar, } class ScatterTest(test.TestCase): def _VariableRankTest(self, tf_scatter, vtype, itype, repeat_indices=False, updates_are_scalar=False): np.random.seed(8) with self.cached_session(use_gpu=True): for indices_shape in (), (2,), (3, 7), (3, 4, 7): for extra_shape in (), (5,), (5, 9): # Generate random indices with no duplicates for easy numpy comparison size = np.prod(indices_shape, dtype=itype) first_dim = 3 * size indices = np.arange(first_dim) np.random.shuffle(indices) indices = indices[:size] if size > 1 and repeat_indices: # Add some random repeats. indices = indices[:size // 2] for _ in range(size - size // 2): # Randomly append some repeats. indices = np.append(indices, indices[np.random.randint(size // 2)]) np.random.shuffle(indices) indices = indices.reshape(indices_shape) if updates_are_scalar: updates = _AsType(np.random.randn(), vtype) else: updates = _AsType( np.random.randn(*(indices_shape + extra_shape)), vtype) # Clips small values to avoid division by zero. def clip_small_values(x): threshold = 1e-4 sign = np.sign(x) if isinstance(x, np.int32): threshold = 1 sign = np.random.choice([-1, 1]) return threshold * sign if np.abs(x) < threshold else x updates = np.vectorize(clip_small_values)(updates) old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype) # Scatter via numpy new = old.copy() if updates_are_scalar: np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter] else: np_scatter = _TF_OPS_TO_NUMPY[tf_scatter] np_scatter(new, indices, updates) # Scatter via tensorflow ref = variables.VariableV1(old) ref.initializer.run() tf_scatter(ref, indices, updates).eval() self.assertAllClose(ref.eval(), new) def _VariableRankTests(self, tf_scatter, repeat_indices=False, updates_are_scalar=False): vtypes = [np.float32, np.float64] if tf_scatter != state_ops.scatter_div: vtypes.append(np.int32) for vtype in vtypes: for itype in (np.int32, np.int64): self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices, updates_are_scalar) def testVariableRankUpdate(self): self._VariableRankTests(state_ops.scatter_update, False) def testVariableRankAdd(self): self._VariableRankTests(state_ops.scatter_add, False) def testVariableRankSub(self): self._VariableRankTests(state_ops.scatter_sub, False) def testVariableRankMul(self): self._VariableRankTests(state_ops.scatter_mul, False) def testVariableRankDiv(self): self._VariableRankTests(state_ops.scatter_div, False) def testVariableRankMin(self): self._VariableRankTests(state_ops.scatter_min, False) def testVariableRankMax(self): self._VariableRankTests(state_ops.scatter_max, False) def testRepeatIndicesAdd(self): self._VariableRankTests(state_ops.scatter_add, True) def testRepeatIndicesSub(self): self._VariableRankTests(state_ops.scatter_sub, True) def testRepeatIndicesMul(self): self._VariableRankTests(state_ops.scatter_mul, True) def testRepeatIndicesDiv(self): self._VariableRankTests(state_ops.scatter_div, True) def testRepeatIndicesMin(self): self._VariableRankTests(state_ops.scatter_min, True) def testRepeatIndicesMax(self): self._VariableRankTests(state_ops.scatter_max, True) def testVariableRankUpdateScalar(self): self._VariableRankTests(state_ops.scatter_update, False, True) def testVariableRankAddScalar(self): self._VariableRankTests(state_ops.scatter_add, False, True) def testVariableRankSubScalar(self): self._VariableRankTests(state_ops.scatter_sub, False, True) def testVariableRankMulScalar(self): self._VariableRankTests(state_ops.scatter_mul, False, True) def testVariableRankDivScalar(self): self._VariableRankTests(state_ops.scatter_div, False, True) def testVariableRankMinScalar(self): self._VariableRankTests(state_ops.scatter_min, False, True) def testVariableRankMaxScalar(self): self._VariableRankTests(state_ops.scatter_max, False, True) def testRepeatIndicesAddScalar(self): self._VariableRankTests(state_ops.scatter_add, True, True) def testRepeatIndicesSubScalar(self): self._VariableRankTests(state_ops.scatter_sub, True, True) def testRepeatIndicesMulScalar(self): self._VariableRankTests(state_ops.scatter_mul, True, True) def testRepeatIndicesDivScalar(self): self._VariableRankTests(state_ops.scatter_div, True, True) def testRepeatIndicesMinScalar(self): self._VariableRankTests(state_ops.scatter_min, True, True) def testRepeatIndicesMaxScalar(self): self._VariableRankTests(state_ops.scatter_max, True, True) def testBooleanScatterUpdate(self): if not test.is_gpu_available(): with self.session(use_gpu=False) as session: var = variables.Variable([True, False]) update0 = state_ops.scatter_update(var, 1, True) update1 = state_ops.scatter_update( var, constant_op.constant( 0, dtype=dtypes.int64), False) var.initializer.run() session.run([update0, update1]) self.assertAllEqual([False, True], self.evaluate(var)) def testScatterOutOfRangeCpu(self): for op, _ in _TF_OPS_TO_NUMPY.items(): params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32) updates = np.array([-3, -4, -5]).astype(np.float32) if not test.is_gpu_available(): with self.session(use_gpu=False): ref = variables.VariableV1(params) ref.initializer.run() # Indices all in range, no problem. indices = np.array([2, 0, 5]) op(ref, indices, updates).eval() # Test some out of range errors. indices = np.array([-1, 0, 5]) with self.assertRaisesOpError( r'indices\[0\] = -1 is not in \[0, 6\)'): op(ref, indices, updates).eval() indices = np.array([2, 0, 6]) with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'): op(ref, indices, updates).eval() # TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU. def _disabledTestScatterOutOfRangeGpu(self): if test.is_gpu_available(): return for op, _ in _TF_OPS_TO_NUMPY.items(): params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32) updates = np.array([-3, -4, -5]).astype(np.float32) # With GPU, the code ignores indices that are out of range. # We don't test the implementation; just test there's no failures. with self.cached_session(force_gpu=True): ref = variables.Variable(params) ref.initializer.run() # Indices all in range, no problem. indices = np.array([2, 0, 5]) op(ref, indices, updates).eval() # Indicies out of range should not fail. indices = np.array([-1, 0, 5]) op(ref, indices, updates).eval() indices = np.array([2, 0, 6]) op(ref, indices, updates).eval() if __name__ == '__main__': test.main()
brchiu/tensorflow
tensorflow/python/kernel_tests/scatter_ops_test.py
Python
apache-2.0
11,256
import logging def init_logger(): formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]') logger = logging.getLogger('redberry') logger.setLevel(logging.DEBUG) console = logging.StreamHandler() console.setFormatter(formatter) logger.addHandler(console)
michaelcho/redberry
redberry/utils/logger.py
Python
apache-2.0
327
import os, sys import unittest import vtk, qt, ctk, slicer from slicer.ScriptedLoadableModule import * import logging import csv from slicer.util import VTKObservationMixin import platform import time import urllib import shutil from CommonUtilities import * from packaging import version def _setSectionResizeMode(header, *args, **kwargs): if version.parse(qt.Qt.qVersion()) < version.parse("5.0.0"): header.setResizeMode(*args, **kwargs) else: header.setSectionResizeMode(*args, **kwargs) # # ShapeAnalysisModule # class ShapeAnalysisModule(ScriptedLoadableModule): """Uses ScriptedLoadableModule base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self, parent): ScriptedLoadableModule.__init__(self, parent) self.parent.title = "Shape Analysis Module" self.parent.categories = ["SPHARM"] self.parent.dependencies = [] self.parent.contributors = ["Laura Pascal (Kitware Inc.), Beatriz Paniagua (Kitware Inc.), Hina Shah (Kitware Inc.)"] self.parent.helpText = """ SPHARM-PDM is a tool that computes point-based models using a parametric boundary description for the computing of Shape Analysis. """ self.parent.acknowledgementText = """ This work was supported by NIH NIBIB R01EB021391 (Shape Analysis Toolbox for Medical Image Computing Projects). """ # # ShapeAnalysisModuleWidget # class ShapeAnalysisModuleWidget(ScriptedLoadableModuleWidget): """Uses ScriptedLoadableModuleWidget base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def setup(self): ScriptedLoadableModuleWidget.setup(self) # # Global variables # self.Logic = ShapeAnalysisModuleLogic() self.progressbars_layout = None # # Interface # loader = qt.QUiLoader() self.moduleName = 'ShapeAnalysisModule' scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower()) scriptedModulesPath = os.path.dirname(scriptedModulesPath) path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' % self.moduleName) qfile = qt.QFile(path) qfile.open(qt.QFile.ReadOnly) widget = loader.load(qfile, self.parent) self.layout = self.parent.layout() self.widget = widget self.layout.addWidget(widget) # Global variables of the Interface # Group Project IO self.CollapsibleButton_GroupProjectIO = self.getWidget('CollapsibleButton_GroupProjectIO') self.GroupProjectInputDirectory = self.getWidget('DirectoryButton_GroupProjectInputDirectory') self.GroupProjectOutputDirectory = self.getWidget('DirectoryButton_GroupProjectOutputDirectory') self.Debug = self.getWidget('checkBox_Debug') # Post Processed Segmentation self.CollapsibleButton_SegPostProcess = self.getWidget('CollapsibleButton_SegPostProcess') self.OverwriteSegPostProcess = self.getWidget('checkBox_OverwriteSegPostProcess') self.label_RescaleSegPostProcess = self.getWidget('label_RescaleSegPostProcess') self.RescaleSegPostProcess = self.getWidget('checkBox_RescaleSegPostProcess') self.sx = self.getWidget('SliderWidget_sx') self.sy = self.getWidget('SliderWidget_sy') self.sz = self.getWidget('SliderWidget_sz') self.label_sx = self.getWidget('label_sx') self.label_sy = self.getWidget('label_sy') self.label_sz = self.getWidget('label_sz') self.LabelState = self.getWidget('checkBox_LabelState') self.label_ValueLabelNumber = self.getWidget('label_ValueLabelNumber') self.ValueLabelNumber = self.getWidget('SliderWidget_ValueLabelNumber') # Generate Mesh Parameters self.CollapsibleButton_GenParaMesh = self.getWidget('CollapsibleButton_GenParaMesh') self.OverwriteGenParaMesh = self.getWidget('checkBox_OverwriteGenParaMesh') self.NumberofIterations = self.getWidget('SliderWidget_NumberofIterations') # Parameters to SPHARM Mesh self.CollapsibleButton_ParaToSPHARMMesh = self.getWidget('CollapsibleButton_ParaToSPHARMMesh') self.OverwriteParaToSPHARMMesh = self.getWidget('checkBox_OverwriteParaToSPHARMMesh') self.SubdivLevelValue = self.getWidget('SliderWidget_SubdivLevelValue') self.SPHARMDegreeValue = self.getWidget('SliderWidget_SPHARMDegreeValue') self.thetaIterationValue = self.getWidget('spinBox_thetaIterationValue') self.phiIterationValue = self.getWidget('spinBox_phiIterationValue') self.medialMesh = self.getWidget('checkBox_medialMesh') # Advanced Post Processed Segmentation self.CollapsibleButton_AdvancedPostProcessedSegmentation = self.getWidget('CollapsibleButton_AdvancedPostProcessedSegmentation') self.GaussianFiltering = self.getWidget('checkBox_GaussianFiltering') self.label_VarianceX = self.getWidget('label_VarianceX') self.VarianceX = self.getWidget('SliderWidget_VarianceX') self.label_VarianceY = self.getWidget('label_VarianceY') self.VarianceY = self.getWidget('SliderWidget_VarianceY') self.label_VarianceZ = self.getWidget('label_VarianceZ') self.VarianceZ = self.getWidget('SliderWidget_VarianceZ') # Advanced Parameters to SPHARM Mesh self.CollapsibleButton_AdvancedParametersToSPHARMMesh = self.getWidget('CollapsibleButton_AdvancedParametersToSPHARMMesh') self.useRegTemplate = self.getWidget('checkBox_useRegTemplate') self.label_regTemplate = self.getWidget('label_regTemplate') self.regTemplate = self.getWidget('PathLineEdit_regTemplate') self.useFlipTemplate = self.getWidget('checkBox_useFlipTemplate') self.label_flipTemplate = self.getWidget('label_flipTemplate') self.flipTemplate = self.getWidget('PathLineEdit_flipTemplate') self.choiceOfFlip = self.getWidget('comboBox_choiceOfFlip') self.sameFlipForAll = self.getWidget('checkBox_sameFlipForAll') self.tableWidget_ChoiceOfFlip = self.getWidget('tableWidget_ChoiceOfFlip') # Visualization self.CollapsibleButton_Visualization = self.getWidget('CollapsibleButton_Visualization') self.visualizationInSPV = self.getWidget('pushButton_visualizationInSPV') self.CheckableComboBox_visualization = self.getWidget('CheckableComboBox_visualization') self.tableWidget_visualization = self.getWidget('tableWidget_visualization') # Apply CLIs self.ApplyButton = self.getWidget('applyButton') self.progress_layout = self.getWidget('progress_layout') # Connections # Group Project IO self.CollapsibleButton_GroupProjectIO.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_GroupProjectIO)) self.GroupProjectInputDirectory.connect('directoryChanged(const QString &)', self.onInputDirectoryChanged) self.GroupProjectOutputDirectory.connect('directoryChanged(const QString &)', self.onOutputDirectoryChanged) self.Debug.connect('clicked(bool)', self.onDebug) # Post Processed Segmentation self.CollapsibleButton_SegPostProcess.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_SegPostProcess)) self.OverwriteSegPostProcess.connect('clicked(bool)', self.onOverwriteFilesSegPostProcess) self.RescaleSegPostProcess.connect('stateChanged(int)', self.onSelectSpacing) self.sx.connect('valueChanged(double)', self.onSxValueChanged) self.sy.connect('valueChanged(double)', self.onSyValueChanged) self.sz.connect('valueChanged(double)', self.onSzValueChanged) self.LabelState.connect('clicked(bool)', self.onSelectValueLabelNumber) self.ValueLabelNumber.connect('valueChanged(double)', self.onLabelNumberValueChanged) # Generate Mesh Parameters self.CollapsibleButton_GenParaMesh.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_GenParaMesh)) self.OverwriteGenParaMesh.connect('clicked(bool)', self.onOverwriteFilesGenParaMesh) self.NumberofIterations.connect('valueChanged(double)', self.onNumberofIterationsValueChanged) # Parameters to SPHARM Mesh self.CollapsibleButton_ParaToSPHARMMesh.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_ParaToSPHARMMesh)) self.OverwriteParaToSPHARMMesh.connect('clicked(bool)', self.onOverwriteFilesParaToSPHARMMesh) self.SubdivLevelValue.connect('valueChanged(double)', self.onSubdivLevelValueChanged) self.SPHARMDegreeValue.connect('valueChanged(double)', self.onSPHARMDegreeValueChanged) self.thetaIterationValue.connect('valueChanged(int)', self.onThetaIterationValueChanged) self.phiIterationValue.connect('valueChanged(int)', self.onPhiIterationValueChanged) self.medialMesh.connect('clicked(bool)', self.onMedialMeshValueChanged) # Advanced Post Processed Segmentation self.CollapsibleButton_AdvancedPostProcessedSegmentation.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_AdvancedPostProcessedSegmentation)) self.GaussianFiltering.connect('clicked(bool)', self.onSelectGaussianVariance) self.VarianceX.connect('valueChanged(double)', self.onVarianceXValueChanged) self.VarianceY.connect('valueChanged(double)', self.onVarianceYValueChanged) self.VarianceZ.connect('valueChanged(double)', self.onVarianceZValueChanged) # Advanced Parameters to SPHARM Mesh self.CollapsibleButton_AdvancedParametersToSPHARMMesh.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_AdvancedParametersToSPHARMMesh)) self.useRegTemplate.connect('clicked(bool)', self.onEnableRegTemplate) self.regTemplate.connect('currentPathChanged(const QString)', self.onRegTemplateValueChanged) self.useFlipTemplate.connect('clicked(bool)', self.onEnableFlipTemplate) self.flipTemplate.connect('currentPathChanged(const QString)', self.onFlipTemplateValueChanged) self.choiceOfFlip.connect('currentIndexChanged(int)', self.onChoiceOfFlipValueChanged) self.sameFlipForAll.connect('clicked(bool)', self.onEnableFlipChoices) # Visualization self.CollapsibleButton_Visualization.connect('clicked()', lambda: self.onSelectedCollapsibleButtonOpen( self.CollapsibleButton_Visualization)) self.CheckableComboBox_visualization.connect('checkedIndexesChanged()', self.onCheckableComboBoxValueChanged) self.visualizationInSPV.connect('clicked(bool)', self.onSPHARMMeshesVisualizationInSPV) # Apply CLIs self.ApplyButton.connect('clicked(bool)', self.onApplyButton) slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene) # Widget Configuration # Table for the Flip Options self.tableWidget_ChoiceOfFlip.setColumnCount(2) self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip ']) self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False) # Progress Bar self.progress_layout.addWidget(self.Logic.ProgressBar) # Table for the visualization in SPV self.tableWidget_visualization.setColumnCount(2) self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization ']) self.tableWidget_visualization.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_visualization.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_visualization.verticalHeader().setVisible(False) # Configuration of the parameters of the widget self.Logic.parameters.setTableForChoiceOfFlip(self.tableWidget_ChoiceOfFlip) def enter(self): if not hasattr(slicer.modules, 'shapepopulationviewer') and not hasattr(slicer.modules, 'launcher'): messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("Shape Population Viewer is not installed!") messageBox.setInformativeText("To install Shape Population Viewer in order to display the SPHARM meshes outputs generated by Shape Analysis Module, you can:\n" "Solution 1: \n" " - Install it via the Extensions Managers\n" " - Restart 3DSlicer\n" "Solution 2: \n" " - Download it on https://www.nitrc.org/projects/shapepopviewer/\n" " - Add the folder where you stored it in Edit/Application Settings/Modules/Add\n" " - Restart 3DSlicer") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() else: self.CollapsibleButton_Visualization.enabled = True def onCloseScene(self, obj, event): # Group Project IO self.CollapsibleButton_GroupProjectIO.setChecked(True) self.Logic.InputCases = [] self.GroupProjectInputDirectory.directory = slicer.app.slicerHome self.GroupProjectOutputDirectory.directory = slicer.app.slicerHome self.Debug.setChecked(False) # Post Processed Segmentation self.CollapsibleButton_SegPostProcess.setChecked(False) self.OverwriteSegPostProcess.setChecked(False) self.RescaleSegPostProcess.setChecked(True) self.sx.setValue(0.5) self.sy.setValue(0.5) self.sz.setValue(0.5) self.LabelState.setChecked(False) self.ValueLabelNumber.setValue(0) # Generate Mesh Parameters self.CollapsibleButton_GenParaMesh.setChecked(False) self.OverwriteGenParaMesh.setChecked(False) self.NumberofIterations.setValue(1000) # Parameters to SPHARM Mesh self.CollapsibleButton_ParaToSPHARMMesh.setChecked(False) self.OverwriteParaToSPHARMMesh.setChecked(False) self.SubdivLevelValue.setValue(10) self.SPHARMDegreeValue.setValue(15) self.thetaIterationValue.setValue(100) self.phiIterationValue.setValue(100) self.medialMesh.setChecked(False) # Advanced Post Processed Segmentation self.CollapsibleButton_AdvancedPostProcessedSegmentation.setChecked(False) self.GaussianFiltering.setChecked(False) self.VarianceX.setValue(10) self.VarianceY.setValue(10) self.VarianceZ.setValue(10) # Advanced Parameters to SPHARM Mesh self.CollapsibleButton_AdvancedParametersToSPHARMMesh.setChecked(False) self.useRegTemplate.setChecked(False) self.regTemplate.setCurrentPath(" ") self.useFlipTemplate.setChecked(False) self.flipTemplate.setCurrentPath(" ") self.choiceOfFlip.setCurrentIndex(0) self.choiceOfFlip.enabled = True self.sameFlipForAll.setChecked(True) self.tableWidget_ChoiceOfFlip.enabled = False self.tableWidget_ChoiceOfFlip.clear() self.tableWidget_ChoiceOfFlip.setColumnCount(2) self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip ']) self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False) # Visualization self.CollapsibleButton_Visualization.setChecked(False) self.CheckableComboBox_visualization.model().clear() self.tableWidget_visualization.clear() self.tableWidget_visualization.setColumnCount(2) self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization ']) self.tableWidget_visualization.setColumnWidth(0, 400) horizontalHeader = self.tableWidget_visualization.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) self.tableWidget_visualization.verticalHeader().setVisible(False) # Apply if self.ApplyButton.text == "Cancel": self.ApplyButton.click() self.Logic.ProgressBar.hide() if self.progressbars_layout: self.CLIProgressBars.hide() # Functions to recover the widget in the .ui file def getWidget(self, objectName): return self.findWidget(self.widget, objectName) def findWidget(self, widget, objectName): if widget.objectName == objectName: return widget else: for w in widget.children(): resulting_widget = self.findWidget(w, objectName) if resulting_widget: return resulting_widget return None # Only one tab can be displayed at the same time: # When one tab is opened all the other tabs are closed def onSelectedCollapsibleButtonOpen(self, selectedCollapsibleButton): if selectedCollapsibleButton.isChecked(): collapsibleButtonList = [self.CollapsibleButton_GroupProjectIO, self.CollapsibleButton_SegPostProcess, self.CollapsibleButton_GenParaMesh, self.CollapsibleButton_ParaToSPHARMMesh, self.CollapsibleButton_AdvancedPostProcessedSegmentation, self.CollapsibleButton_AdvancedParametersToSPHARMMesh, self.CollapsibleButton_Visualization] for collapsibleButton in collapsibleButtonList: collapsibleButton.setChecked(False) selectedCollapsibleButton.setChecked(True) # # Group Project IO # def onInputDirectoryChanged(self): inputDirectory = self.GroupProjectInputDirectory.directory.encode('utf-8') # Update of the input directory path self.Logic.parameters.setInputDirectory(inputDirectory) # Possible extensions exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"] # Search cases and add the filename to a list self.Logic.InputCases = [] for file in os.listdir(inputDirectory): for ext in exts: if file.endswith(ext): self.Logic.InputCases.append(file) if file.endswith(".nii") or file.endswith(".nii.gz"): self.RescaleSegPostProcess.setCheckState(qt.Qt.Unchecked) self.label_RescaleSegPostProcess.enabled = False self.RescaleSegPostProcess.enabled = False # Update of the output directory path def onOutputDirectoryChanged(self): outputDirectory = self.GroupProjectOutputDirectory.directory.encode('utf-8') self.Logic.parameters.setOutputDirectory(outputDirectory) # Update of the debug parameter def onDebug(self): self.Logic.parameters.setDebug(self.Debug.checkState()) # # Post Processed Segmentation # def onOverwriteFilesSegPostProcess(self): # Update of the overwrite boolean for the Post Processed Segmentation step self.Logic.parameters.setOverwriteSegPostProcess(self.OverwriteSegPostProcess.checkState()) if self.OverwriteSegPostProcess.checkState(): # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>Applying the overwrite option to Post Processed Segmentation step will also apply to the next steps</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # Check the overwrite option for the next steps self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked) self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState()) self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked) self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState()) def onSelectSpacing(self): # Update of the rescale boolean for the Post Processed Segmentation step self.Logic.parameters.setRescaleSegPostProcess(self.RescaleSegPostProcess.checkState()) # Enable/Disable the spacing x,y, and z parameters in the UI self.label_sx.enabled = self.RescaleSegPostProcess.checkState() self.label_sy.enabled = self.RescaleSegPostProcess.checkState() self.label_sz.enabled = self.RescaleSegPostProcess.checkState() self.sx.enabled = self.RescaleSegPostProcess.checkState() self.sy.enabled = self.RescaleSegPostProcess.checkState() self.sz.enabled = self.RescaleSegPostProcess.checkState() # Update of the spacing x parameter for the Post Processed Segmentation step def onSxValueChanged(self): self.Logic.parameters.setSx(self.sx.value) # Update of the spacing y parameter for the Post Processed Segmentation step def onSyValueChanged(self): self.Logic.parameters.setSy(self.sy.value) # Update of the spacing z parameter for the Post Processed Segmentation step def onSzValueChanged(self): self.Logic.parameters.setSz(self.sz.value) # Enable/Disable the label number value in the UI def onSelectValueLabelNumber(self): self.label_ValueLabelNumber.enabled = self.LabelState.checkState() self.ValueLabelNumber.enabled = self.LabelState.checkState() # Update of the label parameter for the Post Processed Segmentation step def onLabelNumberValueChanged(self): self.Logic.parameters.setLabelNumber(self.ValueLabelNumber.value) # # Generate Mesh Parameters # def onOverwriteFilesGenParaMesh(self): # If the overwrite option for GenParaMesh is unchecked if not self.OverwriteGenParaMesh.checkState(): # If the overwrite option for the previous step is checked, the overwrite option need to be checked for this step too if self.OverwriteSegPostProcess.checkState(): self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked) # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # If the overwrite option for GenParaMesh is checked else: # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>Applying the overwrite option to Generate Mesh Parameters step will also apply to the next steps</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # Check the overwrite option for the next step self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked) self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState()) # Update of the overwrite boolean for the Generate Mesh Parameters step self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState()) # Update of the iterations parameter for the Generate Mesh Parameters step def onNumberofIterationsValueChanged(self): self.Logic.parameters.setNumberofIterations(self.NumberofIterations.value) # # Parameters to SPHARM Mesh # def onOverwriteFilesParaToSPHARMMesh(self): # If the overwrite option for ParaToSPHARMMesh is unchecked if not self.OverwriteParaToSPHARMMesh.checkState(): # If the overwrite option for a previous step is checked, the overwrite option need to be checked for this step too if self.OverwriteSegPostProcess.checkState() or self.OverwriteGenParaMesh.checkState(): self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked) # Message for the user messageBox = ctk.ctkMessageBox() messageBox.setWindowTitle(' /!\ WARNING /!\ ') messageBox.setIcon(messageBox.Warning) messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>") messageBox.setStandardButtons(messageBox.Ok) messageBox.exec_() # Update of the overwrite boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState()) # Update of the sub-division parameter for the Parameters to SPHARM Mesh step def onSubdivLevelValueChanged(self): self.Logic.parameters.setSubdivLevelValue(self.SubdivLevelValue.value) # Update of the SPHARM degree parameter for the Parameters to SPHARM Mesh step def onSPHARMDegreeValueChanged(self): self.Logic.parameters.setSPHARMDegreeValue(self.SPHARMDegreeValue.value) # Update of the theta iteration parameter for the Parameters to SPHARM Mesh step def onThetaIterationValueChanged(self): self.Logic.parameters.setThetaIterationValue(self.thetaIterationValue.value) # Update of the phi iteration parameter for the Parameters to SPHARM Mesh step def onPhiIterationValueChanged(self): self.Logic.parameters.setPhiIterationValue(self.phiIterationValue.value) # Update of the medial mesh boolean for the Parameters to SPHARM Mesh step def onMedialMeshValueChanged(self): self.Logic.parameters.setMedialMesh(self.medialMesh.checkState()) # # Advanced Post Processed Segmentation # def onSelectGaussianVariance(self): # Update of the gaussian variance boolean for the Post Processed Segmentation step self.Logic.parameters.setGaussianFiltering(self.GaussianFiltering.checkState()) # Enable/Disable the gaussian variance parameters in the UI self.label_VarianceX.enabled = self.GaussianFiltering.checkState() self.VarianceX.enabled = self.GaussianFiltering.checkState() self.label_VarianceY.enabled = self.GaussianFiltering.checkState() self.VarianceY.enabled = self.GaussianFiltering.checkState() self.label_VarianceZ.enabled = self.GaussianFiltering.checkState() self.VarianceZ.enabled = self.GaussianFiltering.checkState() # Update of the variance x parameter for the Post Processed Segmentation step def onVarianceXValueChanged(self): self.Logic.parameters.setVarianceX(self.VarianceX.value) # Update of the variance y parameter for the Post Processed Segmentation step def onVarianceYValueChanged(self): self.Logic.parameters.setVarianceY(self.VarianceY.value) # Update of the variance z parameter for the Post Processed Segmentation step def onVarianceZValueChanged(self): self.Logic.parameters.setVarianceZ(self.VarianceZ.value) # # Advanced Parameters to SPHARM Mesh # def onEnableRegTemplate(self): # Update of the registration template boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setUseRegTemplate(self.useRegTemplate.checkState()) # Enable/Disable the registration template path in the UI self.label_regTemplate.enabled = self.useRegTemplate.checkState() self.regTemplate.enabled = self.useRegTemplate.checkState() # Update of the registration template path for the Parameters to SPHARM Mesh step def onRegTemplateValueChanged(self): self.Logic.parameters.setRegTemplate(self.regTemplate.currentPath) def onEnableFlipTemplate(self): # Update of the flip template boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setUseFlipTemplate(self.useFlipTemplate.checkState()) # Enable/Disable the flip template path in the UI self.label_flipTemplate.enabled = self.useFlipTemplate.checkState() self.flipTemplate.enabled = self.useFlipTemplate.checkState() # Update of the flip template path for the Parameters to SPHARM Mesh step def onFlipTemplateValueChanged(self): self.Logic.parameters.setFlipTemplate(self.flipTemplate.currentPath) # Update of the flip parameter for the Parameters to SPHARM Mesh step def onChoiceOfFlipValueChanged(self): self.Logic.parameters.setChoiceOfFlip(self.choiceOfFlip.currentIndex) def onEnableFlipChoices(self): # Update of the flip option boolean for the Parameters to SPHARM Mesh step self.Logic.parameters.setSameFlipForAll(self.sameFlipForAll.checkState()) self.choiceOfFlip.enabled = self.sameFlipForAll.checkState() self.tableWidget_ChoiceOfFlip.enabled = not self.sameFlipForAll.checkState() if not self.sameFlipForAll.checkState(): self.fillTableForFlipOptions() # # Apply CLIs # def onApplyButton(self): # Run workflow if not self.Logic.Node.IsBusy(): # Check the registration template file if self.useRegTemplate.checkState(): if not os.path.exists(self.regTemplate.currentPath) or not self.regTemplate.currentPath.endswith(".vtk"): slicer.util.errorDisplay("Invalid registration template file in Advanced Parameters to SPHARM Mesh Tab") return # Check the flip template file if self.useFlipTemplate.checkState(): if not os.path.exists(self.flipTemplate.currentPath) or not self.flipTemplate.currentPath.endswith(".coef"): slicer.util.errorDisplay("Invalid flip template file in Advanced Parameters to SPHARM Mesh Tab") return # Empty the output folders if the overwrite options are checked self.Logic.cleanOutputFolders() # Change the apply buttons logging.info('Widget: Running ShapeAnalysisModule') self.ApplyButton.setText("Cancel") self.Logic.addObserver(self.Logic.Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent, self.onLogicModified) self.Logic.Node.SetStatus(self.Logic.Node.Scheduled) self.Logic.allCaseStartTime = time.time() self.Logic.ShapeAnalysisCases() # Cancel Workflow else: logging.info("Widget: Cancelling ShapeAnalysisModule") self.ApplyButton.setEnabled(False) self.Logic.Cancel() def onLogicModified(self, logic_node, event): status = logic_node.GetStatusString() logging.info('-- %s : ShapeAnalysisModule', status) # if not busy (completed, error, cancelled) if not logic_node.IsBusy(): self.Logic.removeObserver(logic_node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent, self.onLogicModified) # Create Error Message if status == 'Completed with errors' or status == 'Cancelled': logging.error(self.Logic.ErrorMessage) qt.QMessageBox.critical(slicer.util.mainWindow(), 'ShapeAnalysisModule', self.Logic.ErrorMessage) elif status == 'Completed': self.configurationVisualization() # Empty lists self.Logic.pipeline = {} self.Logic.completed = {} # Change the apply buttons self.ApplyButton.setEnabled(True) self.ApplyButton.setText("Run ShapeAnalysisModule") # if running, create some progress bars for each cases elif status == 'Running': self.Logic.ProgressBar.show() if self.progressbars_layout: self.CLIProgressBars.hide() self.CLIProgressBars = ctk.ctkCollapsibleGroupBox() self.CLIProgressBars.setTitle('Detail') self.progress_layout.addWidget(self.CLIProgressBars) self.progressbars_layout = qt.QVBoxLayout(self.CLIProgressBars) for i in range(len(self.Logic.pipeline)): self.progressbars_layout.addWidget(self.Logic.pipeline[i].ProgressBar) # Function to update the checkable comboBox and the table's checkBoxes in the visualization tab according of the check of one checkBox in the checkable comboBox def onCheckableComboBoxValueChanged(self): currentText = self.CheckableComboBox_visualization.currentText currentIndex = self.CheckableComboBox_visualization.currentIndex currentItem = self.CheckableComboBox_visualization.model().item(currentIndex, 0) # ******* Update the CheckableComboBox ******* # # Check/Uncheck the "Case i: case_name [..]" checkboxes in the checkacle comboBox if currentText == "All Models": self.checkedItems("SPHARM", currentItem.checkState()) elif currentText == "All SPHARM Models": self.checkedItems("SPHARM Models", currentItem.checkState()) elif currentText == "All SPHARM Ellipse Aligned Models": self.checkedItems("SPHARM Ellipse Aligned Models", currentItem.checkState()) elif currentText == "All SPHARM Medial Meshes": self.checkedItems("SPHARM Medial Meshes", currentItem.checkState()) elif currentText == "All SPHARM Procrustes Aligned Models": self.checkedItems("SPHARM Procrustes Aligned Models", currentItem.checkState()) # Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox self.checkedAllItems() self.CheckableComboBox_visualization.blockSignals(False) # ******* Update the checkboxes in the table ******* # for row in range(0, self.tableWidget_visualization.rowCount): actionOnCheckBox = False label = self.tableWidget_visualization.cellWidget(row, 0) outputRootname = label.text if currentText == "All Models": actionOnCheckBox = True elif currentText == "All SPHARM Models": if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True elif currentText == "All SPHARM Ellipse Aligned Models": if not outputRootname.find("SPHARM_ellalign") == -1: actionOnCheckBox = True elif currentText == "All SPHARM Medial Meshes": if not outputRootname.find("SPHARMMedialMesh") == -1: actionOnCheckBox = True elif currentText == "All SPHARM Procrustes Aligned Models": if not outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True else: for inputFilename in self.Logic.InputCases: inputRootname = inputFilename.split('/')[-1].split('.')[0] if not currentText.find(inputRootname) == -1: if not currentText.find("SPHARM Models") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True elif not currentText.find("SPHARM Ellipse Aligned Models") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_ellalign") == -1: actionOnCheckBox = True elif not currentText.find("SPHARM Medial Meshes") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARMMedialMesh") == -1: actionOnCheckBox = True elif not currentText.find("SPHARM Procrustes Aligned Models") == -1: if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_procalign") == -1: actionOnCheckBox = True # check/uncheck the checkBox at (row,1) if actionOnCheckBox: widget = self.tableWidget_visualization.cellWidget(row, 1) tuple = widget.children() checkBox = tuple[1] checkBox.blockSignals(True) item = self.CheckableComboBox_visualization.model().item(currentIndex, 0) if item.checkState(): checkBox.setChecked(True) else: checkBox.setChecked(False) checkBox.blockSignals(False) # Function to update the checkboxes in the checkbable comboBox in the visualization tab according of the check of a checBox in the visualization tab def onCheckBoxTableValueChanged(self): self.CheckableComboBox_visualization.blockSignals(True) list = self.CheckableComboBox_visualization.model() table = self.tableWidget_visualization allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes") # If == -1 "All SPHARM Medial Meshes" checkBox doesn't exist allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models") # If == -1 "All SPHARM Procrustes Aligned Models" checkBox doesn't exist for i in range(len(self.Logic.InputCases)): allCaseSPHARMModelsChecked = True allCaseSPHARMEllalignModelsChecked = True allCaseSPHARMMedialMeshesChecked = True allCaseSPHARMProcrustesAlignedModelsChecked = True inputRootname = self.Logic.InputCases[i].split('/')[-1].split('.')[0] for row in range(0,table.rowCount): label = table.cellWidget(row, 0) outputRootname = label.text if not outputRootname.find(inputRootname) == -1: widget = table.cellWidget(row, 1) tuple = widget.children() checkBox = tuple[1] if not checkBox.checkState(): if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1: allCaseSPHARMModelsChecked = False if not outputRootname.find("SPHARM_ellalign") == -1: allCaseSPHARMEllalignModelsChecked = False if not allSPHARMMesdialMeshesIndex == -1: if not outputRootname.find("SPHARMMedialMesh") == -1: allCaseSPHARMMedialMeshesChecked = False if not allSPHARMProcrustesAlignedModelsIndex == -1: if not outputRootname.find("SPHARM_procalign") == -1: allCaseSPHARMProcrustesAlignedModelsChecked = False # Check/uncheck checbox case according of the checkbox in the table text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Models" self.checkedCaseItem(text, allCaseSPHARMModelsChecked) text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Ellipse Aligned Models" self.checkedCaseItem(text, allCaseSPHARMEllalignModelsChecked) if not allSPHARMMesdialMeshesIndex == -1: text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Medial Meshes" self.checkedCaseItem(text, allCaseSPHARMMedialMeshesChecked) if not allSPHARMProcrustesAlignedModelsIndex == -1: text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Procrustes Aligned Models" self.checkedCaseItem(text, allCaseSPHARMProcrustesAlignedModelsChecked) # Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox self.checkedAllItems() self.CheckableComboBox_visualization.blockSignals(False) # Visualization of the SPHARM Mesh outputs in Shape Population Viewer def onSPHARMMeshesVisualizationInSPV(self): # Creation of a CSV file to load the vtk files in ShapePopulationViewer filePathCSV = slicer.app.temporaryPath + '/' + 'PreviewForVisualizationInSPV.csv' self.Logic.creationCSVFileForSPV(self.tableWidget_visualization, filePathCSV) # Creation of the parameters of SPV parameters = {} parameters["CSVFile"] = filePathCSV # If a binary of SPV has been installed if hasattr(slicer.modules, 'shapepopulationviewer'): SPV = slicer.modules.shapepopulationviewer # If SPV has been installed via the Extension Manager elif hasattr(slicer.modules, 'launcher'): SPV = slicer.modules.launcher # Launch SPV slicer.cli.run(SPV, None, parameters, wait_for_completion=True) # Deletion of the CSV files in the Slicer temporary directory if os.path.exists(filePathCSV): os.remove(filePathCSV) # Function to fill the flip options table for all the SPHARM mesh outputs # - Column 0: filename of the input files # - Column 1: comboBox with the flip corresponding to the output file def fillTableForFlipOptions(self): table = self.tableWidget_ChoiceOfFlip row = 0 for basename in self.Logic.InputCases: table.setRowCount(row + 1) # Column 0: rootname = basename.split('/')[-1].split('.')[0] labelVTKFile = qt.QLabel(rootname) labelVTKFile.setAlignment(0x84) table.setCellWidget(row, 0, labelVTKFile) # Column 1: widget = qt.QWidget() layout = qt.QHBoxLayout(widget) comboBox = qt.QComboBox() comboBox.addItems(['No Flip', 'Flip Along Axis of x and y', 'Flip Along Axis of y and z', 'Flip Along Axis of x and z', 'Flip Along Axis of x', 'Flip Along Axis of y', 'Flip Along Axis of x, y and z', 'Flip Along Axis of z', 'All']) comboBox.setCurrentIndex(self.choiceOfFlip.currentIndex) layout.addWidget(comboBox) layout.setAlignment(0x84) layout.setContentsMargins(0, 0, 0, 0) widget.setLayout(layout) table.setCellWidget(row, 1, widget) row = row + 1 # Function to configure the checkable comboBox and the table of the visualization tab def configurationVisualization(self): # Configuration of the checkable comboBox checkableComboBox = self.CheckableComboBox_visualization # clean the checkable comboBox list = checkableComboBox.model() list.clear() # add items according of the SPHARM Mesh computed by ParaToSPHARMMesh checkableComboBox.blockSignals(True) checkableComboBox.addItem("All Models") checkableComboBox.addItem("All SPHARM Models") checkableComboBox.addItem("All SPHARM Ellipse Aligned Models") if self.medialMesh.checkState(): checkableComboBox.addItem("All SPHARM Medial Meshes") if self.useRegTemplate.checkState(): checkableComboBox.addItem("All SPHARM Procrustes Aligned Models") # Fill the checkable comboBox for i in range(len(self.Logic.InputCases)): checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Models") checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Ellipse Aligned Models") if self.medialMesh.checkState(): checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Medial Meshes") if self.useRegTemplate.checkState(): checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Procrustes Aligned Models") checkableComboBox.blockSignals(False) # Configuration of the table # column 0: filename of the SPHARM Meshes generated by ParaToSPHARMMesh # column 1: checkbox that allows to the user to select what output he wants to display in Shape Population Viewer table = self.tableWidget_visualization outputDirectory = self.GroupProjectOutputDirectory.directory.encode('utf-8') SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/" row = 0 for filename in os.listdir(SPHARMMeshOutputDirectory): if filename.endswith(".vtk") and not filename.endswith("_para.vtk") and not filename.endswith("SPHARMMedialAxis.vtk"): table.setRowCount(row + 1) # Column 0: labelVTKFile = qt.QLabel(os.path.splitext(filename)[0]) labelVTKFile.setAlignment(0x84) table.setCellWidget(row, 0, labelVTKFile) # Column 1: widget = qt.QWidget() layout = qt.QHBoxLayout(widget) checkBox = qt.QCheckBox() layout.addWidget(checkBox) layout.setAlignment(0x84) layout.setContentsMargins(0, 0, 0, 0) widget.setLayout(layout) table.setCellWidget(row, 1, widget) checkBox.connect('stateChanged(int)', self.onCheckBoxTableValueChanged) row = row + 1 # Functions to update the checkable comboBox in the visualization tab # Check/Uncheck checkBoxes with the label 'text' def checkedItems(self, text, checkState): list = self.CheckableComboBox_visualization.model() for i in range(1, list.rowCount()): item = list.item(i, 0) if not item.text().find(text) == -1: item.setCheckState(checkState) # Check/Uncheck "All [..]" checkBoxes in the checkable comboBox def checkedAllItems(self): list = self.CheckableComboBox_visualization.model() allIndex = self.CheckableComboBox_visualization.findText("All Models") allItem = list.item(allIndex, 0) allSPHARMIndex = self.CheckableComboBox_visualization.findText("All SPHARM Models") allSPHARMItem = list.item(allSPHARMIndex, 0) allSPHARMEllalignIndex = self.CheckableComboBox_visualization.findText("All SPHARM Ellipse Aligned Models") allSPHARMEllalignItem = list.item(allSPHARMEllalignIndex, 0) allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes") if not allSPHARMMesdialMeshesIndex == -1: allSPHARMMesdialMeshesItem = list.item(allSPHARMMesdialMeshesIndex, 0) allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models") if not allSPHARMProcrustesAlignedModelsIndex == -1: allSPHARMProcrustesAlignedModelsItem = list.item(allSPHARMProcrustesAlignedModelsIndex, 0) # Check/Uncheck "All SPHARM Models" checkBox self.checkedAllItem("- SPHARM Models", allSPHARMItem) # Check/Uncheck "All SPHARM Ellipse Aligned Models" checkBox self.checkedAllItem("- SPHARM Ellipse Aligned Models", allSPHARMEllalignItem) # Check/Uncheck "All SPHARM Medial Mesh" checkBox if not allSPHARMMesdialMeshesIndex == -1: self.checkedAllItem("- SPHARM Medial Meshes", allSPHARMMesdialMeshesItem) # Check/Uncheck "All SPHARM Procrustes Aligned Models" checkBox if not allSPHARMProcrustesAlignedModelsIndex == -1: self.checkedAllItem("- SPHARM Procrustes Aligned Models", allSPHARMProcrustesAlignedModelsItem) # Check/Uncheck "All Models" checkBox if allSPHARMEllalignItem.checkState() and allSPHARMItem.checkState(): if allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1: allItem.setCheckState(qt.Qt.Checked) return elif not allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1: if allSPHARMMesdialMeshesItem.checkState() and allSPHARMProcrustesAlignedModelsItem.checkState(): allItem.setCheckState(qt.Qt.Checked) return elif not allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1: if allSPHARMMesdialMeshesItem.checkState(): allItem.setCheckState(qt.Qt.Checked) return elif allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1: if allSPHARMProcrustesAlignedModelsItem.checkState(): allItem.setCheckState(qt.Qt.Checked) return allItem.setCheckState(qt.Qt.Unchecked) # Check/Uncheck "Case i: case_name - SPHARM [..]" checkBox in the checkable comboBox def checkedCaseItem(self, text, doCheck): list = self.CheckableComboBox_visualization.model() item = list.findItems(text)[0] if doCheck: item.setCheckState(qt.Qt.Checked) else: item.setCheckState(qt.Qt.Unchecked) # Check/Uncheck "All [..]" (except "All Models") checkBox in the checkable comboBox def checkedAllItem(self, text, item): if self.areAllCasesChecked(text): item.setCheckState(qt.Qt.Checked) else: item.setCheckState(qt.Qt.Unchecked) # Specify if all the "Case i: case_name - SPHARM [..]" checkBoxes of one type of Model are checked def areAllCasesChecked(self, text): list = self.CheckableComboBox_visualization.model() isChecked = True for i in range(3, list.rowCount()): item = list.item(i, 0) if not item.text().find(text) == -1: if not item.checkState(): isChecked = False return isChecked def clearFlipOptionsTable(self): table = self.tableWidget_ChoiceOfFlip table.clear() table.setColumnCount(2) table.setHorizontalHeaderLabels([' Files ', ' Choice of Flip ']) table.setColumnWidth(0, 400) horizontalHeader = table.horizontalHeader() horizontalHeader.setStretchLastSection(False) _setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch) _setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents) table.verticalHeader().setVisible(False) # # ShapeAnalysisModuleParameters # class ShapeAnalysisModuleParameters(object): def __init__(self): # self.waitForCompletion = False # Group Project IO self.inputDirectory = " " self.outputDirectory = " " self.debug = False # Post Processed Segmentation self.OverwriteSegPostProcess = False self.RescaleSegPostProcess = True self.sx = 0.5 self.sy = 0.5 self.sz = 0.5 self.labelNumber = 0 # Generate Mesh Parameters self.OverwriteGenParaMesh = False self.NumberofIterations = 1000 # Parameters to SPHARM Mesh self.OverwriteParaToSPHARMMesh = False self.SubdivLevelValue = 10 self.SPHARMDegreeValue = 15 self.thetaIterationValue = 100 self.phiIterationValue = 100 self.medialMesh = False self.tableWidget_ChoiceOfFlip = None # Advanced Post Processed Segmentation self.GaussianFiltering = False self.VarianceX = 10 self.VarianceY = 10 self.VarianceZ = 10 # Advanced Parameters to SPHARM Mesh self.useRegTemplate = False self.regTemplate = " " self.useFlipTemplate = False self.flipTemplate = " " self.choiceOfFlip = 0 self.sameFlipForAll = True def setWaitForCompletion(self, bool): self.waitForCompletion = bool def setInputDirectory(self, path): self.inputDirectory = path def setOutputDirectory(self, path): self.outputDirectory = path def setDebug(self, bool): self.debug = bool def setOverwriteSegPostProcess(self, bool): self.OverwriteSegPostProcess = bool def setRescaleSegPostProcess(self, bool): self.RescaleSegPostProcess = bool def setSx(self, value): self.sx = value def setSy(self, value): self.sy = value def setSz(self, value): self.sz = value def setLabelNumber(self, value): self.labelNumber = value def setOverwriteGenParaMesh(self, bool): self.OverwriteGenParaMesh = bool def setNumberofIterations(self, value): self.NumberofIterations = value def setOverwriteParaToSPHARMMesh(self, bool): self.OverwriteParaToSPHARMMesh = bool def setSubdivLevelValue(self, value): self.SubdivLevelValue = value def setSPHARMDegreeValue(self, value): self.SPHARMDegreeValue = value def setThetaIterationValue(self, value): self.thetaIterationValue = value def setPhiIterationValue(self, value): self.phiIterationValue = value def setMedialMesh(self, bool): self.medialMesh = bool def setTableForChoiceOfFlip(self, table): self.tableWidget_ChoiceOfFlip = table def setGaussianFiltering(self, bool): self.GaussianFiltering = bool def setVarianceX(self, value): self.VarianceX = value def setVarianceY(self, value): self.VarianceY = value def setVarianceZ(self, value): self.VarianceZ = value def setUseRegTemplate(self, bool): self.useRegTemplate = bool def setRegTemplate(self, path): self.regTemplate = path def setUseFlipTemplate(self, bool): self.useFlipTemplate = bool def setFlipTemplate(self, path): self.flipTemplate = path def setChoiceOfFlip(self, value): self.choiceOfFlip = value def setSameFlipForAll(self, bool): self.sameFlipForAll = bool # # ShapeAnalysisModuleLogic # class ShapeAnalysisModuleLogic(LogicMixin): """ Uses ScriptedLoadableModuleLogic base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self): LogicMixin.__init__(self, "ShapeAnalysisModule") self.parameters = ShapeAnalysisModuleParameters() def ShapeAnalysisCases(self): # No cases if not len(self.InputCases) > 0: inputDirectory = self.parameters.inputDirectory self.ErrorMessage = "No cases found in " + inputDirectory self.Node.SetStatus(self.Node.CompletedWithErrors) return -1 # Create pipelines else: logging.info('%d case(s) found', len(self.InputCases)) # Init for i in range(len(self.InputCases)): self.completed[i] = False self.pipeline[i] = ShapeAnalysisModulePipeline(i, self.InputCases[i], self.parameters) self.addObserver(self.pipeline[i].Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent, self.onPipelineModified) # Logic ready self.Node.SetStatus(self.Node.Running) # Launch Workflow self.startPipeline(0) return 0 # Empty the output folders if the overwrite option is checked def cleanOutputFolders(self): outputDirectory = self.parameters.outputDirectory if self.parameters.OverwriteSegPostProcess: PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess" if os.path.exists(PostProcessOutputDirectory): for filename in os.listdir(PostProcessOutputDirectory): os.remove(os.path.join(PostProcessOutputDirectory, filename)) if self.parameters.OverwriteGenParaMesh: GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh" if os.path.exists(GenParaMeshOutputDirectory): for filename in os.listdir(GenParaMeshOutputDirectory): os.remove(os.path.join(GenParaMeshOutputDirectory, filename)) if self.parameters.OverwriteParaToSPHARMMesh: SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh" if os.path.exists(SPHARMMeshOutputDirectory): for filename in os.listdir(SPHARMMeshOutputDirectory): os.remove(os.path.join(SPHARMMeshOutputDirectory, filename)) # Function to create a CSV file containing all the SPHARM mesh output files # that the user wants to display in ShapePopultaionViewer def creationCSVFileForSPV(self, table, filepathCSV): # Creation of a CSV file with a header 'VTK Files' file = open(filepathCSV, 'w') cw = csv.writer(file, delimiter=',') cw.writerow(['VTK Files']) # Add the filepath of the vtk file checked in the table outputDirectory = self.parameters.outputDirectory SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/" # Add the path of the vtk files if the users selected it for row in range(0, table.rowCount): # check the checkBox widget = table.cellWidget(row, 1) tuple = widget.children() checkBox = tuple[1] if checkBox.isChecked(): # Recovery of the vtk filename qlabel = table.cellWidget(row, 0) vtkRootname = qlabel.text VTKfilepath = SPHARMMeshOutputDirectory + vtkRootname + ".vtk" if os.path.exists(VTKfilepath): cw.writerow([VTKfilepath]) file.close() # # ShapeAnalysisModulePipeline # class ShapeAnalysisModulePipeline(PipelineMixin): def __init__(self, pipelineID, CaseInput, interface): PipelineMixin.__init__(self, pipelineID, CaseInput, interface) self.interface = interface def setupSkipCLIs(self): self.skip_meshToLabelMap = False self.skip_segPostProcess = False self.skip_genParaMesh = False self.skip_paraToSPHARMMesh = False outputDirectory = self.interface.outputDirectory # Skip MeshToLabelMap? if not self.inputExtension == "vtk" and not self.inputExtension == "vtp": self.skip_meshToLabelMap = True else: MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap" MeshToLabelMapOutputFilepath = MeshToLabelMapOutputDirectory + "/" + self.inputRootname + ".nrrd" if os.path.exists(MeshToLabelMapOutputFilepath): self.inputExtension = "nrrd" self.skip_meshToLabelMap = True # If MeshToLabelMap is not skipped, do not skip the next CLIs: SegPostProcess, GenParaMesh and ParaToSPHARMMesh if self.skip_meshToLabelMap == False: return # Skip SegPostProcess ? if not self.interface.OverwriteSegPostProcess: PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess" PostProcessOutputFilepath = PostProcessOutputDirectory + "/" + self.inputRootname + "_pp.nrrd" if os.path.exists(PostProcessOutputFilepath): self.skip_segPostProcess = True # If SegPostProcess is not skip, do not skip the next CLIs: GenParaMesh and ParaToSPHARMMesh if self.skip_segPostProcess == False: return # Skip GenParaMesh ? if not self.interface.OverwriteGenParaMesh: GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh" ParaOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_para.vtk" SurfOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_surf.vtk" if os.path.exists(ParaOutputFilepath) and os.path.exists(SurfOutputFilepath): self.skip_genParaMesh = True # If GenParaMesh is not skipped, do not skip the next CLI: ParaToSPHARMMesh if self.skip_genParaMesh == False: return # Skip ParaToSPHARMMesh ? if not self.interface.OverwriteParaToSPHARMMesh: SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh" SPHARMMeshRootname = self.inputRootname + "_pp_surf" if os.path.exists(SPHARMMeshOutputDirectory): for file in os.listdir(SPHARMMeshOutputDirectory): if not file.find(SPHARMMeshRootname) == -1: self.skip_paraToSPHARMMesh = True def setup(self): # Initialization of global variables self.setupGlobalVariables() self.setupSkipCLIs() inputDirectory = self.interface.inputDirectory outputDirectory = self.interface.outputDirectory ## Mesh To Label Map: Transform model in label map cli_nodes = list() # list of the nodes used in the Mesh to Label Map step cli_dirnames = list() # list of the directory pathes where the nodes used in the Mesh to Label Map step are stored MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap" MeshToLabelMapOutputFilename = self.inputRootname + ".nrrd" MeshToLabelMapOutputFilepath = os.path.join(MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename) if not self.skip_meshToLabelMap: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} model_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'ModelFile') cli_parameters["mesh"] = model_input_node meshtolabelmap_output_node = MRMLUtility.createNewMRMLNode(self.inputRootname, slicer.vtkMRMLLabelMapVolumeNode()) cli_parameters["labelMap"] = meshtolabelmap_output_node cli_parameters["spacingVec"] = "0.1,0.1,0.1" self.inputExtension = "nrrd" self.setupModule(slicer.modules.meshtolabelmap, cli_parameters) # Setup of the nodes created by the CLI # Creation of a folder in the output folder : LabelMap if not os.path.exists(MeshToLabelMapOutputDirectory): os.makedirs(MeshToLabelMapOutputDirectory) cli_nodes.append(model_input_node) cli_nodes.append(meshtolabelmap_output_node) cli_dirnames.append(inputDirectory) cli_dirnames.append(MeshToLabelMapOutputDirectory) self.setupNode(0, cli_nodes, cli_dirnames, [False, True], [True, True]) else: if os.path.exists(MeshToLabelMapOutputFilepath): # Setup of the nodes which will be used by the next CLI meshtolabelmap_output_node = MRMLUtility.loadMRMLNode(self.inputRootname, MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename, 'LabelMap') cli_nodes.append(meshtolabelmap_output_node) cli_dirnames.append(MeshToLabelMapOutputDirectory) self.setupNode(0, cli_nodes, cli_dirnames, [False], [True]) ## Post Processed Segmentation cli_nodes = list() # list of the nodes used in the Post Processed Segmentation step cli_dirnames = list() # list of the directory pathes where the nodes used in the Post Processed Segmentation step are stored PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess" PostProcessOutputRootname = self.inputRootname + "_pp" PostProcessOutputFilename = self.inputRootname + "_pp.nrrd" if not self.skip_segPostProcess: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} # IF Mesh To Label Map has been skipped AND the input given was already a label map if self.skip_meshToLabelMap and not os.path.exists(MeshToLabelMapOutputFilepath): PossProcessInputDirectory = inputDirectory labelmap_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'LabelMap') # ELSE the input given was a model which has been transformed by MeshToLabelMap and store in the folder LabelMap else: labelmap_input_node = meshtolabelmap_output_node PossProcessInputDirectory = MeshToLabelMapOutputDirectory cli_parameters["fileName"] = labelmap_input_node pp_output_node = MRMLUtility.createNewMRMLNode(PostProcessOutputRootname, slicer.vtkMRMLLabelMapVolumeNode()) cli_parameters["outfileName"] = pp_output_node.GetID() if self.interface.RescaleSegPostProcess: cli_parameters["scaleOn"] = True cli_parameters["spacing_vect"] = str(self.interface.sx) + "," + str(self.interface.sy) + "," + str(self.interface.sz) cli_parameters["label"] = self.interface.labelNumber if self.interface.debug: cli_parameters["debug"] = True # Advanced parameters if self.interface.GaussianFiltering: cli_parameters["gaussianOn"] = True cli_parameters["variance_vect"] = str(self.interface.VarianceX) + "," + str(self.interface.VarianceY) + "," + str(self.interface.VarianceZ) self.setupModule(slicer.modules.segpostprocessclp, cli_parameters) # Setup of the nodes created by the CLI # Creation of a folder in the output folder : Step1_SegPostProcess if not os.path.exists(PostProcessOutputDirectory): os.makedirs(PostProcessOutputDirectory) cli_nodes.append(labelmap_input_node) cli_nodes.append(pp_output_node) cli_dirnames.append(PossProcessInputDirectory) cli_dirnames.append(PostProcessOutputDirectory) self.setupNode(1, cli_nodes, cli_dirnames, [False,True], [True,True]) else: # Setup of the nodes which will be used by the next CLI pp_output_node = MRMLUtility.loadMRMLNode(PostProcessOutputRootname, PostProcessOutputDirectory, PostProcessOutputFilename, 'LabelMap') cli_nodes.append(pp_output_node) cli_dirnames.append(PostProcessOutputDirectory) self.setupNode(1, cli_nodes, cli_dirnames, [False], [True]) ## Generate Mesh Parameters cli_nodes = list() # list of the nodes used in the Generate Mesh Parameters step cli_dirnames = list() # list of the directory pathes where the nodes used in the Generate Mesh Parameters step are stored GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh" GenParaMeshOutputParaRootname = PostProcessOutputRootname + "_para" GenParaMeshOutputSurfRootname = PostProcessOutputRootname + "_surf" GenParaMeshOutputParaFilename = PostProcessOutputRootname + "_para.vtk" GenParaMeshOutputSurfFilename = PostProcessOutputRootname + "_surf.vtk" if not self.skip_genParaMesh: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} cli_parameters["infile"] = pp_output_node para_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputParaRootname, slicer.vtkMRMLModelNode()) cli_parameters["outParaName"] = para_output_model surfmesh_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputSurfRootname, slicer.vtkMRMLModelNode()) cli_parameters["outSurfName"] = surfmesh_output_model cli_parameters["numIterations"] = self.interface.NumberofIterations if self.interface.debug: cli_parameters["debug"] = True self.setupModule(slicer.modules.genparameshclp, cli_parameters) # Setup of the nodes created by the CLI # Creation of a folder in the output folder : Step2_GenParaMesh if not os.path.exists(GenParaMeshOutputDirectory): os.makedirs(GenParaMeshOutputDirectory) cli_nodes.append(para_output_model) cli_nodes.append(surfmesh_output_model) cli_dirnames.append(GenParaMeshOutputDirectory) cli_dirnames.append(GenParaMeshOutputDirectory) self.setupNode(2, cli_nodes, cli_dirnames, [True,True], [True,True]) else: # Setup of the nodes which will be used by the next CLI para_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputParaRootname, GenParaMeshOutputDirectory, GenParaMeshOutputParaFilename, 'ModelFile') surfmesh_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputSurfRootname, GenParaMeshOutputDirectory, GenParaMeshOutputSurfFilename, 'ModelFile') cli_nodes.append(para_output_model) cli_nodes.append(surfmesh_output_model) cli_dirnames.append(GenParaMeshOutputDirectory) cli_dirnames.append(GenParaMeshOutputDirectory) self.setupNode(2, cli_nodes, cli_dirnames, [False, False], [True, True]) ## Parameters to SPHARM Mesh cli_nodes = list() # list of the nodes used in the Parameters To SPHARM Mesh step cli_dirnames = list() # list of the directory pathes where the nodes used in the Parameters To SPHARM Mesh step are stored SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh" if not self.skip_paraToSPHARMMesh: # Search of the flip to apply: # 1 = flip along axes of x &amp; y, # 2 = flip along y &amp; z, # 3 = flip along x &amp; z # 4 = flip along x, # 5 = flip along y, # 6 = flip along x &amp; y &amp; z, # 7 = flip along z where y is the smallest, x is the second smallest and z is the long axis of the ellipsoid # 8 = All the flips if not self.interface.sameFlipForAll: # Recovery of the flip chosen by the user row = self.pipelineID widget = self.interface.tableWidget_ChoiceOfFlip.cellWidget(row, 1) tuple = widget.children() comboBox = qt.QComboBox() comboBox = tuple[1] flipIndexToApply = comboBox.currentIndex pass else: flipIndexToApply = self.interface.choiceOfFlip # Only one flip to apply if flipIndexToApply < 8: L = [1] # All the flips to apply else: L = range(1,8) for i in L: # Setup of the parameters of the CLI self.ID += 1 cli_parameters = {} cli_parameters["inParaFile"] = para_output_model cli_parameters["inSurfFile"] = surfmesh_output_model # Creation of a folder in the output folder : Step3_ParaToSPHARMMesh if not os.path.exists(SPHARMMeshOutputDirectory): os.makedirs(SPHARMMeshOutputDirectory) if flipIndexToApply < 8: SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + GenParaMeshOutputSurfRootname cli_parameters["outbase"] = SPHARMMeshRootname # For each flip creation of an output filename else: flipName = ['AlongXY', 'AlongYZ', 'AlongXZ', 'AlongX', 'AlongY', 'AlongXYZ', 'AlongZ'] SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + self.inputRootname + "_flip" + flipName[i - 1] + "_pp_surf" cli_parameters["outbase"] = SPHARMMeshRootname cli_parameters["subdivLevel"] = self.interface.SubdivLevelValue cli_parameters["spharmDegree"] = self.interface.SPHARMDegreeValue cli_parameters["thetaIteration"] = self.interface.thetaIterationValue cli_parameters["phiIteration"] = self.interface.phiIterationValue if self.interface.medialMesh: cli_parameters["medialMesh"] = True if self.interface.debug: cli_parameters["debug"] = True # Advanced parameters if self.interface.useRegTemplate: cli_parameters["regTemplateFileOn"] = True regtemplate_filepath = self.interface.regTemplate regtemplate_dir = os.path.split(regtemplate_filepath)[0] regtemplate_rootname = os.path.split(regtemplate_filepath)[1].split(".")[0] regtemplate_filename = os.path.split(regtemplate_filepath)[1] regtemplate_model = MRMLUtility.loadMRMLNode(regtemplate_rootname, regtemplate_dir, regtemplate_filename, 'ModelFile') cli_parameters["regTemplateFile"] = regtemplate_model cli_nodes.append(regtemplate_model) cli_dirnames.append(regtemplate_filepath) self.setupNode(i + 2, cli_nodes, cli_dirnames, [False], [True]) if self.interface.useFlipTemplate: cli_parameters["flipTemplateFileOn"] = True cli_parameters["flipTemplateFile"] = self.interface.flipTemplate if flipIndexToApply < 8 : cli_parameters["finalFlipIndex"] = flipIndexToApply else: cli_parameters["finalFlipIndex"] = i self.setupModule(slicer.modules.paratospharmmeshclp, cli_parameters) class ShapeAnalysisModuleWrapper: """ This class should be called from an external python script to run SPHARM-PDM method on multiple cases thanks to SlicerSALT or 3DSlicer. External python script (ex: SPHARM-PDM.py) should do the following: from ShapeAnalasisModule import ShapeAnalysisModuleWrapper from ConfigParser import SafeConfigParser parser = SafeConfigParser() parser.read(sys.argv[1]) #argv[1]: 'path/to/SPHARM-PDM-parameters.ini' inputDirectoryPath = parser.get('section', 'input-directory-path') [...] ShapeAnalysisModuleInstance = ShapeAnalysisModuleWrapper(inputDirectoryPath, outputDirectoryPath, [...]) ShapeAnalysisModuleInstance.startProcessing() The external python script can be run non-interactively using this command: ./SlicerSalt --no-main-window --python-script /path/to/SPHARM-PDM.py path/to/SPHARM-PDM-parameters.py """ def __init__(self, inputDirectoryPath, outputDirectoryPath, RescaleSegPostProcess, sx, sy, sz, labelNumber, GaussianFiltering, VarianceX, VarianceY, VarianceZ, numberofIterations, SubdivLevelValue, SPHARMDegreeValue, medialMesh, thetaIterationValue, phiIterationValue, useRegTemplate, regTemplate, useFlipTemplate, flipTemplate, choiceOfFlip): self.Logic = ShapeAnalysisModuleLogic() self.Logic.parameters.setWaitForCompletion(True) self.Logic.parameters.setInputDirectory(inputDirectoryPath) self.Logic.parameters.setOutputDirectory(outputDirectoryPath) self.Logic.parameters.setRescaleSegPostProcess(RescaleSegPostProcess) self.Logic.parameters.setSx(sx) self.Logic.parameters.setSy(sy) self.Logic.parameters.setSz(sz) self.Logic.parameters.setLabelNumber(labelNumber) self.Logic.parameters.setGaussianFiltering(GaussianFiltering) self.Logic.parameters.setVarianceX(VarianceX) self.Logic.parameters.setVarianceY(VarianceY) self.Logic.parameters.setVarianceZ(VarianceZ) self.Logic.parameters.setNumberofIterations(numberofIterations) self.Logic.parameters.setSubdivLevelValue(SubdivLevelValue) self.Logic.parameters.setSPHARMDegreeValue(SPHARMDegreeValue) self.Logic.parameters.setMedialMesh(medialMesh) self.Logic.parameters.setThetaIterationValue(thetaIterationValue) self.Logic.parameters.setPhiIterationValue(phiIterationValue) self.Logic.parameters.setUseRegTemplate(useRegTemplate) self.Logic.parameters.setRegTemplate(regTemplate) self.Logic.parameters.setUseFlipTemplate(useFlipTemplate) self.Logic.parameters.setFlipTemplate(flipTemplate) self.Logic.parameters.setChoiceOfFlip(choiceOfFlip) def startProcessing(self): # Setup the inputCases # Possible extensions exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"] # Search cases and add the filename to a list self.Logic.InputCases = [] for file in os.listdir(self.Logic.parameters.inputDirectory): for ext in exts: if file.endswith(ext): self.Logic.InputCases.append(file) self.Logic.ShapeAnalysisCases() class ShapeAnalysisModuleTest(ScriptedLoadableModuleTest): """ This is the test case for your scripted module. Uses ScriptedLoadableModuleTest base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def setUp(self): slicer.mrmlScene.Clear(0) self.inputRootnames = list() def runTest(self): self.setUp() self.delayDisplay('Starting the tests') self.test_ShapeAnalysisModule_completedWithoutErrors() def test_ShapeAnalysisModule_completedWithoutErrors(self): self.delayDisplay('Test 1: Run Shape Analysis Module') self.Logic = ShapeAnalysisModuleLogic() # Creation of input folder inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule' if not os.path.exists(inputDirectoryPath): os.makedirs(inputDirectoryPath) # Download the label map in the input folder input_downloads = ( ('https://data.kitware.com/api/v1/file/59945eb38d777f7d33e9c3c4/download', 'InputImage.gipl'), ) for i in range(len(input_downloads)): self.inputRootnames.append(input_downloads[i][1].split(".")[0]) self.download_files(inputDirectoryPath, input_downloads) # Creation of output folder outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' if not os.path.exists(outputDirectoryPath): os.makedirs(outputDirectoryPath) # Creation of a template folder templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule' if not os.path.exists(templateDirectoryPath): os.makedirs(templateDirectoryPath) else: for filename in os.listdir(templateDirectoryPath): os.remove(os.path.join(templateDirectoryPath, filename)) # Download the registration template in the template folder template_downloads = ( ('https://data.kitware.com/api/v1/file/599462f78d777f7d33e9c3e6/download', 'RegistrationTemplateForParaToSPHARMMesh.vtk'), ) self.download_files(templateDirectoryPath, template_downloads) # # Inputs of Shape Analysis Module # self.Logic.parameters.setWaitForCompletion(True) self.Logic.parameters.setInputDirectory(inputDirectoryPath) self.Logic.parameters.setOutputDirectory(outputDirectoryPath) self.Logic.parameters.setOverwriteSegPostProcess(True) self.Logic.parameters.setOverwriteGenParaMesh(True) self.Logic.parameters.setNumberofIterations(25) self.Logic.parameters.setOverwriteParaToSPHARMMesh(True) self.Logic.parameters.setMedialMesh(True) self.Logic.parameters.setUseRegTemplate(True) regTemplateFilePath = templateDirectoryPath + '/RegistrationTemplateForParaToSPHARMMesh.vtk' self.Logic.parameters.setChoiceOfFlip(3) self.Logic.parameters.setRegTemplate(regTemplateFilePath) # Setup the inputCases # Possible extensions exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"] # Search cases and add the filename to a list self.Logic.InputCases = [] for file in os.listdir(inputDirectoryPath): for ext in exts: if file.endswith(ext): self.Logic.InputCases.append(file) self.delayDisplay('Run Shape Analysis Module') self.Logic.ShapeAnalysisCases() self.assertTrue(self.comparisonOfOutputsSegPostProcess()) self.assertTrue(self.comparisonOfOutputsGenParaMesh()) self.assertTrue(self.comparisonOfOutputsParaToSPHARMMesh()) self.cleanSlicerTemporaryDirectory() self.delayDisplay('Tests Passed!') slicer.mrmlScene.Clear(0) def comparisonOfOutputsSegPostProcess(self): self.delayDisplay('Test 2: Comparison of the outputs generated by SegPostProcess CLI') # Checking the existence of the output directory Step1_SegPostProcess outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' SegPostProcessOutputDirectoryPath = outputDirectoryPath + '/Step1_SegPostProcess' if not os.path.exists(SegPostProcessOutputDirectoryPath): return False # Downloading output data to compare with the ones generated by Shape Analysis Module during the tests output_downloads = ( ('https://data.kitware.com/api/v1/file/59945ee08d777f7d33e9c3d3/download', 'OutputImageToCompareSegPostProcess.nrrd'), ) self.download_files(SegPostProcessOutputDirectoryPath, output_downloads) # Comparison of the Post Process Mesh Outputs self.delayDisplay('Comparison of the Post Process Outputs') output_filenames = list() for inputRootname in self.inputRootnames: output_filename = inputRootname + "_pp.nrrd" output_filenames.append(output_filename) for i in range(len(output_filenames)): volume2_filepath = os.path.join(SegPostProcessOutputDirectoryPath, output_filenames[i]) # Checking the existence of the output files in the folder Step1_SegPostProcess if not os.path.exists(volume2_filepath): return False # Loading the 2 volumes for comparison volume1_rootname = output_filenames[i].split(".")[0] volume2_rootname = output_downloads[i][1].split(".")[0] volume1 = MRMLUtility.loadMRMLNode(volume1_rootname, SegPostProcessOutputDirectoryPath, output_downloads[i][1], 'LabelMap') volume2 = MRMLUtility.loadMRMLNode(volume2_rootname, SegPostProcessOutputDirectoryPath, output_filenames[i], 'LabelMap') # Comparison if not self.volume_comparison(volume1, volume2): return False return True def comparisonOfOutputsGenParaMesh(self): self.delayDisplay('Test 3: Comparison of the outputs generated by GenParaMesh CLI') # Checking the existence of the output directory Step2_GenParaMesh outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' GenParaMeshOutputDirectoryPath = outputDirectoryPath + '/Step2_GenParaMesh' if not os.path.exists(GenParaMeshOutputDirectoryPath): return False # Downloading output data to compare with the ones generated by Shape Analysis Module during the tests output_downloads = ( ('https://data.kitware.com/api/v1/file/59af09588d777f7d33e9cf9d/download', 'OutputImageToCompareGenParaMesh_para.vtk'), ('https://data.kitware.com/api/v1/file/59945ece8d777f7d33e9c3c7/download', 'OutputImageToCompareGenParaMesh_surf.vtk'), ) self.download_files(GenParaMeshOutputDirectoryPath, output_downloads) # Comparison of the Parameters Mesh Outputs self.delayDisplay('Comparison of the Parameters Mesh Outputs') output_filenames = list() for inputRootname in self.inputRootnames: output_para_filename = inputRootname + "_pp_para.vtk" output_surf_filename = inputRootname + "_pp_surf.vtk" output_filenames.append(output_para_filename) output_filenames.append(output_surf_filename) for i in range(len(output_filenames)): model2_filepath = os.path.join(GenParaMeshOutputDirectoryPath, output_filenames[i]) # Checking the existence of the output files in the folder Step2_GenParaMesh if not os.path.exists(model2_filepath): return False # Loading the 2 models for comparison model1_rootname = output_downloads[i][1].split(".")[0] model2_rootname = output_filenames[i].split(".")[0] model1 = MRMLUtility.loadMRMLNode(model1_rootname, GenParaMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile') model2 = MRMLUtility.loadMRMLNode(model2_rootname, GenParaMeshOutputDirectoryPath,output_filenames[i], 'ModelFile') # Comparison if not self.polydata_comparison(model1, model2): return False return True def comparisonOfOutputsParaToSPHARMMesh(self): self.delayDisplay('Test 4: Comparison of the outputs generated by ParaToSPHARMMesh CLI') # Checking the existence of the output directory Step3_ParaToSPHARMMesh outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' ParaToSPHARMMeshOutputDirectoryPath = outputDirectoryPath + '/Step3_ParaToSPHARMMesh' if not os.path.exists(ParaToSPHARMMeshOutputDirectoryPath): return False # Downloading output data to compare with the ones generated by Shape Analysis Module during the tests output_downloads = ( ('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf9a/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM.vtk'), ('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf91/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_ellalign.vtk'), ('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf94/download', 'OutputImageToCompareParaToSPHARMMesh_MedialMesh.vtk'), ('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf97/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_procalign.vtk'), ) self.download_files(ParaToSPHARMMeshOutputDirectoryPath, output_downloads) # Comparison of the SPHARM Mesh Outputs self.delayDisplay('Comparison of the SPHARM Mesh Outputs') output_filenames = list() for inputRootname in self.inputRootnames: output_spharm_filename = inputRootname + "_pp_surf_SPHARM.vtk" output_ellalign_filename = inputRootname + "_pp_surf_SPHARM_ellalign.vtk" output_medialmesh_filename = inputRootname + "_pp_surf_SPHARMMedialMesh.vtk" output_procalign_filename = inputRootname + "_pp_surf_SPHARM_procalign.vtk" output_filenames.append(output_spharm_filename) output_filenames.append(output_ellalign_filename) output_filenames.append(output_medialmesh_filename) output_filenames.append(output_procalign_filename) for i in range(len(output_filenames)): model2_filepath = os.path.join(ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i]) # Checking the existence of the output files in the folder Step3_ParaToSPHARMMesh if not os.path.exists(model2_filepath): return False # Loading the 2 models for comparison model1_rootname = output_downloads[i][1].split(".")[0] model2_rootname = output_filenames[i].split(".")[0] model1 = MRMLUtility.loadMRMLNode(model1_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile') model2 = MRMLUtility.loadMRMLNode(model2_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i], 'ModelFile') # Comparison if not self.polydata_comparison(model1, model2): return False return True def volume_comparison(self, volume1, volume2): imageData1 = volume1.GetImageData() imageData2 = volume2.GetImageData() nbPoints1 = imageData1.GetNumberOfPoints() nbPoints2 = imageData2.GetNumberOfPoints() if not nbPoints1 == nbPoints2: return False dimension1 = imageData1.GetDimensions() dimension2 = imageData2.GetDimensions() if not dimension1 == dimension2: return False for i in range(dimension1[0]): for j in range(dimension1[1]): for k in range(dimension1[2]): if not imageData1.GetScalarComponentAsDouble(i,j,k,0) == imageData2.GetScalarComponentAsDouble(i,j,k,0): return False return True def polydata_comparison(self, model1, model2): polydata1 = model1.GetPolyData() polydata2 = model2.GetPolyData() # Number of points nbPoints1 = polydata1.GetNumberOfPoints() nbPoints2 = polydata2.GetNumberOfPoints() if not nbPoints1 == nbPoints2: return False # Polydata data1 = polydata1.GetPoints().GetData() data2 = polydata2.GetPoints().GetData() # Number of Components nbComponents1 = data1.GetNumberOfComponents() nbComponents2 = data2.GetNumberOfComponents() if not nbComponents1 == nbComponents2: return False # Points value for i in range(nbPoints1): for j in range(nbComponents1): if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]: return False # Area nbAreas1 = polydata1.GetPointData().GetNumberOfArrays() nbAreas2 = polydata2.GetPointData().GetNumberOfArrays() if not nbAreas1 == nbAreas2: return False for l in range(nbAreas1): area1 = polydata1.GetPointData().GetArray(l) area2 = polydata2.GetPointData().GetArray(l) # Name of the area nameArea1 = area1.GetName() nameArea2 = area2.GetName() if not nameArea1 == nameArea2: return False # Number of Components of the area nbComponents1 = area1.GetNumberOfComponents() nbComponents2 = area2.GetNumberOfComponents() if not nbComponents1 == nbComponents2: return False # Points value in the area for i in range(nbPoints1): for j in range(nbComponents1): if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]: return False return True def download_files(self, directoryPath, downloads): self.delayDisplay('Starting download') for url, name in downloads: filePath = os.path.join(directoryPath, name) if not os.path.exists(filePath) or os.stat(filePath).st_size == 0: print 'Requesting download %s from %s...\n' % (name, url) urllib.urlretrieve(url, filePath) self.delayDisplay('Finished with download') # Function to delete all the data needed for the tests def cleanSlicerTemporaryDirectory(self): # deletion of the SAM input folder inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule' if os.path.exists(inputDirectoryPath): shutil.rmtree(inputDirectoryPath) # deletion of the SAM output folder outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule' if os.path.exists(outputDirectoryPath): shutil.rmtree(outputDirectoryPath) # deletion of the SAM template folder templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule' if os.path.exists(templateDirectoryPath): shutil.rmtree(templateDirectoryPath)
bpaniagua/SPHARM-PDM
Modules/Scripted/ShapeAnalysisModule/ShapeAnalysisModule.py
Python
apache-2.0
88,012
"""Authentication for HTTP component.""" import base64 import logging from aiohttp import hdrs from aiohttp.web import middleware import jwt from homeassistant.auth.providers import legacy_api_password from homeassistant.auth.util import generate_secret from homeassistant.const import HTTP_HEADER_HA_AUTH from homeassistant.core import callback from homeassistant.util import dt as dt_util from .const import KEY_AUTHENTICATED, KEY_HASS_USER, KEY_REAL_IP _LOGGER = logging.getLogger(__name__) DATA_API_PASSWORD = "api_password" DATA_SIGN_SECRET = "http.auth.sign_secret" SIGN_QUERY_PARAM = "authSig" @callback def async_sign_path(hass, refresh_token_id, path, expiration): """Sign a path for temporary access without auth header.""" secret = hass.data.get(DATA_SIGN_SECRET) if secret is None: secret = hass.data[DATA_SIGN_SECRET] = generate_secret() now = dt_util.utcnow() return "{}?{}={}".format( path, SIGN_QUERY_PARAM, jwt.encode( { "iss": refresh_token_id, "path": path, "iat": now, "exp": now + expiration, }, secret, algorithm="HS256", ).decode(), ) @callback def setup_auth(hass, app): """Create auth middleware for the app.""" old_auth_warning = set() support_legacy = hass.auth.support_legacy if support_legacy: _LOGGER.warning("legacy_api_password support has been enabled.") trusted_networks = [] for prv in hass.auth.auth_providers: if prv.type == "trusted_networks": trusted_networks += prv.trusted_networks async def async_validate_auth_header(request): """ Test authorization header against access token. Basic auth_type is legacy code, should be removed with api_password. """ try: auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION).split(" ", 1) except ValueError: # If no space in authorization header return False if auth_type == "Bearer": refresh_token = await hass.auth.async_validate_access_token(auth_val) if refresh_token is None: return False request[KEY_HASS_USER] = refresh_token.user return True if auth_type == "Basic" and support_legacy: decoded = base64.b64decode(auth_val).decode("utf-8") try: username, password = decoded.split(":", 1) except ValueError: # If no ':' in decoded return False if username != "homeassistant": return False user = await legacy_api_password.async_validate_password(hass, password) if user is None: return False request[KEY_HASS_USER] = user _LOGGER.info( "Basic auth with api_password is going to deprecate," " please use a bearer token to access %s from %s", request.path, request[KEY_REAL_IP], ) old_auth_warning.add(request.path) return True return False async def async_validate_signed_request(request): """Validate a signed request.""" secret = hass.data.get(DATA_SIGN_SECRET) if secret is None: return False signature = request.query.get(SIGN_QUERY_PARAM) if signature is None: return False try: claims = jwt.decode( signature, secret, algorithms=["HS256"], options={"verify_iss": False} ) except jwt.InvalidTokenError: return False if claims["path"] != request.path: return False refresh_token = await hass.auth.async_get_refresh_token(claims["iss"]) if refresh_token is None: return False request[KEY_HASS_USER] = refresh_token.user return True async def async_validate_trusted_networks(request): """Test if request is from a trusted ip.""" ip_addr = request[KEY_REAL_IP] if not any(ip_addr in trusted_network for trusted_network in trusted_networks): return False user = await hass.auth.async_get_owner() if user is None: return False request[KEY_HASS_USER] = user return True async def async_validate_legacy_api_password(request, password): """Validate api_password.""" user = await legacy_api_password.async_validate_password(hass, password) if user is None: return False request[KEY_HASS_USER] = user return True @middleware async def auth_middleware(request, handler): """Authenticate as middleware.""" authenticated = False if HTTP_HEADER_HA_AUTH in request.headers or DATA_API_PASSWORD in request.query: if request.path not in old_auth_warning: _LOGGER.log( logging.INFO if support_legacy else logging.WARNING, "api_password is going to deprecate. You need to use a" " bearer token to access %s from %s", request.path, request[KEY_REAL_IP], ) old_auth_warning.add(request.path) if hdrs.AUTHORIZATION in request.headers and await async_validate_auth_header( request ): # it included both use_auth and api_password Basic auth authenticated = True # We first start with a string check to avoid parsing query params # for every request. elif ( request.method == "GET" and SIGN_QUERY_PARAM in request.query and await async_validate_signed_request(request) ): authenticated = True elif trusted_networks and await async_validate_trusted_networks(request): if request.path not in old_auth_warning: # When removing this, don't forget to remove the print logic # in http/view.py request["deprecate_warning_message"] = ( "Access from trusted networks without auth token is " "going to be removed in Home Assistant 0.96. Configure " "the trusted networks auth provider or use long-lived " "access tokens to access {} from {}".format( request.path, request[KEY_REAL_IP] ) ) old_auth_warning.add(request.path) authenticated = True elif ( support_legacy and HTTP_HEADER_HA_AUTH in request.headers and await async_validate_legacy_api_password( request, request.headers[HTTP_HEADER_HA_AUTH] ) ): authenticated = True elif ( support_legacy and DATA_API_PASSWORD in request.query and await async_validate_legacy_api_password( request, request.query[DATA_API_PASSWORD] ) ): authenticated = True request[KEY_AUTHENTICATED] = authenticated return await handler(request) app.middlewares.append(auth_middleware)
fbradyirl/home-assistant
homeassistant/components/http/auth.py
Python
apache-2.0
7,407
import android class SMSPoolMember: def __init__(self, query): self.droid = android.Android() self.query = str(query).lstrip().rstrip() def wifiConnected(self): none = "<unknown ssid>" return not self.droid.wifiGetConnectionInfo().result["ssid"] == none def dataConnected(self): return self.droid.getCellLocation().result["cid"] > -1 def sendResponse(self): if self.query == "connection": return "pool:" + str(self.wifiConnected() or self.dataConnected()) else: return "pool: None"
wallarelvo/eneza-server
smsserver/poolmember.py
Python
apache-2.0
584
import re from django.db import models, transaction from django.utils import timezone from django.utils.six import iteritems, with_metaclass from django.utils.translation import ugettext_lazy as _ from django.core.cache import cache from django.core.exceptions import SuspiciousOperation from django.contrib.contenttypes.models import ContentType from djcelery.models import PeriodicTask, CrontabSchedule import base64 import copy try: # noinspection PyPep8Naming import cPickle as pickle except ImportError: import pickle from vms.utils import PickleDict, RWMethods from gui.models import User from que.utils import owner_id_from_task_id from que.user_tasks import UserTasks class _DummyModelBase(type): def __new__(mcs, name, bases, attrs): meta = attrs.pop('Meta', None) new_class = type.__new__(mcs, name, bases, attrs) if meta: meta.model_name = name.lower() meta.concrete_model = new_class setattr(new_class, '_meta', meta) else: raise AttributeError('Class %s has no "class Meta" definition' % name) return new_class class _DummyModel(with_metaclass(_DummyModelBase)): """ Dummy model simulating some properties of django models """ _pk_key = NotImplemented class Meta: pass class _DummyDataModel(with_metaclass(_DummyModelBase)): """ Dummy model simulating some properties of django models + serialization of internal data dictionary. """ class Meta: pass def __new__(cls, *args, **kwargs): # noinspection PyArgumentList obj = super(_DummyDataModel, cls).__new__(cls, *args, **kwargs) obj._data = {} return obj def __init__(self, data=None): if data: self._data.update(data) def __getattr__(self, key): if key.startswith('_'): # noinspection PyUnresolvedReferences return super(_DummyDataModel, self).__getattr__(key) else: return self._data[key] def __setattr__(self, key, value): if key.startswith('_'): return super(_DummyDataModel, self).__setattr__(key, value) else: self._data[key] = value def __delattr__(self, key): if key.startswith('_'): return super(_DummyDataModel, self).__delattr__(key) else: return self._data.__delitem__(key) def __getstate__(self): return self._data.copy() def __setstate__(self, state): self._data = state def __iter__(self): return iteritems(self._data) def __getitem__(self, item): return self._data.__getitem__(item) def get(self, key, default=None): return self._data.get(key, default) def update(self, data): self._data.update(data) class _PickleModel(models.Model): """ Abstract class with pickle encoding and decoding of field attributes. """ class Meta: app_label = 'vms' abstract = True @staticmethod def _decode(xdata): """Unpickle data from DB and return a PickleDict object""" data = pickle.loads(base64.decodestring(xdata)) if not isinstance(data, PickleDict): data = PickleDict(data) return data @staticmethod def _encode(xdata): """Pickle a dict object and return data, which can be saved in DB""" if not isinstance(xdata, dict): raise ValueError('json is not a dict') return base64.encodestring(pickle.dumps(copy.copy(dict(xdata)))) class _JsonPickleModel(_PickleModel): """ Abstract _PickleModel with json attributes stored in enc_json field. """ EMPTY = 'KGRwMQou\n' class Meta: app_label = 'vms' abstract = True # don't access 'encoded_data' directly, use the 'data' property instead, etc # default is an empty dict enc_json = models.TextField(blank=False, editable=False, default=EMPTY) # Access the json property to load/save/manipulate the dict object # json is the dict which will be used for creating/updating the VM on SmartOS @property def json(self): return self._decode(self.enc_json) @json.setter def json(self, data): self.enc_json = self._encode(data) def save_item(self, key, value, save=True, metadata=None, **kwargs): """Set one item in json""" _json = self.json if metadata: if metadata not in _json: _json[metadata] = {} _json[metadata][key] = value else: _json[key] = value self.json = _json if save: return self.save(**kwargs) else: return True def save_items(self, save=True, metadata=None, **key_value): """Save multiple items in json""" _json = self.json if metadata: if metadata not in _json: _json[metadata] = {} _json[metadata].update(key_value) else: _json.update(key_value) self.json = _json if save: return self.save() else: return True def delete_item(self, key, save=True, metadata=None, **kwargs): """Set one item in json""" _json = self.json try: if metadata: if metadata in _json: del _json[metadata][key] else: del _json[key] except KeyError: pass self.json = _json if save: return self.save(**kwargs) else: return True class _StatusModel(models.Model): """ Abstract model class with basic status attributes. Also tracks changes of status property and updates the status_change attribute if changed. Status information is cached - the key is PK:status. """ _lock = False # Cannot be saved when True _cache_status = False # Should we cache the status into redis after calling save()? _orig_status = None # Original value of status _update_changed = True # When True, the changed field will be updated at each save() # status = models.SmallIntegerField(_('Status')) # You need this in descendant status_change = models.DateTimeField(_('Last status change'), default=None, null=True, editable=False) created = models.DateTimeField(_('Created'), editable=False) changed = models.DateTimeField(_('Last changed'), editable=False) class Meta: app_label = 'vms' abstract = True def __init__(self, *args, **kwargs): super(_StatusModel, self).__init__(*args, **kwargs) self._orig_status = self.status @staticmethod def status_key(pk): # public helper for accessing the cache key return str(pk) + ':status' @staticmethod def status_change_key(pk): # public helper for accessing the cache key return str(pk) + ':status-change' @property # just a helper, so we have one method to construct the cache key def obj_status_key(self): return _StatusModel.status_key(self.pk) @property # just a helper, so we have one method to construct the cache key def obj_status_change_key(self): return _StatusModel.status_change_key(self.pk) def lock(self): self._lock = True def unlock(self): self._lock = False def save(self, *args, **kwargs): """Update status_change and cache when needed""" if self._lock: # Used for active json display in vm_define when the json is overwritten by active json raise SuspiciousOperation('%s object "%s" is locked!' % (self.__class__.__name__, self)) now = timezone.now() status_change_time = kwargs.pop('status_change_time', now) update_fields = kwargs.get('update_fields', None) if self._update_changed: self.changed = now if not self.created: self.created = now if self.status != self._orig_status: self.status_change = status_change_time res = super(_StatusModel, self).save(*args, **kwargs) # update cache if status changed if self.status != self._orig_status and (update_fields is None or 'status' in update_fields): if self._cache_status: cache.set(self.obj_status_key, self.status) cache.set(self.obj_status_change_key, self.status_change) self._orig_status = self.status return res def save_status(self, new_status=None, **kwargs): """Just update the status field (and other related fields)""" if new_status is not None: self.status = new_status return self.save(update_fields=('status', 'status_change'), **kwargs) # noinspection PyUnusedLocal @staticmethod def post_delete_status(sender, instance, **kwargs): """Clean cache after removing the object - call from signal""" # noinspection PyProtectedMember if instance._cache_status: # remove the cache entries cache.delete(instance.obj_status_key) cache.delete(instance.obj_status_change_key) class _OSType(models.Model): """ Abstract class used for children to inherit OS type attributes and field. """ LINUX = 1 SUNOS = 2 BSD = 3 WINDOWS = 4 SUNOS_ZONE = 5 LINUX_ZONE = 6 OSTYPE = ( (LINUX, _('Linux VM')), (SUNOS, _('SunOS VM')), (BSD, _('BSD VM')), (WINDOWS, _('Windows VM')), (SUNOS_ZONE, _('SunOS Zone')), (LINUX_ZONE, _('Linux Zone')), ) # KVM = frozenset([LINUX, SUNOS, BSD, WINDOWS]) # to HVM HVM_OSTYPES = frozenset([LINUX, SUNOS, BSD, WINDOWS]) ZONE_OSTYPES = frozenset([SUNOS_ZONE, LINUX_ZONE]) class Meta: app_label = 'vms' abstract = True # ostype = models.SmallIntegerField(_('Guest OS type'), choices=OSTYPE) class _HVMType(models.Model): """ Abstract class used for children to inherit hvm_type attributes and field. """ Hypervisor_KVM = 1 Hypervisor_BHYVE = 2 Hypervisor_NONE = 3 # for zones HVM_TYPE = ( (Hypervisor_KVM, _('KVM hypervisor')), (Hypervisor_BHYVE, _('BHYVE hypervisor')), (Hypervisor_NONE, _('NO hypervisor')), ) # used on VM create or when editing HVM VM HVM_TYPE_GUI = ( (Hypervisor_KVM, _('KVM')), (Hypervisor_BHYVE, _('BHYVE')), ) # used in VM modal when editing already created zone HVM_TYPE_GUI_NO_HYPERVISOR = ( (Hypervisor_NONE, _('NO hypervisor')), ) HVM = frozenset([Hypervisor_KVM, Hypervisor_BHYVE]) class Meta: app_label = 'vms' abstract = True class _VirtModel(models.Model): """ Abstract class used by any virtualization object/bucket, like Image and Network. All this objects should have this common attributes for unified access and naming strategy. Example: Image access strategy ------------------------------ Public: Customers can purchase this image Disabled: Shown to customers, but they are not able to purchase it Private: Internal images shown only to image owners Deleted: Do not show to anybody """ PUBLIC = 1 DISABLED = 2 PRIVATE = 3 DELETED = 4 INTERNAL = 9 ACCESS = ( (PUBLIC, _('Public')), (DISABLED, _('Disabled')), (PRIVATE, _('Private')), (DELETED, _('Deleted')), (INTERNAL, _('Internal')), ) INVISIBLE = (DELETED, INTERNAL) UNUSABLE = (DISABLED, DELETED, INTERNAL) name = models.CharField(_('Name'), max_length=64, unique=True) alias = models.CharField(_('Alias'), max_length=32) desc = models.CharField(_('Description'), max_length=128, blank=True) owner = models.ForeignKey(User, verbose_name=_('Owner'), on_delete=models.PROTECT) access = models.SmallIntegerField(_('Access'), choices=ACCESS, default=PRIVATE) created = models.DateTimeField(_('Created'), auto_now_add=True, editable=False) changed = models.DateTimeField(_('Last changed'), auto_now=True, editable=False) class Meta: app_label = 'vms' abstract = True # unique_together = (('alias', 'owner'),) # ^^^^ This is very important and should be placed in the descendant model. def __unicode__(self): return '%s' % self.name class _UserTasksModel(object): """ Model for working (listing, adding, removing) with user tasks related to this object. WARNING: this implementation depends on the owner attribute. Object owner must _not_ change when a pending task exists! """ owner = None # This class is only useful in models that have a owner attribute pk = NotImplemented # Should always exist in any django model _pk_key = NotImplemented # Set in descendant class _log_name_attr = 'name' # Name of object's attribute which will be used for the object_name field in TaskLogEntry class Meta: app_label = 'vms' abstract = True @staticmethod def _add_task(user_id, task_id, info): return UserTasks(user_id).add(task_id, info) @staticmethod def _pop_task(user_id, task_id): return UserTasks(user_id).pop(task_id) @staticmethod def _get_tasks(user_id): return UserTasks(user_id).tasks # noinspection PyMethodMayBeStatic def default_apiview(self): """Return dict with object related attributes which are always available in apiview""" return {} def get_tasks(self, match_dict=None): """Return pending tasks for this VM as a dict with task_id as keys. If match_dict is specified then try to match key/values to current tasks and if task is found return only the one task else return {}.""" res = {} for tid, task in iteritems(self._get_tasks(self.owner.id)): if task.get(self._pk_key, None) == self.pk: res[tid] = task.get('apiview', {}) if match_dict: subtasks = {} for tid, task in iteritems(res): match_found = all(task.get(k, None) == v for k, v in iteritems(match_dict)) if match_found: subtasks[tid] = task return subtasks return res @property # property to get_tasks() method def tasks(self): return self.get_tasks() @property def tasks_rw(self): return self.get_tasks(match_dict={'method': RWMethods}) def tasks_ro(self): return self.get_tasks(match_dict={'method': 'GET'}) @classmethod def _create_task_info(cls, pk, apiview, msg, additional_apiview=None): """Prepare task info dict (will be stored in UserTasks cache)""" if apiview is None: apiview = {} if additional_apiview: apiview.update(additional_apiview) if 'time' not in apiview: apiview['time'] = timezone.now().isoformat() return {cls._pk_key: pk, 'msg': msg, 'apiview': apiview} @classmethod def _tasks_add(cls, pk, task_id, apiview, msg='', **additional_apiview): """Add task to pending tasks dict in cache.""" info = cls._create_task_info(pk, apiview, msg, additional_apiview=additional_apiview) return cls._add_task(owner_id_from_task_id(task_id), task_id, info) def tasks_add(self, task_id, apiview, msg='', **additional_apiview): """Add task to pending tasks dict in cache.""" return self._tasks_add(self.pk, task_id, apiview, msg=msg, **additional_apiview) @classmethod def _tasks_del(cls, task_id, apiview=None, **additional_apiview): """Delete task from pending tasks dict in cache.""" if apiview is None: info = cls._pop_task(owner_id_from_task_id(task_id), task_id) apiview = info.get('apiview', {}) if additional_apiview: apiview.update(additional_apiview) # Store task info for socket.io que monitor cache.set('sio-' + task_id, apiview, 60) return apiview def tasks_del(self, task_id, **additional_apiview): """Delete task from pending tasks dict in cache.""" info = self._pop_task(owner_id_from_task_id(task_id), task_id) apiview = info.get('apiview', {}) apiview.update(self.default_apiview()) return self._tasks_del(task_id, apiview=apiview, **additional_apiview) @classmethod def get_log_name_lookup_kwargs(cls, log_name_value): """Return lookup_key=value DB pairs which can be used for retrieving objects by log_name value""" return {cls._log_name_attr: log_name_value} @classmethod def get_content_type(cls): # Warning: get_content_type will be deprecated soon. New models should implement get_object_type() return ContentType.objects.get_for_model(cls) @classmethod def get_object_type(cls, content_type=None): if content_type: return content_type.model return cls.get_content_type().model @classmethod def get_object_by_pk(cls, pk): # noinspection PyUnresolvedReferences return cls.objects.get(pk=pk) @classmethod def get_pk_key(cls): return cls._pk_key @property def log_name(self): return getattr(self, self._log_name_attr) @property def log_alias(self): # noinspection PyUnresolvedReferences return self.alias @property def log_list(self): return self.log_name, self.log_alias, self.pk, self.__class__ class _VmDiskModel(models.Model): """ Abstract class with Virtual Machine disk_id and array_disk_id fields. """ _vm_disk_map = None # Cached real_disk_id -> disk_id mapping _vm_disks = None # Cache vm.json_active_get_disks() vm = None # declare in descendant class disk_id = models.SmallIntegerField(_('Disk ID')) # Always store real_disk_id class Meta: app_label = 'vms' abstract = True def get_disk_map(self): """Return real_disk_id -> disk_id mapping""" self._vm_disks = self.vm.json_active_get_disks() return self.vm.get_disk_map(self._vm_disks) @property # Fake disk_id def array_disk_id(self): if self._vm_disk_map is None: self._vm_disk_map = self.get_disk_map() return self._vm_disk_map[self.disk_id] + 1 @property def disk_size(self): disk_id = self.array_disk_id - 1 return self._vm_disks[disk_id]['size'] @property def zfs_filesystem(self): disk_id = self.array_disk_id - 1 return self._vm_disks[disk_id]['zfs_filesystem'] @staticmethod def get_real_disk_id(disk_or_path): """Return integer disk suffix from json.disks.*.path attribute""" if isinstance(disk_or_path, dict): disk_path = disk_or_path['path'] else: disk_path = disk_or_path return int(re.split('-|/', disk_path)[-1].lstrip('disk')) @classmethod def get_disk_id(cls, vm, array_disk_id): """Return real_disk_id from vm's active_json""" disk = vm.json_active_get_disks()[array_disk_id - 1] return cls.get_real_disk_id(disk) class _ScheduleModel(models.Model): """ Abstract class with relation to PeriodicTask and lazy cron schedule and active properties. """ PT = PeriodicTask _active = None # cached enabled attribute _schedule = None # cached crontab entry periodic_task = models.ForeignKey(PT, null=True, blank=True) class Meta: app_label = 'vms' abstract = True # noinspection PyMethodMayBeStatic def _new_periodic_task(self): """Return instance of PeriodicTask. Define in descendant class""" return NotImplemented # return self.PT(name=, task=, args=, kwargs=, expires=) def _save_crontab(self, c): """Save crontab instance""" c.minute, c.hour, c.day_of_month, c.month_of_year, c.day_of_week = self.schedule.split() c.save() return c @staticmethod def crontab_to_schedule(c): """Return string representation of CrontabSchedule model""" def s(f): return f and str(f).replace(' ', '') or '*' return '%s %s %s %s %s' % (s(c.minute), s(c.hour), s(c.day_of_month), s(c.month_of_year), s(c.day_of_week)) @property def active(self): """Return enabled boolean from periodic task""" if self._active is None: # init if self.periodic_task: self._active = self.periodic_task.enabled else: self._active = True # default return self._active @active.setter def active(self, value): """Cache active attribute - will be updated/created later in save()""" self._active = value @property def schedule(self): """Return cron entry from periodic task""" if self._schedule is None and self.periodic_task and self.periodic_task.crontab: # init self._schedule = self.crontab_to_schedule(self.periodic_task.crontab) return self._schedule @schedule.setter def schedule(self, value): """Cache cron entry - will be updated/created later in save()""" self._schedule = value @transaction.atomic def save(self, *args, **kwargs): """Create or update periodic task and cron schedule in DB""" # Save first, because the periodic_task needs this object's ID super(_ScheduleModel, self).save(*args, **kwargs) do_save = False pt = self.periodic_task if not pt: pt = self._new_periodic_task() if not pt.crontab: pt.crontab = self._save_crontab(CrontabSchedule()) do_save = True elif self.schedule != self.crontab_to_schedule(pt.crontab): self._save_crontab(pt.crontab) do_save = True # Need to update PeriodicTask, because it will signal the Scheduler to reload if self.active != pt.enabled: pt.enabled = self.active do_save = True if not pt.pk: pt.save() # New periodic task object self.periodic_task = pt self.save(update_fields=('periodic_task',)) # Update this object elif do_save: pt.save(update_fields=('enabled', 'crontab', 'date_changed')) # Update periodic task # noinspection PyUnusedLocal @staticmethod def post_delete_schedule(sender, instance, **kwargs): """Cascade delete - call from signal""" if instance.periodic_task: if instance.periodic_task.crontab: instance.periodic_task.crontab.delete() else: instance.periodic_task.delete()
erigones/esdc-ce
vms/models/base.py
Python
apache-2.0
22,882