gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
def load_data():
'''Preprocess data and return X, y, label_list'''
# Data files
acc_x_card = 'data/tj_02_acc_x_card.csv'
acc_transc = 'data/tj_02_account_transaction.csv'
cred_transc = 'data/tj_02_creditcard_transaction.csv'
train_file = 'data/tj_02_training.csv'
test_file = 'data/tj_02_test.csv'
df_acc_x = pd.read_csv(acc_x_card)
df_acc_trancs = pd.read_csv(acc_transc)
df_cred = pd.read_csv(cred_transc)
df_train = pd.read_csv(train_file, header=None)
df_test = pd.read_csv(test_file, header=None)
df_train.columns = ['account_no', 'label']
df_test.columns = ['account_no']
# Preprocess account transaction data
dummy_type = pd.get_dummies(df_acc_trancs['txn_type'])
df = pd.concat([df_acc_trancs, dummy_type], axis=1)
df = df.drop(['txn_type'], axis=1)
df['hour'] = df['txn_hour']//3
df_hour = pd.get_dummies(df['hour'])
df_hour.columns = ['h0', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7']
df = pd.concat([df, df_hour], axis=1)
df = df.drop(['txn_hour', 'hour'], axis=1)
df['date'] = pd.to_datetime(df['txn_dt'])
df['day'] = df['date'].dt.dayofweek
df['weekend'] = df['day'] >= 5
df['weekday'] = df['day'] < 5
df['weekday'] = df['weekday'].astype(int)
holidays = pd.read_csv('../../Holiday2016.csv', header=None)
df['holidays'] = df['txn_dt'].isin(holidays[0].tolist())
df['holidays'] = df['holidays'] | df['weekend']
df['holidays'] = df['holidays'].astype(int)
df['weekend'] = df['weekend'].astype(int)
df = df.drop(['txn_dt', 'day'], axis=1)
df = df[df['txn_amount'] < df_acc_trancs.quantile(0.995)['txn_amount']]
txn_am_mean = df.groupby(['account_no'])['txn_amount'].mean()
txn_am_std = df.groupby(['account_no'])['txn_amount'].std()
txn_am_max = df.groupby(['account_no'])['txn_amount'].max()
txn_am_min = df.groupby(['account_no'])['txn_amount'].min()
txn_am_median = df.groupby(['account_no'])['txn_amount'].median()
txn_am_count = df.groupby(['account_no'])['txn_amount'].count()
txn_am_sum = df.groupby(['account_no'])['txn_amount'].sum()
df_txn = pd.concat([txn_am_mean, txn_am_std, txn_am_max, txn_am_min, txn_am_median, txn_am_count, txn_am_sum], axis=1, join='inner')
df_txn.columns = ['txn_mean', 'txn_std', 'txn_max', 'txn_min', 'txn_median', 'txn_count', 'txn_sum']
df_txn = df_txn.fillna(0)
cols = ['CR', 'DR', 'h0', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'weekend', 'weekday', 'holidays']
df_sum = list()
for col in cols:
if not len(df_sum) == 0:
df_sum = pd.concat([df_sum, df.groupby(['account_no'])[col].sum()], axis=1, join='inner')
else:
df_sum = df.groupby(['account_no'])[col].sum()
df_sum.columns = cols
df_date = df.groupby(['account_no'])['date'].apply(lambda x: x.sort_values().drop_duplicates().diff().fillna(0).median())
df_date = df_date.dt.days
df_txn = pd.concat([df_txn, df_sum, df_date], axis=1, join='inner')
# Preprocess Credit transaction
whole_sale = {2741, 2791, 2842, 5013, 5021, 5039, 5044, 5045, 5046, 5047, 5051, 5065, 5072, 5074, 5085, 5094,
5099, 5111, 5122, 5131, 5137, 5139, 5169, 5172, 5192, 5193, 5198, 5199, 7375, 7379, 7829, 8734}
contract_services = {742,763,780,1520,1711,1731,1740,1750,1761,1771,1799}
airlines = set(range(3000,3300)) | {4511}
rental_car = set(range(3351, 3442)) | {7512}
hotel = set(range(3501, 3787)) | {7011}
transport = set(range(4011, 4790))
utilities = set(range(4812, 4817)) | {4821} | set(range(4899, 4901))
retail = set(range(5200,5500))
automobile = set(range(5511, 5600))
clothing = set(range(5611, 5700))
misc = set(range(5712, 6000))
quasi_cash = {4829,6050,6051,6529,6530,6534}
service_providers = {6010,6011,6012,6531,6532,6533,6211,6300,6381,6399,7011,7012,7032,7033}
personal = set(range(7210, 7300))
business = set(range(7311, 7524))
repair = set(range(7531, 7700))
entertain = set(range(7829, 8000))
prefessional = set(range(8011, 9000))
government = set(range(9211, 9951))
mcc = {
'whole_sale': whole_sale,
'contract_services': contract_services,
'airlines': airlines,
'rental_car': rental_car,
'hotel': hotel,
'transport': transport,
'utilities':utilities,
'retail': retail,
'automobile': automobile,
'clothing': clothing,
'misc': misc,
'quasi_cash': quasi_cash,
'service_providers': service_providers,
'personal': personal,
'business': business,
'repair': repair,
'entertain': entertain,
'prefessional': prefessional,
'government': government,
}
for k, v in mcc.items():
df_cred[k] = df_cred['mer_cat_code'].isin(v).astype(int)
txn_cr_mean = df_cred.groupby(['card_no'])['txn_amount'].mean()
txn_cr_std = df_cred.groupby(['card_no'])['txn_amount'].std()
txn_cr_max = df_cred.groupby(['card_no'])['txn_amount'].max()
txn_cr_min = df_cred.groupby(['card_no'])['txn_amount'].min()
txn_cr_median = df_cred.groupby(['card_no'])['txn_amount'].median()
txn_cr_count = df_cred.groupby(['card_no'])['txn_amount'].count()
txn_cr_sum = df_cred.groupby(['card_no'])['txn_amount'].sum()
df_txn_cr = pd.concat([txn_cr_mean, txn_cr_std, txn_cr_max, txn_cr_min, txn_cr_median, txn_cr_count, txn_cr_sum], axis=1, join='inner')
df_txn_cr.columns = ['txn_cr_mean', 'txn_cr_std', 'txn_cr_max', 'txn_cr_min', 'txn_cr_median', 'txn_cr_count', 'txn_cr_sum']
df_txn_cr = df_txn_cr.fillna(0)
df_cr_hr = df_cred.groupby(['card_no'])['txn_hour'].median()
df_mer_cat = df_cred.drop(['txn_date', 'txn_hour', 'txn_amount', 'mer_cat_code', 'mer_id'], axis=1)
df_mer_cat = df_mer_cat.groupby(['card_no']).sum()
df_cr = pd.concat([df_txn_cr, df_cr_hr, df_mer_cat], axis=1, join='inner')
# Merge account transaction and credit transaction data
df_txn = df_txn.reset_index(drop=False)
df_result = pd.merge(df_acc_x, df_txn, on='account_no', how='left', left_index=True)
df_cr = df_cr.reset_index(drop=False)
df_result = pd.merge(df_result, df_cr, on='card_no', how='left', left_index=True)
df_result = df_result.fillna(0)
drop_cols = ['quasi_cash', 'rental_car', 'contract_services', 'repair', 'personal', 'service_providers',
'hotel', 'whole_sale', 'government', 'txn_cr_median', 'airlines', 'prefessional', 'utilities',
'clothing', 'transport', 'retail', 'txn_cr_mean','txn_hour', 'txn_cr_std', 'h1', 'txn_cr_min',
'txn_cr_max']
df_result = df_result.drop(drop_cols, axis=1)
# Merge with Train and test data
X = pd.merge(df_train, df_result, on='account_no')
X = X.drop(['account_no', 'label', 'card_no'], axis=1)
assert len(X) == len(df_train)
col_name = X.columns
y = df_train['label']
assert len(y) == len(df_train)
X_test = pd.merge(df_test, df_result, on='account_no')
X_test = X_test.drop(['account_no', 'card_no'], axis=1)
assert len(X_test) == len(df_test)
X_all = pd.concat([X, X_test])
X_all = StandardScaler().fit_transform(X_all.values)
X = X_all[0:len(df_train)]
X_test = X_all[len(df_train):]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.25, random_state=42)
label_list = [0, 1]
return X_train, y_train, X_val, y_val, X_test, label_list, col_name
def get_data():
X_train_file = 'data/X_train.csv'
y_train_file = 'data/y_train.csv'
X_val_file = 'data/X_val.csv'
y_val_file = 'data/y_val.csv'
X_test_file = 'data/X_test.csv'
label_file = 'data/label_list.csv'
col_file = 'data/col_name.csv'
if not os.path.exists(X_train_file):
X_train, y_train, X_val, y_val, X_test, label_list, col_name = load_data()
# Generate file from array
gen_file(X_train, X_train_file)
gen_file(y_train, y_train_file)
gen_file(X_val, X_val_file)
gen_file(y_val, y_val_file)
gen_file(X_test, X_test_file)
gen_file(label_list, label_file)
gen_file(col_name, col_file)
else:
# Read array from file
X_train = get_array(X_train_file)
y_train = get_array(y_train_file)
y_train = y_train.flatten()
X_val = get_array(X_val_file)
y_val = get_array(y_val_file)
y_val = y_val.flatten()
X_test = get_array(X_test_file)
label_list = get_array(label_file)
label_list = label_list.transpose()
label_list = list(label_list[0])
return X_train, X_val, y_train, y_val, X_test, label_list
def gen_file(array, filename):
df = pd.DataFrame(array)
df.to_csv(filename, header=None, index=False)
def get_array(filename):
df = pd.read_csv(filename, header=None)
return np.array(df)
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
bzr vcs support.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import email.utils # For email parsing
import dateutil.parser # Date string parsing
# first try python3, then python2
try:
from urllib.request import url2pathname
except ImportError:
from urllib2 import url2pathname
from vcstools.vcs_base import VcsClientBase, VcsError
from vcstools.common import sanitized, normalized_rel_path, \
run_shell_command, ensure_dir_notexists
def _get_bzr_version():
"""Looks up bzr version by calling bzr --version.
:raises: VcsError if bzr is not installed"""
try:
value, output, _ = run_shell_command('bzr --version',
shell=True,
us_env=True)
if value == 0 and output is not None and len(output.splitlines()) > 0:
version = output.splitlines()[0]
else:
raise VcsError("bzr --version returned %s," +
" maybe bzr is not installed" %
value)
except VcsError as e:
raise VcsError("Coud not determine whether bzr is installed: %s" % e)
return version
class BzrClient(VcsClientBase):
def __init__(self, path):
"""
:raises: VcsError if bzr not detected
"""
VcsClientBase.__init__(self, 'bzr', path)
_get_bzr_version()
@staticmethod
def get_environment_metadata():
metadict = {}
try:
metadict["version"] = _get_bzr_version()
except:
metadict["version"] = "no bzr installed"
return metadict
def get_url(self):
"""
:returns: BZR URL of the branch (output of bzr info command),
or None if it cannot be determined
"""
result = None
if self.detect_presence():
cmd = 'bzr info %s' % self._path
_, output, _ = run_shell_command(cmd, shell=True, us_env=True)
matches = [l for l in output.splitlines() if l.startswith(' parent branch: ')]
if matches:
ppath = url2pathname(matches[0][len(' parent branch: '):])
# when it can, bzr substitues absolute path for relative paths
if (ppath is not None and os.path.isdir(ppath) and not os.path.isabs(ppath)):
result = os.path.abspath(os.path.join(os.getcwd(), ppath))
else:
result = ppath
return result
def url_matches(self, url, url_or_shortcut):
if super(BzrClient, self).url_matches(url, url_or_shortcut):
return True
# if we got a shortcut (e.g. launchpad url), we compare using
# bzr info and return that one if result matches.
result = False
if url_or_shortcut is not None:
cmd = 'bzr info %s' % url_or_shortcut
value, output, _ = run_shell_command(cmd, shell=True, us_env=True)
if value == 0:
for line in output.splitlines():
sline = line.strip()
for prefix in ['shared repository: ',
'repository branch: ',
'branch root: ']:
if sline.startswith(prefix):
if super(BzrClient, self).url_matches(url, sline[len(prefix):]):
result = True
break
return result
def detect_presence(self):
return self.path_exists() and os.path.isdir(os.path.join(self._path, '.bzr'))
def checkout(self, url, version=None, verbose=False,
shallow=False, timeout=None):
if url is None or url.strip() == '':
raise ValueError('Invalid empty url : "%s"' % url)
# bzr 2.5.1 fails if empty directory exists
if not ensure_dir_notexists(self.get_path()):
self.logger.error("Can't remove %s" % self.get_path())
return False
cmd = 'bzr branch'
if version:
cmd += ' -r %s' % version
cmd += ' %s %s' % (url, self._path)
value, _, msg = run_shell_command(cmd,
shell=True,
show_stdout=verbose,
verbose=verbose)
if value != 0:
if msg:
self.logger.error('%s' % msg)
return False
return True
def update(self, version='', verbose=False, timeout=None):
if not self.detect_presence():
return False
value, _, _ = run_shell_command("bzr pull",
cwd=self._path,
shell=True,
show_stdout=True,
verbose=verbose)
if value != 0:
return False
# Ignore verbose param, bzr is pretty verbose on update anyway
if version is not None and version != '':
cmd = "bzr update -r %s" % (version)
else:
cmd = "bzr update"
value, _, _ = run_shell_command(cmd,
cwd=self._path,
shell=True,
show_stdout=True,
verbose=verbose)
if value == 0:
return True
return False
def get_version(self, spec=None):
"""
:param spec: (optional) revisionspec of desired version. May
be any revisionspec as returned by 'bzr help revisionspec',
e.g. a tagname or 'revno:<number>'
:returns: the current revision number of the repository. Or if
spec is provided, the number of a revision specified by some
token.
"""
if self.detect_presence():
if spec is not None:
command = ['bzr log -r %s .' % sanitized(spec)]
_, output, _ = run_shell_command(command,
shell=True,
cwd=self._path,
us_env=True)
if output is None or output.strip() == '' or output.startswith("bzr:"):
return None
else:
matches = [l for l in output.split('\n') if l.startswith('revno: ')]
if len(matches) == 1:
return matches[0].split()[1]
else:
_, output, _ = run_shell_command('bzr revno --tree',
shell=True,
cwd=self._path,
us_env=True)
return output.strip()
def get_diff(self, basepath=None):
response = None
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = sanitized(normalized_rel_path(self._path, basepath))
command = "bzr diff %s" % rel_path
command += " -p1 --prefix %s/:%s/" % (rel_path, rel_path)
_, response, _ = run_shell_command(command, shell=True, cwd=basepath)
return response
def get_log(self, relpath=None, limit=None):
response = []
if relpath is None:
relpath = ''
# Compile regexes
id_regex = re.compile('^revno: ([0-9]+)$', flags=re.MULTILINE)
committer_regex = re.compile('^committer: (.+)$', flags=re.MULTILINE)
timestamp_regex = re.compile('^timestamp: (.+)$', flags=re.MULTILINE)
message_regex = re.compile('^ (.+)$', flags=re.MULTILINE)
if self.path_exists() and os.path.exists(os.path.join(self._path, relpath)):
# Get the log
limit_cmd = (("--limit=%d" % (int(limit))) if limit else "")
command = "bzr log %s %s" % (sanitized(relpath), limit_cmd)
return_code, text_response, stderr = run_shell_command(command, shell=True, cwd=self._path)
if return_code == 0:
revno_match = id_regex.findall(text_response)
committer_match = committer_regex.findall(text_response)
timestamp_match = timestamp_regex.findall(text_response)
message_match = message_regex.findall(text_response)
# Extract the entries
for revno, committer, timestamp, message in zip(revno_match,
committer_match,
timestamp_match,
message_match):
author, email_address = email.utils.parseaddr(committer)
date = dateutil.parser.parse(timestamp)
log_data = {'id': revno,
'author': author,
'email': email_address,
'message': message,
'date': date}
response.append(log_data)
return response
def get_status(self, basepath=None, untracked=False):
response = None
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = normalized_rel_path(self._path, basepath)
command = "bzr status %s -S" % sanitized(rel_path)
if not untracked:
command += " -V"
_, response, _ = run_shell_command(command, shell=True, cwd=basepath)
response_processed = ""
for line in response.split('\n'):
if len(line.strip()) > 0:
response_processed += line[0:4] + rel_path + '/'
response_processed += line[4:] + '\n'
response = response_processed
return response
def export_repository(self, version, basepath):
# execute the bzr export cmd
cmd = 'bzr export --format=tgz {0} '.format(basepath + '.tar.gz')
cmd += '{0}'.format(version)
result, _, _ = run_shell_command(cmd, shell=True, cwd=self._path)
if result:
return False
return True
BZRClient = BzrClient
|
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent an AWS Virtual Machine object.
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import base64
import collections
import json
import logging
import uuid
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import resource
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import windows_virtual_machine
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.providers.aws import aws_disk
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
from perfkitbenchmarker import providers
FLAGS = flags.FLAGS
HVM = 'HVM'
PV = 'PV'
NON_HVM_PREFIXES = ['m1', 'c1', 't1', 'm2']
PLACEMENT_GROUP_PREFIXES = frozenset(
['c3', 'c4', 'cc2', 'cg1', 'g2', 'cr1', 'r3', 'hi1', 'i2'])
NUM_LOCAL_VOLUMES = {
'c1.medium': 1, 'c1.xlarge': 4,
'c3.large': 2, 'c3.xlarge': 2, 'c3.2xlarge': 2, 'c3.4xlarge': 2,
'c3.8xlarge': 2, 'cc2.8xlarge': 4,
'cg1.4xlarge': 2, 'cr1.8xlarge': 2, 'g2.2xlarge': 1,
'hi1.4xlarge': 2, 'hs1.8xlarge': 24,
'i2.xlarge': 1, 'i2.2xlarge': 2, 'i2.4xlarge': 4, 'i2.8xlarge': 8,
'm1.small': 1, 'm1.medium': 1, 'm1.large': 2, 'm1.xlarge': 4,
'm2.xlarge': 1, 'm2.2xlarge': 1, 'm2.4xlarge': 2,
'm3.medium': 1, 'm3.large': 1, 'm3.xlarge': 2, 'm3.2xlarge': 2,
'r3.large': 1, 'r3.xlarge': 1, 'r3.2xlarge': 1, 'r3.4xlarge': 1,
'r3.8xlarge': 2, 'd2.xlarge': 3, 'd2.2xlarge': 6, 'd2.4xlarge': 12,
'd2.8xlarge': 24, 'x1.32xlarge': 2,
}
DRIVE_START_LETTER = 'b'
INSTANCE_EXISTS_STATUSES = frozenset(
['pending', 'running', 'stopping', 'stopped'])
INSTANCE_DELETED_STATUSES = frozenset(['shutting-down', 'terminated'])
INSTANCE_KNOWN_STATUSES = INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES
HOST_EXISTS_STATES = frozenset(
['available', 'under-assessment', 'permanent-failure'])
HOST_RELEASED_STATES = frozenset(['released', 'released-permanent-failure'])
KNOWN_HOST_STATES = HOST_EXISTS_STATES | HOST_RELEASED_STATES
def GetRootBlockDeviceSpecForImage(image_id):
""" Queries the CLI and returns the root block device specification as a dict.
Args:
image_id: The EC2 image id to query
Returns:
The root block device specification as returned by the AWS cli,
as a Python dict. If the image is not found, or if the response
is malformed, an exception will be raised.
"""
command = util.AWS_PREFIX + [
'ec2',
'describe-images',
'--image-ids=%s' % image_id,
'--query', 'Images[]']
stdout, _ = util.IssueRetryableCommand(command)
images = json.loads(stdout)
assert images
assert len(images) == 1, \
'Expected to receive only one image description for %s' % image_id
image_spec = images[0]
root_device_name = image_spec['RootDeviceName']
block_device_mappings = image_spec['BlockDeviceMappings']
root_block_device_dict = next((x for x in block_device_mappings if
x['DeviceName'] == root_device_name))
return root_block_device_dict
def GetBlockDeviceMap(machine_type, root_volume_size_gb=None, image_id=None):
"""Returns the block device map to expose all devices for a given machine.
Args:
machine_type: The machine type to create a block device map for.
root_volume_size: The desired size of the root volume, in GiB,
or None to the default provided by AWS.
image: The image id (AMI) to use in order to lookup the default
root device specs. This is only required if root_volume_size
is specified.
Returns:
The json representation of the block device map for a machine compatible
with the AWS CLI, or if the machine type has no local disks, it will
return None. If root_volume_size_gb and image_id are provided, the block
device map will include the specification for the root volume.
"""
mappings = []
if root_volume_size_gb is not None:
if image_id is None:
raise ValueError(
"image_id must be provided if root_volume_size_gb is specified")
root_block_device = GetRootBlockDeviceSpecForImage(image_id)
root_block_device['Ebs']['VolumeSize'] = root_volume_size_gb
# The 'Encrypted' key must be removed or the CLI will complain
root_block_device['Ebs'].pop('Encrypted')
mappings.append(root_block_device)
if machine_type in NUM_LOCAL_VOLUMES:
for i in xrange(NUM_LOCAL_VOLUMES[machine_type]):
mappings.append({
'VirtualName': 'ephemeral%s' % i,
'DeviceName': '/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i)})
if len(mappings):
return json.dumps(mappings)
return None
def IsPlacementGroupCompatible(machine_type):
"""Returns True if VMs of 'machine_type' can be put in a placement group."""
prefix = machine_type.split('.')[0]
return prefix in PLACEMENT_GROUP_PREFIXES
class AwsDedicatedHost(resource.BaseResource):
"""Object representing an AWS host.
Attributes:
region: The AWS region of the host.
zone: The AWS availability zone of the host.
machine_type: The machine type of VMs that may be created on the host.
client_token: A uuid that makes the creation request idempotent.
id: The host_id of the host.
"""
def __init__(self, machine_type, zone):
super(AwsDedicatedHost, self).__init__()
self.machine_type = machine_type
self.zone = zone
self.region = util.GetRegionFromZone(self.zone)
self.client_token = str(uuid.uuid4())
self.id = None
def _Create(self):
create_cmd = util.AWS_PREFIX + [
'ec2',
'allocate-hosts',
'--region=%s' % self.region,
'--client-token=%s' % self.client_token,
'--instance-type=%s' % self.machine_type,
'--availability-zone=%s' % self.zone,
'--auto-placement=off',
'--quantity=1']
vm_util.IssueCommand(create_cmd)
def _Delete(self):
if self.id:
delete_cmd = util.AWS_PREFIX + [
'ec2',
'release-hosts',
'--region=%s' % self.region,
'--host-ids=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
@vm_util.Retry()
def _Exists(self):
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-hosts',
'--region=%s' % self.region,
'--filter=Name=client-token,Values=%s' % self.client_token]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
response = json.loads(stdout)
hosts = response['Hosts']
assert len(hosts) < 2, 'Too many hosts.'
if not hosts:
return False
host = hosts[0]
self.id = host['HostId']
state = host['State']
assert state in KNOWN_HOST_STATES, state
return state in HOST_EXISTS_STATES
class AwsVmSpec(virtual_machine.BaseVmSpec):
"""Object containing the information needed to create an AwsVirtualMachine.
Attributes:
use_dedicated_host: bool. Whether to create this VM on a dedicated host.
"""
CLOUD = providers.AWS
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Can be overridden by derived classes to add support for specific flags.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(AwsVmSpec, cls)._ApplyFlags(config_values, flag_values)
if flag_values['aws_dedicated_hosts'].present:
config_values['use_dedicated_host'] = flag_values.aws_dedicated_hosts
if flag_values['aws_boot_disk_size'].present:
config_values['boot_disk_size'] = flag_values.aws_boot_disk_size
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(AwsVmSpec, cls)._GetOptionDecoderConstructions()
result.update({
'use_dedicated_host': (option_decoders.BooleanDecoder,
{'default': False}),
'boot_disk_size': (option_decoders.IntDecoder, {'default': None})})
return result
class AwsVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an AWS Virtual Machine."""
CLOUD = providers.AWS
IMAGE_NAME_FILTER = None
DEFAULT_ROOT_DISK_TYPE = 'gp2'
_lock = threading.Lock()
imported_keyfile_set = set()
deleted_keyfile_set = set()
deleted_hosts = set()
host_map = collections.defaultdict(list)
def __init__(self, vm_spec):
"""Initialize a AWS virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(AwsVirtualMachine, self).__init__(vm_spec)
self.region = util.GetRegionFromZone(self.zone)
self.user_name = FLAGS.aws_user_name
if self.machine_type in NUM_LOCAL_VOLUMES:
self.max_local_disks = NUM_LOCAL_VOLUMES[self.machine_type]
self.user_data = None
self.network = aws_network.AwsNetwork.GetNetwork(self)
self.firewall = aws_network.AwsFirewall.GetFirewall()
self.use_dedicated_host = vm_spec.use_dedicated_host
self.boot_disk_size = vm_spec.boot_disk_size
self.client_token = str(uuid.uuid4())
self.host = None
self.id = None
if self.use_dedicated_host and util.IsRegion(self.zone):
raise ValueError(
'In order to use dedicated hosts, you must specify an availability '
'zone, not a region ("zone" was %s).' % self.zone)
@property
def host_list(self):
"""Returns the list of hosts that are compatible with this VM."""
return self.host_map[(self.machine_type, self.zone)]
@property
def group_id(self):
"""Returns the security group ID of this VM."""
return self.network.regional_network.vpc.default_security_group_id
@classmethod
def _GetDefaultImage(cls, machine_type, region):
"""Returns the default image given the machine type and region.
If no default is configured, this will return None.
"""
if cls.IMAGE_NAME_FILTER is None:
return None
prefix = machine_type.split('.')[0]
virt_type = 'paravirtual' if prefix in NON_HVM_PREFIXES else 'hvm'
describe_cmd = util.AWS_PREFIX + [
'--region=%s' % region,
'ec2',
'describe-images',
'--query', 'Images[*].{Name:Name,ImageId:ImageId}',
'--filters',
'Name=name,Values=%s' % cls.IMAGE_NAME_FILTER,
'Name=block-device-mapping.volume-type,Values=%s' %
cls.DEFAULT_ROOT_DISK_TYPE,
'Name=virtualization-type,Values=%s' % virt_type]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
if not stdout:
return None
images = json.loads(stdout)
# We want to return the latest version of the image, and since the wildcard
# portion of the image name is the image's creation date, we can just take
# the image with the 'largest' name.
return max(images, key=lambda image: image['Name'])['ImageId']
def ImportKeyfile(self):
"""Imports the public keyfile to AWS."""
with self._lock:
if self.region in self.imported_keyfile_set:
return
cat_cmd = ['cat',
vm_util.GetPublicKeyPath()]
keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd)
import_cmd = util.AWS_PREFIX + [
'ec2', '--region=%s' % self.region,
'import-key-pair',
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri,
'--public-key-material=%s' % keyfile]
util.IssueRetryableCommand(import_cmd)
self.imported_keyfile_set.add(self.region)
if self.region in self.deleted_keyfile_set:
self.deleted_keyfile_set.remove(self.region)
def DeleteKeyfile(self):
"""Deletes the imported keyfile for a region."""
with self._lock:
if self.region in self.deleted_keyfile_set:
return
delete_cmd = util.AWS_PREFIX + [
'ec2', '--region=%s' % self.region,
'delete-key-pair',
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri]
util.IssueRetryableCommand(delete_cmd)
self.deleted_keyfile_set.add(self.region)
if self.region in self.imported_keyfile_set:
self.imported_keyfile_set.remove(self.region)
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data and tag it."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-instances',
'--region=%s' % self.region,
'--instance-ids=%s' % self.id]
logging.info('Getting instance %s public IP. This will fail until '
'a public IP is available, but will be retried.', self.id)
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instance = response['Reservations'][0]['Instances'][0]
self.ip_address = instance['PublicIpAddress']
self.internal_ip = instance['PrivateIpAddress']
if util.IsRegion(self.zone):
self.zone = str(instance['Placement']['AvailabilityZone'])
util.AddDefaultTags(self.id, self.region)
assert self.group_id == instance['SecurityGroups'][0]['GroupId'], (
self.group_id, instance['SecurityGroups'][0]['GroupId'])
def _CreateDependencies(self):
"""Create VM dependencies."""
self.ImportKeyfile()
# _GetDefaultImage calls the AWS CLI.
self.image = self.image or self._GetDefaultImage(self.machine_type,
self.region)
self.AllowRemoteAccessPorts()
if self.use_dedicated_host:
with self._lock:
if not self.host_list:
host = AwsDedicatedHost(self.machine_type, self.zone)
self.host_list.append(host)
host.Create()
self.host = self.host_list[-1]
def _DeleteDependencies(self):
"""Delete VM dependencies."""
self.DeleteKeyfile()
if self.host:
with self._lock:
if self.host in self.host_list:
self.host_list.remove(self.host)
if self.host not in self.deleted_hosts:
self.host.Delete()
self.deleted_hosts.add(self.host)
def _Create(self):
"""Create a VM instance."""
placement = []
if not util.IsRegion(self.zone):
placement.append('AvailabilityZone=%s' % self.zone)
if self.use_dedicated_host:
placement.append('Tenancy=host,HostId=%s' % self.host.id)
num_hosts = len(self.host_list)
elif IsPlacementGroupCompatible(self.machine_type):
placement.append('GroupName=%s' % self.network.placement_group.name)
placement = ','.join(placement)
block_device_map = GetBlockDeviceMap(self.machine_type,
self.boot_disk_size,
self.image)
create_cmd = util.AWS_PREFIX + [
'ec2',
'run-instances',
'--region=%s' % self.region,
'--subnet-id=%s' % self.network.subnet.id,
'--associate-public-ip-address',
'--client-token=%s' % self.client_token,
'--image-id=%s' % self.image,
'--instance-type=%s' % self.machine_type,
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri]
if block_device_map:
create_cmd.append('--block-device-mappings=%s' % block_device_map)
if placement:
create_cmd.append('--placement=%s' % placement)
if self.user_data:
create_cmd.append('--user-data=%s' % self.user_data)
_, stderr, _ = vm_util.IssueCommand(create_cmd)
if self.use_dedicated_host and 'InsufficientCapacityOnHost' in stderr:
logging.warning(
'Creation failed due to insufficient host capacity. A new host will '
'be created and instance creation will be retried.')
with self._lock:
if num_hosts == len(self.host_list):
host = AwsDedicatedHost(self.machine_type, self.zone)
self.host_list.append(host)
host.Create()
self.host = self.host_list[-1]
self.client_token = str(uuid.uuid4())
raise errors.Resource.RetryableCreationError()
def _Delete(self):
"""Delete a VM instance."""
if self.id:
delete_cmd = util.AWS_PREFIX + [
'ec2',
'terminate-instances',
'--region=%s' % self.region,
'--instance-ids=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the VM exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-instances',
'--region=%s' % self.region,
'--filter=Name=client-token,Values=%s' % self.client_token]
stdout, _ = util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
reservations = response['Reservations']
assert len(reservations) < 2, 'Too many reservations.'
if not reservations:
return False
instances = reservations[0]['Instances']
assert len(instances) == 1, 'Wrong number of instances.'
status = instances[0]['State']['Name']
self.id = instances[0]['InstanceId']
assert status in INSTANCE_KNOWN_STATUSES, status
return status in INSTANCE_EXISTS_STATUSES
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
# Instantiate the disk(s) that we want to create.
disks = []
for _ in range(disk_spec.num_striped_disks):
data_disk = aws_disk.AwsDisk(disk_spec, self.zone, self.machine_type)
if disk_spec.disk_type == disk.LOCAL:
data_disk.device_letter = chr(ord(DRIVE_START_LETTER) +
self.local_disk_counter)
# Local disk numbers start at 1 (0 is the system disk).
data_disk.disk_number = self.local_disk_counter + 1
self.local_disk_counter += 1
if self.local_disk_counter > self.max_local_disks:
raise errors.Error('Not enough local disks.')
else:
# Remote disk numbers start at 1 + max_local disks (0 is the system disk
# and local disks occupy [1, max_local_disks]).
data_disk.disk_number = (self.remote_disk_counter +
1 + self.max_local_disks)
self.remote_disk_counter += 1
disks.append(data_disk)
self._CreateScratchDiskFromDisks(disk_spec, disks)
def AddMetadata(self, **kwargs):
"""Adds metadata to the VM."""
util.AddTags(self.id, self.region, **kwargs)
def GetMachineTypeDict(self):
"""Returns a dict containing properties that specify the machine type.
Returns:
dict mapping string property key to value.
"""
result = super(AwsVirtualMachine, self).GetMachineTypeDict()
result['dedicated_host'] = self.use_dedicated_host
return result
class DebianBasedAwsVirtualMachine(AwsVirtualMachine,
linux_virtual_machine.DebianMixin):
IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-trusty-14.04-amd64-*'
class JujuBasedAwsVirtualMachine(AwsVirtualMachine,
linux_virtual_machine.JujuMixin):
IMAGE_NAME_FILTER = 'ubuntu/images/*/ubuntu-trusty-14.04-amd64-*'
class RhelBasedAwsVirtualMachine(AwsVirtualMachine,
linux_virtual_machine.RhelMixin):
IMAGE_NAME_FILTER = 'amzn-ami-*-x86_64-*'
def __init__(self, vm_spec):
super(RhelBasedAwsVirtualMachine, self).__init__(vm_spec)
self.user_name = 'ec2-user'
class WindowsAwsVirtualMachine(AwsVirtualMachine,
windows_virtual_machine.WindowsMixin):
IMAGE_NAME_FILTER = 'Windows_Server-2012-R2_RTM-English-64Bit-Core-*'
def __init__(self, vm_spec):
super(WindowsAwsVirtualMachine, self).__init__(vm_spec)
self.user_name = 'Administrator'
self.user_data = ('<powershell>%s</powershell>' %
windows_virtual_machine.STARTUP_SCRIPT)
@vm_util.Retry()
def _GetDecodedPasswordData(self):
# Retreive a base64 encoded, encrypted password for the VM.
get_password_cmd = util.AWS_PREFIX + [
'ec2',
'get-password-data',
'--region=%s' % self.region,
'--instance-id=%s' % self.id]
stdout, _ = util.IssueRetryableCommand(get_password_cmd)
response = json.loads(stdout)
password_data = response['PasswordData']
# AWS may not populate the password data until some time after
# the VM shows as running. Simply retry until the data shows up.
if not password_data:
raise ValueError('No PasswordData in response.')
# Decode the password data.
return base64.b64decode(password_data)
def _PostCreate(self):
"""Retrieve generic VM info and then retrieve the VM's password."""
super(WindowsAwsVirtualMachine, self)._PostCreate()
# Get the decoded password data.
decoded_password_data = self._GetDecodedPasswordData()
# Write the encrypted data to a file, and use openssl to
# decrypt the password.
with vm_util.NamedTemporaryFile() as tf:
tf.write(decoded_password_data)
tf.close()
decrypt_cmd = ['openssl',
'rsautl',
'-decrypt',
'-in',
tf.name,
'-inkey',
vm_util.GetPrivateKeyPath()]
password, _ = vm_util.IssueRetryableCommand(decrypt_cmd)
self.password = password
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import Markup
from flask import current_app
from flask import request
from flask import url_for
_bs = '<li class="previous"><a href="{0}">{1}</a></li>'
_bs4 = '<li class="page-item">\
<a class="page-link" href="{0}" aria-label="Previous">\
<span aria-hidden="true">{1}</span>\
<span class="sr-only">Previous</span></a></li>'
PREV_PAGES = dict(bootstrap=_bs,
bootstrap2=_bs,
bootstrap3=_bs,
bootstrap4=_bs4,
foundation='<li class="arrow"><a href="{0}">{1}</a></li>',
)
_bs = '<li class="next"><a href="{0}">{1}</a></li>'
_bs4 = '<li class="page-item">\
<a class="page-link" href="{0}" aria-label="Next">\
<span aria-hidden="true">{1}</span>\
<span class="sr-only">Next</span></a></li>'
NEXT_PAGES = dict(bootstrap=_bs,
bootstrap2=_bs,
bootstrap3=_bs,
bootstrap4=_bs4,
foundation='<li class="arrow"><a href="{0}">{1}</a></li>',
)
_bs = '<li class="active"><a>{0}</a></li>'
_bs4 = '<li class="page-item active"><a class="page-link">{0} \
<span class="sr-only">(current)</span></a></li>'
CURRENT_PAGES = dict(bootstrap=_bs,
bootstrap2=_bs,
bootstrap3=_bs,
bootstrap4=_bs4,
foundation='<li class="current"><a>{0}</a></li>',
)
LINK = '<li><a href="{0}">{1}</a></li>'
BS4_LINK = '<li class="page-item"><a class="page-link" href="{0}">{1}</a></li>'
_bs = '<li class="disabled"><a>...</a></li>'
_bs4 = '<li class="page-item disabled"><span class="page-link">...</span></li>'
_fa = '<li class="unavailable"><a>...</a></li>'
GAP_MARKERS = dict(bootstrap=_bs,
bootstrap2=_bs,
bootstrap3=_bs,
bootstrap4=_bs4,
foundation=_fa,
)
_bs = '<li class="previous disabled unavailable"><a> {0} </a></li>'
_bs4 = '<li class="page-item disabled"><span class="page-link"> {0} \
</span></li>'
_fa = '<li class="unavailable"><a>{0}</a></li>'
PREV_DISABLED_PAGES = dict(bootstrap=_bs,
bootstrap2=_bs,
bootstrap3=_bs,
bootstrap4=_bs4,
foundation=_fa,
)
_bs = '<li class="next disabled"><a> {0} </a></li>'
_bs4 = '<li class="page-item disabled"><span class="page-link"> {0} \
</span></li>'
_fa = '<li class="unavailable"><a>{0}</a></li>'
NEXT_DISABLED_PAGES = dict(bootstrap=_bs,
bootstrap2=_bs,
bootstrap3=_bs,
bootstrap4=_bs4,
foundation=_fa,
)
PREV_LABEL = 'previous'
NEXT_LABEL = 'next'
RECORD_NAME = 'data'
DISPLAY_MSG = '''displaying <b>{start} - {end}</b> {record_name} in
total <b>{total}</b>'''
SEARCH_MSG = '''found <b>{found}</b> {record_name},
displaying <b>{start} - {end}</b>'''
_bs4 = '<nav aria-label="..."><ul class="pagination{0}{1}">'
CSS_LINKS = dict(bootstrap='<div class="pagination{0}{1}"><ul>',
bootstrap2='<div class="pagination{0}{1}"><ul>',
bootstrap3='<ul class="pagination{0}{1}">',
bootstrap4=_bs4,
foundation='<ul class="pagination{0}{1}">',
)
CSS_LINKS_END = dict(bootstrap='</ul></div>',
bootstrap2='</ul></div>',
bootstrap3='</ul>',
bootstrap4='</ul></nav>',
foundation='</ul>',
)
# foundation aligment
F_ALIGNMENT = '<div class="pagination-{0}">'
def get_parameter(param=None, args=None, default='page'):
if not args:
args = request.args.copy()
args.update(request.view_args.copy())
if not param:
pk = 'page_parameter' if default == 'page' else 'per_page_parameter'
param = args.get(pk)
if not param:
param = current_app.config.get(pk.upper())
return param or default
def get_page_parameter(param=None, args=None):
return get_parameter(param, args, 'page')
def get_per_page_parameter(param=None, args=None):
return get_parameter(param, args, 'per_page')
def get_page_args(page_parameter=None, per_page_parameter=None,
for_test=False):
'''param order: 1. passed parameter 2. request.args 3: config value
for_test will return page_parameter and per_page_parameter'''
args = request.args.copy()
args.update(request.view_args.copy())
page_name = get_page_parameter(page_parameter, args)
per_page_name = get_per_page_parameter(per_page_parameter, args)
if for_test:
return page_name, per_page_name
page = int(args.get(page_name, 1))
per_page = args.get(per_page_name)
if not per_page:
per_page = current_app.config.get(per_page_name.upper(), 10)
else:
per_page = int(per_page)
offset = (page - 1) * per_page
return page, per_page, offset
class Pagination(object):
"""A simple pagination extension for flask."""
def __init__(self, found=0, **kwargs):
self.found = found
page_parameter = kwargs.get('page_parameter')
if not page_parameter:
page_parameter = get_page_parameter()
self.page_parameter = page_parameter
self.page = kwargs.get(self.page_parameter, 1)
per_page_param = kwargs.get('per_page_parameter')
if not per_page_param:
per_page_param = get_per_page_parameter()
self.per_page_parameter = per_page_param
self.per_page = kwargs.get(per_page_param, 10)
self.inner_window = kwargs.get('inner_window', 2)
self.outer_window = kwargs.get('outer_window', 1)
self.prev_label = kwargs.get('prev_label') or PREV_LABEL
self.next_label = kwargs.get('next_label') or NEXT_LABEL
self.search = kwargs.get('search', False)
self.total = kwargs.get('total', 0)
self.format_total = kwargs.get('format_total', False)
self.format_number = kwargs.get('format_number', False)
self.display_msg = kwargs.get('display_msg') or DISPLAY_MSG
self.search_msg = kwargs.get('search_msg') or SEARCH_MSG
self.record_name = kwargs.get('record_name') or RECORD_NAME
self.css_framework = kwargs.get('css_framework', 'bootstrap').lower()
if self.css_framework not in CURRENT_PAGES:
self.css_framework = 'bootstrap'
self.bs_version = kwargs.get('bs_version') or 2
if self.css_framework.startswith('bootstrap'):
if self.bs_version in (3, '3'):
self.css_framework = 'bootstrap3'
elif self.bs_version in (4, '4'):
self.css_framework = 'bootstrap4'
self.link_size = kwargs.get('link_size', '')
if self.link_size:
if self.css_framework == 'foundation':
self.link_size = ''
else:
self.link_size = ' pagination-{0}'.format(self.link_size)
self.alignment = kwargs.get('alignment', '')
if self.alignment and self.css_framework.startswith('bootstrap'):
if self.css_framework == 'bootstrap4':
if self.alignment == 'center':
self.alignment = ' justify-content-center'
elif self.alignment in ('right', 'end'):
self.alignment = ' justify-content-end'
else:
self.alignment = ' pagination-{0}'.format(self.alignment)
self.href = kwargs.get('href', None)
self.anchor = kwargs.get('anchor', None)
self.show_single_page = kwargs.get('show_single_page', True)
self.link = LINK
if self.css_framework == 'bootstrap4':
self.link = BS4_LINK
self.current_page_fmt = CURRENT_PAGES[self.css_framework]
self.link_css_fmt = CSS_LINKS[self.css_framework]
self.gap_marker_fmt = GAP_MARKERS[self.css_framework]
self.prev_disabled_page_fmt = PREV_DISABLED_PAGES[self.css_framework]
self.next_disabled_page_fmt = NEXT_DISABLED_PAGES[self.css_framework]
self.prev_page_fmt = PREV_PAGES[self.css_framework]
self.next_page_fmt = NEXT_PAGES[self.css_framework]
self.css_end_fmt = CSS_LINKS_END[self.css_framework]
self.init_values()
def page_href(self, page):
if self.href:
url = self.href.format(page or 1)
else:
self.args[self.page_parameter] = page
if self.anchor:
url = url_for(self.endpoint, _anchor=self.anchor, **self.args)
else:
url = url_for(self.endpoint, **self.args)
return url
def init_values(self):
current_total = self.found if self.search else self.total
pages = divmod(current_total, self.per_page)
self.total_pages = pages[0] + 1 if pages[1] else pages[0]
self.has_prev = self.page > 1
self.has_next = self.page < self.total_pages
args = request.args.copy()
args.update(request.view_args.copy())
self.args = {}
for k, v in args.lists():
if len(v) == 1:
self.args[k] = v[0]
else:
self.args[k] = v
self.endpoint = request.endpoint
@property
def prev_page(self):
if self.has_prev:
page = self.page - 1 if self.page > 2 else None
url = self.page_href(page)
return self.prev_page_fmt.format(url, self.prev_label)
return self.prev_disabled_page_fmt.format(self.prev_label)
@property
def next_page(self):
if self.has_next:
url = self.page_href(self.page + 1)
return self.next_page_fmt.format(url, self.next_label)
return self.next_disabled_page_fmt.format(self.next_label)
@property
def first_page(self):
# current page is first page
if self.has_prev:
return self.link.format(self.page_href(None), 1)
return self.current_page_fmt.format(1)
@property
def last_page(self):
if self.has_next:
url = self.page_href(self.total_pages)
return self.link.format(url, self.total_pages)
return self.current_page_fmt.format(self.page)
@property
def pages(self):
if self.total_pages < self.inner_window * 2 - 1:
return range(1, self.total_pages + 1)
pages = []
win_from = self.page - self.inner_window
win_to = self.page + self.inner_window
if win_to > self.total_pages:
win_from -= win_to - self.total_pages
win_to = self.total_pages
if win_from < 1:
win_to = win_to + 1 - win_from
win_from = 1
if win_to > self.total_pages:
win_to = self.total_pages
if win_from > self.inner_window:
pages.extend(range(1, self.outer_window + 1 + 1))
pages.append(None)
else:
pages.extend(range(1, win_to + 1))
if win_to < self.total_pages - self.inner_window + 1:
if win_from > self.inner_window:
pages.extend(range(win_from, win_to + 1))
pages.append(None)
if self.outer_window == 0:
pages.extend(range(self.total_pages, self.total_pages + 1))
else:
pages.extend(range(self.total_pages - 1, self.total_pages + 1))
elif win_from > self.inner_window:
pages.extend(range(win_from, self.total_pages + 1))
else:
pages.extend(range(win_to + 1, self.total_pages + 1))
return pages
def single_page(self, page):
if page == self.page:
return self.current_page_fmt.format(page)
if page == 1:
return self.first_page
if page == self.total_pages:
return self.last_page
return self.link.format(self.page_href(page), page)
def _get_single_page_link(self):
s = [self.link_css_fmt.format(self.link_size, self.alignment)]
s.append(self.prev_page)
s.append(self.single_page(1))
s.append(self.next_page)
s.append(self.css_end_fmt)
if self.css_framework == 'foundation' and self.alignment:
s.insert(0, F_ALIGNMENT.format(self.alignment))
s.append('</div>')
return Markup(''.join(s))
@property
def links(self):
"""Get all the pagination links."""
if self.total_pages <= 1:
if self.show_single_page:
return self._get_single_page_link()
return ''
s = [self.link_css_fmt.format(self.link_size, self.alignment)]
s.append(self.prev_page)
for page in self.pages:
s.append(self.single_page(page) if page else self.gap_marker_fmt)
s.append(self.next_page)
s.append(self.css_end_fmt)
if self.css_framework == 'foundation' and self.alignment:
s.insert(0, F_ALIGNMENT.format(self.alignment))
s.append('</div>')
return Markup(''.join(s))
@property
def info(self):
"""Get the pagination information."""
start = 1 + (self.page - 1) * self.per_page
end = start + self.per_page - 1
if end > self.total:
end = self.total if not self.search else self.found
if start > self.total:
start = self.total if not self.search else self.found
s = ['<div class="pagination-page-info">']
page_msg = self.search_msg if self.search else self.display_msg
if self.format_total:
total_text = '{0:,}'.format(self.total)
else:
total_text = '{0}'.format(self.total)
if self.format_number:
start_text = '{0:,}'.format(start)
end_text = '{0:,}'.format(end)
else:
start_text = start
end_text = end
s.append(page_msg.format(found=self.found,
total=total_text,
start=start_text,
end=end_text,
record_name=self.record_name,
)
)
s.append('</div>')
return Markup(''.join(s))
|
|
"""
The rechunk module defines:
intersect_chunks: a function for
converting chunks to new dimensions
rechunk: a function to convert the blocks
of an existing dask array to new chunks or blockshape
"""
from __future__ import absolute_import, division, print_function
import heapq
from itertools import product, chain, count
from operator import getitem, add, mul, itemgetter
import numpy as np
from toolz import merge, accumulate, reduce
from ..base import tokenize
from .core import concatenate3, Array, normalize_chunks
def cumdims_label(chunks, const):
""" Internal utility for cumulative sum with label.
>>> cumdims_label(((5, 3, 3), (2, 2, 1)), 'n') # doctest: +NORMALIZE_WHITESPACE
[(('n', 0), ('n', 5), ('n', 8), ('n', 11)),
(('n', 0), ('n', 2), ('n', 4), ('n', 5))]
"""
return [tuple(zip((const,) * (1 + len(bds)),
accumulate(add, (0,) + bds)))
for bds in chunks]
def _breakpoints(cumold, cumnew):
"""
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _breakpoints(new[0], old[0])
(('n', 0), ('o', 0), ('n', 2), ('o', 2), ('o', 4), ('n', 5), ('o', 5))
>>> _breakpoints(new[1], old[1])
(('n', 0), ('o', 0), ('n', 2), ('n', 4), ('n', 5), ('o', 5))
"""
return tuple(sorted(cumold + cumnew, key=itemgetter(1)))
def _intersect_1d(breaks):
"""
Internal utility to intersect chunks for 1d after preprocessing.
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _intersect_1d(_breakpoints(old[0], new[0])) # doctest: +NORMALIZE_WHITESPACE
[[(0, slice(0, 2, None))],
[(1, slice(0, 2, None)), (2, slice(0, 1, None))]]
>>> _intersect_1d(_breakpoints(old[1], new[1])) # doctest: +NORMALIZE_WHITESPACE
[[(0, slice(0, 2, None))],
[(0, slice(2, 4, None))],
[(0, slice(4, 5, None))]]
Parameters
----------
breaks: list of tuples
Each tuple is ('o', 8) or ('n', 8)
These are pairs of 'o' old or new 'n'
indicator with a corresponding cumulative sum.
Uses 'o' and 'n' to make new tuples of slices for
the new block crosswalk to old blocks.
"""
start = 0
last_end = 0
old_idx = 0
ret = []
ret_next = []
for idx in range(1, len(breaks)):
label, br = breaks[idx]
last_label, last_br = breaks[idx - 1]
if last_label == 'n':
if ret_next:
ret.append(ret_next)
ret_next = []
if last_label == 'o':
start = 0
else:
start = last_end
end = br - last_br + start
last_end = end
if br == last_br:
continue
ret_next.append((old_idx, slice(start, end)))
if label == 'o':
old_idx += 1
start = 0
if ret_next:
ret.append(ret_next)
return ret
def intersect_chunks(old_chunks, new_chunks):
"""
Make dask.array slices as intersection of old and new chunks.
>>> intersections = intersect_chunks(((4, 4), (2,)),
... ((8,), (1, 1)))
>>> list(intersections) # doctest: +NORMALIZE_WHITESPACE
[(((0, slice(0, 4, None)), (0, slice(0, 1, None))),
((1, slice(0, 4, None)), (0, slice(0, 1, None)))),
(((0, slice(0, 4, None)), (0, slice(1, 2, None))),
((1, slice(0, 4, None)), (0, slice(1, 2, None))))]
Parameters
----------
old_chunks : iterable of tuples
block sizes along each dimension (convert from old_chunks)
new_chunks: iterable of tuples
block sizes along each dimension (converts to new_chunks)
"""
cmo = cumdims_label(old_chunks, 'o')
cmn = cumdims_label(new_chunks, 'n')
sums = [sum(o) for o in old_chunks]
sums2 = [sum(n) for n in old_chunks]
if not sums == sums2:
raise ValueError('Cannot change dimensions from to %r' % sums2)
old_to_new = [_intersect_1d(_breakpoints(cm[0], cm[1]))
for cm in zip(cmo, cmn)]
cross1 = product(*old_to_new)
cross = chain(tuple(product(*cr)) for cr in cross1)
return cross
def blockdims_dict_to_tuple(old, new):
"""
>>> blockdims_dict_to_tuple((4, 5, 6), {1: 10})
(4, 10, 6)
"""
newlist = list(old)
for k, v in new.items():
newlist[k] = v
return tuple(newlist)
def blockshape_dict_to_tuple(old_chunks, d):
"""
>>> blockshape_dict_to_tuple(((4, 4), (5, 5)), {1: 3})
((4, 4), (3, 3, 3, 1))
"""
shape = tuple(map(sum, old_chunks))
new_chunks = list(old_chunks)
for k, v in d.items():
div = shape[k] // v
mod = shape[k] % v
new_chunks[k] = (v,) * div + ((mod,) if mod else ())
return tuple(new_chunks)
DEFAULT_THRESHOLD = 4
DEFAULT_BLOCK_SIZE_LIMIT = 1e8
def rechunk(x, chunks, threshold=DEFAULT_THRESHOLD,
block_size_limit=DEFAULT_BLOCK_SIZE_LIMIT):
"""
Convert blocks in dask array x for new chunks.
>>> import dask.array as da
>>> a = np.random.uniform(0, 1, 7**4).reshape((7,) * 4)
>>> x = da.from_array(a, chunks=((2, 3, 2),)*4)
>>> x.chunks
((2, 3, 2), (2, 3, 2), (2, 3, 2), (2, 3, 2))
>>> y = rechunk(x, chunks=((2, 4, 1), (4, 2, 1), (4, 3), (7,)))
>>> y.chunks
((2, 4, 1), (4, 2, 1), (4, 3), (7,))
chunks also accept dict arguments mapping axis to blockshape
>>> y = rechunk(x, chunks={1: 2}) # rechunk axis 1 with blockshape 2
Parameters
----------
x: dask array
chunks: tuple
The new block dimensions to create
threshold: int
The graph growth factor under which we don't bother
introducing an intermediate step
block_size_limit: int
The maximum block size (in bytes) we want to produce during an
intermediate step
"""
threshold = threshold or DEFAULT_THRESHOLD
block_size_limit = block_size_limit or DEFAULT_BLOCK_SIZE_LIMIT
if isinstance(chunks, dict):
if not chunks or isinstance(next(iter(chunks.values())), int):
chunks = blockshape_dict_to_tuple(x.chunks, chunks)
else:
chunks = blockdims_dict_to_tuple(x.chunks, chunks)
if isinstance(chunks, (tuple, list)):
chunks = tuple(lc if lc is not None else rc
for lc, rc in zip(chunks, x.chunks))
chunks = normalize_chunks(chunks, x.shape)
if chunks == x.chunks:
return x
ndim = x.ndim
if not len(chunks) == ndim or tuple(map(sum, chunks)) != x.shape:
raise ValueError("Provided chunks are not consistent with shape")
steps = plan_rechunk(x.chunks, chunks, x.dtype.itemsize,
threshold, block_size_limit)
for c in steps:
x = _compute_rechunk(x, c)
return x
def _number_of_blocks(chunks):
return reduce(mul, map(len, chunks))
def _largest_block_size(chunks):
return reduce(mul, map(max, chunks))
def estimate_graph_size(old_chunks, new_chunks):
""" Estimate the graph size during a rechunk computation.
"""
# Estimate the number of intermediate blocks that will be produced
# (we don't use intersect_chunks() which is much more expensive)
crossed_size = reduce(mul, (len(oc) + len(nc)
for oc, nc in zip(old_chunks, new_chunks)))
return crossed_size
def divide_to_width(desired_chunks, max_width):
""" Minimally divide the given chunks so as to make the largest chunk
width less or equal than *max_width*.
"""
chunks = []
for c in desired_chunks:
nb_divides = int(np.ceil(c / max_width))
for i in range(nb_divides):
n = c // (nb_divides - i)
chunks.append(n)
c -= n
assert c == 0
return tuple(chunks)
def merge_to_number(desired_chunks, max_number):
""" Minimally merge the given chunks so as to drop the number of
chunks below *max_number*, while minimizing the largest width.
"""
if len(desired_chunks) <= max_number:
return desired_chunks
distinct = set(desired_chunks)
if len(distinct) == 1:
# Fast path for homogeneous target, also ensuring a regular result
w = distinct.pop()
n = len(desired_chunks)
total = n * w
desired_width = total // max_number
width = w * (desired_width // w)
adjust = (total - max_number * width) // w
return (width + w,) * adjust + (width,) * (max_number - adjust)
desired_width = sum(desired_chunks) // max_number
nmerges = len(desired_chunks) - max_number
heap = [(desired_chunks[i] + desired_chunks[i + 1], i, i + 1)
for i in range(len(desired_chunks) - 1)]
heapq.heapify(heap)
chunks = list(desired_chunks)
while nmerges > 0:
# Find smallest interval to merge
width, i, j = heapq.heappop(heap)
# If interval was made invalid by another merge, recompute
# it, re-insert it and retry.
if chunks[j] == 0:
j += 1
while chunks[j] == 0:
j += 1
heapq.heappush(heap, (chunks[i] + chunks[j], i, j))
continue
elif chunks[i] + chunks[j] != width:
heapq.heappush(heap, (chunks[i] + chunks[j], i, j))
continue
# Merge
assert chunks[i] != 0
chunks[i] = 0 # mark deleted
chunks[j] = width
nmerges -= 1
return tuple(filter(None, chunks))
def find_merge_rechunk(old_chunks, new_chunks, block_size_limit):
"""
Find an intermediate rechunk that would merge some adjacent blocks
together in order to get us nearer the *new_chunks* target, without
violating the *block_size_limit* (in number of elements).
"""
ndim = len(old_chunks)
old_largest_width = [max(c) for c in old_chunks]
new_largest_width = [max(c) for c in new_chunks]
graph_size_effect = {
dim: len(nc) / len(oc)
for dim, (oc, nc) in enumerate(zip(old_chunks, new_chunks))
}
block_size_effect = {
dim: new_largest_width[dim] / old_largest_width[dim]
for dim in range(ndim)
}
# Our goal is to reduce the number of nodes in the rechunk graph
# by merging some adjacent chunks, so consider dimensions where we can
# reduce the # of chunks
merge_candidates = [dim for dim in range(ndim)
if graph_size_effect[dim] <= 1.0]
# Merging along each dimension reduces the graph size by a certain factor
# and increases memory largest block size by a certain factor.
# We want to optimize the graph size while staying below the given
# block_size_limit. This is in effect a knapsack problem, except with
# multiplicative values and weights. Just use a greedy algorithm
# by trying dimensions in decreasing value / weight order.
def key(k):
gse = graph_size_effect[k]
bse = block_size_effect[k]
if bse == 1:
bse = 1 + 1e-9
return np.log(gse) / np.log(bse)
sorted_candidates = sorted(merge_candidates, key=key)
largest_block_size = reduce(mul, old_largest_width)
chunks = list(old_chunks)
memory_limit_hit = False
for dim in sorted_candidates:
# Examine this dimension for possible graph reduction
new_largest_block_size = (
largest_block_size * new_largest_width[dim] // old_largest_width[dim])
if new_largest_block_size <= block_size_limit:
# Full replacement by new chunks is possible
chunks[dim] = new_chunks[dim]
largest_block_size = new_largest_block_size
else:
# Try a partial rechunk, dividing the new chunks into
# smaller pieces
largest_width = old_largest_width[dim]
chunk_limit = int(block_size_limit * largest_width / largest_block_size)
c = divide_to_width(new_chunks[dim], chunk_limit)
if len(c) <= len(old_chunks[dim]):
# We manage to reduce the number of blocks, so do it
chunks[dim] = c
largest_block_size = largest_block_size * max(c) // largest_width
memory_limit_hit = True
assert largest_block_size == _largest_block_size(chunks)
assert largest_block_size <= block_size_limit
return tuple(chunks), memory_limit_hit
def find_split_rechunk(old_chunks, new_chunks, graph_size_limit):
"""
Find an intermediate rechunk that would split some chunks to
get us nearer *new_chunks*, without violating the *graph_size_limit*.
"""
ndim = len(old_chunks)
chunks = list(old_chunks)
for dim in range(ndim):
graph_size = estimate_graph_size(chunks, new_chunks)
if graph_size > graph_size_limit:
break
if len(old_chunks[dim]) > len(new_chunks[dim]):
# It's not interesting to split
continue
# Merge the new chunks so as to stay within the graph size budget
max_number = int(len(old_chunks[dim]) * graph_size_limit / graph_size)
c = merge_to_number(new_chunks[dim], max_number)
assert len(c) <= max_number
# Consider the merge successful if its result has a greater length
# and smaller max width than the old chunks
if len(c) >= len(old_chunks[dim]) and max(c) <= max(old_chunks[dim]):
chunks[dim] = c
return tuple(chunks)
def plan_rechunk(old_chunks, new_chunks, itemsize,
threshold=DEFAULT_THRESHOLD,
block_size_limit=DEFAULT_BLOCK_SIZE_LIMIT):
""" Plan an iterative rechunking from *old_chunks* to *new_chunks*.
The plan aims to minimize the rechunk graph size.
Parameters
----------
itemsize: int
The item size of the array
threshold: int
The graph growth factor under which we don't bother
introducing an intermediate step
block_size_limit: int
The maximum block size (in bytes) we want to produce during an
intermediate step
"""
ndim = len(new_chunks)
steps = []
if ndim <= 1 or not all(new_chunks):
# Trivial array => no need for an intermediate
return steps + [new_chunks]
# Make it a number ef elements
block_size_limit /= itemsize
# Fix block_size_limit if too small for either old_chunks or new_chunks
largest_old_block = _largest_block_size(old_chunks)
largest_new_block = _largest_block_size(new_chunks)
block_size_limit = max([block_size_limit,
largest_old_block,
largest_new_block,
])
# The graph size above which to optimize
graph_size_threshold = threshold * (_number_of_blocks(old_chunks) +
_number_of_blocks(new_chunks))
current_chunks = old_chunks
first_pass = True
while True:
graph_size = estimate_graph_size(current_chunks, new_chunks)
if graph_size < graph_size_threshold:
break
if first_pass:
chunks = current_chunks
else:
# We hit the block_size_limit in a previous merge pass =>
# accept a significant increase in graph size in exchange for
# 1) getting nearer the goal 2) reducing the largest block size
# to make place for the following merge.
# To see this pass in action, make the block_size_limit very small.
chunks = find_split_rechunk(current_chunks, new_chunks,
graph_size * threshold)
chunks, memory_limit_hit = find_merge_rechunk(chunks, new_chunks,
block_size_limit)
if chunks == current_chunks or chunks == new_chunks:
break
steps.append(chunks)
current_chunks = chunks
if not memory_limit_hit:
break
first_pass = False
return steps + [new_chunks]
def _compute_rechunk(x, chunks):
""" Compute the rechunk of *x* to the given *chunks*.
"""
ndim = x.ndim
crossed = intersect_chunks(x.chunks, chunks)
x2 = dict()
intermediates = dict()
token = tokenize(x, chunks)
merge_temp_name = 'rechunk-merge-' + token
split_temp_name = 'rechunk-split-' + token
split_name_suffixes = count()
# Pre-allocate old block references, to allow re-use and reduce the
# graph's memory footprint a bit.
old_blocks = np.empty([len(c) for c in x.chunks], dtype='O')
for index in np.ndindex(old_blocks.shape):
old_blocks[index] = (x.name,) + index
# Iterate over all new blocks
new_index = product(*(range(len(c)) for c in chunks))
for new_idx, cross1 in zip(new_index, crossed):
key = (merge_temp_name,) + new_idx
old_block_indices = [[cr[i][0] for cr in cross1] for i in range(ndim)]
subdims1 = [len(set(old_block_indices[i]))
for i in range(ndim)]
rec_cat_arg = np.empty(subdims1, dtype='O')
rec_cat_arg_flat = rec_cat_arg.flat
# Iterate over the old blocks required to build the new block
for rec_cat_index, ind_slices in enumerate(cross1):
old_block_index, slices = zip(*ind_slices)
name = (split_temp_name, next(split_name_suffixes))
intermediates[name] = (getitem, old_blocks[old_block_index], slices)
rec_cat_arg_flat[rec_cat_index] = name
assert rec_cat_index == rec_cat_arg.size - 1
# New block is formed by concatenation of sliced old blocks
x2[key] = (concatenate3, rec_cat_arg.tolist())
assert new_idx == tuple(len(c) - 1 for c in chunks)
del old_blocks, new_index
x2 = merge(x.dask, x2, intermediates)
return Array(x2, merge_temp_name, chunks, dtype=x.dtype)
class _PrettyBlocks(object):
def __init__(self, blocks):
self.blocks = blocks
def __str__(self):
runs = []
run = []
repeats = 0
for c in self.blocks:
if run and run[-1] == c:
if repeats == 0 and len(run) > 1:
runs.append((None, run[:-1]))
run = run[-1:]
repeats += 1
else:
if repeats > 0:
assert len(run) == 1
runs.append((repeats + 1, run[-1]))
run = []
repeats = 0
run.append(c)
if run:
if repeats == 0:
runs.append((None, run))
else:
assert len(run) == 1
runs.append((repeats + 1, run[-1]))
parts = []
for repeats, run in runs:
if repeats is None:
parts.append(str(run))
else:
parts.append("%d*[%s]" % (repeats, run))
return " | ".join(parts)
__repr__ = __str__
def format_blocks(blocks):
"""
Pretty-format *blocks*.
>>> format_blocks((10, 10, 10))
3*[10]
>>> format_blocks((2, 3, 4))
[2, 3, 4]
>>> format_blocks((10, 10, 5, 6, 2, 2, 2, 7))
2*[10] | [5, 6] | 3*[2] | [7]
"""
assert (isinstance(blocks, tuple) and
all(isinstance(x, int) for x in blocks))
return _PrettyBlocks(blocks)
def format_chunks(chunks):
"""
>>> format_chunks((10 * (3,), 3 * (10,)))
(10*[3], 3*[10])
"""
assert isinstance(chunks, tuple)
return tuple(format_blocks(c) for c in chunks)
def format_plan(plan):
"""
>>> format_plan([((10, 10, 10), (15, 15)), ((30,), (10, 10, 10))])
[(3*[10], 2*[15]), ([30], 3*[10])]
"""
return [format_chunks(c) for c in plan]
|
|
"""This module contains analysis module helpers to solve path constraints."""
from functools import lru_cache
from typing import Dict, Tuple, Union
from z3 import sat, unknown, FuncInterp
import z3
from mythril.analysis.analysis_args import analysis_args
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.state.constraints import Constraints
from mythril.laser.ethereum.transaction import BaseTransaction
from mythril.laser.smt import UGE, Optimize, symbol_factory
from mythril.laser.ethereum.time_handler import time_handler
from mythril.exceptions import UnsatError
from mythril.laser.ethereum.transaction.transaction_models import (
ContractCreationTransaction,
)
import logging
log = logging.getLogger(__name__)
# LRU cache works great when used in powers of 2
@lru_cache(maxsize=2 ** 23)
def get_model(constraints, minimize=(), maximize=(), enforce_execution_time=True):
"""
:param constraints:
:param minimize:
:param maximize:
:param enforce_execution_time: Bool variable which enforces --execution-timeout's time
:return:
"""
s = Optimize()
timeout = analysis_args.solver_timeout
if enforce_execution_time:
timeout = min(timeout, time_handler.time_remaining() - 500)
if timeout <= 0:
raise UnsatError
s.set_timeout(timeout)
for constraint in constraints:
if type(constraint) == bool and not constraint:
raise UnsatError
constraints = [constraint for constraint in constraints if type(constraint) != bool]
for constraint in constraints:
s.add(constraint)
for e in minimize:
s.minimize(e)
for e in maximize:
s.maximize(e)
result = s.check()
if result == sat:
return s.model()
elif result == unknown:
log.debug("Timeout encountered while solving expression using z3")
raise UnsatError
def pretty_print_model(model):
""" Pretty prints a z3 model
:param model:
:return:
"""
ret = ""
for d in model.decls():
if type(model[d]) == FuncInterp:
condition = model[d].as_list()
ret += "%s: %s\n" % (d.name(), condition)
continue
try:
condition = "0x%x" % model[d].as_long()
except:
condition = str(z3.simplify(model[d]))
ret += "%s: %s\n" % (d.name(), condition)
return ret
def get_transaction_sequence(
global_state: GlobalState, constraints: Constraints
) -> Dict:
"""Generate concrete transaction sequence.
:param global_state: GlobalState to generate transaction sequence for
:param constraints: list of constraints used to generate transaction sequence
"""
transaction_sequence = global_state.world_state.transaction_sequence
concrete_transactions = []
tx_constraints, minimize = _set_minimisation_constraints(
transaction_sequence, constraints.copy(), [], 5000, global_state.world_state
)
try:
model = get_model(tx_constraints, minimize=minimize)
except UnsatError:
raise UnsatError
# Include creation account in initial state
# Note: This contains the code, which should not exist until after the first tx
initial_world_state = transaction_sequence[0].world_state
initial_accounts = initial_world_state.accounts
for transaction in transaction_sequence:
concrete_transaction = _get_concrete_transaction(model, transaction)
concrete_transactions.append(concrete_transaction)
min_price_dict = {} # type: Dict[str, int]
for address in initial_accounts.keys():
min_price_dict[address] = model.eval(
initial_world_state.starting_balances[
symbol_factory.BitVecVal(address, 256)
].raw,
model_completion=True,
).as_long()
concrete_initial_state = _get_concrete_state(initial_accounts, min_price_dict)
steps = {"initialState": concrete_initial_state, "steps": concrete_transactions}
return steps
def _get_concrete_state(initial_accounts: Dict, min_price_dict: Dict[str, int]):
""" Gets a concrete state """
accounts = {}
for address, account in initial_accounts.items():
# Skip empty default account
data = dict() # type: Dict[str, Union[int, str]]
data["nonce"] = account.nonce
data["code"] = account.code.bytecode
data["storage"] = str(account.storage)
data["balance"] = hex(min_price_dict.get(address, 0))
accounts[hex(address)] = data
return {"accounts": accounts}
def _get_concrete_transaction(model: z3.Model, transaction: BaseTransaction):
""" Gets a concrete transaction from a transaction and z3 model"""
# Get concrete values from transaction
address = hex(transaction.callee_account.address.value)
value = model.eval(transaction.call_value.raw, model_completion=True).as_long()
caller = "0x" + (
"%x" % model.eval(transaction.caller.raw, model_completion=True).as_long()
).zfill(40)
input_ = ""
if isinstance(transaction, ContractCreationTransaction):
address = ""
input_ += transaction.code.bytecode
input_ += "".join(
[
hex(b)[2:] if len(hex(b)) % 2 == 0 else "0" + hex(b)[2:]
for b in transaction.call_data.concrete(model)
]
)
# Create concrete transaction dict
concrete_transaction = dict() # type: Dict[str, str]
concrete_transaction["input"] = "0x" + input_
concrete_transaction["value"] = "0x%x" % value
# Fixme: base origin assignment on origin symbol
concrete_transaction["origin"] = caller
concrete_transaction["address"] = "%s" % address
return concrete_transaction
def _set_minimisation_constraints(
transaction_sequence, constraints, minimize, max_size, world_state
) -> Tuple[Constraints, tuple]:
""" Set constraints that minimise key transaction values
Constraints generated:
- Upper bound on calldata size
- Minimisation of call value's and calldata sizes
:param transaction_sequence: Transaction for which the constraints should be applied
:param constraints: The constraints array which should contain any added constraints
:param minimize: The minimisation array which should contain any variables that should be minimised
:param max_size: The max size of the calldata array
:return: updated constraints, minimize
"""
for transaction in transaction_sequence:
# Set upper bound on calldata size
max_calldata_size = symbol_factory.BitVecVal(max_size, 256)
constraints.append(UGE(max_calldata_size, transaction.call_data.calldatasize))
# Minimize
minimize.append(transaction.call_data.calldatasize)
minimize.append(transaction.call_value)
constraints.append(
UGE(
symbol_factory.BitVecVal(1000000000000000000000, 256),
world_state.starting_balances[transaction.caller],
)
)
for account in world_state.accounts.values():
# Lazy way to prevent overflows and to ensure "reasonable" balances
# Each account starts with less than 100 ETH
constraints.append(
UGE(
symbol_factory.BitVecVal(100000000000000000000, 256),
world_state.starting_balances[account.address],
)
)
return constraints, tuple(minimize)
|
|
from biicode.common.exception import NotInStoreException, ForbiddenException,\
NotFoundException
from biicode.server.model.payment.user_subscription import CURRENT_PLANS
class PlanUpgradeNeeded(ForbiddenException):
'''Exception for trying to adding a new contributor
if the plan don't allow more contribution or private block'''
pass
class PlanDowngradeNotAllowed(ForbiddenException):
'''Exception for trying to adding a new contributor
if the plan don't allow more contribution or private block'''
pass
class SubscriptionError(ForbiddenException):
'''Not current subscription'''
pass
class Security(object):
'''define authorized methods'''
def __init__(self, auth_user, store):
"""Auth user is the user doing the action, the user authenticated"""
self.auth_user = auth_user
self._store = store
# -------------- GENERIC CHECKS -------------#
def check_act_as_admin_of(self, brl_user):
if self.auth_user != brl_user and self._can_act_as_admin_of(brl_user) is not True:
raise ForbiddenException("Permission denied: Updating user %s" % brl_user)
return
check_read_user_subscription = check_act_as_admin_of
check_update_card = check_act_as_admin_of
check_handle_coupons = check_act_as_admin_of
def check_are_own_user(self, brl_user):
"""Auth user can read brl_user subscription??"""
if brl_user != self.auth_user:
raise ForbiddenException("Permission denied")
return
def check_can_change_current_subscription(self, brl_user, plan_id):
self.check_act_as_admin_of(brl_user)
# Look if its a downgrade and we the destination plan
# fits our current contributors and private blocks
# Check destination plan is active
dest_plan = CURRENT_PLANS[plan_id]
if not dest_plan["active"]:
raise ForbiddenException("Plan is no longer available")
users, num_private_blocks = self._get_subscription_utilisation_status(brl_user)
if dest_plan["num_users"] != -1: # No unlimited
if len(users) > dest_plan["num_users"]:
raise PlanDowngradeNotAllowed("You are currently using %d users, "
"reduce to %d before plan downgrade" %
(len(users), dest_plan["num_users"]))
if dest_plan["num_private_blocks"] != -1: # No unlimited
if num_private_blocks > dest_plan["num_private_blocks"]:
raise PlanDowngradeNotAllowed("You have %d private blocks, "
"reduce it to %d before plan downgrade" %
(num_private_blocks, dest_plan["num_private_blocks"]))
def check_subscription_limit_reached(self, brl_user, brl_new_user=None):
contributors, num_private_blocks = self._get_subscription_utilisation_status(brl_user)
user_subscription = self._store.read_user_subscription(brl_user)
print user_subscription.max_users
if brl_new_user:
contributors.add(brl_new_user)
# Check contributors
if len(contributors) > user_subscription.max_users:
more = " more" if user_subscription.max_users > 0 else ""
raise PlanUpgradeNeeded("Upgrade plan to get%s users" % more)
# Check num private blocks
if user_subscription.max_private_blocks != -1: # Unlimited
if num_private_blocks > user_subscription.max_private_blocks:
self.raise_upgrade_blocks(user_subscription)
def raise_upgrade_blocks(self, user_subscription):
more = " more" if user_subscription.max_private_blocks > 0 else ""
raise PlanUpgradeNeeded("Upgrade plan to get%s private blocks" % more)
# -------------- USER METHODS -------------- #
check_update_user = check_act_as_admin_of
check_change_password = check_act_as_admin_of
check_read_user_permissions = check_act_as_admin_of
def check_grant_administrator_for(self, brl_user, brl_new_user):
self.check_act_as_admin_of(brl_user)
self.check_subscription_limit_reached(brl_user, brl_new_user)
def check_revoke_administrator_for(self, brl_user):
return self.check_act_as_admin_of(brl_user)
def check_handle_block_permissions(self, brl_block):
self.check_act_as_admin_of(brl_block.owner)
# Read if block is public
block_access = self._store.read_block_permissions(brl_block)
if block_access.is_private:
# Check limits are ok if its free, can handle block permissions
self.check_subscription_limit_reached(brl_block.owner)
def check_grant_read_or_write_permissions_to(self, brl_new_user, brl_block):
self.check_act_as_admin_of(brl_block.owner)
# Read if block is public
block_access = self._store.read_block_permissions(brl_block)
if block_access.is_private:
# Check if can add one more contributor (no more private blocks)
self.check_subscription_limit_reached(brl_block.owner, brl_new_user)
def check_revoke_read_or_write_permissions_to(self, brl_block):
self.check_act_as_admin_of(brl_block.owner)
# -------------- BLOCK METHODS -------------- #
def check_create_block(self, brl_user, private=False):
# Avoid Mocks to return Mock, so comparing with True
self.check_act_as_admin_of(brl_user)
if private:
# Check if can add one more private block (no more contributors)
_, num_private_blocks = self._get_subscription_utilisation_status(brl_user)
user_subscription = self._store.read_user_subscription(brl_user)
if user_subscription.max_private_blocks != -1: # Unlimited
if num_private_blocks + 1 > user_subscription.max_private_blocks:
self.raise_upgrade_blocks(user_subscription)
def check_make_private_a_block(self, brl_user):
self.check_create_block(brl_user, True)
check_make_public_a_block = check_act_as_admin_of
def check_read_block(self, brl_block):
block_access = self._store.read_block_permissions(brl_block)
if block_access.is_private:
if not self._read_granted(brl_block, block_access):
raise ForbiddenException("Permission denied: Reading block '%s'" % (brl_block))
def check_write_block(self, brl_block):
try:
block_access = self._store.read_block_permissions(brl_block)
except NotFoundException:
return self.check_create_block(brl_block.owner, False)
# If block is private, check auth_user can read it and owner is paying
if block_access.is_private:
# Check limits are ok, If its free will raise
self.check_subscription_limit_reached(brl_block.owner)
# Check if auth_user has write permissions
if self._can_act_as_admin_of(brl_block.owner) is not True:
if not block_access.write.is_granted(self.auth_user):
raise ForbiddenException("Permission denied: Writing block '%s'" % brl_block)
# Check Publish
def check_publish_block(self, brl_block, publish_request):
self.check_write_block(brl_block)
user = self._store.read_user(publish_request.parent.block.owner)
if user.blocks_bytes + publish_request.bytes > user.max_workspace_size:
raise ForbiddenException("Workspace max size reached please contact us")
# Delete if auth_user is an admin
def check_delete_block(self, brl_block):
self.check_act_as_admin_of(brl_block.owner)
def check_read_blocks_permissions(self, brl_block):
self.check_act_as_admin_of(brl_block.owner)
# ############ AUX METHODS #################
def is_private(self, brl_block):
"""Block is private?"""
return self._store.read_block_permissions(brl_block).is_private
def _can_act_as_admin_of(self, brl_user_owner):
"""Check if auth_user can act as block owner"""
if not self.auth_user:
return False # Anonymous
if brl_user_owner == self.auth_user:
return True
try:
admins = self._store.read_user(brl_user_owner).administrators
except NotInStoreException:
return False
return admins.is_granted(self.auth_user)
def _get_subscription_utilisation_status(self, brl_user_owner):
"""Returns a tuple with a set with users, and num_private_blocks"""
# Reads subscription and check limits
admins = set(self._store.read_user(brl_user_owner).administrators)
# Iterate user blocks and read the distinct users granted for read and write
users_granted = set([])
user = self._store.read_user(brl_user_owner)
num_private_blocks = 0
for brl_block in user.blocks.iterkeys():
perms = self._store.read_block_permissions(brl_block)
if perms.is_private: # Only compute for private blocks
num_private_blocks += 1
users_granted |= perms.write | perms.read
return admins.union(users_granted), num_private_blocks
def _read_granted(self, brl_block, block_access):
""" self.auth_user is granted to read brl_block?"""
return (block_access.write.is_granted(self.auth_user) or
block_access.read.is_granted(self.auth_user) or
self._can_act_as_admin_of(brl_block.owner))
|
|
"""Test config entries API."""
import pytest
from homeassistant.auth.providers import homeassistant as prov_ha
from homeassistant.components.config import (
auth_provider_homeassistant as auth_ha)
from tests.common import MockUser, register_auth_provider
@pytest.fixture(autouse=True)
def setup_config(hass):
"""Fixture that sets up the auth provider homeassistant module."""
hass.loop.run_until_complete(register_auth_provider(hass, {
'type': 'homeassistant'
}))
hass.loop.run_until_complete(auth_ha.async_setup(hass))
async def test_create_auth_system_generated_user(hass, hass_access_token,
hass_ws_client):
"""Test we can't add auth to system generated users."""
system_user = MockUser(system_generated=True).add_to_hass(hass)
client = await hass_ws_client(hass, hass_access_token)
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_CREATE,
'user_id': system_user.id,
'username': 'test-user',
'password': 'test-pass',
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'system_generated'
async def test_create_auth_user_already_credentials():
"""Test we can't create auth for user with pre-existing credentials."""
# assert False
async def test_create_auth_unknown_user(hass_ws_client, hass,
hass_access_token):
"""Test create pointing at unknown user."""
client = await hass_ws_client(hass, hass_access_token)
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_CREATE,
'user_id': 'test-id',
'username': 'test-user',
'password': 'test-pass',
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'not_found'
async def test_create_auth_requires_admin(hass, hass_ws_client,
hass_read_only_access_token):
"""Test create requires admin to call API."""
client = await hass_ws_client(hass, hass_read_only_access_token)
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_CREATE,
'user_id': 'test-id',
'username': 'test-user',
'password': 'test-pass',
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'unauthorized'
async def test_create_auth(hass, hass_ws_client, hass_access_token,
hass_storage):
"""Test create auth command works."""
client = await hass_ws_client(hass, hass_access_token)
user = MockUser().add_to_hass(hass)
assert len(user.credentials) == 0
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_CREATE,
'user_id': user.id,
'username': 'test-user',
'password': 'test-pass',
})
result = await client.receive_json()
assert result['success'], result
assert len(user.credentials) == 1
creds = user.credentials[0]
assert creds.auth_provider_type == 'homeassistant'
assert creds.auth_provider_id is None
assert creds.data == {
'username': 'test-user'
}
assert prov_ha.STORAGE_KEY in hass_storage
entry = hass_storage[prov_ha.STORAGE_KEY]['data']['users'][0]
assert entry['username'] == 'test-user'
async def test_create_auth_duplicate_username(hass, hass_ws_client,
hass_access_token, hass_storage):
"""Test we can't create auth with a duplicate username."""
client = await hass_ws_client(hass, hass_access_token)
user = MockUser().add_to_hass(hass)
hass_storage[prov_ha.STORAGE_KEY] = {
'version': 1,
'data': {
'users': [{
'username': 'test-user'
}]
}
}
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_CREATE,
'user_id': user.id,
'username': 'test-user',
'password': 'test-pass',
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'username_exists'
async def test_delete_removes_just_auth(hass_ws_client, hass, hass_storage,
hass_access_token):
"""Test deleting an auth without being connected to a user."""
client = await hass_ws_client(hass, hass_access_token)
hass_storage[prov_ha.STORAGE_KEY] = {
'version': 1,
'data': {
'users': [{
'username': 'test-user'
}]
}
}
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_DELETE,
'username': 'test-user',
})
result = await client.receive_json()
assert result['success'], result
assert len(hass_storage[prov_ha.STORAGE_KEY]['data']['users']) == 0
async def test_delete_removes_credential(hass, hass_ws_client,
hass_access_token, hass_storage):
"""Test deleting auth that is connected to a user."""
client = await hass_ws_client(hass, hass_access_token)
user = MockUser().add_to_hass(hass)
hass_storage[prov_ha.STORAGE_KEY] = {
'version': 1,
'data': {
'users': [{
'username': 'test-user'
}]
}
}
user.credentials.append(
await hass.auth.auth_providers[0].async_get_or_create_credentials({
'username': 'test-user'}))
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_DELETE,
'username': 'test-user',
})
result = await client.receive_json()
assert result['success'], result
assert len(hass_storage[prov_ha.STORAGE_KEY]['data']['users']) == 0
async def test_delete_requires_admin(hass, hass_ws_client,
hass_read_only_access_token):
"""Test delete requires admin."""
client = await hass_ws_client(hass, hass_read_only_access_token)
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_DELETE,
'username': 'test-user',
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'unauthorized'
async def test_delete_unknown_auth(hass, hass_ws_client, hass_access_token):
"""Test trying to delete an unknown auth username."""
client = await hass_ws_client(hass, hass_access_token)
await client.send_json({
'id': 5,
'type': auth_ha.WS_TYPE_DELETE,
'username': 'test-user',
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'auth_not_found'
async def test_change_password(hass, hass_ws_client, hass_access_token):
"""Test that change password succeeds with valid password."""
provider = hass.auth.auth_providers[0]
await provider.async_initialize()
await hass.async_add_executor_job(
provider.data.add_auth, 'test-user', 'test-pass')
credentials = await provider.async_get_or_create_credentials({
'username': 'test-user'
})
refresh_token = await hass.auth.async_validate_access_token(
hass_access_token)
user = refresh_token.user
await hass.auth.async_link_user(user, credentials)
client = await hass_ws_client(hass, hass_access_token)
await client.send_json({
'id': 6,
'type': auth_ha.WS_TYPE_CHANGE_PASSWORD,
'current_password': 'test-pass',
'new_password': 'new-pass'
})
result = await client.receive_json()
assert result['success'], result
await provider.async_validate_login('test-user', 'new-pass')
async def test_change_password_wrong_pw(hass, hass_ws_client,
hass_access_token):
"""Test that change password fails with invalid password."""
provider = hass.auth.auth_providers[0]
await provider.async_initialize()
await hass.async_add_executor_job(
provider.data.add_auth, 'test-user', 'test-pass')
credentials = await provider.async_get_or_create_credentials({
'username': 'test-user'
})
refresh_token = await hass.auth.async_validate_access_token(
hass_access_token)
user = refresh_token.user
await hass.auth.async_link_user(user, credentials)
client = await hass_ws_client(hass, hass_access_token)
await client.send_json({
'id': 6,
'type': auth_ha.WS_TYPE_CHANGE_PASSWORD,
'current_password': 'wrong-pass',
'new_password': 'new-pass'
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'invalid_password'
with pytest.raises(prov_ha.InvalidAuth):
await provider.async_validate_login('test-user', 'new-pass')
async def test_change_password_no_creds(hass, hass_ws_client,
hass_access_token):
"""Test that change password fails with no credentials."""
client = await hass_ws_client(hass, hass_access_token)
await client.send_json({
'id': 6,
'type': auth_ha.WS_TYPE_CHANGE_PASSWORD,
'current_password': 'test-pass',
'new_password': 'new-pass'
})
result = await client.receive_json()
assert not result['success'], result
assert result['error']['code'] == 'credentials_not_found'
|
|
#
# Copyright 2014 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# heng@kedevelopments.co.uk
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pyfftw import FFTW, byte_align, empty_aligned, forget_wisdom
import pyfftw
import numpy
from timeit import Timer
import time
import unittest
from .test_pyfftw_base import FFTWBaseTest, run_test_suites
# We make this 1D case not inherit from FFTWBaseTest.
# It needs to be combined with FFTWBaseTest to work.
# This allows us to separate out tests that are use
# in multiple locations.
class Complex64FFTW1DTest(object):
def test_time(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
fft, ifft = self.run_validate_fft(a, b, axes)
self.timer_routine(fft.execute,
lambda: self.np_fft_comparison(a))
self.assertTrue(True)
def test_invalid_args_raise(self):
in_shape = self.input_shapes['1d']
out_shape = self.output_shapes['1d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
# Note "thread" is incorrect, it should be "threads"
self.assertRaises(TypeError, FFTW, a, b, axes, thread=4)
def test_1d(self):
in_shape = self.input_shapes['1d']
out_shape = self.output_shapes['1d']
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
self.run_validate_fft(a, b, axes)
def test_multiple_1d(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
self.run_validate_fft(a, b, axes)
def test_default_args(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
a, b = self.create_test_arrays(in_shape, out_shape)
fft = FFTW(a,b)
fft.execute()
ref_b = self.reference_fftn(a, axes=(-1,))
self.assertTrue(numpy.allclose(b, ref_b, rtol=1e-2, atol=1e-3))
def test_time_with_array_update(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
fft, ifft = self.run_validate_fft(a, b, axes)
def fftw_callable():
fft.update_arrays(a,b)
fft.execute()
self.timer_routine(fftw_callable,
lambda: self.np_fft_comparison(a))
self.assertTrue(True)
def test_planning_time_limit(self):
in_shape = self.input_shapes['1d']
out_shape = self.output_shapes['1d']
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
# run this a few times
runs = 10
t1 = time.time()
for n in range(runs):
forget_wisdom()
fft = FFTW(a, b, axes=axes)
unlimited_time = (time.time() - t1)/runs
time_limit = (unlimited_time)/8
# Now do it again but with an upper limit on the time
t1 = time.time()
for n in range(runs):
forget_wisdom()
fft = FFTW(a, b, axes=axes, planning_timelimit=time_limit)
limited_time = (time.time() - t1)/runs
import sys
if sys.platform == 'win32':
# Give a 4x margin on windows. The timers are low
# precision and FFTW seems to take longer anyway
self.assertTrue(limited_time < time_limit*4)
else:
# Otherwise have a 2x margin
self.assertTrue(limited_time < time_limit*2)
def test_invalid_planning_time_limit(self):
in_shape = self.input_shapes['1d']
out_shape = self.output_shapes['1d']
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
self.assertRaisesRegex(TypeError, 'Invalid planning timelimit',
FFTW, *(a,b, axes), **{'planning_timelimit': 'foo'})
def test_planner_flags(self):
'''Test all the planner flags on a small array
'''
in_shape = self.input_shapes['small_1d']
out_shape = self.output_shapes['small_1d']
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
for each_flag in pyfftw.pyfftw._flag_dict:
fft, ifft = self.run_validate_fft(a, b, axes,
flags=(each_flag,))
self.assertTrue(each_flag in fft.flags)
self.assertTrue(each_flag in ifft.flags)
# also, test no flags (which should still work)
fft, ifft = self.run_validate_fft(a, b, axes,
flags=())
def test_destroy_input(self):
'''Test the destroy input flag
'''
# We can't really test it actually destroys the input, as it might
# not (plus it's not exactly something we want).
# It's enough just to check it runs ok with that flag.
in_shape = self.input_shapes['1d']
out_shape = self.output_shapes['1d']
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
self.run_validate_fft(a, b, axes,
flags=('FFTW_ESTIMATE','FFTW_DESTROY_INPUT'))
def test_invalid_flag_fail(self):
'''Test passing a garbage flag fails
'''
in_shape = self.input_shapes['1d']
out_shape = self.output_shapes['1d']
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
self.assertRaisesRegex(ValueError, 'Invalid flag',
self.run_validate_fft, *(a, b, axes),
**{'flags':('garbage',)})
def test_alignment(self):
'''Test to see if the alignment is returned correctly
'''
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
input_dtype_alignment = self.get_input_dtype_alignment()
output_dtype_alignment = self.get_output_dtype_alignment()
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
a = byte_align(a, n=16)
b = byte_align(b, n=16)
fft, ifft = self.run_validate_fft(a, b, axes,
force_unaligned_data=True)
a, b = self.create_test_arrays(in_shape, out_shape)
a = byte_align(a, n=16)
b = byte_align(b, n=16)
a_orig = a.copy()
b_orig = b.copy()
# Offset from 16 byte aligned to guarantee it's not
# 16 byte aligned
a__ = empty_aligned(
numpy.prod(in_shape)*a.itemsize + input_dtype_alignment,
dtype='int8', n=16)
a_ = (a__[input_dtype_alignment:]
.view(dtype=self.input_dtype).reshape(*in_shape))
a_[:] = a
b__ = empty_aligned(
numpy.prod(out_shape)*b.itemsize + input_dtype_alignment,
dtype='int8', n=16)
b_ = (b__[input_dtype_alignment:]
.view(dtype=self.output_dtype).reshape(*out_shape))
b_[:] = b
a[:] = a_orig
fft, ifft = self.run_validate_fft(a, b, axes,
create_array_copies=False)
self.assertTrue(fft.input_alignment == 16)
self.assertTrue(fft.output_alignment == 16)
a[:] = a_orig
fft, ifft = self.run_validate_fft(a, b_, axes,
create_array_copies=False)
self.assertTrue(fft.input_alignment == input_dtype_alignment)
self.assertTrue(fft.output_alignment == output_dtype_alignment)
a_[:] = a_orig
fft, ifft = self.run_validate_fft(a_, b, axes,
create_array_copies=False)
self.assertTrue(fft.input_alignment == input_dtype_alignment)
self.assertTrue(fft.output_alignment == output_dtype_alignment)
a_[:] = a_orig
fft, ifft = self.run_validate_fft(a_, b_, axes,
create_array_copies=False)
self.assertTrue(fft.input_alignment == input_dtype_alignment)
self.assertTrue(fft.output_alignment == output_dtype_alignment)
a[:] = a_orig
fft, ifft = self.run_validate_fft(a, b, axes,
create_array_copies=False, force_unaligned_data=True)
self.assertTrue(fft.input_alignment == input_dtype_alignment)
self.assertTrue(fft.output_alignment == output_dtype_alignment)
def test_incorrect_byte_alignment_fails(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
input_dtype_alignment = self.get_input_dtype_alignment()
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
a = byte_align(a, n=16)
b = byte_align(b, n=16)
fft, ifft = self.run_validate_fft(a, b, axes,
force_unaligned_data=True)
a, b = self.create_test_arrays(in_shape, out_shape)
# Offset from 16 byte aligned to guarantee it's not
# 16 byte aligned
a__ = empty_aligned(
numpy.prod(in_shape)*a.itemsize + 1,
dtype='int8', n=16)
a_ = a__[1:].view(dtype=self.input_dtype).reshape(*in_shape)
a_[:] = a
b__ = empty_aligned(
numpy.prod(out_shape)*b.itemsize + 1,
dtype='int8', n=16)
b_ = b__[1:].view(dtype=self.output_dtype).reshape(*out_shape)
b_[:] = b
self.assertRaisesRegex(ValueError, 'Invalid output alignment',
FFTW, *(a, b_))
self.assertRaisesRegex(ValueError, 'Invalid input alignment',
FFTW, *(a_, b))
self.assertRaisesRegex(ValueError, 'Invalid input alignment',
FFTW, *(a_, b_))
def test_zero_length_fft_axis_fail(self):
in_shape = (1024, 0)
out_shape = in_shape
axes = (-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
self.assertRaisesRegex(ValueError, 'Zero length array',
self.run_validate_fft, *(a,b, axes))
def test_missized_fail(self):
in_shape = self.input_shapes['2d']
_out_shape = self.output_shapes['2d']
out_shape = (_out_shape[0]+1, _out_shape[1])
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
with self.assertRaisesRegex(ValueError, 'Invalid shapes'):
FFTW(a, b, axes, direction=self.direction)
def test_missized_nonfft_axes_fail(self):
in_shape = self.input_shapes['3d']
_out_shape = self.output_shapes['3d']
out_shape = (_out_shape[0], _out_shape[1]+1, _out_shape[2])
axes=(2,)
a, b = self.create_test_arrays(in_shape, out_shape)
with self.assertRaisesRegex(ValueError, 'Invalid shapes'):
FFTW(a, b, direction=self.direction)
def test_extra_dimension_fail(self):
in_shape = self.input_shapes['2d']
_out_shape = self.output_shapes['2d']
out_shape = (2, _out_shape[0], _out_shape[1])
axes=(1,)
a, b = self.create_test_arrays(in_shape, out_shape)
with self.assertRaisesRegex(ValueError, 'Invalid shapes'):
FFTW(a, b, direction=self.direction)
def test_f_contiguous_1d(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(0,)
a, b = self.create_test_arrays(in_shape, out_shape)
# Taking the transpose just makes the array F contiguous
a = a.transpose()
b = b.transpose()
self.run_validate_fft(a, b, axes, create_array_copies=False)
def test_different_dtypes_fail(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
a_ = numpy.complex64(a)
b_ = numpy.complex128(b)
self.assertRaisesRegex(ValueError, 'Invalid scheme',
FFTW, *(a_,b_))
a_ = numpy.complex128(a)
b_ = numpy.complex64(b)
self.assertRaisesRegex(ValueError, 'Invalid scheme',
FFTW, *(a_,b_))
def test_update_data(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
fft, ifft = self.run_validate_fft(a, b, axes)
a, b = self.create_test_arrays(in_shape, out_shape)
self.run_validate_fft(a, b, axes, fft=fft, ifft=ifft)
def test_with_not_ndarray_error(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
a, b = self.create_test_arrays(in_shape, out_shape)
self.assertRaisesRegex(ValueError, 'Invalid output array',
FFTW, *(a,10))
self.assertRaisesRegex(ValueError, 'Invalid input array',
FFTW, *(10,b))
def test_update_data_with_not_ndarray_error(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
fft, ifft = self.run_validate_fft(a, b, axes,
create_array_copies=False)
self.assertRaisesRegex(ValueError, 'Invalid output array',
fft.update_arrays, *(a,10))
self.assertRaisesRegex(ValueError, 'Invalid input array',
fft.update_arrays, *(10,b))
def test_update_data_with_stride_error(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
fft, ifft = self.run_validate_fft(a, b, axes,
create_array_copies=False)
# We offset by 16 to make sure the byte alignment is still correct.
in_shape = (in_shape[0]+16, in_shape[1]+16)
out_shape = (out_shape[0]+16, out_shape[1]+16)
a_, b_ = self.create_test_arrays(in_shape, out_shape)
a_ = a_[16:,16:]
b_ = b_[16:,16:]
with self.assertRaisesRegex(ValueError, 'Invalid input striding'):
self.run_validate_fft(a_, b, axes,
fft=fft, ifft=ifft, create_array_copies=False)
with self.assertRaisesRegex(ValueError, 'Invalid output striding'):
self.run_validate_fft(a, b_, axes,
fft=fft, ifft=ifft, create_array_copies=False)
def test_update_data_with_shape_error(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
fft, ifft = self.run_validate_fft(a, b, axes)
in_shape = (in_shape[0]-10, in_shape[1])
out_shape = (out_shape[0], out_shape[1]+5)
a_, b_ = self.create_test_arrays(in_shape, out_shape)
with self.assertRaisesRegex(ValueError, 'Invalid input shape'):
self.run_validate_fft(a_, b, axes,
fft=fft, ifft=ifft, create_array_copies=False)
with self.assertRaisesRegex(ValueError, 'Invalid output shape'):
self.run_validate_fft(a, b_, axes,
fft=fft, ifft=ifft, create_array_copies=False)
def test_update_unaligned_data_with_FFTW_UNALIGNED(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
input_dtype_alignment = self.get_input_dtype_alignment()
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
a = byte_align(a, n=16)
b = byte_align(b, n=16)
fft, ifft = self.run_validate_fft(a, b, axes,
force_unaligned_data=True)
a, b = self.create_test_arrays(in_shape, out_shape)
# Offset from 16 byte aligned to guarantee it's not
# 16 byte aligned
a__ = empty_aligned(
numpy.prod(in_shape)*a.itemsize + input_dtype_alignment,
dtype='int8', n=16)
a_ = (a__[input_dtype_alignment:]
.view(dtype=self.input_dtype).reshape(*in_shape))
a_[:] = a
b__ = empty_aligned(
numpy.prod(out_shape)*b.itemsize + input_dtype_alignment,
dtype='int8', n=16)
b_ = (b__[input_dtype_alignment:]
.view(dtype=self.output_dtype).reshape(*out_shape))
b_[:] = b
self.run_validate_fft(a, b_, axes, fft=fft, ifft=ifft)
self.run_validate_fft(a_, b, axes, fft=fft, ifft=ifft)
self.run_validate_fft(a_, b_, axes, fft=fft, ifft=ifft)
def test_update_data_with_unaligned_original(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
input_dtype_alignment = self.get_input_dtype_alignment()
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
# Offset from 16 byte aligned to guarantee it's not
# 16 byte aligned
a__ = empty_aligned(
numpy.prod(in_shape)*a.itemsize + input_dtype_alignment,
dtype='int8', n=16)
a_ = a__[input_dtype_alignment:].view(dtype=self.input_dtype).reshape(*in_shape)
a_[:] = a
b__ = empty_aligned(
numpy.prod(out_shape)*b.itemsize + input_dtype_alignment,
dtype='int8', n=16)
b_ = b__[input_dtype_alignment:].view(dtype=self.output_dtype).reshape(*out_shape)
b_[:] = b
fft, ifft = self.run_validate_fft(a_, b_, axes,
force_unaligned_data=True)
self.run_validate_fft(a, b_, axes, fft=fft, ifft=ifft)
self.run_validate_fft(a_, b, axes, fft=fft, ifft=ifft)
self.run_validate_fft(a_, b_, axes, fft=fft, ifft=ifft)
def test_update_data_with_alignment_error(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
byte_error = 1
axes=(-1,)
a, b = self.create_test_arrays(in_shape, out_shape)
a = byte_align(a, n=16)
b = byte_align(b, n=16)
fft, ifft = self.run_validate_fft(a, b, axes)
a, b = self.create_test_arrays(in_shape, out_shape)
# Offset from 16 byte aligned to guarantee it's not
# 16 byte aligned
a__ = empty_aligned(
numpy.prod(in_shape)*a.itemsize+byte_error,
dtype='int8', n=16)
a_ = (a__[byte_error:]
.view(dtype=self.input_dtype).reshape(*in_shape))
a_[:] = a
b__ = empty_aligned(
numpy.prod(out_shape)*b.itemsize+byte_error,
dtype='int8', n=16)
b_ = (b__[byte_error:]
.view(dtype=self.output_dtype).reshape(*out_shape))
b_[:] = b
with self.assertRaisesRegex(ValueError, 'Invalid output alignment'):
self.run_validate_fft(a, b_, axes, fft=fft, ifft=ifft,
create_array_copies=False)
with self.assertRaisesRegex(ValueError, 'Invalid input alignment'):
self.run_validate_fft(a_, b, axes, fft=fft, ifft=ifft,
create_array_copies=False)
# Should also be true for the unaligned case
fft, ifft = self.run_validate_fft(a, b, axes,
force_unaligned_data=True)
with self.assertRaisesRegex(ValueError, 'Invalid output alignment'):
self.run_validate_fft(a, b_, axes, fft=fft, ifft=ifft,
create_array_copies=False)
with self.assertRaisesRegex(ValueError, 'Invalid input alignment'):
self.run_validate_fft(a_, b, axes, fft=fft, ifft=ifft,
create_array_copies=False)
def test_invalid_axes(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-3,)
a, b = self.create_test_arrays(in_shape, out_shape)
with self.assertRaisesRegex(IndexError, 'Invalid axes'):
FFTW(a, b, axes, direction=self.direction)
axes=(10,)
with self.assertRaisesRegex(IndexError, 'Invalid axes'):
FFTW(a, b, axes, direction=self.direction)
class Complex64FFTWTest(Complex64FFTW1DTest, FFTWBaseTest):
def test_2d(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-2,-1)
a, b = self.create_test_arrays(in_shape, out_shape)
self.run_validate_fft(a, b, axes, create_array_copies=False)
def test_multiple_2d(self):
in_shape = self.input_shapes['3d']
out_shape = self.output_shapes['3d']
axes=(-2,-1)
a, b = self.create_test_arrays(in_shape, out_shape)
self.run_validate_fft(a, b, axes, create_array_copies=False)
def test_3d(self):
in_shape = self.input_shapes['3d']
out_shape = self.output_shapes['3d']
axes=(0, 1, 2)
a, b = self.create_test_arrays(in_shape, out_shape)
self.run_validate_fft(a, b, axes, create_array_copies=False)
def test_non_monotonic_increasing_axes(self):
'''Test the case where the axes arg does not monotonically increase.
'''
axes=(1, 0)
# We still need the shapes to work!
in_shape = numpy.asarray(self.input_shapes['2d'])[list(axes)]
out_shape = numpy.asarray(self.output_shapes['2d'])[list(axes)]
a, b = self.create_test_arrays(in_shape, out_shape, axes=axes)
self.run_validate_fft(a, b, axes, create_array_copies=False)
def test_non_contiguous_2d(self):
in_shape = self.input_shapes['2d']
out_shape = self.output_shapes['2d']
axes=(-2,-1)
a, b = self.create_test_arrays(in_shape, out_shape)
# Some arbitrary and crazy slicing
a_sliced = a[12:200:3, 300:2041:9]
# b needs to be the same size
b_sliced = b[20:146:2, 100:1458:7]
self.run_validate_fft(a_sliced, b_sliced, axes, create_array_copies=False)
def test_non_contiguous_2d_in_3d(self):
in_shape = (256, 4, 2048)
out_shape = in_shape
axes=(0,2)
a, b = self.create_test_arrays(in_shape, out_shape)
# Some arbitrary and crazy slicing
a_sliced = a[12:200:3, :, 300:2041:9]
# b needs to be the same size
b_sliced = b[20:146:2, :, 100:1458:7]
self.run_validate_fft(a_sliced, b_sliced, axes, create_array_copies=False)
class Complex128FFTWTest(Complex64FFTWTest):
def setUp(self):
self.input_dtype = numpy.complex128
self.output_dtype = numpy.complex128
self.np_fft_comparison = numpy.fft.fft
self.direction = 'FFTW_FORWARD'
return
class ComplexLongDoubleFFTWTest(Complex64FFTWTest):
def setUp(self):
self.input_dtype = numpy.clongdouble
self.output_dtype = numpy.clongdouble
self.np_fft_comparison = self.reference_fftn
self.direction = 'FFTW_FORWARD'
return
def reference_fftn(self, a, axes):
# numpy.fft.fftn doesn't support complex256 type,
# so we need to compare to a lower precision type.
a = numpy.complex128(a)
return numpy.fft.fftn(a, axes=axes)
@unittest.skip('numpy.fft has issues with this dtype.')
def test_time(self):
pass
@unittest.skip('numpy.fft has issues with this dtype.')
def test_time_with_array_update(self):
pass
test_cases = (
Complex64FFTWTest,
Complex128FFTWTest,
ComplexLongDoubleFFTWTest,)
test_set = None
#test_set = {'all':['test_alignment', 'test_incorrect_byte_alignment_fails']}
if __name__ == '__main__':
run_test_suites(test_cases, test_set)
|
|
from . import _get_clients, _setup
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, settings
from uuid import uuid4
# Yes, yes it talks to all the things
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_managed_compute_environment():
ec2_client, iam_client, ecs_client, _, batch_client = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="MANAGED",
state="ENABLED",
computeResources={
"type": "EC2",
"minvCpus": 5,
"maxvCpus": 10,
"desiredvCpus": 5,
"instanceTypes": ["t2.small", "t2.medium"],
"imageId": "some_image_id",
"subnets": [subnet_id],
"securityGroupIds": [sg_id],
"ec2KeyPair": "string",
"instanceRole": iam_arn.replace("role", "instance-profile"),
"tags": {"string": "string"},
"bidPercentage": 123,
"spotIamFleetRole": "string",
},
serviceRole=iam_arn,
)
resp.should.contain("computeEnvironmentArn")
resp["computeEnvironmentName"].should.equal(compute_name)
our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name]
)["computeEnvironments"][0]
# Given a t2.medium is 2 vcpu and t2.small is 1, therefore 2 mediums and 1 small should be created
if not settings.TEST_SERVER_MODE:
# Can't verify this in ServerMode, as other tests may have created instances
resp = ec2_client.describe_instances()
resp.should.contain("Reservations")
len(resp["Reservations"]).should.equal(3)
# Should have created 1 ECS cluster
all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.should.contain(our_env["ecsClusterArn"])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_managed_compute_environment_with_instance_family():
"""
The InstanceType parameter can have multiple values:
instance_type t2.small
instance_family t2 <-- What we're testing here
'optimal'
unknown value
"""
ec2_client, iam_client, _, _, batch_client = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="MANAGED",
state="ENABLED",
computeResources={
"type": "EC2",
"minvCpus": 5,
"maxvCpus": 10,
"desiredvCpus": 5,
"instanceTypes": ["t2"],
"imageId": "some_image_id",
"subnets": [subnet_id],
"securityGroupIds": [sg_id],
"ec2KeyPair": "string",
"instanceRole": iam_arn.replace("role", "instance-profile"),
"tags": {"string": "string"},
"bidPercentage": 123,
"spotIamFleetRole": "string",
},
serviceRole=iam_arn,
)
our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name]
)["computeEnvironments"][0]
our_env["computeResources"]["instanceTypes"].should.equal(["t2"])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_managed_compute_environment_with_unknown_instance_type():
"""
The InstanceType parameter can have multiple values:
instance_type t2.small
instance_family t2
'optimal'
unknown value <-- What we're testing here
"""
ec2_client, iam_client, _, _, batch_client = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
with pytest.raises(ClientError) as exc:
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="MANAGED",
state="ENABLED",
computeResources={
"type": "EC2",
"minvCpus": 5,
"maxvCpus": 10,
"desiredvCpus": 5,
"instanceTypes": ["unknown"],
"imageId": "some_image_id",
"subnets": [subnet_id],
"securityGroupIds": [sg_id],
"ec2KeyPair": "string",
"instanceRole": iam_arn.replace("role", "instance-profile"),
"tags": {"string": "string"},
"bidPercentage": 123,
"spotIamFleetRole": "string",
},
serviceRole=iam_arn,
)
err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValue")
err["Message"].should.equal("Instance type unknown does not exist")
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, _, batch_client = _get_clients()
_, _, _, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="UNMANAGED",
state="ENABLED",
serviceRole=iam_arn,
)
resp.should.contain("computeEnvironmentArn")
resp["computeEnvironmentName"].should.equal(compute_name)
our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name]
)["computeEnvironments"][0]
our_env.should.have.key("ecsClusterArn")
# Its unmanaged so no instances should be created
if not settings.TEST_SERVER_MODE:
# Can't verify this in ServerMode, as other tests may have created instances
resp = ec2_client.describe_instances()
resp.should.contain("Reservations")
len(resp["Reservations"]).should.equal(0)
# Should have created 1 ECS cluster
all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.should.contain(our_env["ecsClusterArn"])
# TODO create 1000s of tests to test complex option combinations of create environment
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_describe_compute_environment():
ec2_client, iam_client, _, _, batch_client = _get_clients()
_, _, _, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
compute_arn = (
f"arn:aws:batch:eu-central-1:123456789012:compute-environment/{compute_name}"
)
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="UNMANAGED",
state="ENABLED",
serviceRole=iam_arn,
)
all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name]
our_envs.should.have.length_of(1)
our_envs[0]["computeEnvironmentName"].should.equal(compute_name)
our_envs[0]["computeEnvironmentArn"].should.equal(compute_arn)
our_envs[0].should.have.key("ecsClusterArn")
our_envs[0].should.have.key("state").equal("ENABLED")
our_envs[0].should.have.key("status").equal("VALID")
# Test filtering
resp = batch_client.describe_compute_environments(computeEnvironments=["test1"])
len(resp["computeEnvironments"]).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, _, batch_client = _get_clients()
_, _, _, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="UNMANAGED",
state="ENABLED",
serviceRole=iam_arn,
)
our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name]
)["computeEnvironments"][0]
batch_client.delete_compute_environment(computeEnvironment=compute_name)
all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
all_names = [e["computeEnvironmentName"] for e in all_envs]
all_names.shouldnt.contain(compute_name)
all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.shouldnt.contain(our_env["ecsClusterArn"])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_managed_compute_environment():
ec2_client, iam_client, ecs_client, _, batch_client = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="MANAGED",
state="ENABLED",
computeResources={
"type": "EC2",
"minvCpus": 5,
"maxvCpus": 10,
"desiredvCpus": 5,
"instanceTypes": ["t2.small", "t2.medium"],
"imageId": "some_image_id",
"subnets": [subnet_id],
"securityGroupIds": [sg_id],
"ec2KeyPair": "string",
"instanceRole": iam_arn.replace("role", "instance-profile"),
"tags": {"string": "string"},
"bidPercentage": 123,
"spotIamFleetRole": "string",
},
serviceRole=iam_arn,
)
our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name]
)["computeEnvironments"][0]
batch_client.delete_compute_environment(computeEnvironment=compute_name)
all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
all_names = [e["computeEnvironmentName"] for e in all_envs]
all_names.shouldnt.contain(compute_name)
if not settings.TEST_SERVER_MODE:
# Too many instances to know which one is ours in ServerMode
resp = ec2_client.describe_instances()
resp.should.contain("Reservations")
len(resp["Reservations"]).should.equal(3)
for reservation in resp["Reservations"]:
reservation["Instances"][0]["State"]["Name"].should.equal("terminated")
all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.shouldnt.contain(our_env["ecsClusterArn"])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_unmanaged_compute_environment_state():
ec2_client, iam_client, _, _, batch_client = _get_clients()
_, _, _, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="UNMANAGED",
state="ENABLED",
serviceRole=iam_arn,
)
batch_client.update_compute_environment(
computeEnvironment=compute_name, state="DISABLED"
)
all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name]
our_envs.should.have.length_of(1)
our_envs[0]["state"].should.equal("DISABLED")
@pytest.mark.parametrize("compute_env_type", ["FARGATE", "FARGATE_SPOT"])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_fargate_managed_compute_environment(compute_env_type):
ec2_client, iam_client, ecs_client, _, batch_client = _get_clients()
_, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = str(uuid4())
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type="MANAGED",
state="ENABLED",
computeResources={
"type": compute_env_type,
"maxvCpus": 10,
"subnets": [subnet_id],
"securityGroupIds": [sg_id],
},
serviceRole=iam_arn,
)
resp.should.contain("computeEnvironmentArn")
resp["computeEnvironmentName"].should.equal(compute_name)
our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name]
)["computeEnvironments"][0]
our_env["computeResources"]["type"].should.equal(compute_env_type)
# Should have created 1 ECS cluster
all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.should.contain(our_env["ecsClusterArn"])
|
|
from pykechain.enums import NotificationEvent, NotificationChannels, NotificationStatus
from pykechain.exceptions import NotFoundError, MultipleFoundError, APIError, IllegalArgumentError
from pykechain.models import User, Team
from pykechain.models.notification import Notification
from tests.classes import TestBetamax
class _TestNotification(TestBetamax):
SUBJECT = "_TEST_SUBJECT"
MESSAGE = "_TEST_MESSAGE"
USER_ID = 1
def setUp(self) -> None:
super().setUp()
self.USER = self.client.user(pk=self.USER_ID)
self.TEAM = self.client.teams()[0]
for old_notification in self.client.notifications(subject=self.SUBJECT):
old_notification.delete()
class TestNotificationCreation(_TestNotification):
def setUp(self):
super().setUp()
self.bucket = list()
def tearDown(self):
for obj in self.bucket:
try:
obj.delete()
except APIError:
pass
super().tearDown()
def test_create(self):
# setUp
notification = self.client.create_notification(subject=self.SUBJECT, message=self.MESSAGE)
self.bucket.append(notification)
# testing
self.assertIsInstance(notification, Notification)
self.assertEqual(self.SUBJECT, notification.subject)
self.assertEqual(self.MESSAGE, notification.message)
def test_create_with_inputs(self):
notification = self.client.create_notification(
subject=self.SUBJECT,
message=self.MESSAGE,
status=NotificationStatus.READY,
recipients=[self.USER_ID],
team=self.TEAM,
from_user=self.USER_ID,
event=NotificationEvent.EXPORT_ACTIVITY_ASYNC,
channel=NotificationChannels.EMAIL,
)
self.bucket.append(notification)
self.assertIsInstance(notification, Notification)
self.assertEqual(NotificationStatus.READY, notification.status)
self.assertEqual(self.USER_ID, notification.recipient_user_ids[0])
self.assertEqual(self.TEAM, notification.get_team())
self.assertEqual(self.USER, notification.get_from_user())
self.assertEqual(NotificationEvent.EXPORT_ACTIVITY_ASYNC, notification.event)
self.assertEqual(NotificationChannels.EMAIL, notification.channels[0])
def test_create_invalid_inputs(self):
kwargs = dict(subject=self.SUBJECT, message=self.MESSAGE)
with self.assertRaises(IllegalArgumentError):
self.bucket.append(
self.client.create_notification(subject=False, message=self.MESSAGE)
)
with self.assertRaises(IllegalArgumentError):
self.bucket.append(
self.client.create_notification(subject=self.SUBJECT, message=[self.MESSAGE])
)
with self.assertRaises(IllegalArgumentError):
self.bucket.append(self.client.create_notification(status="sending", **kwargs))
with self.assertRaises(IllegalArgumentError):
self.bucket.append(self.client.create_notification(recipients="not a list", **kwargs))
with self.assertRaises(IllegalArgumentError):
self.bucket.append(
self.client.create_notification(recipients=["not a user id"], **kwargs)
)
with self.assertRaises(IllegalArgumentError):
self.bucket.append(self.client.create_notification(team=0, **kwargs))
with self.assertRaises(IllegalArgumentError):
self.bucket.append(self.client.create_notification(from_user="Myself", **kwargs))
with self.assertRaises(IllegalArgumentError):
self.bucket.append(self.client.create_notification(event="Update", **kwargs))
with self.assertRaises(IllegalArgumentError):
self.bucket.append(
self.client.create_notification(channel=[NotificationChannels.EMAIL], **kwargs)
)
def test_delete_notification_from_client(self):
# setUp
notification = self.client.create_notification(message=self.MESSAGE, subject=self.SUBJECT)
self.client.delete_notification(notification=notification)
# testing
with self.assertRaises(NotFoundError):
self.client.notification(message=self.MESSAGE, subject=self.SUBJECT)
def test_delete_notification(self):
# setUp
notification = self.client.create_notification(message=self.MESSAGE, subject=self.SUBJECT)
notification.delete()
# testing
with self.assertRaises(NotFoundError):
self.client.notification(message=self.MESSAGE, subject=self.SUBJECT)
class TestNotifications(_TestNotification):
def setUp(self):
super().setUp()
self.notification = self.client.create_notification(
subject=self.SUBJECT,
message=self.MESSAGE,
recipients=[self.USER_ID],
team=self.TEAM,
)
def tearDown(self):
self.notification.delete()
super().tearDown()
def test_all_notifications_retrieval(self):
# setUp
notifications = self.client.notifications()
number_of_notification = len(notifications)
dummy_notification = self.client.create_notification(
subject="Dummy subject", message="Dummy message"
)
notifications_retrieved_again = self.client.notifications()
# testing
self.assertTrue(len(notifications_retrieved_again) == number_of_notification + 1)
# tearDown
self.client.delete_notification(notification=dummy_notification)
def test_retrieve_notification(self):
# testing
retrieved_notification = self.client.notification(
message=self.MESSAGE, subject=self.SUBJECT
)
self.assertIsInstance(retrieved_notification, Notification)
self.assertEqual(self.notification, retrieved_notification)
def test_retrieve_notification_raise_not_found(self):
with self.assertRaises(NotFoundError):
self.client.notification(message="Doesn't exist")
def test_retrieve_notification_raise_multiple_found(self):
# setUp
clone_testing_notification = self.client.create_notification(
subject=self.SUBJECT, message=self.MESSAGE
)
# testing
with self.assertRaises(MultipleFoundError):
self.client.notification(message=self.MESSAGE, subject=self.SUBJECT)
# tearDown
clone_testing_notification.delete()
def test_get_recipient_users(self):
recipients = self.notification.get_recipient_users()
# testing
self.assertIsInstance(recipients, list)
self.assertTrue(recipients)
first_recipient = recipients[0]
self.assertIsInstance(first_recipient, User)
self.assertEqual("superuser", first_recipient.username)
def test_get_from_user(self):
from_user = self.notification.get_from_user()
self.assertTrue(from_user)
self.assertIsInstance(from_user, User)
self.assertEqual("pykechain_user", from_user.username)
def test_get_team(self):
team = self.notification.get_team()
self.assertIsInstance(team, Team)
self.assertEqual(self.TEAM, team)
def test_edit(self):
subject = "NEW SUBJECT"
message = "NEW MESSAGE"
status = NotificationStatus.ARCHIVED
recipients = [4]
from_user = 4
event = NotificationEvent.EXPORT_ACTIVITY_ASYNC
channel = NotificationChannels.APP
self.notification.edit(
subject=subject,
message=message,
status=status,
recipients=recipients,
team=self.TEAM,
from_user=from_user,
event=event,
channel=channel,
)
self.assertEqual(subject, self.notification.subject)
self.assertEqual(message, self.notification.message)
self.assertEqual(status, self.notification.status)
self.assertListEqual(recipients, self.notification.recipient_user_ids)
self.assertEqual(self.TEAM.id, self.notification.team_id)
self.assertEqual(from_user, self.notification.from_user_id)
self.assertEqual(event, self.notification.event)
self.assertEqual(channel, self.notification.channels[0])
def test_edit_incorrect_inputs(self):
with self.assertRaises(IllegalArgumentError):
self.notification.edit(subject=["Not a string"])
with self.assertRaises(IllegalArgumentError):
self.notification.edit(message=False)
with self.assertRaises(IllegalArgumentError):
self.notification.edit(status="Deleting")
with self.assertRaises(IllegalArgumentError):
self.notification.edit(recipients=True)
with self.assertRaises(IllegalArgumentError):
self.notification.edit(recipients=["Not a user ID"])
with self.assertRaises(IllegalArgumentError):
self.notification.edit(team=5)
with self.assertRaises(IllegalArgumentError):
self.notification.edit(from_user="self")
with self.assertRaises(IllegalArgumentError):
self.notification.edit(from_user=5.3)
with self.assertRaises(IllegalArgumentError):
self.notification.edit(event="Update")
with self.assertRaises(IllegalArgumentError):
self.notification.edit(channel=[NotificationChannels.APP])
# test added due to #847 - providing no inputs overwrites values
def test_edit_notification_clear_values(self):
# setup
initial_subject = "NEW SUBJECT"
initial_message = "NEW MESSAGE"
initial_status = NotificationStatus.ARCHIVED
initial_recipients = [4]
initial_from_user = 4
initial_event = NotificationEvent.EXPORT_ACTIVITY_ASYNC
initial_channel = NotificationChannels.APP
self.notification.edit(
subject=initial_subject,
message=initial_message,
status=initial_status,
recipients=initial_recipients,
team=self.TEAM,
from_user=initial_from_user,
event=initial_event,
channel=initial_channel,
)
# Edit without mentioning values, everything should stay the same
new_subject = "AWESOME SUBJECT NEW"
self.notification.edit(subject=new_subject)
# testing
self.assertEqual(self.notification.subject, new_subject)
self.assertEqual(self.notification.message, initial_message)
self.assertEqual(self.notification.status, initial_status)
self.assertEqual(self.notification.recipient_user_ids, initial_recipients)
self.assertEqual(self.notification.from_user_id, initial_from_user)
self.assertEqual(self.notification.event, initial_event)
self.assertEqual(self.notification.channels, [initial_channel])
# Edit with clearing the values, name and status cannot be cleared
self.notification.edit(
subject=None,
message=None,
status=None,
recipients=None,
team=None,
from_user=None,
event=None,
channel=None,
)
self.assertEqual(self.notification.subject, new_subject)
self.assertEqual(self.notification.message, initial_message)
self.assertEqual(self.notification.status, initial_status)
self.assertEqual(self.notification.recipient_user_ids, list())
self.assertEqual(self.notification.from_user_id, None)
self.assertEqual(self.notification.event, initial_event)
self.assertEqual(self.notification.channels, [initial_channel])
|
|
from datetime import datetime
from django.utils import timezone
from bulk_update.helper import bulk_update
from pontoon.base.models import (
Entity,
Locale,
Translation,
)
from pontoon.base.utils import match_attr
class ChangeSet(object):
"""
Stores a set of changes to be made to the database and the
translations stored in VCS. Once all the necessary changes have been
stored, execute all the changes at once efficiently.
"""
def __init__(self, db_project, vcs_project, now):
"""
:param now:
Datetime to use for marking when approvals happened.
"""
self.db_project = db_project
self.vcs_project = vcs_project
self.now = now
self.executed = False
self.changes = {
'update_vcs': [],
'update_db': [],
'obsolete_db': [],
'create_db': []
}
self.entities_to_update = []
self.translations_to_update = []
self.translations_to_create = []
self.commit_authors_per_locale = {}
def update_vcs_entity(self, locale_code, db_entity, vcs_entity):
"""
Replace the translations in VCS with the translations from the
database.
"""
self.changes['update_vcs'].append((locale_code, db_entity, vcs_entity))
def create_db_entity(self, vcs_entity):
"""Create a new entity in the database."""
self.changes['create_db'].append(vcs_entity)
def update_db_entity(self, locale_code, db_entity, vcs_entity):
"""Update the database with translations from VCS."""
self.changes['update_db'].append((locale_code, db_entity, vcs_entity))
def obsolete_db_entity(self, db_entity):
"""Mark the given entity as obsolete."""
self.changes['obsolete_db'].append(db_entity.pk)
def execute(self):
"""
Execute the changes stored in this changeset. Execute can only
be called once per changeset; subsequent calls raise a
RuntimeError, even if the changes failed.
"""
if self.executed:
raise RuntimeError('execute() can only be called once per changeset.')
else:
self.executed = True
# Store locales and resources for FK relationships.
self.locales = {l.code: l for l in Locale.objects.all()}
self.resources = {r.path: r for r in self.db_project.resources.all()}
# Perform the changes and fill the lists for bulk creation and
# updating.
self.execute_update_vcs()
self.execute_create_db()
self.execute_update_db()
self.execute_obsolete_db()
# Apply the built-up changes to the DB
if len(self.entities_to_update) > 0:
bulk_update(self.entities_to_update, update_fields=[
'resource',
'string',
'string_plural',
'key',
'comment',
'order',
'source'
])
Translation.objects.bulk_create(self.translations_to_create)
if len(self.translations_to_update) > 0:
bulk_update(self.translations_to_update, update_fields=[
'entity',
'locale',
'string',
'plural_form',
'approved',
'approved_user_id',
'approved_date',
'fuzzy',
'extra'
])
def execute_update_vcs(self):
resources = self.vcs_project.resources
changed_resources = set()
for locale_code, db_entity, vcs_entity in self.changes['update_vcs']:
changed_resources.add(resources[db_entity.resource.path])
vcs_translation = vcs_entity.translations[locale_code]
db_translations = (db_entity.translation_set
.filter(approved=True, locale__code=locale_code))
# If no DB translations are fuzzy, set fuzzy to False.
# Otherwise, it's true.
vcs_translation.fuzzy = any(t for t in db_translations if t.fuzzy)
if len(db_translations) > 0:
last_translation = max(
db_translations,
key=lambda t: t.date or timezone.make_aware(datetime.min)
)
vcs_translation.last_updated = last_translation.date
vcs_translation.last_translator = last_translation.user
# Replace existing translations with ones from the database.
vcs_translation.strings = {
db.plural_form: db.string for db in db_translations
}
# Track which translators were involved.
self.commit_authors_per_locale[locale_code] = [t.user for t in db_translations if t.user]
for resource in changed_resources:
resource.save()
def get_entity_updates(self, vcs_entity):
"""
Return a dict of the properties and values necessary to create
or update a database entity from a VCS entity.
"""
return {
'resource': self.resources[vcs_entity.resource.path],
'string': vcs_entity.string,
'string_plural': vcs_entity.string_plural,
'key': vcs_entity.key,
'comment': '\n'.join(vcs_entity.comments),
'order': vcs_entity.order,
'source': vcs_entity.source
}
def execute_create_db(self):
for vcs_entity in self.changes['create_db']:
entity = Entity(**self.get_entity_updates(vcs_entity))
entity.save() # We can't use bulk_create since we need a PK
for locale_code, vcs_translation in vcs_entity.translations.items():
for plural_form, string in vcs_translation.strings.items():
self.translations_to_create.append(Translation(
entity=entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=self.now if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy
))
def execute_update_db(self):
for locale_code, db_entity, vcs_entity in self.changes['update_db']:
for field, value in self.get_entity_updates(vcs_entity).items():
setattr(db_entity, field, value)
if db_entity.is_dirty(check_relationship=True):
self.entities_to_update.append(db_entity)
# Update translations for the entity.
vcs_translation = vcs_entity.translations[locale_code]
db_translations = db_entity.translation_set.filter(
locale__code=locale_code,
)
approved_translations = []
for plural_form, string in vcs_translation.strings.items():
# Check if we need to modify an existing translation or
# create a new one.
db_translation = match_attr(db_translations,
plural_form=plural_form,
string=string)
if db_translation:
if not db_translation.approved:
db_translation.approved = True
db_translation.approved_date = self.now
db_translation.fuzzy = vcs_translation.fuzzy
db_translation.extra = vcs_translation.extra
if db_translation.is_dirty():
self.translations_to_update.append(db_translation)
if not db_translation.fuzzy:
approved_translations.append(db_translation)
else:
self.translations_to_create.append(Translation(
entity=db_entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=self.now if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy,
extra=vcs_translation.extra
))
# Any existing translations that were not approved get unapproved.
for translation in db_translations.filter(approved_date__lte=self.now):
if translation not in approved_translations:
translation.approved = False
translation.approved_user = None
translation.approved_date = None
if translation.is_dirty():
self.translations_to_update.append(translation)
def execute_obsolete_db(self):
(Entity.objects
.filter(pk__in=self.changes['obsolete_db'])
.update(obsolete=True))
|
|
"""
Simplex optimization.
"""
from __future__ import absolute_import
from __future__ import division
import copy
import collections
import logging
import logging.config
import numpy as np
import re
import sqlite3
import textwrap
import calculate
import compare
import constants as co
import datatypes
import opt
import parameters
logger = logging.getLogger(__name__)
class Simplex(opt.Optimizer):
"""
Optimizes force field parameters using an in-house version of the simplex
method. See `Optimizer` for repeated documentation.
Attributes
----------
_max_cycles_wo_change : int
End the simplex optimization early if there have
been this many consecutive simplex steps without
improvement in the objective function.
do_massive_contraction : bool
If True, allows massive contractions to be
performed, contracting all parameters at once.
do_weighted_reflection : bool
If True, weights parameter sets based on their
objective function score when determining the
reflection point.
max_cycles : int
Maximum number of simplex cycles.
max_params : int
Maximum number of parameters used in a single simplex cycle.
"""
def __init__(self,
direc=None,
ff=None,
ff_lines=None,
args_ff=None,
args_ref=None):
super(Simplex, self).__init__(
direc, ff, ff_lines, args_ff, args_ref)
self._max_cycles_wo_change = None
self.do_massive_contraction = True
self.do_weighted_reflection = True
self.max_cycles = 100
self.max_params = 3
@property
def best_ff(self):
# Typically, self.new_ffs would include the original FF, self.ff,
# but this can be changed by massive contractions.
if self.new_ffs:
self.new_ffs = sorted(self.new_ffs, key=lambda x: x.score)
# I think this is necessary after massive contraction.
# Massive contraction can potentially make eveything worse.
# No, it can't!!! The best FF is always retained! /Per-Ola
# Yep, he's right. /Eric
if self.new_ffs[0].score < self.ff.score:
best_ff = self.new_ffs[0]
best_ff = restore_simp_ff(best_ff, self.ff)
return best_ff
else:
return self.ff
else:
return self.ff
@opt.catch_run_errors
def run(self, r_data=None):
"""
Once all attributes are setup as you so desire, run this method to
optimize the parameters.
Returns
-------
`datatypes.FF` (or subclass)
Contains the best parameters.
"""
if r_data is None:
r_data = opt.return_ref_data(self.args_ref)
if self.ff.score is None:
logger.log(20, '~~ CALCULATING INITIAL FF SCORE ~~'.rjust(79, '~'))
self.ff.export_ff()
# Could store data on self.ff.data if we wanted. Not necessary for
# simplex. If simplex yielded no improvements, it would return this
# FF, and then we might want the data such taht we don't have to
# recalculate it in gradient. Let's hope simplex generally yields
# improvements.
data = calculate.main(self.args_ff)
#deprecated
#self.ff.score = compare.compare_data(r_data, data)
r_dict = compare.data_by_type(r_data)
c_dict = compare.data_by_type(data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
self.ff.score = compare.compare_data(r_dict, c_dict)
else:
logger.log(20, ' -- Reused existing score and data for initial FF.')
logger.log(20, '~~ SIMPLEX OPTIMIZATION ~~'.rjust(79, '~'))
logger.log(20, 'INIT FF SCORE: {}'.format(self.ff.score))
opt.pretty_ff_results(self.ff, level=20)
# Here's what we do if there are too many parameters.
if self.max_params and len(self.ff.params) > self.max_params:
logger.log(20, ' -- More parameters than the maximum allowed.')
logger.log(5, 'CURRENT PARAMS: {}'.format(len(self.ff.params)))
logger.log(5, 'MAX PARAMS: {}'.format(self.max_params))
# Here we select the parameters that have the lowest 2nd
# derivatives.
# Could fail when simplex finds improvements but restores other
# parameters.
# if self.ff.params[0].d1:
if None in [x.d1 for x in self.ff.params]:
logger.log(15, ' -- Calculating new parameter derivatives.')
# Do central differentiation so we can calculate derivatives.
# Another option would be to write code to determine
# derivatives only from forward differentiation.
ffs = opt.differentiate_ff(self.ff, central=True)
# We have to score to get the derivatives.
for ff in ffs:
ff.export_ff(path=self.ff.path, lines=self.ff_lines)
logger.log(20, ' -- Calculating {}.'.format(ff))
data = calculate.main(self.args_ff)
#deprecated
#ff.score = compare.compare_data(r_data, data)
r_dict = compare.data_by_type(r_data)
c_dict = compare.data_by_type(data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
ff.score = compare.compare_data(r_dict, c_dict)
opt.pretty_ff_results(ff)
# Add the derivatives to your original FF.
opt.param_derivs(self.ff, ffs)
# Only keep the forward differentiated FFs.
ffs = opt.extract_forward(ffs)
logger.log(5, ' -- Keeping {} forward differentiated '
'FFs.'.format(len(ffs)))
else:
logger.log(15, ' -- Reusing existing parameter derivatives.')
# Differentiate all parameters forward. Yes, I know this is
# counter-intuitive because we are going to only use subset of
# the forward differentiated FFs. However, this is very
# computationally inexpensive because we're not scoring them
# now. We will remove the forward differentiated FFs we don't
# want before scoring.
ffs = opt.differentiate_ff(self.ff, central=False)
# This sorts the parameters based upon their 2nd derivative.
# It keeps the ones with lowest 2nd derivatives.
# SCHEDULED FOR CHANGES. NOT A GOOD SORTING CRITERION.
params = select_simp_params_on_derivs(
self.ff.params, max_params=self.max_params)
# From the entire list of forward differentiated FFs, pick
# out the ones that have the lowest 2nd derivatives.
self.new_ffs = opt.extract_ff_by_params(ffs, params)
logger.log(1, '>>> len(self.new_ffs): {}'.format(len(self.new_ffs)))
# Reduce number of parameters.
# Will need an option that's not MM3* specific in the future.
ff_rows = [x.mm3_row for x in params]
ff_cols = [x.mm3_col for x in params]
for ff in self.new_ffs:
new_params = []
for param in ff.params:
if param.mm3_row in ff_rows and param.mm3_col in ff_cols:
new_params.append(param)
ff.params = new_params
# Make a copy of your original FF that has less parameters.
ff_copy = copy.deepcopy(self.ff)
new_params = []
for param in ff.params:
if param.mm3_row in ff_rows and param.mm3_col in ff_cols:
new_params.append(param)
ff_copy.params = new_params
else:
# In this case it's simple. Just forward differentiate each
# parameter.
self.new_ffs = opt.differentiate_ff(self.ff, central=False)
logger.log(1, '>>> len(self.new_ffs): {}'.format(len(self.new_ffs)))
# Still make that FF copy.
ff_copy = copy.deepcopy(self.ff)
# Double check and make sure they're all scored.
for ff in self.new_ffs:
if ff.score is None:
ff.export_ff(path=self.ff.path, lines=self.ff_lines)
logger.log(20, ' -- Calculating {}.'.format(ff))
data = calculate.main(self.args_ff)
#deprecated
#ff.score = compare.compare_data(r_data, data)
r_dict = compare.data_by_type(r_data)
c_dict = compare.data_by_type(data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
ff.score = compare.compare_data(r_dict, c_dict)
opt.pretty_ff_results(ff)
# Add your copy of the orignal to FF to the forward differentiated FFs.
self.new_ffs = sorted(self.new_ffs + [ff_copy], key=lambda x: x.score)
# Allow 3 cycles w/o change for each parameter present. Remember that
# the initial FF was added here, hence the minus one.
self._max_cycles_wo_change = 3 * (len(self.new_ffs) - 1)
wrapper = textwrap.TextWrapper(width=79)
# Shows all FFs parameters.
opt.pretty_ff_params(self.new_ffs)
# Start the simplex cycles.
current_cycle = 0
cycles_wo_change = 0
while current_cycle < self.max_cycles \
and cycles_wo_change < self._max_cycles_wo_change:
current_cycle += 1
# Save the last best in case some accidental sort goes on.
# Plus it makes reading the code a litle easier.
last_best_ff = copy.deepcopy(self.new_ffs[0])
logger.log(20, '~~ START SIMPLEX CYCLE {} ~~'.format(
current_cycle).rjust(79, '~'))
logger.log(20, 'ORDERED FF SCORES:')
logger.log(20, wrapper.fill('{}'.format(
' '.join('{:15.4f}'.format(x.score) for x in self.new_ffs))))
inv_ff = self.ff.__class__()
if self.do_weighted_reflection:
inv_ff.method = 'WEIGHTED INVERSION'
else:
inv_ff.method = 'INVERSION'
inv_ff.params = copy.deepcopy(last_best_ff.params)
ref_ff = self.ff.__class__()
ref_ff.method = 'REFLECTION'
ref_ff.params = copy.deepcopy(last_best_ff.params)
# Need score difference sum for weighted inversion.
# Calculate this value before going into loop.
if self.do_weighted_reflection:
# If zero, should break.
score_diff_sum = sum([x.score - self.new_ffs[-1].score
for x in self.new_ffs[:-1]])
if score_diff_sum == 0.:
logger.warning(
'No difference between force field scores. '
'Exiting simplex.')
# We want to raise opt.OptError such that
# opt.catch_run_errors will write the best FF obtained thus
# far.
raise opt.OptError(
'No difference between force field scores. '
'Exiting simplex.')
for i in range(0, len(last_best_ff.params)):
if self.do_weighted_reflection:
inv_val = (
sum([x.params[i].value *
(x.score - self.new_ffs[-1].score)
for x in self.new_ffs[:-1]])
/ score_diff_sum)
else:
inv_val = (
sum([x.params[i].value for x in self.new_ffs[:-1]])
/
len(self.new_ffs[:-1]))
inv_ff.params[i].value = inv_val
ref_ff.params[i].value = (
2 * inv_val - self.new_ffs[-1].params[i].value)
# The inversion point does not need to be scored.
# Calculate score for reflected parameters.
ref_ff.export_ff(path=self.ff.path, lines=self.ff.lines)
data = calculate.main(self.args_ff)
#deprecated
#ref_ff.score = compare.compare_data(r_data, data)
r_dict = compare.data_by_type(r_data)
c_dict = compare.data_by_type(data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
ref_ff.score = compare.compare_data(r_dict, c_dict)
opt.pretty_ff_results(ref_ff)
if ref_ff.score < last_best_ff.score:
logger.log(20, '~~ ATTEMPTING EXPANSION ~~'.rjust(79, '~'))
exp_ff = self.ff.__class__()
exp_ff.method = 'EXPANSION'
exp_ff.params = copy.deepcopy(last_best_ff.params)
for i in range(0, len(last_best_ff.params)):
exp_ff.params[i].value = (
3 * inv_ff.params[i].value -
2 * self.new_ffs[-1].params[i].value)
exp_ff.export_ff(path=self.ff.path, lines=self.ff.lines)
data = calculate.main(self.args_ff)
#deprecated
#exp_ff.score = compare.compare_data(r_data, data)
r_dict = compare.data_by_type(r_data)
c_dict = compare.data_by_type(data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
exp_ff.score = compare.compare_data(r_dict, c_dict)
opt.pretty_ff_results(exp_ff)
if exp_ff.score < ref_ff.score:
self.new_ffs[-1] = exp_ff
logger.log(
20, ' -- Expansion succeeded. Keeping expanded '
'parameters.')
else:
self.new_ffs[-1] = ref_ff
logger.log(
20, ' -- Expansion failed. Keeping reflected parameters.')
elif ref_ff.score < self.new_ffs[-2].score:
logger.log(20, ' -- Keeping reflected parameters.')
self.new_ffs[-1] = ref_ff
else:
logger.log(20, '~~ ATTEMPTING CONTRACTION ~~'.rjust(79, '~'))
con_ff = self.ff.__class__()
con_ff.method = 'CONTRACTION'
con_ff.params = copy.deepcopy(last_best_ff.params)
for i in range(0, len(last_best_ff.params)):
if ref_ff.score > self.new_ffs[-1].score:
con_val = (
(inv_ff.params[i].value +
self.new_ffs[-1].params[i].value) / 2)
else:
con_val = (
(3 * inv_ff.params[i].value -
self.new_ffs[-1].params[i].value) / 2)
con_ff.params[i].value = con_val
self.ff.export_ff(params=con_ff.params)
data = calculate.main(self.args_ff)
#deprecated
#con_ff.score = compare.compare_data(r_data, data)
r_dict = compare.data_by_type(r_data)
c_dict = compare.data_by_type(data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
con_ff.score = compare.compare_data(r_dict, c_dict)
opt.pretty_ff_results(con_ff)
# This change was made to reflect the 1998 Q2MM publication.
# if con_ff.score < self.new_ffs[-1].score:
if con_ff.score < self.new_ffs[-2].score:
logger.log(20, ' -- Contraction succeeded.')
self.new_ffs[-1] = con_ff
elif self.do_massive_contraction:
logger.log(
20, '~~ DOING MASSIVE CONTRACTION ~~'.rjust(79, '~'))
for ff_num, ff in enumerate(self.new_ffs[1:]):
for i in range(0, len(last_best_ff.params)):
ff.params[i].value = (
(ff.params[i].value +
self.new_ffs[0].params[i].value) / 2)
self.ff.export_ff(params=ff.params)
data = calculate.main(self.args_ff)
#deprecated
#ff.score = compare.compare_data(r_data, data)
r_dict = compare.data_by_type(r_data)
c_dict = compare.data_by_type(data)
r_dict, c_dict = compare.trim_data(r_dict,c_dict)
ff.score = compare.compare_data(r_dict, c_dict)
ff.method += ' MC'
opt.pretty_ff_results(ff)
else:
logger.log(
20, ' -- Contraction failed. Keeping parmaeters '
'anyway.')
self.new_ffs[-1] = con_ff
self.new_ffs = sorted(self.new_ffs, key=lambda x: x.score)
# Keep track of the number of cycles without change. If there's
# improvement, reset the counter.
if self.new_ffs[0].score < last_best_ff.score:
cycles_wo_change = 0
else:
cycles_wo_change += 1
logger.log(20, ' -- {} cycles without improvement out of {} '
'allowed.'.format(
cycles_wo_change, self._max_cycles_wo_change))
logger.log(20, 'BEST:')
opt.pretty_ff_results(self.new_ffs[0], level=20)
logger.log(20, '~~ END SIMPLEX CYCLE {} ~~'.format(
current_cycle).rjust(79, '~'))
# This sort is likely unnecessary because it should be done at the end
# of the last loop cycle, but I put it here just in case.
self.new_ffs = sorted(self.new_ffs, key=lambda x: x.score)
best_ff = self.new_ffs[0]
if best_ff.score < self.ff.score:
logger.log(20, '~~ SIMPLEX FINISHED WITH IMPROVEMENTS ~~'.rjust(
79, '~'))
best_ff = restore_simp_ff(best_ff, self.ff)
else:
logger.log(20, '~~ SIMPLEX FINISHED WITHOUT IMPROVEMENTS ~~'.rjust(
79, '~'))
# This restores the inital parameters, so no need to use
# restore_simp_ff here.
best_ff = self.ff
opt.pretty_ff_results(self.ff, level=20)
opt.pretty_ff_results(best_ff, level=20)
logger.log(20, ' -- Writing best force field from simplex.')
best_ff.export_ff(best_ff.path)
return best_ff
def calc_simp_var(params):
"""
Simplex variable is calculated: (2nd der.) / (1st der.)**2
"""
logger.log(1, '>>> params: {}'.format(params))
logger.log(1, '>>> 1st ders.: {}'.format([x.d1 for x in params]))
logger.log(1, '>>> 2nd ders.: {}'.format([x.d2 for x in params]))
for param in params:
param.simp_var = param.d2 / param.d1**2.
# Sorting based upon the 2nd derivative isn't such a good criterion. This should
# be updated soon.
def select_simp_params_on_derivs(params, max_params=10):
"""
Sorts parameter sets from lowest to highest second
derivatives of their score in the objective function.
Parameters
----------
params : list of subclasses of `datatypes.Param`
"""
calc_simp_var(params)
keep = sorted(params, key=lambda x: x.simp_var)
logger.log(1, '>>> x.simp_var: {}'.format([x.simp_var for x in keep]))
# Eliminate all where simp_var is greater than 1. This means that the
# correct value is bracketed by the differentiation, so gradient
# optimization should work.
# keep = [x for x in keep if x.simp_var < 1.]
# Old sorting method.
# keep = sorted(params, key=lambda x: x.d2)
keep = keep[:max_params]
logger.log(1, '>>> x.simp_var: {}'.format([x.simp_var for x in keep]))
logger.log(20, 'KEEPING PARAMS FOR SIMPLEX:\n{}'.format(
' '.join([str(x) for x in keep])))
return keep
def restore_simp_ff(new_ff, old_ff):
"""
The old FF has properties that we need to copy to the new FF. We also need
to grab all the extra parameters included in old FF and add them to the new
FF.
"""
old_ff.copy_attributes(new_ff)
if len(old_ff.params) > len(new_ff.params):
logger.log(15, ' -- Restoring {} parameters to new FF.'.format(
len(old_ff.params) - len(new_ff.params)))
logger.log(1, '>>> old_ff.params:')
logger.log(1, old_ff.params)
logger.log(1, [x.d1 for x in old_ff.params])
logger.log(1, [x.d2 for x in old_ff.params])
opt.pretty_derivs(old_ff.params, level=1)
logger.log(1, '>>> new_ff.params:')
logger.log(1, new_ff.params)
logger.log(1, [x.d1 for x in new_ff.params])
logger.log(1, [x.d2 for x in new_ff.params])
opt.pretty_derivs(new_ff.params, level=1)
# Backup new parameters.
new_params = copy.deepcopy(new_ff.params)
# Copy over all old parameters.
new_ff.params = copy.deepcopy(old_ff.params)
# Replace the old with the new.
for i, param_o in enumerate(old_ff.params):
for param_n in new_params:
# Should replace this with a general index scheme.
if param_o.mm3_row == param_n.mm3_row and \
param_o.mm3_col == param_n.mm3_col:
new_ff.params[i] = copy.deepcopy(param_n)
return new_ff
|
|
# tempfile.py unit tests.
import tempfile
import os
import signal
import sys
import re
import warnings
import unittest
from test import support
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform.startswith('openbsd'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class BaseTestCase(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
class TestExports(BaseTestCase):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
class TestRandomNameSequence(BaseTestCase):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
for s in r:
i += 1
if i == 20:
break
@unittest.skipUnless(hasattr(os, 'fork'),
"os.fork is required for this test")
def test_process_awareness(self):
# ensure that the random source differs between
# child and parent.
read_fd, write_fd = os.pipe()
pid = None
try:
pid = os.fork()
if not pid:
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
if pid:
# best effort to ensure the process can't bleed out
# via any bugs above
try:
os.kill(pid, signal.SIGKILL)
except EnvironmentError:
pass
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
class TestCandidateTempdirList(BaseTestCase):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
# We test _get_default_tempdir by testing gettempdir.
class TestGetCandidateNames(BaseTestCase):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
class TestMkstempInner(BaseTestCase):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
file = self.mkstemped(dir, pre, suf, bin)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
class TestGetTempPrefix(BaseTestCase):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
fd = os.open(p, os.O_RDWR | os.O_CREAT)
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
class TestGetTempDir(BaseTestCase):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
class TestMkstemp(BaseTestCase):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
class TestMkdtemp(BaseTestCase):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
class TestMktemp(BaseTestCase):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
file = self.mktemped(self.dir, pre, suf)
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class TestNamedTemporaryFile(BaseTestCase):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
class TestSpooledTemporaryFile(BaseTestCase):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
f.close()
f.close()
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
f.close()
f.close()
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_truncate_with_size_parameter(self):
# A SpooledTemporaryFile can be truncated to zero size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.seek(0)
f.truncate()
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'')
# A SpooledTemporaryFile can be truncated to a specific size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(4)
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'abcd')
# A SpooledTemporaryFile rolls over if truncated to large size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(20)
self.assertTrue(f._rolled)
if has_stat:
self.assertEqual(os.fstat(f.fileno()).st_size, 20)
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
class TestTemporaryFile(BaseTestCase):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
tempfile.TemporaryFile()
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
# cleanup
f.close()
os.rmdir(dir)
raise
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class TestTemporaryDirectory(BaseTestCase):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
self.do_create(tmp.name, pre, suf, recurse-1)
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(os.error):
tempfile.TemporaryDirectory(dir=nonexistent)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create()
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
@unittest.expectedFailure # See issue #10188
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
# Make sure it works with the relevant modules nulled out
with self.do_create() as dir:
d = self.do_create(dir=dir)
# Mimic the nulling out of modules that
# occurs during system shutdown
modules = [os, os.path]
if has_stat:
modules.append(stat)
# Currently broken, so suppress the warning
# that is otherwise emitted on stdout
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
# Currently broken, so stop spurious exception by
# indicating the object has already been closed
d._closed = True
# And this assert will fail, as expected by the
# unittest decorator...
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
def test_warnings_on_cleanup(self):
# Two kinds of warning on shutdown
# Issue 10888: may write to stderr if modules are nulled out
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
if os.sep != '\\':
# Embed a backslash in order to make sure string escaping
# in the displayed error message is dealt with correctly
suffix = '\\check_backslash_handling'
else:
suffix = ''
d = self.do_create(dir=dir, suf=suffix)
#Check for the Issue 10888 message
modules = [os, os.path]
if has_stat:
modules.append(stat)
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
message = err.getvalue().replace('\\\\', '\\')
self.assertIn("while cleaning up", message)
self.assertIn(d.name, message)
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
d.__del__()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after __del__" % d.name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
d.cleanup()
d.cleanup()
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
import argparse
from collections import OrderedDict
import os
import re
import sys
import types
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
__version__ = "0.9.3"
ACTION_TYPES_THAT_DONT_NEED_A_VALUE = {argparse._StoreTrueAction,
argparse._StoreFalseAction, argparse._CountAction,
argparse._StoreConstAction, argparse._AppendConstAction}
# global ArgumentParser instances
_parsers = {}
def initArgumentParser(name=None, **kwargs):
"""Creates a global ArgumentParser instance with the given name,
passing any args other than "name" to the ArgumentParser constructor.
This instance can then be retrieved using getArgumentParser(..)
"""
if name is None:
name = "default"
if name in _parsers:
raise ValueError(("kwargs besides 'name' can only be passed in the"
" first time. '%s' ArgumentParser already exists: %s") % (
name, _parsers[name]))
kwargs.setdefault('formatter_class', argparse.ArgumentDefaultsHelpFormatter)
kwargs.setdefault('conflict_handler', 'resolve')
_parsers[name] = ArgumentParser(**kwargs)
def getArgumentParser(name=None, **kwargs):
"""Returns the global ArgumentParser instance with the given name. The 1st
time this function is called, a new ArgumentParser instance will be created
for the given name, and any args other than "name" will be passed on to the
ArgumentParser constructor.
"""
if name is None:
name = "default"
if len(kwargs) > 0 or name not in _parsers:
initArgumentParser(name, **kwargs)
return _parsers[name]
class ArgumentDefaultsRawHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter,
argparse.RawDescriptionHelpFormatter):
"""HelpFormatter that adds default values AND doesn't do line-wrapping"""
pass
class ArgumentParser(argparse.ArgumentParser):
"""Drop-in replacement for argparse.ArgumentParser that adds support for
environment variables and .ini or .yaml-style config files.
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=argparse.HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
add_config_file_help=True,
add_env_var_help=True,
default_config_files=[],
allow_unknown_config_file_keys=False,
args_for_setting_config_path=[],
config_arg_is_required=False,
config_arg_help_message="config file path",
):
"""Supports all the same args as the argparse.ArgumentParser
constructor, as well as the following additional args.
Additional Args:
add_config_file_help: Whether to add a description of config file
syntax to the help message.
add_env_var_help: Whether to add something to the help message for
args that can be set through environment variables.
default_config_files: When specified, this list of config files will
be parsed in order, with the values from each config file
taking precedence over pervious ones. This allows an application
to look for config files in multiple standard locations such as
the install directory, home directory, and current directory:
["<install dir>/app_config.ini",
"~/.my_app_config.ini",
"./app_config.txt"]
allow_unknown_config_file_keys: Whether unknown config file keys
should be ignored or whether it should be an error.
args_for_setting_config_path: A list of one or more command line
args that would allow a user to provide a config file path
(eg. ["-c", "--config-file"]). Default: []
config_arg_is_required: when args_for_setting_config_path is set,
set this to True to always require users to provide a config path.
config_arg_help_message: when args_for_setting_config_path is set,
this will be the help message for the config_file_args.
"""
self._add_config_file_help = add_config_file_help
self._add_env_var_help = add_env_var_help
# extract kwargs that can be passed to the super constructor
kwargs_for_super = {k: v for k, v in locals().items() if k in [
"prog", "usage", "description", "epilog", "version", "parents",
"formatter_class", "prefix_chars", "fromfile_prefix_chars",
"argument_default", "conflict_handler", "add_help" ]}
if sys.version_info >= (3, 3) and "version" in kwargs_for_super:
del kwargs_for_super["version"] # version arg deprecated in v3.3
argparse.ArgumentParser.__init__(self, **kwargs_for_super)
# parse the additionial args
self._default_config_files = default_config_files
self._allow_unknown_config_file_keys = allow_unknown_config_file_keys
if args_for_setting_config_path:
self.add_argument(*args_for_setting_config_path, dest="config_file",
required=config_arg_is_required, help=config_arg_help_message,
is_config_file=True)
def parse_args(self, args = None, namespace = None,
config_file_contents = None, env_vars = os.environ):
"""Supports all the same args as the ArgumentParser.parse_args(..),
as well as the following additional args.
Additional Args:
args: a list of args as in argparse, or a string (eg. "-x -y bla")
config_file_contents: String. Used for testing.
env_vars: Dictionary. Used for testing.
"""
args, argv = self.parse_known_args(args = args,
namespace = namespace,
config_file_contents = config_file_contents,
env_vars = env_vars)
if argv:
self.error('unrecognized arguments: %s' % ' '.join(argv))
return args
def parse_known_args(self, args = None, namespace = None,
config_file_contents = None, env_vars = os.environ):
"""Supports all the same args as the ArgumentParser.parse_args(..),
as well as the following additional args.
Additional Args:
args: a list of args as in argparse, or a string (eg. "-x -y bla")
config_file_contents: String. Used for testing.
env_vars: Dictionary. Used for testing.
"""
if args is None:
args = sys.argv[1:]
elif type(args) == str:
args = args.split()
else:
args = list(args)
# maps string describing the source (eg. env var) to a settings dict
# to keep track of where values came from (used by print_values())
self._source_to_settings = OrderedDict()
self._command_line_args_string = ' '.join(args)
if args:
self._source_to_settings["Command Line Args: "] = {
'': self._command_line_args_string}
# add env var settings to the command line that aren't there already
env_var_args = []
actions_with_env_var_values = [a for a in self._actions
if a.option_strings and a.env_var
and a.env_var in env_vars
and not any(opt in args for opt in a.option_strings)]
for a in actions_with_env_var_values:
key = a.env_var
value = env_vars[key]
env_var_args += self.convert_setting_to_command_line_arg(
a, key, value)
args = env_var_args + args
if env_var_args:
self._source_to_settings["Environment Variables:\n"] = OrderedDict(
[(a.env_var, env_vars[a.env_var])
for a in actions_with_env_var_values])
# read settings from config file(s)
if config_file_contents:
stream = StringIO(config_file_contents)
stream.name = "method arg"
config_streams = [stream]
else:
config_streams = self._open_config_files(args)
# add config file settings to the command line that aren't there already
# for each action, add its possible config keys to a dict
possible_config_keys = {config_key: action for action in self._actions
for config_key in self.get_possible_config_keys(action)}
# parse each config file
for stream in config_streams[::-1]:
try:
config_settings = self.parse_config_file(stream)
finally:
if hasattr(stream, "close"):
stream.close()
# make sure config file doesn't use any unknown keys
if not self._allow_unknown_config_file_keys:
invalid_keys = list(
set(config_settings.keys()) - set(possible_config_keys.keys()))
if invalid_keys:
self.error(("%s contains unknown config key(s): %s") % (
stream.name, ", ".join(invalid_keys)))
# add config settings to the command line if they aren't there already
config_args = []
for key, value in config_settings.items():
if key in possible_config_keys:
action = possible_config_keys[key]
already_on_command_line = any(
arg in args for arg in action.option_strings)
if already_on_command_line:
del config_settings[key]
else:
config_args += self.convert_setting_to_command_line_arg(
action, key, value)
args = config_args + args
if config_args:
self._source_to_settings[
"Config File (%s):\n" %stream.name]=config_settings
# save default settings for use by print_values()
default_settings = OrderedDict()
for a in self._actions:
already_on_command_line = any(arg in args for arg in a.option_strings)
cares_about_default = a.option_strings or a.nargs in [
argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if (already_on_command_line or not cares_about_default or
a.default == None or a.default == argparse.SUPPRESS or
type(a) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE):
continue
else:
key = a.option_strings[-1] if a.option_strings else a.dest
default_settings[key] = str(a.default)
if default_settings:
self._source_to_settings["Defaults:\n"] = default_settings
# parse all args (including command-line, config file, and env var)
return argparse.ArgumentParser.parse_known_args(
self, args=args, namespace=namespace)
def parse_config_file(self, stream):
"""Parses a config file and return a dictionary of settings"""
settings = OrderedDict()
for i, line in enumerate(stream):
line = line.strip()
if not line or line[0] in ["#", ";", "["] or line.startswith("---"):
continue
white_space = "\\s*"
key = "(?P<key>[^:=;#\s]+?)"
value1 = white_space+"[:=]"+white_space+"(?P<value>[^;#]+?)"
value2 = white_space+"[\s]"+white_space+"(?P<value>[^;#\s]+?)"
comment = white_space+"(?P<comment>\\s[;#].*)?"
key_only_match = re.match("^" + key + comment +"$", line)
if key_only_match:
key = key_only_match.group("key")
settings[key] = "true"
continue
key_value_match = re.match("^"+key+value1+comment+"$", line) or \
re.match("^"+key+value2+comment+"$", line)
if key_value_match:
key = key_value_match.group("key")
value = key_value_match.group("value")
settings[key] = value
continue
self.error("Unexpected line %s in %s: %s" % (i, stream.name, line))
return settings
def convert_setting_to_command_line_arg(self, action, key, value):
"""Converts a config file or env var key/value to a list of
command line args to append to the command line.
Args:
action: The action corresponding to this setting
key: The config file key or env var name (used for error messages)
value: The raw value string from the config file or env var
"""
assert type(value) == str
args = []
if value.lower() == "true":
if type(action) not in ACTION_TYPES_THAT_DONT_NEED_A_VALUE:
self.error("%s set to 'True' rather than a value" % key)
args.append( action.option_strings[-1] )
elif value.startswith("[") and value.endswith("]"):
if type(action) != argparse._AppendAction:
self.error(("%s can't be set to a list '%s' unless its action "
"type is changed to 'append'") % (key, value))
for list_elem in value[1:-1].split(","):
args.append( action.option_strings[-1] )
args.append( list_elem.strip() )
else:
if type(action) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE:
self.error("%s is a flag but is being set to '%s'" % (key,
value))
args.append( action.option_strings[-1] )
args.append( value )
return args
def get_possible_config_keys(self, action):
"""This method decides which actions can be set in a config file and
what their keys will be. It return a list of 0 or more config keys that
can be used to set the given action's value in a config file.
"""
keys = []
for arg in action.option_strings:
if arg.startswith(2*self.prefix_chars[0]):
keys += [arg[2:], arg] # eg. for '--bla' return ['bla', '--bla']
return keys
def _open_config_files(self, command_line_args):
"""Tries to parse config file path(s) from within command_line_args.
Returns a list of opened config files, including files specified on the
command line as well as any default_config_files specified in the
constructor that are present on disk.
Args:
command_line_args: List of all args (already split on spaces)
"""
# open any default config files
config_files = [open(f) for f in map(
os.path.expanduser, self._default_config_files) if os.path.isfile(f)]
if not command_line_args:
return config_files
# list actions which had is_config_file=True set. Its possible there is
# more than one such arg (perhaps to have multiple aliases for the file)
user_config_file_arg_actions = [
a for a in self._actions if getattr(a, "is_config_file", False)]
if not user_config_file_arg_actions:
return config_files
for action in user_config_file_arg_actions:
# try to parse out the config file path by using a clean new
# ArgumentParser that only knows this one arg/action.
arg_parser = argparse.ArgumentParser(
prefix_chars=self.prefix_chars,
add_help=False)
arg_parser._add_action(action)
# make parser not exit on error by replacing its error method.
# Otherwise it sys.exits(..) if, for example, config file
# is_required=True and user doesn't provide it.
def error_method(self, message):
pass
arg_parser.error = types.MethodType(error_method, arg_parser)
# check whether the user provided a value
parsed_arg = arg_parser.parse_known_args(args=command_line_args)
if not parsed_arg:
continue
namespace, _ = parsed_arg
user_config_file = getattr(namespace, action.dest, None)
if not user_config_file:
continue
# validate the user-provided config file path
user_config_file = os.path.expanduser(user_config_file)
if not os.path.isfile(user_config_file):
self.error('File not found: %s' % user_config_file)
config_files += [open(user_config_file)]
return config_files
def format_values(self):
"""Returns a string with all args and settings and where they came from
(eg. command line, config file, enviroment variable or default)
"""
r = StringIO()
for source, settings in self._source_to_settings.items():
r.write(source)
for key, value in settings.items():
if key:
r.write(" %-19s%s\n" % (key+":", value))
else:
r.write(" %s\n" % value)
return r.getvalue()
def print_values(self, file = sys.stdout):
"""Prints the format_values() string (to sys.stdout or another file)."""
file.write(self.format_values())
def format_help(self):
msg = ""
added_config_file_help = False
added_env_var_help = False
if self._add_config_file_help:
default_config_files = self._default_config_files
cc = 2*self.prefix_chars[0] # eg. --
config_keys = [(arg, a) for a in self._actions for arg in
a.option_strings if arg.startswith(cc) and a.dest != "help"]
config_path_actions = [a for a in
self._actions if getattr(a, "is_config_file", False)]
if (default_config_files or config_path_actions) and config_keys:
self._add_config_file_help = False # prevent duplication
added_config_file_help = True
msg += ("Args that start with '%s' (eg. %s) can also be set in "
"a config file") % (cc, config_keys[0][0])
config_arg_string = " or ".join(a.option_strings[0]
for a in config_path_actions if a.option_strings)
if config_arg_string:
config_arg_string = "specified via " + config_arg_string
if default_config_files or config_arg_string:
msg += " (%s)" % " or ".join(default_config_files +
[config_arg_string])
msg += " by using .ini or .yaml-style syntax "
examples = []
key_value_args = [arg for arg, a in config_keys
if a.type not in ACTION_TYPES_THAT_DONT_NEED_A_VALUE]
if key_value_args:
examples += ["%s=value" % key_value_args[0].strip(cc)]
flag_args = [arg for arg, a in config_keys
if a.type in ACTION_TYPES_THAT_DONT_NEED_A_VALUE]
if flag_args:
examples += ["%s=TRUE" % flag_args[0].strip(cc)]
if examples:
msg += "(eg. %s)." % " or ".join(examples)
if self._add_env_var_help:
env_var_actions = [(a.env_var, a) for a in self._actions
if getattr(a, "env_var", None)]
for env_var, a in env_var_actions:
env_var_help_string = " [env var: %s]" % env_var
if not a.help:
a.help = ""
if env_var_help_string not in a.help:
a.help += env_var_help_string
added_env_var_help = True
self._add_env_var_help = False # prevent duplication
if added_env_var_help or added_config_file_help:
value_sources = ["defaults"]
if added_config_file_help:
value_sources = ["config file values"] + value_sources
if added_env_var_help:
value_sources = ["environment variables"] + value_sources
msg += (" If an arg is specified in more than one place, then "
"command-line values override %s.") % (
" which override ".join(value_sources))
if msg:
self.description = (self.description or "") + " " + msg
return argparse.ArgumentParser.format_help(self)
def add_argument(self, *args, **kwargs):
"""
This method supports the same args as ArgumentParser.add_argument(..)
as well as the additional args below.
All
Additional Args:
env_var: The name of the environment variable to check.
is_config_file: If True, this arg is treated as a config file path
This provides an alternative way to specify config files in place of
the ArgumentParser(fromfile_prefix_chars=..) mechanism.
Default: False
"""
env_var = kwargs.pop("env_var", None)
is_config_file = kwargs.pop("is_config_file", None)
action = self.original_add_argument_method(*args, **kwargs)
is_positional_arg = not action.option_strings
if is_positional_arg and env_var:
raise ValueError("env_var can't be set for a positional arg.")
if is_config_file and type(action) != argparse._StoreAction:
raise ValueError("arg with is_config_file=True must have action='store'")
action.env_var = env_var
action.is_config_file = is_config_file
return action
# wrap ArgumentParser's add_argument(..) method with the one above
argparse._ActionsContainer.original_add_argument_method = argparse._ActionsContainer.add_argument
argparse._ActionsContainer.add_argument = add_argument
# add all public classes in argparse module's namespace to this namespace so
# that the 2 modules are truly interchangeable
HelpFormatter = argparse.HelpFormatter
RawDescriptionHelpFormatter = argparse.RawDescriptionHelpFormatter
RawTextHelpFormatter = argparse.RawTextHelpFormatter
ArgumentDefaultsHelpFormatter = argparse.ArgumentDefaultsHelpFormatter
ArgumentError = argparse.ArgumentError
ArgumentTypeError = argparse.ArgumentTypeError
Action = argparse.Action
FileType = argparse.FileType
Namespace = argparse.Namespace
# create shorter aliases for the key methods and class names
getArgParser = getArgumentParser
getParser = getArgumentParser
ArgParser = ArgumentParser
Parser = ArgumentParser
argparse._ActionsContainer.add_arg = argparse._ActionsContainer.add_argument
argparse._ActionsContainer.add = argparse._ActionsContainer.add_argument
ArgumentParser.parse = ArgumentParser.parse_args
ArgumentParser.parse_known = ArgumentParser.parse_known_args
RawFormatter = RawDescriptionHelpFormatter
DefaultsFormatter = ArgumentDefaultsHelpFormatter
DefaultsRawFormatter = ArgumentDefaultsRawHelpFormatter
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from os import path
from os import SEEK_END
from os import system
from os import unlink
from psutil import Process
import click
import curses
import importlib
import psutil
import sys
try:
import gunicorn
assert gunicorn
except ImportError:
raise ValueError('Poort::cli requires Gunicorn to run!')
_config = None
def config():
global _config
if _config is None:
try:
sys.path.append('.')
_config = importlib.import_module('config')
except ImportError:
raise ValueError('Could not import `config.py`.')
return _config
@click.group()
@click.option('--debug/--no-debug', default=False)
def main(debug):
if debug:
click.echo('proc_name = %s' % config().proc_name)
click.echo('bind = %s' % config().bind)
click.echo('pidfile = %s' % config().pidfile)
def get_process():
if not path.exists(config().pidfile):
return False
try:
with open(config().pidfile) as stream:
return Process(int(stream.read().strip()))
except psutil.NoSuchProcess:
unlink(config().pidfile)
return False
@main.command('start')
@click.option('--package', default='app')
@click.option('--runnable', default='application')
@click.option('--environ', default='development')
@click.option('--check/--no-check', default=True)
@click.option('--reload', is_flag=True, default=False)
def start(package, runnable, environ, check, reload):
"""Start a server for your app.
:param package: Package/module containing your app.
:param runnable: Entrypoint of the server for requests.
:param environ: Which environment to start
(production, staging, development [default]).
:param check: Check if the package is importable and the entrypoint is
runnable.
:type package: str
:type runnable: str
:type environ: str
:type check: bool
.. code-block:: bash
poort start --environ production
poort status
"""
if get_process() is not False:
click.secho('Application is already running.', err=True, fg='red')
if reload:
click.secho('Restarting application.', fg='green')
system('kill -HUP `cat %s`' % config().pidfile)
else:
click.echo('Provide --reload to reload when already started.')
raise click.Abort
if check:
cmd = '%s -c \'import %s; exit(0 if hasattr(%s, "%s") else 1)\''
cmd = cmd % (sys.executable, package, package, runnable)
msg = 'Failed to import %s:%s.' % (package, runnable)
assert system(cmd) == 0, msg
click.secho('Starting your application.', fg='green')
system('ENVIRON=%s gunicorn -c config.py %s:%s' % (
environ, package, runnable))
@main.command('stop')
@click.option('--graceful/--quick', default=True)
def stop(graceful):
"""Stop the server.
:param graceful: Graceful or forceful (--quick).
:type graceful: bool
.. code-block:: bash
poort stop
"""
if get_process() is False:
click.secho('Application is not running.', err=True, fg='red')
raise click.Abort
if graceful:
click.secho('Stopping your application.', fg='green')
system('kill -TERM `cat %s`' % config().pidfile)
else:
click.secho('Stopping your application (force-quit).', fg='purple')
system('kill -QUIT `cat %s`' % config().pidfile)
@main.command('reload')
def reload():
"""Reload the server.
.. code-block:: bash
poort reload
"""
if get_process() is False:
click.secho('Application is not running.', err=True, fg='red')
raise click.Abort
click.secho('Restarting application.', fg='green')
system('kill -HUP `cat %s`' % config().pidfile)
@main.command('scale')
@click.argument('way', type=click.Choice(['up', 'down']))
@click.argument('amount', default=1)
def scale(way, amount):
"""Scale the workers of server up or down.
:param way: Which way to scale (--way up | --way down)
:param amount: The amount of workers to scale.
:type way: str
:type amount: int
.. code-block:: bash
poort scale --way up --amount 2
"""
if get_process() is False:
click.secho('Application is not running.', err=True, fg='red')
raise click.Abort
if amount == 0:
click.secho('Cannot scale 0.', err=True, fg='red')
raise click.Abort
if way == 'down':
click.secho('Scaling application %d down.' % amount, fg='green')
for i in range(amount):
system('kill -TTOU `cat %s`' % config().pidfile)
elif way == 'up':
click.secho('Scaling application %d up.' % amount, fg='green')
for i in range(amount):
system('kill -TTIN `cat %s`' % config().pidfile)
@main.command('status')
@click.option('--delay', default=1)
def status(delay):
"""Show a status screen (refreshing) of the server.
The status screen shows information about the workers and gives you
some shortcut keys to handle the server (quit, upscale, download, reload).
It also shows the last couple of lines from the server error log.
:param watch: Keep watching
:param amount: The amount of workers to scale.
:type way: str
:type amount: int
.. code-block:: bash
poort status
Output example::
test-poort-two
Running with 2 workers (default is 2)
Name PID CPU Mem
----------------------------------------
Master 27309 0.0% 8.1M
Worker 1 27316 0.3% 36.8M
Worker 2 27319 0.3% 36.6M
Waiting... -- (q)uit (u)pscale (d)ownscale (r)eload
[2016-06-30 13:54:13] [26806] [INFO] Worker exiting (pid: 26806)
[2016-06-30 13:54:13] [26805] [INFO] Worker exiting (pid: 26805)
[2016-06-30 13:54:13] [26831] [INFO] Booting worker with pid: 26831
[2016-06-30 13:54:13] [26832] [INFO] Booting worker with pid: 26832
.. warning::
This is a very hacky program, do not mash a key!
.. warning::
This needs refactoring into some nicer, more maintainable code.
"""
if get_process() is False:
click.secho('Application is not running.', err=True, fg='red')
raise click.Abort
process = get_process()
if not process:
click.secho('Application is not running.', err=True, fg='red')
raise click.Abort
screen = curses.initscr()
curses.halfdelay(delay * 10)
curses.noecho()
def line(name, proc, y, x=2):
cpu_percentage = proc.cpu_percent(None)
memory = proc.memory_info()
screen.addstr(y, x, '%-10s %5.d %4.1f%% %5.1fM' % (
name, proc.pid,
cpu_percentage, memory.rss / 1024 / 1024))
status = 'Waiting...'
def display(status, running):
if not process.is_running():
raise psutil.NoSuchProcess(process.pid)
children = process.children()
workers = len(children)
screen.erase()
screen.addstr(1, 2, config().proc_name or 'Unnamed')
screen.addstr(4, 2, 'Running with %d workers (default is %d)' % (
workers, config().workers))
screen.addstr(6, 2, '%-10s %5s %5s %6s' % (
'Name', 'PID', 'CPU', 'Mem'))
screen.addstr(7, 2, '-' * 40)
line('Master', process, 8)
for cx, child in enumerate(children):
line('Worker %d' % (cx + 1), child, 9 + cx)
usage = '(q)uit (u)pscale (d)ownscale (r)eload'
screen.addstr(9 + workers + 2, 2, '%-20s -- %s' % (
status, usage))
y, x = screen.getmaxyx()
height = y - 1
top = 9 + workers + 5
max_lines = height - top
with open(config().errorlog) as stream:
contents = tail(stream, max_lines)
for lx, content in enumerate(contents):
screen.addstr(top + lx, 2, content[:x - 4])
char = screen.getch()
if char != curses.ERR:
key = chr(char)
if key == 'q':
status = 'Quit'
running = False
elif key == 'u':
status = 'Scaling up'
system('kill -TTIN `cat %s`' % config().pidfile)
elif key == 'd':
status = 'Scaling down'
system('kill -TTOU `cat %s`' % config().pidfile)
elif key == 'r':
status = 'Restarting'
system('kill -HUP `cat %s`' % config().pidfile)
else:
status = 'Unknown key'
else:
status = 'Waiting...'
return status, running
running = True
try:
while running:
try:
status, running = display(status, running)
except KeyboardInterrupt:
running = False
except psutil.Error:
curses.endwin()
with open(config().errorlog) as stream:
click.echo(''.join(tail(stream, 24)))
click.secho('Application has stopped responding.', err=True, fg='red')
except:
curses.endwin()
raise
else:
curses.endwin()
def tail(f, lines=1, _buffer=4098):
"""Tail a file and get X lines from the end"""
# place holder for the lines found
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, SEEK_END)
except IOError: # too small or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
if len(lines_found) > lines:
break
block_counter -= 1
return lines_found[-lines:]
|
|
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
tilecompare.py - compare two sets of tiles.
"""
import sys
import os
import re
import dbutil
from osgeo import gdal
import numpy as np
from dbcompare import ComparisonWrapper
from EOtools.execute import execute
# #
# # TileComparisonPair class
# #
#
# Constants
#
IGNORE_CLASS_ID = [2]
MOSAIC_CLASS_ID = [4]
class TileComparisonPair(object):
"""A pair of databases from which tiles are to be compared.
Analagous to the ComparisonPair class for comparing databases, the
TileCompare class provides for comparision of tile stores from two
databases. The first database pertains to a benchmark tile store, and the
second database relates to the tile store arising from the latest ingest
code we are seeking to verify.
"""
# pylint:disable=too-many-instance-attributes
def __init__(self, db1, db2, schema1, schema2):
"""
Positional Arguments:
db1, db2: Connections to the databases to be compared.
Keyword Arguments:
schema1: The schema to be used for the first database (db1)
schema2: The schema to be used for the second database (db2)
"""
# Set autocommit mode on the connections; retain the old settings.
self.old_autocommit = (db1.autocommit, db2.autocommit)
db1.autocommit = True
db2.autocommit = True
# Sanitise the schema names, just in case.
self.schema1 = dbutil.safe_name(schema1)
self.schema2 = dbutil.safe_name(schema2)
# Wrap the connections to gain access to database structure queries.
self.db1 = ComparisonWrapper(db1, self.schema1)
self.db2 = ComparisonWrapper(db2, self.schema2)
# Get the database names...
self.db1_name = self.db1.database_name()
self.db2_name = self.db2.database_name()
# and qualify with the schema names if they are not 'public'
if self.schema1 != 'public':
self.db1_name = self.schema1 + '.' + self.db1_name
if self.schema2 != 'public':
self.db2_name = self.schema2 + '.' + self.db2_name
# Set input, expected and output directores
# Not used yet
module = "tilecompare"
suite = "TileCompare"
self.input_dir = dbutil.input_directory(module, suite)
self.output_dir = dbutil.output_directory(module, suite)
self.expected_dir = dbutil.expected_directory(module, suite)
# tile_root could be different to database?
def restore_autocommit(self):
"""Restore the autocommit status of the underlying connections.
The comparison pair should not be used after calling this, in
case the connections have been reset to autocommit=False. The
method sets the database attributes to None to enforce this."""
self.db1.conn.autocommit = self.old_autocommit[0]
self.db2.conn.autocommit = self.old_autocommit[1]
self.db1 = None
self.db2 = None
def compare_tile_stores(db1, db2, schema1='public', schema2='public',
output=sys.stdout):
"""Compares the tile stores from two databases.
Database Connection db1 is assumed to represent the production tile store,
against which we wish to verify the tile store resulting from a Fresh
Ingest, which has taken place onto the previously-empty Database Connection
db2.
This function runs in three stages:
1. Gather the Fresh Ingest information on Database Connection db2 into a
table and copy this accross to Database Connection db1, the production
database.
2. On Database Connection db1, merge the table from Step 1 to find the
corresponding production tiles.
3. For those Fresh Ingest tiles where a production tile can be found,
compare the two tiles and report if there is a difference. It can happen
that the tile exists on Fresh Ingest but not on production tile store.
This can happen for one of several reasons:
a) The old ingest used PQA to determine the existence of lower-level
data. By contrast, the Fresh Ingest process looks at the tile
directly to evaluate the exitence of data.
b) Mosaic tiles used to be created on user-request by the stacker class
of the API. By contrast, The Fresh Ingest process does this
automatically.
c) The coverage method of the Fresh Ingest process will, very
occasionally, pick up some new tiles.
Such anomalies are reported in the output stream with a "WARNING" prefix
Preconditions: db1 and db2 are open database connections. These are
assumed to be psycopg2 connections to PostgreSQL databases. Tables
that are not being explictly ignored are expected to have primary keys.
Positional Arguments:
db1, db2: Connections to the databases to be compared.
Keyword Arguments:
schema1: The schema to be used for the first database (db1), defaults
to 'public'.
schema2: The schema to be used for the second database (db2), defaults
to 'public'.
output: Where the output goes. This is assumed to be a file object.
Defaults to sys.stdout.
Return Value: Returns a list (path1, path2) of those corresponding tile
pairs where the contents differ.
"""
pair = TileComparisonPair(db1, db2, schema1, schema2)
#TEMPORARY delete some tiles:
_temp_delete_some_tiles(pair)
# Create a random 9-digit string to append to tables"
random_suffix = dbutil.random_name("_")
# Name of table to which information from fresh ingest will be written.
test_ingest_table = 'test_ingest%s' %random_suffix
# Create the table pertaining to the fresh ingest and copy it to the
# production database.
_copy_fresh_ingest_info(pair, test_ingest_table)
# Create tuple (list_both, list_db1_not_db2, list_db2_not_db1), where each
# list is a list of tuples:
# (level, tile_class_id1, tile_class_id2, path1, path2).
(list_both, list_db1_not_db2, list_db2_not_db1) = \
_get_comparison_pairs(pair, test_ingest_table)
# Output information for the edge cases of tiles being in only one database
tile_list = [p[3] for p in list_db1_not_db2]
_log_missing_tile_info(tile_list, pair.db1_name, pair.db2_name,
output)
tile_list = [p[4] for p in list_db2_not_db1]
_log_missing_tile_info(tile_list, pair.db2_name, pair.db1_name,
output)
output.writelines('There might be further mosaic tiles that are missing\n')
# Compare the tiles if they both exist
difference_pairs = _compare_tile_contents(list_both, output)
return difference_pairs
def _temp_delete_some_tiles(comparison_pair):
"""Temporarily delete some files."""
#TEMPORARY delete some tiles from tile table to test whether
#we can detect that they are present on DB1 but not on DB2.
sql = ("DELETE FROM tile WHERE x_index=116")
with comparison_pair.db2.cursor() as cur:
cur.execute(sql, {})
def _copy_fresh_ingest_info(comparison_pair, test_ingest_info_table):
"""Given this database connection, collate the acquisition information
for each tile into a table. Copy this table to the production database."""
sql = ("CREATE TABLE " + test_ingest_info_table + " AS" + "\n" +
"SELECT tile_id, x_index, y_index, a.acquisition_id," + "\n" +
"a.end_datetime - a.start_datetime as aq_len," + "\n" +
"tile_class_id, tile_pathname, level_id, satellite_id," + "\n" +
"sensor_id, a.start_datetime, a.end_datetime FROM tile t\n"
"INNER JOIN dataset d on d.dataset_id=t.dataset_id" + "\n" +
"INNER JOIN acquisition a on d.acquisition_id=a.acquisition_id\n")
with comparison_pair.db2.cursor() as cur:
cur.execute(sql, {})
dbutil.TESTSERVER.copy_table_between_databases(comparison_pair.db2_name,
comparison_pair.db1_name,
test_ingest_info_table)
def _get_comparison_pairs(db_pair, test_ingest_info):
"""Given Database 2's information in test_ingest_info table, generate pairs
of corresponding tiles from Database 1 and Database 2.
Returns: 3 lists as follows:
1. production_and_test: those corresponding pairs which exist on Database 1
and Database 2.
2. production_not_test: the tiles found only on Database 1.
3. test_not_production: the tiles found only on Database 2.
Each element of the above lists is a 5-tuple:
(level_name, tile_class_id on Database 1, tile_class_id on Database 2,
tile_pathname on Database 1, tile_pathname on Database 2)."""
fuzzy_match_percentage = 15
# Strip the random suffix from the test_ingest_info table and use it
# for other tables.
random_suffix = re.match(r'.+(_\d+)', test_ingest_info).groups(1)
# Match the datasets from Database 2 to those in Database 1
sql = (
"CREATE TEMPORARY TABLE datasets_join_info AS SELECT DISTINCT\n" +
"a.acquisition_id AS acquisition_id1, ti.acquisition_id AS\n" +
"acquisition_id2, level_id FROM acquisition a\n" +
"INNER JOIN " + test_ingest_info + " ti ON " +
" a.satellite_id=ti.satellite_id AND " +
" a.sensor_id=ti.sensor_id AND " +
" a.start_datetime BETWEEN " +
" ti.start_datetime - " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len AND\n" +
" ti.start_datetime + " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len AND\n" +
" a.end_datetime BETWEEN " +
" ti.end_datetime - " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len AND\n" +
" ti.end_datetime + " + str(fuzzy_match_percentage/100.) +
" *ti.aq_len;"
)
# Find all tiles from Database 1 which appear in the datasets
sqltemp = (
"CREATE TEMPORARY TABLE tiles1 AS SELECT\n" +
"acquisition_id1, acquisition_id2, dji.level_id,\n" +
"tile_class_id AS tile_class_id1, tile_pathname AS path1,\n" +
"x_index, y_index FROM datasets_join_info dji\n" +
"INNER JOIN acquisition a ON a.acquisition_id=dji.acquisition_id1\n" +
"INNER JOIN dataset d on d.acquisition_id=a.acquisition_id AND\n" +
" d.level_id=dji.level_id\n" +
"INNER JOIN tile t ON t.dataset_id=d.dataset_id\n" +
"WHERE t.tile_class_id<>2;"
)
sql = sql + sqltemp
# Find all tiles from test ingestion
sqltemp = (
"CREATE TEMPORARY TABLE tiles2 AS SELECT\n" +
"acquisition_id1, acquisition_id2, dji.level_id,\n" +
"tile_class_id AS tile_class_id2, tile_pathname AS path2,\n" +
"x_index, y_index FROM datasets_join_info dji\n" +
"INNER JOIN " + test_ingest_info + " ti ON \n" +
" ti.acquisition_id=dji.acquisition_id2 AND\n" +
" ti.level_id=dji.level_id;"
)
sql = sql + sqltemp
# For each Database 1 tile found in the test ingest datasets, find the
# corresponding Database 2 tile if it exists.
production_all_tiles = 'tiles1_all%s' %random_suffix
test_ingest_all_tiles = 'tiles2_all%s' %random_suffix
sqltemp = (
"CREATE TABLE " + production_all_tiles + " AS SELECT\n" +
"level_name, tile_class_id1, tile_class_id2, path1, path2\n" +
"FROM tiles1 t1 LEFT OUTER JOIN tiles2 t2 ON\n" +
"t1.acquisition_id1=t2.acquisition_id1 AND\n" +
"t1.level_id=t2.level_id AND\n" +
"t1.x_index=t2.x_index AND t1.y_index=t2.y_index\n" +
"INNER JOIN processing_level p on p.level_id=t1.level_id;"
)
sql = sql + sqltemp
# For each Database 2 tile found in the test ingest datasets, find the
# corresponding Database 1 tile if it exists.
sqltemp = (
"CREATE TABLE " + test_ingest_all_tiles + " AS SELECT\n" +
"level_name, tile_class_id1, tile_class_id2, path1, path2\n" +
"FROM tiles2 t2 LEFT OUTER JOIN tiles1 t1 ON\n" +
"t1.acquisition_id1=t2.acquisition_id1 AND\n" +
"t1.level_id=t2.level_id AND\n" +
"t1.x_index=t2.x_index AND t1.y_index=t2.y_index\n" +
"INNER JOIN processing_level p on p.level_id=t2.level_id; "
)
sql = sql+sqltemp
# Generate list of tiles found in Database 1 and Database 2
sql_fetch_both = ("SELECT\n" +
"t1.level_name, t1.tile_class_id1, t2.tile_class_id2, \n" +
"t1.path1, t2.path2 FROM\n" +
production_all_tiles + " t1 INNER JOIN " +
test_ingest_all_tiles + " t2 ON\n" +
"t1.path1=t2.path1 AND t1.path2=t2.path2;")
# Generate list of tiles found in Database 1 but not Database 2
sql_fetch_production_not_test = ("SELECT\n" +
"level_name, tile_class_id1, tile_class_id2, \n" +
"path1, path2 FROM\n" +
production_all_tiles + " WHERE path2 is NULL;")
# Generate list of tiles found in Database 2 but not Database 1
sql_fetch_test_not_production = ("SELECT\n" +
"level_name, tile_class_id1, tile_class_id2,\n" +
"path1, path2 FROM\n" +
test_ingest_all_tiles + " WHERE path1 is NULL;")
with db_pair.db1.cursor() as cur:
cur.execute(sql, {})
cur.execute(sql_fetch_both, {})
production_and_test = cur.fetchall()
cur.execute(sql_fetch_production_not_test, {})
production_not_test = cur.fetchall()
cur.execute(sql_fetch_test_not_production, {})
test_not_production = cur.fetchall()
db_pair.db1.drop_table(test_ingest_info)
db_pair.db1.drop_table(production_all_tiles)
db_pair.db1.drop_table(test_ingest_all_tiles)
return (production_and_test, production_not_test, test_not_production)
def _log_missing_tile_info(tile_list, dbname_present, dbname_missing, output):
"""Log information from the edge case of tiles present on dbname_present,
but missing on dbname_missing."""
if tile_list:
if len(tile_list) == 1:
number_str = " is %d tile " %len(tile_list)
else:
number_str = " are %d tiles " %len(tile_list)
output.writelines('Given the datasets from the Test Ingest process, ' \
'there are %s that are in the %s tile ' \
'store that are not in the %s tile store:\n'\
%(number_str, dbname_present, dbname_missing))
for tile in tile_list:
output.writelines('WARNING: Only in %s tilestore:' \
'%s\n'%(dbname_present, tile))
def _compare_tile_contents(list_both, output):
"""Compare the tile pairs contained in list_both. Additionally, report
those tiles that are only in Database 1, or only in Database 2.
Positional arguments: 3 lists as follows:
1. production_and_test: those corresponding pairs which exist on Database 1
and Database 2.
2. production_not_test: the tiles found only on Database 1.
3. test_not_production: the tiles found only on Database 2.
Each element of the above lists is a 5-tuple:
(level_name, tile_class_id on Database 1, tile_class_id on Database 2,
tile_pathname on Database 1, tile_pathname on Database 2).
Returns:
List of tile-path pairs (path1, path2) for which a difference has been
detected."""
#pylint:disable=too-many-locals
# Define a list of tuples (path1, path2) where the contents differ
# Each
rec_num = 0
difference_pairs = []
for tile_pair in list_both:
rec_num += 1
is_mosaic_vrt = False
level, tile_class_id1, tile_class_id2, path1, path2 = tile_pair
output.writelines('RECORD NUMBER %d tile_class_id2=%d level=%s\n'
%(rec_num, tile_class_id2, level))
# For a mosaic tile, the tile entry may not be on the database, so
# look in mosaic_cache:
if tile_class_id2 in MOSAIC_CLASS_ID:
path1 = os.path.join(os.path.dirname(path1), 'mosaic_cache',
os.path.basename(path1))
# For non-PQA tiles, the benchmark mosaic will be .vrt extension
if level in ['NBAR', 'ORTHO']:
path1 = re.match(r'(.+)\.tif$', path1).groups(1)[0] + '.vrt'
is_mosaic_vrt = True
# Check the Geotransform, Projection and shape (unless it is a vrt)
if is_mosaic_vrt:
data1, data2, msg = (None, None, "")
else:
# Skip checking of metadata for a vrt mosaic since we will check
# with system diff command in _compare_data
data1, data2, msg = _check_tile_metadata(path1, path2)
if msg:
output.writelines(msg)
# Compare the tile contents
are_different, msg = _compare_data(level,
tile_class_id1, tile_class_id2,
path1, path2, data1, data2)
if are_different:
difference_pairs.extend((path1, path2))
if msg:
sys.stdout.writelines(msg)
output.writelines(msg)
return difference_pairs
def _check_tile_metadata(path1, path2):
"""Given two tile paths, check that the projections, geotransforms and
dimensions agree. Returns a message in string msg which, if empty,
indicates agreement on the metadata."""
# pylint:disable=too-many-branches
# pylint:disable=too-many-statements
gdal.UseExceptions()
msg = ""
data1 = None
data2 = None
# Open the tile files
try:
dset1 = gdal.Open(path1)
data1 = dset1.ReadAsArray()
except RuntimeError:
msg += "ERROR:\tBenchmark tile %s does not exist\n" %path1
dset1 = None
data1 = None
try:
dset2 = gdal.Open(path2)
data2 = dset2.ReadAsArray()
except RuntimeError:
msg += "ERROR:\tTest Ingest tile %s does not exist\n" %path2
dset2 = None
data2 = None
# Check geotransforms present
try:
geotransform1 = dset1.GetGeoTransform()
except RuntimeError:
if dset1:
# file exists but geotransform not present
msg += "\tError:\tGeotransform for %s not present\n" %path1
geotransform1 = None
try:
geotransform2 = dset2.GetGeoTransform()
except RuntimeError:
if dset2:
# file exists but geotransform not present
msg += "\tError:\tGeotransform for %s not present\n" %path2
geotransform2 = None
# Check geotransforms equal
if geotransform1 and geotransform2:
if geotransform1 != geotransform2:
msg += "\tError:\tGeotransforms disagree for %s and %s\n"\
%(path1, path2)
# Check projections present
try:
projection1 = dset1.GetProjection()
except RuntimeError:
if dset1:
# file exists but projections not present
msg += "\tError:\tProjection for %s not present\n" %path1
projection1 = None
try:
projection2 = dset2.GetProjection()
except RuntimeError:
if dset2:
# file exists but projection not present
msg += "\tError:\tProjection for %s not present\n" %path2
projection2 = None
# Check projections equal
if projection1 and projection2:
if projection1 != projection2:
msg += "\tError:\tProjections disagree for %s and %s\n"\
%(path1, path2)
# Check the dimensions of the arrays
if dset1 and dset2:
if data1.shape != data2.shape:
msg += "\tError:\tDimensions of arrays disagree for %s and %s\n" \
%(path1, path2)
if dset1 and data1 is None:
msg += "\tError:\tArray data for %s not present\n" %path1
if dset2 and data2 is None:
msg += "\tError:\tArray data for %s not present\n" %path2
return (data1, data2, msg)
def _compare_data(level, tile_class_id1, tile_class_id2, path1, path2,
data1, data2):
"""Given two arrays and the level name, check that the data arrays agree.
If the level is 'PQA' and the tile is a mosaic, then only compare mosaics
at pixels where the contiguity bit is set in both versions of the mosaic
tile. Returns a message in string msg which, if empty indicates agreement
on the tile data."""
# pylint:disable=too-many-arguments
# pylint:disable=too-many-locals
# pylint:disable=unused-argument
different = False
msg = ""
if tile_class_id2 not in MOSAIC_CLASS_ID:
if (data1 != data2).any():
msg += "Difference in Tile data: %s and %s\n" \
%(path1, path2)
else:
# mosaic tile
if level == 'PQA':
ind = (data1 == data2)
# Check that differences are due to differing treatment
# of contiguity bit.
data1_diff = data1[~ind].ravel()
data2_diff = data2[~ind].ravel()
contiguity_diff = \
np.logical_or(
np.bitwise_and(data1_diff, 1 << 8) == 0,
np.bitwise_and(data2_diff, 1 << 8) == 0)
if not contiguity_diff.all():
msg += "On %d pixels, mosaiced tile benchmark %s differs"\
"from Fresh Ingest %s\n"\
%(np.count_nonzero(~contiguity_diff), path1, path2)
different = True
else:
diff_cmd = ["diff",
"-I",
"[Ff]ilename",
"%s" %path1,
"%s" %path2
]
result = execute(diff_cmd, shell=False)
if result['stdout'] != '':
msg += "Difference between mosaic vrt files:\n" + \
result['stdout']
different = True
if result['stderr'] != '':
msg += "Error in system diff command:\n" + result['stderr']
return (different, msg)
|
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MIP utility functions."""
import copy
import dataclasses
import enum
import math
from typing import Any, List
from absl import logging
import numpy as np
from neural_lns import sampling
class MPSolverResponseStatus(enum.Enum):
"""Enum of solver statuses."""
OPTIMAL = 0
FEASIBLE = 1
NOT_SOLVED = 2
INFEASIBLE = 3
UNBOUNDED = 4
INFEASIBLE_OR_UNBOUNDED = 5
STOPPED = 6
UNKNOWN = 7
FAILED = 8
BESTSOLLIMIT = 9
@dataclasses.dataclass
class MPVariable:
"""MPVariable contains all the information related to a single variable."""
# Lower and upper bounds; lower_bound must be <= upper_bound.
lower_bound: float = -math.inf
upper_bound: float = math.inf
# The coefficient of the variable in the objective. Must be finite.
objective_coefficient: float = 0.0
# True if the variable is constrained to be integer.
is_integer: bool = True
# The name of the variable.
name: str = ""
@dataclasses.dataclass
class MPConstraint:
"""MPConstraint contains all the information related to a single constraint."""
# var_index[i] is the variable index (w.r.t. to "variable" field of
# MPModel) of the i-th linear term involved in this constraint, and
# coefficient[i] is its coefficient. Only the terms with non-zero
# coefficients need to appear. var_index may not contain duplicates.
var_index: List[int] = dataclasses.field(default_factory=list)
coefficient: List[float] = dataclasses.field(default_factory=list)
# lower_bound must be <= upper_bound.
lower_bound: float = -math.inf
upper_bound: float = math.inf
# The name of the constraint.
name: str = ""
@dataclasses.dataclass
class MPModel:
"""MPModel fully encodes a Mixed-Integer Linear Programming model."""
# All the variables appearing in the model.
variable: List[MPVariable] = dataclasses.field(default_factory=list)
# All the constraints appearing in the model.
constraint: List[MPConstraint] = dataclasses.field(default_factory=list)
# True if the problem is a maximization problem. Minimize by default.
maximize: bool = False
# Offset for the objective function. Must be finite.
objective_offset: float = 0.0
# Name of the model.
name: str = ""
@dataclasses.dataclass
class MPSolutionResponse:
"""Class for solution response from the solver."""
# Objective value corresponding to the "variable_value" below, taking into
# account the source "objective_offset" and "objective_coefficient".
objective_value: float
# Variable values in the same order as the MPModel.variable field.
# This is a dense representation. These are set iff 'status' is OPTIMAL or
# FEASIBLE.
variable_value: List[float]
# Human-readable status string.
status_str: str
# Result of the optimization.
status: MPSolverResponseStatus = MPSolverResponseStatus.UNKNOWN
def tighten_variable_bounds(mip: Any,
names: List[str],
lbs: List[float],
ubs: List[float]):
"""Tightens variables of the given MIP in-place.
Args:
mip: Input MIP.
names: List of variable names to tighten.
lbs: List of lower bounds, in same order as names.
ubs: List of lower bounds, in same order as names.
"""
if len(names) != len(lbs) or len(lbs) != len(ubs):
raise ValueError(
"Names, lower and upper bounds should have the same length")
name_to_bounds = {}
for name, lb, ub in zip(names, lbs, ubs):
name = name.decode() if isinstance(name, bytes) else name
name_to_bounds[name] = (lb, ub)
c = 0
for v in mip.variable:
name = v.name.decode() if isinstance(v.name, bytes) else v.name
if name in name_to_bounds:
lb, ub = name_to_bounds[name]
v.lower_bound = max(lb, v.lower_bound)
v.upper_bound = min(ub, v.upper_bound)
c += 1
logging.info("Tightened %s vars", c)
def is_var_binary(variable: Any) -> bool:
"""Checks whether a given variable is binary."""
lb_is_zero = np.isclose(variable.lower_bound, 0)
ub_is_one = np.isclose(variable.upper_bound, 1)
return variable.is_integer and lb_is_zero and ub_is_one
def add_binary_invalid_cut(mip: Any,
names: List[str],
values: List[int],
weights: List[float],
depth: float):
"""Adds a weighted binary invalid cut to the given MIP in-place.
Given a binary assignment for all or some of the binary variables, adds
a constraint in the form:
sum_{i in zeros} w_i * x_i + sum_{j in ones} w_j * (1-x_j) <= d
The first summation is over variables predicted to be zeros, the second
summation is over variables predicted to be ones. d is the maximum distance
allowed for a solution to be away from predicted assignment.
Args:
mip: Input MIP.
names: Binary variable names.
values: Predicted values of binary variables.
weights: Weights associated with cost inccured by reversing prediction.
depth: The amount of cost allowed to be incurred by flipping
assignments.
"""
assert len(names) == len(values) == len(weights)
name_to_idx = {}
for i, v in enumerate(mip.variable):
name = v.name.decode() if isinstance(v.name, bytes) else v.name
name_to_idx[name] = i
ub = depth
var_index = []
coeffs = []
for name, val, w in zip(names, values, weights):
name = name.decode() if isinstance(name, bytes) else name
assert is_var_binary(mip.variable[name_to_idx[name]])
var_index.append(name_to_idx[name])
if val == 1:
ub -= w
coeffs.append(-w)
else:
coeffs.append(w)
constraint = mip.constraint.add()
constraint.var_index.extend(var_index)
constraint.coefficient.extend(coeffs)
constraint.upper_bound = ub
constraint.name = "weighted_invalid_cut"
def make_sub_mip(mip: Any, assignment: sampling.Assignment):
"""Creates a sub-MIP by tightening variables and applying cut."""
sub_mip = copy.deepcopy(mip)
tighten_variable_bounds(sub_mip, assignment.names,
assignment.lower_bounds, assignment.upper_bounds)
return sub_mip
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is the main executable of HTPC Manager. It parses the
command line arguments, sets globals variables and calls the
start function to start the server.
"""
import os
import sys
import htpc
import webbrowser
import locale
import logging
def parse_arguments():
""" Get variables from commandline """
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datadir', default=None,
help='Set the datadirectory')
parser.add_argument('--db', default=None,
help='Use a custom database')
parser.add_argument('--host', default=None,
help='Use a specific host/IP')
parser.add_argument('--port', type=int,
help='Use a specific port')
parser.add_argument('--shell', action='store_true', default=False,
help='WARNING! DO NOT USE UNLESS YOU KNOW WHAT .POPEN CAN BE USED FOR (LIKE WIPING YOUR HARDDRIVE).')
parser.add_argument('--daemon', action='store_true', default=False,
help='Daemonize process')
parser.add_argument('--pid', default=False,
help='Generate PID file at location')
parser.add_argument('--debug', action='store_true', default=False,
help='This parameter has been deprecated')
parser.add_argument('--dev', action='store_true', default=False,
help='Used while developing, prints debug messages uncensored, autoreload etc')
parser.add_argument('--openbrowser', action='store_true', default=False,
help='Open the browser on server start')
parser.add_argument('--webdir', default=None,
help='Use a custom webdir')
parser.add_argument('--resetauth', action='store_true', default=False,
help='Resets the username and password to HTPC Manager')
parser.add_argument('--loglevel',
help='Set a loglevel. Allowed values: debug, info, warning, error, critical')
parser.add_argument('--nocolor', action='store_true', default=False,
help='Disable colored terminal text')
return parser.parse_args()
def load_modules():
""" Import the system modules """
from htpc.root import Root
htpc.ROOT = Root()
from htpc.settings import Settings
htpc.ROOT.settings = Settings()
from htpc.log import Log
htpc.ROOT.log = Log()
from htpc.updater import Updater
htpc.ROOT.update = Updater()
# Import all modules.
from modules.kodi import Kodi
htpc.ROOT.kodi = Kodi()
from modules.sabnzbd import Sabnzbd
htpc.ROOT.sabnzbd = Sabnzbd()
from modules.couchpotato import Couchpotato
htpc.ROOT.couchpotato = Couchpotato()
from modules.sickbeard import Sickbeard
htpc.ROOT.sickbeard = Sickbeard()
from modules.transmission import Transmission
htpc.ROOT.transmission = Transmission()
from modules.deluge import Deluge
htpc.ROOT.deluge = Deluge()
from modules.squeezebox import Squeezebox
htpc.ROOT.squeezebox = Squeezebox()
from modules.newznab import Newznab
htpc.ROOT.newznab = Newznab()
from modules.utorrent import UTorrent
htpc.ROOT.utorrent = UTorrent()
from modules.nzbget import NZBGet
htpc.ROOT.nzbget = NZBGet()
from modules.qbittorrent import Qbittorrent
htpc.ROOT.qbittorrent = Qbittorrent()
from modules.stats import Stats
htpc.ROOT.stats = Stats()
from modules.tvheadend import TVHeadend
htpc.ROOT.tvheadend = TVHeadend()
from modules.torrentsearch import Torrentsearch
htpc.ROOT.torrentsearch = Torrentsearch()
from modules.plex import Plex
htpc.ROOT.plex = Plex()
from modules.users import Users
htpc.ROOT.users = Users()
from modules.sonarr import Sonarr
htpc.ROOT.sonarr = Sonarr()
from modules.sickrage import Sickrage
htpc.ROOT.sickrage = Sickrage()
from modules.samsungtv import Samsungtv
htpc.ROOT.samsungtv = Samsungtv()
from modules.vnstat import Vnstat
htpc.ROOT.vnstat = Vnstat()
from modules.headphones import Headphones
htpc.ROOT.headphones = Headphones()
from modules.mylar import Mylar
htpc.ROOT.mylar = Mylar()
def init_sched():
from apscheduler.schedulers.background import BackgroundScheduler
htpc.SCHED = BackgroundScheduler()
htpc.SCHED.start()
def main():
"""
Main function is called at startup.
"""
# Parse runtime arguments
args = parse_arguments()
# Set root and insert bundled libraies into path
htpc.RUNDIR = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.insert(0, os.path.join(htpc.RUNDIR, 'libs'))
try:
locale.setlocale(locale.LC_ALL, "")
htpc.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# for OSes that are poorly configured I'll just force UTF-8
if not htpc.SYS_ENCODING or htpc.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
htpc.SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"):
reload(sys)
# python 2.7.9 verifies certs by default. This disables it
if sys.version_info >= (2, 7, 9):
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# Set datadir, create if it doesn't exist and exit if it isn't writable.
htpc.DATADIR = os.path.join(htpc.RUNDIR, 'userdata/')
if args.datadir:
htpc.DATADIR = args.datadir
if not os.path.isdir(htpc.DATADIR):
os.makedirs(htpc.DATADIR)
if not os.access(htpc.DATADIR, os.W_OK):
sys.exit("No write access to userdata folder")
from mako.lookup import TemplateLookup
# Enable dev mode if needed
htpc.DEV = args.dev
# Set default database and overwrite if supplied through commandline
htpc.DB = os.path.join(htpc.DATADIR, 'database.db')
if args.db:
htpc.DB = args.db
# Load settings from database
from htpc.settings import Settings
htpc.settings = Settings()
# Set default loglevel
htpc.LOGLEVEL = htpc.settings.get('app_loglevel', 'info')
if args.loglevel:
htpc.LOGLEVEL = args.loglevel.lower()
htpc.settings.set('app_loglevel', args.loglevel.lower())
# Check for SSL
htpc.USE_SSL = htpc.settings.get('app_use_ssl')
htpc.SSLCERT = htpc.settings.get('app_ssl_cert')
htpc.SSLKEY = htpc.settings.get('app_ssl_key')
htpc.WEBDIR = htpc.settings.get('app_webdir', '/')
if args.webdir:
htpc.WEBDIR = args.webdir
if not htpc.WEBDIR.startswith('/'):
htpc.WEBDIR = '/' + htpc.WEBDIR
if not htpc.WEBDIR.endswith('/'):
htpc.WEBDIR += '/'
htpc.TEMPLATE = os.path.join(htpc.RUNDIR, 'interfaces/',
htpc.settings.get('app_template', 'default'))
htpc.LOOKUP = TemplateLookup(directories=[os.path.join(htpc.TEMPLATE, 'html/')])
# Overwrite host setting if supplied through commandline
htpc.HOST = htpc.settings.get('app_host', '0.0.0.0')
if args.host:
htpc.HOST = args.host
# Overwrite port setting if supplied through commandline
htpc.PORT = int(htpc.settings.get('app_port', 8085))
if args.port:
htpc.PORT = args.port
htpc.USERNAME = htpc.settings.get('app_username')
htpc.PASSWORD = htpc.settings.get('app_password')
# Is used for hiding logout in the menu
if htpc.USERNAME and htpc.PASSWORD:
htpc.AUTH = True
else:
htpc.AUTH = False
# Resets the htpc manager password and username
if args.resetauth:
htpc.USERNAME = htpc.settings.set('app_username', '')
htpc.PASSWORD = htpc.settings.set('app_password', '')
htpc.NOCOLOR = args.nocolor
# Open webbrowser
if args.openbrowser or htpc.settings.get('openbrowser') and not htpc.DEV:
browser_ssl = 's' if htpc.SSLCERT and htpc.SSLKEY and htpc.settings.get('app_use_ssl') else ''
if htpc.settings.get('app_host') == '0.0.0.0':
browser_host = 'localhost'
else:
browser_host = htpc.settings.get('app_host', 'localhost')
openbrowser = 'http%s://%s:%s%s' % (browser_ssl, str(browser_host), htpc.PORT, htpc.WEBDIR[:-1])
webbrowser.open(openbrowser, new=2, autoraise=True)
# Select if you want to control processes and open from HTPC Manager
htpc.SHELL = args.shell
# Select whether to run as daemon
htpc.DAEMON = args.daemon
# Set Application PID
htpc.PID = args.pid
# Initialize Scheduler
init_sched()
# Inititialize root and settings page
load_modules()
if args.debug:
logger = logging.getLogger('root')
logger.warning('Commandline parameter --debug has has been deprecated')
htpc.ARGS = sys.argv
# Start the server
from htpc.server import start
start()
if __name__ == '__main__':
main()
|
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client
from oslo_config import cfg
from oslo_utils import uuidutils
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import pxe_utils
from ironic.conductor import task_manager
from ironic.dhcp import neutron
from ironic.drivers.modules import ssh
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as object_utils
class TestNeutron(db_base.DbTestCase):
def setUp(self):
super(TestNeutron, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake')
self.config(
cleaning_network_uuid='00000000-0000-0000-0000-000000000000',
group='neutron')
self.config(enabled_drivers=['fake'])
self.config(dhcp_provider='neutron',
group='dhcp')
self.config(url='test-url',
url_timeout=30,
retries=2,
group='neutron')
self.config(insecure=False,
certfile='test-file',
admin_user='test-admin-user',
admin_tenant_name='test-admin-tenant',
admin_password='test-admin-password',
auth_uri='test-auth-uri',
group='keystone_authtoken')
self.node = object_utils.create_test_node(self.context)
self.ports = [
object_utils.create_test_port(
self.context, node_id=self.node.id, id=2,
uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c782',
address='52:54:00:cf:2d:32')]
# Very simple neutron port representation
self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00',
'mac_address': '52:54:00:cf:2d:32'}
dhcp_factory.DHCPFactory._dhcp_provider = None
@mock.patch.object(client.Client, "__init__")
def test__build_client_with_token(self, mock_client_init):
token = 'test-token-123'
expected = {'timeout': 30,
'retries': 2,
'insecure': False,
'ca_cert': 'test-file',
'token': token,
'endpoint_url': 'test-url',
'username': 'test-admin-user',
'tenant_name': 'test-admin-tenant',
'password': 'test-admin-password',
'auth_url': 'test-auth-uri'}
mock_client_init.return_value = None
neutron._build_client(token=token)
mock_client_init.assert_called_once_with(**expected)
@mock.patch.object(client.Client, "__init__")
def test__build_client_without_token(self, mock_client_init):
expected = {'timeout': 30,
'retries': 2,
'insecure': False,
'ca_cert': 'test-file',
'token': None,
'endpoint_url': 'test-url',
'username': 'test-admin-user',
'tenant_name': 'test-admin-tenant',
'password': 'test-admin-password',
'auth_url': 'test-auth-uri'}
mock_client_init.return_value = None
neutron._build_client(token=None)
mock_client_init.assert_called_once_with(**expected)
@mock.patch.object(client.Client, "__init__")
def test__build_client_with_region(self, mock_client_init):
expected = {'timeout': 30,
'retries': 2,
'insecure': False,
'ca_cert': 'test-file',
'token': None,
'endpoint_url': 'test-url',
'username': 'test-admin-user',
'tenant_name': 'test-admin-tenant',
'password': 'test-admin-password',
'auth_url': 'test-auth-uri',
'region_name': 'test-region'}
self.config(region_name='test-region',
group='keystone')
mock_client_init.return_value = None
neutron._build_client(token=None)
mock_client_init.assert_called_once_with(**expected)
@mock.patch.object(client.Client, "__init__")
def test__build_client_noauth(self, mock_client_init):
self.config(auth_strategy='noauth', group='neutron')
expected = {'ca_cert': 'test-file',
'insecure': False,
'endpoint_url': 'test-url',
'timeout': 30,
'retries': 2,
'auth_strategy': 'noauth'}
mock_client_init.return_value = None
neutron._build_client(token=None)
mock_client_init.assert_called_once_with(**expected)
@mock.patch.object(client.Client, 'update_port')
@mock.patch.object(client.Client, "__init__")
def test_update_port_dhcp_opts(self, mock_client_init, mock_update_port):
opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '1.1.1.1'},
{'opt_name': 'server-ip-address',
'opt_value': '1.1.1.1'}]
port_id = 'fake-port-id'
expected = {'port': {'extra_dhcp_opts': opts}}
mock_client_init.return_value = None
api = dhcp_factory.DHCPFactory()
api.provider.update_port_dhcp_opts(port_id, opts)
mock_update_port.assert_called_once_with(port_id, expected)
@mock.patch.object(client.Client, 'update_port')
@mock.patch.object(client.Client, "__init__")
def test_update_port_dhcp_opts_with_exception(self, mock_client_init,
mock_update_port):
opts = [{}]
port_id = 'fake-port-id'
mock_client_init.return_value = None
mock_update_port.side_effect = (
neutron_client_exc.NeutronClientException())
api = dhcp_factory.DHCPFactory()
self.assertRaises(
exception.FailedToUpdateDHCPOptOnPort,
api.provider.update_port_dhcp_opts,
port_id, opts)
@mock.patch.object(client.Client, 'update_port')
@mock.patch.object(client.Client, '__init__')
def test_update_port_address(self, mock_client_init, mock_update_port):
address = 'fe:54:00:77:07:d9'
port_id = 'fake-port-id'
expected = {'port': {'mac_address': address}}
mock_client_init.return_value = None
api = dhcp_factory.DHCPFactory()
api.provider.update_port_address(port_id, address)
mock_update_port.assert_called_once_with(port_id, expected)
@mock.patch.object(client.Client, 'update_port')
@mock.patch.object(client.Client, '__init__')
def test_update_port_address_with_exception(self, mock_client_init,
mock_update_port):
address = 'fe:54:00:77:07:d9'
port_id = 'fake-port-id'
mock_client_init.return_value = None
api = dhcp_factory.DHCPFactory()
mock_update_port.side_effect = (
neutron_client_exc.NeutronClientException())
self.assertRaises(exception.FailedToUpdateMacOnPort,
api.provider.update_port_address,
port_id, address)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp(self, mock_gnvi, mock_updo):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_updo.assert_called_once_with('vif-uuid', opts,
token=self.context.auth_token)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp_no_vif_data(self, mock_gnvi, mock_updo):
mock_gnvi.return_value = {'portgroups': {}, 'ports': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
self.assertRaises(exception.FailedToUpdateDHCPOptOnPort,
api.update_dhcp, task, self.node)
self.assertFalse(mock_updo.called)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp_some_failures(self, mock_gnvi, mock_updo):
# confirm update is called twice, one fails, but no exception raised
mock_gnvi.return_value = {'ports': {'p1': 'v1', 'p2': 'v2'},
'portgroups': {}}
exc = exception.FailedToUpdateDHCPOptOnPort('fake exception')
mock_updo.side_effect = [None, exc]
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, self.node)
mock_gnvi.assert_called_once_with(task)
self.assertEqual(2, mock_updo.call_count)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
@mock.patch('ironic.common.network.get_node_vif_ids')
def test_update_dhcp_fails(self, mock_gnvi, mock_updo):
# confirm update is called twice, both fail, and exception is raised
mock_gnvi.return_value = {'ports': {'p1': 'v1', 'p2': 'v2'},
'portgroups': {}}
exc = exception.FailedToUpdateDHCPOptOnPort('fake exception')
mock_updo.side_effect = [exc, exc]
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory()
self.assertRaises(exception.FailedToUpdateDHCPOptOnPort,
api.update_dhcp,
task, self.node)
mock_gnvi.assert_called_once_with(task)
self.assertEqual(2, mock_updo.call_count)
@mock.patch('time.sleep', autospec=True)
@mock.patch.object(neutron.NeutronDHCPApi, 'update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_set_sleep_and_ssh(self, mock_gnvi, mock_updo,
mock_ts):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
self.config(port_setup_delay=30, group='neutron')
with task_manager.acquire(self.context,
self.node.uuid) as task:
task.driver.power = ssh.SSHPower()
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_ts.assert_called_with(30)
mock_updo.assert_called_once_with(mock.ANY, 'vif-uuid', opts,
token=self.context.auth_token)
@mock.patch.object(neutron, 'LOG', autospec=True)
@mock.patch('time.sleep', autospec=True)
@mock.patch.object(neutron.NeutronDHCPApi, 'update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_unset_sleep_and_ssh(self, mock_gnvi, mock_updo,
mock_ts, mock_log):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
task.driver.power = ssh.SSHPower()
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
self.assertTrue(mock_log.warning.called)
self.assertIn('Setting the port delay to 15 for SSH',
mock_log.warning.call_args[0][0])
mock_ts.assert_called_with(15)
mock_updo.assert_called_once_with(mock.ANY, 'vif-uuid', opts,
token=self.context.auth_token)
@mock.patch.object(neutron, 'LOG', autospec=True)
@mock.patch('time.sleep', autospec=True)
@mock.patch.object(neutron.NeutronDHCPApi, 'update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_set_sleep_and_fake(self, mock_gnvi, mock_updo,
mock_ts, mock_log):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
self.config(port_setup_delay=30, group='neutron')
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_log.debug.assert_called_once_with(
"Waiting %d seconds for Neutron.", 30)
mock_log.warning.assert_not_called()
mock_ts.assert_called_with(30)
mock_updo.assert_called_once_with(mock.ANY, 'vif-uuid', opts,
token=self.context.auth_token)
@mock.patch.object(neutron, 'LOG', autospec=True)
@mock.patch.object(neutron.NeutronDHCPApi, 'update_port_dhcp_opts',
autospec=True)
@mock.patch('ironic.common.network.get_node_vif_ids', autospec=True)
def test_update_dhcp_unset_sleep_and_fake(self, mock_gnvi, mock_updo,
mock_log):
mock_gnvi.return_value = {'ports': {'port-uuid': 'vif-uuid'},
'portgroups': {}}
with task_manager.acquire(self.context,
self.node.uuid) as task:
opts = pxe_utils.dhcp_options_for_instance(task)
api = dhcp_factory.DHCPFactory()
api.update_dhcp(task, opts)
mock_log.debug.assert_not_called()
mock_log.warning.assert_not_called()
mock_updo.assert_called_once_with(mock.ANY, 'vif-uuid', opts,
token=self.context.auth_token)
def test__get_fixed_ip_address(self):
port_id = 'fake-port-id'
expected = "192.168.1.3"
api = dhcp_factory.DHCPFactory().provider
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
fake_client = mock.Mock()
fake_client.show_port.return_value = {'port': port_data}
result = api._get_fixed_ip_address(port_id, fake_client)
self.assertEqual(expected, result)
fake_client.show_port.assert_called_once_with(port_id)
def test__get_fixed_ip_address_invalid_ip(self):
port_id = 'fake-port-id'
api = dhcp_factory.DHCPFactory().provider
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "invalid.ip",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
fake_client = mock.Mock()
fake_client.show_port.return_value = {'port': port_data}
self.assertRaises(exception.InvalidIPv4Address,
api._get_fixed_ip_address,
port_id, fake_client)
fake_client.show_port.assert_called_once_with(port_id)
def test__get_fixed_ip_address_with_exception(self):
port_id = 'fake-port-id'
api = dhcp_factory.DHCPFactory().provider
fake_client = mock.Mock()
fake_client.show_port.side_effect = (
neutron_client_exc.NeutronClientException())
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_fixed_ip_address, port_id, fake_client)
fake_client.show_port.assert_called_once_with(port_id)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_port_ip_address(self, mock_gfia):
expected = "192.168.1.3"
port = object_utils.create_test_port(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id':
'test-vif-A'},
driver='fake')
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_port_ip_address(task, port,
mock.sentinel.client)
self.assertEqual(expected, result)
mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_port_ip_address_for_portgroup(self, mock_gfia):
expected = "192.168.1.3"
pg = object_utils.create_test_portgroup(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id':
'test-vif-A'},
driver='fake')
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_port_ip_address(task, pg,
mock.sentinel.client)
self.assertEqual(expected, result)
mock_gfia.assert_called_once_with('test-vif-A', mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_port_ip_address_with_exception(self, mock_gfia):
expected = "192.168.1.3"
port = object_utils.create_test_port(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
driver='fake')
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_port_ip_address, task, port,
mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_port_ip_address_for_portgroup_with_exception(
self, mock_gfia):
expected = "192.168.1.3"
pg = object_utils.create_test_portgroup(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
driver='fake')
mock_gfia.return_value = expected
with task_manager.acquire(self.context,
self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
self.assertRaises(exception.FailedToGetIPAddressOnPort,
api._get_port_ip_address, task, pg,
mock.sentinel.client)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_ip_addresses_ports(self, mock_gfia):
ip_address = '10.10.0.1'
expected = [ip_address]
port = object_utils.create_test_port(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id':
'test-vif-A'},
driver='fake')
mock_gfia.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_ip_addresses(task, [port],
mock.sentinel.client)
self.assertEqual(expected, result)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_fixed_ip_address')
def test__get_ip_addresses_portgroup(self, mock_gfia):
ip_address = '10.10.0.1'
expected = [ip_address]
pg = object_utils.create_test_portgroup(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id':
'test-vif-A'},
driver='fake')
mock_gfia.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api._get_ip_addresses(task, [pg], mock.sentinel.client)
self.assertEqual(expected, result)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_port_ip_address')
def test_get_ip_addresses(self, get_ip_mock):
ip_address = '10.10.0.1'
expected = [ip_address]
get_ip_mock.return_value = ip_address
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
result = api.get_ip_addresses(task)
get_ip_mock.assert_called_once_with(task, task.ports[0],
mock.ANY)
self.assertEqual(expected, result)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi._get_port_ip_address')
def test_get_ip_addresses_for_port_and_portgroup(self, get_ip_mock):
object_utils.create_test_portgroup(self.context,
node_id=self.node.id,
address='aa:bb:cc:dd:ee:ff',
uuid=uuidutils.generate_uuid(),
extra={'vif_port_id':
'test-vif-A'},
driver='fake')
with task_manager.acquire(self.context, self.node.uuid) as task:
api = dhcp_factory.DHCPFactory().provider
api.get_ip_addresses(task)
get_ip_mock.assert_has_calls(
[mock.call(task, task.ports[0], mock.ANY),
mock.call(task, task.portgroups[0], mock.ANY)])
@mock.patch.object(client.Client, 'create_port')
def test_create_cleaning_ports(self, create_mock):
# Ensure we can create cleaning ports for in band cleaning
create_mock.return_value = {'port': self.neutron_port}
expected = {self.ports[0].uuid: self.neutron_port['id']}
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
ports = api.create_cleaning_ports(task)
self.assertEqual(expected, ports)
create_mock.assert_called_once_with({'port': {
'network_id': '00000000-0000-0000-0000-000000000000',
'admin_state_up': True, 'mac_address': self.ports[0].address}})
@mock.patch.object(neutron.NeutronDHCPApi, '_rollback_cleaning_ports')
@mock.patch.object(client.Client, 'create_port')
def test_create_cleaning_ports_fail(self, create_mock, rollback_mock):
# Check that if creating a port fails, the ports are cleaned up
create_mock.side_effect = neutron_client_exc.ConnectionFailed
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.NodeCleaningFailure,
api.create_cleaning_ports,
task)
create_mock.assert_called_once_with({'port': {
'network_id': '00000000-0000-0000-0000-000000000000',
'admin_state_up': True, 'mac_address': self.ports[0].address}})
rollback_mock.assert_called_once_with(task)
@mock.patch.object(neutron.NeutronDHCPApi, '_rollback_cleaning_ports')
@mock.patch.object(client.Client, 'create_port')
def test_create_cleaning_ports_fail_delayed(self, create_mock,
rollback_mock):
"""Check ports are cleaned up on failure to create them
This test checks that the port clean-up occurs
when the port create call was successful,
but the port in fact was not created.
"""
# NOTE(pas-ha) this is trying to emulate the complex port object
# with both methods and dictionary access with methods on elements
mockport = mock.MagicMock()
create_mock.return_value = mockport
# fail only on second 'or' branch to fool lazy eval
# and actually execute both expressions to assert on both mocks
mockport.get.return_value = True
mockitem = mock.Mock()
mockport.__getitem__.return_value = mockitem
mockitem.get.return_value = None
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.NodeCleaningFailure,
api.create_cleaning_ports,
task)
create_mock.assert_called_once_with({'port': {
'network_id': '00000000-0000-0000-0000-000000000000',
'admin_state_up': True, 'mac_address': self.ports[0].address}})
rollback_mock.assert_called_once_with(task)
mockport.get.assert_called_once_with('port')
mockitem.get.assert_called_once_with('id')
mockport.__getitem__.assert_called_once_with('port')
@mock.patch.object(client.Client, 'create_port')
def test_create_cleaning_ports_bad_config(self, create_mock):
# Check an error is raised if the cleaning network is not set
self.config(cleaning_network_uuid=None, group='neutron')
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
api.create_cleaning_ports, task)
@mock.patch.object(client.Client, 'delete_port')
@mock.patch.object(client.Client, 'list_ports')
def test_delete_cleaning_ports(self, list_mock, delete_mock):
# Ensure that we can delete cleaning ports, and that ports with
# different macs don't get deleted
other_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f01',
'mac_address': 'aa:bb:cc:dd:ee:ff'}
list_mock.return_value = {'ports': [self.neutron_port, other_port]}
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
api.delete_cleaning_ports(task)
list_mock.assert_called_once_with(
network_id='00000000-0000-0000-0000-000000000000')
delete_mock.assert_called_once_with(self.neutron_port['id'])
@mock.patch.object(client.Client, 'list_ports')
def test_delete_cleaning_ports_list_fail(self, list_mock):
# Check that if listing ports fails, the node goes to cleanfail
list_mock.side_effect = neutron_client_exc.ConnectionFailed
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.NodeCleaningFailure,
api.delete_cleaning_ports,
task)
list_mock.assert_called_once_with(
network_id='00000000-0000-0000-0000-000000000000')
@mock.patch.object(client.Client, 'delete_port')
@mock.patch.object(client.Client, 'list_ports')
def test_delete_cleaning_ports_delete_fail(self, list_mock, delete_mock):
# Check that if deleting ports fails, the node goes to cleanfail
list_mock.return_value = {'ports': [self.neutron_port]}
delete_mock.side_effect = neutron_client_exc.ConnectionFailed
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.NodeCleaningFailure,
api.delete_cleaning_ports,
task)
list_mock.assert_called_once_with(
network_id='00000000-0000-0000-0000-000000000000')
delete_mock.assert_called_once_with(self.neutron_port['id'])
def test_out_range_auth_strategy(self):
self.assertRaises(ValueError, cfg.CONF.set_override,
'auth_strategy', 'fake', 'neutron',
enforce_type=True)
|
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
# Django REST Framework
from rest_framework.exceptions import MethodNotAllowed, PermissionDenied
from rest_framework import permissions
# AWX
from awx.main.access import * # noqa
from awx.main.models import * # noqa
from awx.main.utils import get_object_or_400
logger = logging.getLogger('awx.api.permissions')
__all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission',
'TaskPermission', 'ProjectUpdatePermission', 'InventoryInventorySourcesUpdatePermission',
'UserPermission', 'IsSuperUser']
class ModelAccessPermission(permissions.BasePermission):
'''
Default permissions class to check user access based on the model and
request method, optionally verifying the request data.
'''
def check_options_permissions(self, request, view, obj=None):
return self.check_get_permissions(request, view, obj)
def check_head_permissions(self, request, view, obj=None):
return self.check_get_permissions(request, view, obj)
def check_get_permissions(self, request, view, obj=None):
if hasattr(view, 'parent_model'):
parent_obj = view.get_parent_object()
if not check_user_access(request.user, view.parent_model, 'read',
parent_obj):
return False
if not obj:
return True
return check_user_access(request.user, view.model, 'read', obj)
def check_post_permissions(self, request, view, obj=None):
if hasattr(view, 'parent_model'):
parent_obj = view.get_parent_object()
if not check_user_access(request.user, view.parent_model, 'read',
parent_obj):
return False
if hasattr(view, 'parent_key'):
if not check_user_access(request.user, view.model, 'add', {view.parent_key: parent_obj}):
return False
return True
elif getattr(view, 'is_job_start', False):
if not obj:
return True
return check_user_access(request.user, view.model, 'start', obj)
elif getattr(view, 'is_job_cancel', False):
if not obj:
return True
return check_user_access(request.user, view.model, 'cancel', obj)
else:
if obj:
return True
return check_user_access(request.user, view.model, 'add', request.data)
def check_put_permissions(self, request, view, obj=None):
if not obj:
# FIXME: For some reason this needs to return True
# because it is first called with obj=None?
return True
if getattr(view, 'is_variable_data', False):
return check_user_access(request.user, view.model, 'change', obj,
dict(variables=request.data))
else:
return check_user_access(request.user, view.model, 'change', obj,
request.data)
def check_patch_permissions(self, request, view, obj=None):
return self.check_put_permissions(request, view, obj)
def check_delete_permissions(self, request, view, obj=None):
if not obj:
# FIXME: For some reason this needs to return True
# because it is first called with obj=None?
return True
return check_user_access(request.user, view.model, 'delete', obj)
def check_permissions(self, request, view, obj=None):
'''
Perform basic permissions checking before delegating to the appropriate
method based on the request method.
'''
# Don't allow anonymous users. 401, not 403, hence no raised exception.
if not request.user or request.user.is_anonymous():
return False
# Always allow superusers
if getattr(view, 'always_allow_superuser', True) and request.user.is_superuser:
return True
# Check if view supports the request method before checking permission
# based on request method.
if request.method.upper() not in view.allowed_methods:
raise MethodNotAllowed(request.method)
# Check permissions for the given view and object, based on the request
# method used.
check_method = getattr(self, 'check_%s_permissions' % request.method.lower(), None)
result = check_method and check_method(request, view, obj)
if not result:
raise PermissionDenied()
return result
def has_permission(self, request, view, obj=None):
logger.debug('has_permission(user=%s method=%s data=%r, %s, %r)',
request.user, request.method, request.data,
view.__class__.__name__, obj)
try:
response = self.check_permissions(request, view, obj)
except Exception as e:
logger.debug('has_permission raised %r', e, exc_info=True)
raise
else:
logger.debug('has_permission returned %r', response)
return response
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view, obj)
class JobTemplateCallbackPermission(ModelAccessPermission):
'''
Permission check used by job template callback view for requests from
empheral hosts.
'''
def has_permission(self, request, view, obj=None):
# If another authentication method was used and it's not a POST, return
# True to fall through to the next permission class.
if (request.user or request.auth) and request.method.lower() != 'post':
return super(JobTemplateCallbackPermission, self).has_permission(request, view, obj)
# Require method to be POST, host_config_key to be specified and match
# the requested job template, and require the job template to be
# active in order to proceed.
host_config_key = request.data.get('host_config_key', '')
if request.method.lower() != 'post':
raise PermissionDenied()
elif not host_config_key:
raise PermissionDenied()
elif obj and obj.host_config_key != host_config_key:
raise PermissionDenied()
else:
return True
class TaskPermission(ModelAccessPermission):
'''
Permission checks used for API callbacks from running a task.
'''
def has_permission(self, request, view, obj=None):
# If another authentication method was used other than the one for
# callbacks, default to the superclass permissions checking.
if request.user or not request.auth:
return super(TaskPermission, self).has_permission(request, view, obj)
# Verify that the ID present in the auth token is for a valid, active
# unified job.
try:
unified_job = UnifiedJob.objects.get(status='running',
pk=int(request.auth.split('-')[0]))
except (UnifiedJob.DoesNotExist, TypeError):
return False
# Verify that the request method is one of those allowed for the given
# view, also that the job or inventory being accessed matches the auth
# token.
if view.model == Inventory and request.method.lower() in ('head', 'get'):
return bool(not obj or obj.pk == unified_job.inventory_id)
else:
return False
class ProjectUpdatePermission(ModelAccessPermission):
'''
Permission check used by ProjectUpdateView to determine who can update projects
'''
def check_get_permissions(self, request, view, obj=None):
project = get_object_or_400(view.model, pk=view.kwargs['pk'])
return check_user_access(request.user, view.model, 'read', project)
def check_post_permissions(self, request, view, obj=None):
project = get_object_or_400(view.model, pk=view.kwargs['pk'])
return check_user_access(request.user, view.model, 'start', project)
class InventoryInventorySourcesUpdatePermission(ModelAccessPermission):
def check_post_permissions(self, request, view, obj=None):
inventory = get_object_or_400(view.model, pk=view.kwargs['pk'])
return check_user_access(request.user, view.model, 'update', inventory)
class UserPermission(ModelAccessPermission):
def check_post_permissions(self, request, view, obj=None):
if not request.data:
return request.user.admin_of_organizations.exists()
elif request.user.is_superuser:
return True
raise PermissionDenied()
class IsSuperUser(permissions.BasePermission):
"""
Allows access only to admin users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_superuser
|
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Create Station-model plots."""
from enum import Enum
import numpy as np
from .wx_symbols import (current_weather, high_clouds, low_clouds, mid_clouds,
pressure_tendency, sky_cover, wx_symbol_font)
from ..package_tools import Exporter
exporter = Exporter(globals())
@exporter.export
class StationPlot:
"""Make a standard meteorological station plot.
Plots values, symbols, or text spaced around a central location. Can also plot wind
barbs as the center of the location.
"""
location_names = {'C': (0, 0), 'N': (0, 1), 'NE': (1, 1), 'E': (1, 0), 'SE': (1, -1),
'S': (0, -1), 'SW': (-1, -1), 'W': (-1, 0), 'NW': (-1, 1),
'N2': (0, 2), 'NNE': (1, 2), 'ENE': (2, 1), 'E2': (2, 0),
'ESE': (2, -1), 'SSE': (1, -2), 'S2': (0, -2), 'SSW': (-1, -2),
'WSW': (-2, -1), 'W2': (-2, 0), 'WNW': (-2, 1), 'NNW': (-1, 2)}
def __init__(self, ax, x, y, fontsize=10, spacing=None, transform=None, **kwargs):
"""Initialize the StationPlot with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
ax : matplotlib.axes.Axes
The :class:`~matplotlib.axes.Axes` for plotting
x : array_like
The x location of the stations in the plot
y : array_like
The y location of the stations in the plot
fontsize : int
The fontsize to use for drawing text
spacing : int
The spacing, in points, that corresponds to a single increment between
station plot elements.
transform : matplotlib.transforms.Transform (or compatible)
The default transform to apply to the x and y positions when plotting.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
These will be passed to all the plotting methods, and thus need to be valid
for all plot types, such as `clip_on`.
"""
self.ax = ax
self.x = np.atleast_1d(x)
self.y = np.atleast_1d(y)
self.fontsize = fontsize
self.spacing = fontsize if spacing is None else spacing
self.transform = transform
self.items = {}
self.barbs = None
self.arrows = None
self.default_kwargs = kwargs
def plot_symbol(self, location, codes, symbol_mapper, **kwargs):
"""At the specified location in the station model plot a set of symbols.
This specifies that at the offset `location`, the data in `codes` should be
converted to unicode characters (for our :data:`wx_symbol_font`) using `symbol_mapper`,
and plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
codes : array_like
The numeric values that should be converted to unicode characters for plotting.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
from metpy.plots import StationPlot
from metpy.plots.wx_symbols import current_weather, current_weather_auto
from metpy.plots.wx_symbols import low_clouds, mid_clouds, high_clouds
from metpy.plots.wx_symbols import sky_cover, pressure_tendency
def plot_symbols(mapper, name, nwrap=12, figsize=(10, 1.4)):
# Determine how many symbols there are and layout in rows of nwrap
# if there are more than nwrap symbols
num_symbols = len(mapper)
codes = np.arange(len(mapper))
ncols = nwrap
if num_symbols <= nwrap:
nrows = 1
x = np.linspace(0, 1, len(mapper))
y = np.ones_like(x)
ax_height = 0.8
else:
nrows = int(ceil(num_symbols / ncols))
x = np.tile(np.linspace(0, 1, ncols), nrows)[:num_symbols]
y = np.repeat(np.arange(nrows, 0, -1), ncols)[:num_symbols]
figsize = (10, 1 * nrows + 0.4)
ax_height = 0.8 + 0.018 * nrows
fig = plt.figure(figsize=figsize, dpi=300)
ax = fig.add_axes([0, 0, 1, ax_height])
ax.set_title(name, size=20)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
# Plot
sp = StationPlot(ax, x, y, fontsize=36)
sp.plot_symbol('C', codes, mapper)
sp.plot_parameter((0, -1), codes, fontsize=18)
ax.set_ylim(-0.05, nrows + 0.5)
plt.show()
plot_symbols(current_weather, "Current Weather Symbols")
plot_symbols(current_weather_auto, "Current Weather Auto Reported Symbols")
plot_symbols(low_clouds, "Low Cloud Symbols")
plot_symbols(mid_clouds, "Mid Cloud Symbols")
plot_symbols(high_clouds, "High Cloud Symbols")
plot_symbols(sky_cover, "Sky Cover Symbols")
plot_symbols(pressure_tendency, "Pressure Tendency Symbols")
See Also
--------
plot_barb, plot_parameter, plot_text
"""
# Make sure we use our font for symbols
kwargs['fontproperties'] = wx_symbol_font.copy()
return self.plot_parameter(location, codes, symbol_mapper, **kwargs)
def plot_parameter(self, location, parameter, formatter='.0f', **kwargs):
"""At the specified location in the station model plot a set of values.
This specifies that at the offset `location`, the data in `parameter` should be
plotted. The conversion of the data values to a string is controlled by `formatter`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
parameter : array_like
The numeric values that should be plotted
formatter : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_symbol, plot_text
"""
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
parameter = self._scalar_plotting_units(parameter, plotting_units)
if hasattr(parameter, 'units'):
parameter = parameter.magnitude
text = self._to_string_list(parameter, formatter)
return self.plot_text(location, text, **kwargs)
def plot_text(self, location, text, **kwargs):
"""At the specified location in the station model plot a collection of text.
This specifies that at the offset `location`, the strings in `text` should be
plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
text : list (or array) of strings
The strings that should be plotted
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_parameter, plot_symbol
"""
location = self._handle_location(location)
kwargs = self._make_kwargs(kwargs)
text_collection = self.ax.scattertext(self.x, self.y, text, loc=location,
size=kwargs.pop('fontsize', self.fontsize),
**kwargs)
if location in self.items:
self.items[location].remove()
self.items[location] = text_collection
return text_collection
def plot_barb(self, u, v, **kwargs):
r"""At the center of the station model plot wind barbs.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the barbs.
v : array-like
The data to use for the v-component of the barbs.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_arrow, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._vector_plotting_units(u, v, plotting_units)
# Empirically determined
pivot = 0.51 * np.sqrt(self.fontsize)
length = 1.95 * np.sqrt(self.fontsize)
defaults = {'sizes': {'spacing': .15, 'height': 0.5, 'emptybarb': 0.35},
'length': length, 'pivot': pivot}
defaults.update(kwargs)
# Remove old barbs
if self.barbs:
self.barbs.remove()
self.barbs = self.ax.barbs(self.x, self.y, u, v, **defaults)
def plot_arrow(self, u, v, **kwargs):
r"""At the center of the station model plot wind arrows.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.quiver` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the arrows.
v : array-like
The data to use for the v-component of the arrows.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_barb, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._vector_plotting_units(u, v, plotting_units)
defaults = {'pivot': 'tail', 'scale': 20, 'scale_units': 'inches', 'width': 0.002}
defaults.update(kwargs)
# Remove old arrows
if self.arrows:
self.arrows.remove()
self.arrows = self.ax.quiver(self.x, self.y, u, v, **defaults)
@staticmethod
def _vector_plotting_units(u, v, plotting_units):
"""Handle conversion to plotting units for barbs and arrows."""
if plotting_units:
if hasattr(u, 'units') and hasattr(v, 'units'):
u = u.to(plotting_units)
v = v.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'u and v wind components.')
# Strip units, CartoPy transform doesn't like
u = np.array(u)
v = np.array(v)
return u, v
@staticmethod
def _scalar_plotting_units(scalar_value, plotting_units):
"""Handle conversion to plotting units for non-vector quantities."""
if plotting_units:
if hasattr(scalar_value, 'units'):
scalar_value = scalar_value.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'scalar value being converted.')
return scalar_value
def _make_kwargs(self, kwargs):
"""Assemble kwargs as necessary.
Inserts our defaults as well as ensures transform is present when appropriate.
"""
# Use default kwargs and update with additional ones
all_kw = self.default_kwargs.copy()
all_kw.update(kwargs)
# Pass transform if necessary
if 'transform' not in all_kw and self.transform:
all_kw['transform'] = self.transform
return all_kw
@staticmethod
def _to_string_list(vals, fmt):
"""Convert a sequence of values to a list of strings."""
if not callable(fmt):
def formatter(s):
"""Turn a format string into a callable."""
return format(s, fmt)
else:
formatter = fmt
return [formatter(v) if np.isfinite(v) else '' for v in vals]
def _handle_location(self, location):
"""Process locations to get a consistent set of tuples for location."""
if isinstance(location, str):
location = self.location_names[location]
xoff, yoff = location
return xoff * self.spacing, yoff * self.spacing
@exporter.export
class StationPlotLayout(dict):
r"""make a layout to encapsulate plotting using :class:`StationPlot`.
This class keeps a collection of offsets, plot formats, etc. for a parameter based
on its name. This then allows a dictionary of data (or any object that allows looking
up of arrays based on a name) to be passed to :meth:`plot()` to plot the data all at once.
See Also
--------
StationPlot
"""
class PlotTypes(Enum):
r"""Different plotting types for the layout.
Controls how items are displayed (e.g. converting values to symbols).
"""
value = 1
symbol = 2
text = 3
barb = 4
def add_value(self, location, name, fmt='.0f', units=None, **kwargs):
r"""Add a numeric value to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. The conversion of the data values to
a string is controlled by `fmt`. The units required for plotting can also
be passed in using `units`, which will cause the data to be converted before
plotting.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
fmt : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_text
"""
self[location] = (self.PlotTypes.value, name, (fmt, units, kwargs))
def add_symbol(self, location, name, symbol_mapper, **kwargs):
r"""Add a symbol to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. Data values will converted to glyphs
appropriate for MetPy's symbol font using the callable `symbol_mapper`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_text, add_value
"""
self[location] = (self.PlotTypes.symbol, name, (symbol_mapper, kwargs))
def add_text(self, location, name, **kwargs):
r"""Add a text field to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted directly as text with no conversion
applied.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple(float, float)
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_value
"""
self[location] = (self.PlotTypes.text, name, kwargs)
def add_barb(self, u_name, v_name, units=None, **kwargs):
r"""Add a wind barb to the center of the station layout.
This specifies that u- and v-component data should be pulled from the data
container using the keys `u_name` and `v_name`, respectively, and plotted as
a wind barb at the center of the station plot. If `units` are given, both
components will be converted to these units.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or line width.
Parameters
----------
u_name : str
The name of the parameter for the u-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
v_name : str
The name of the parameter for the v-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
add_symbol, add_text, add_value
"""
# Not sure if putting the v_name as a plot-specific option is appropriate,
# but it seems simpler than making name code in plot handle tuples
self['barb'] = (self.PlotTypes.barb, (u_name, v_name), (units, kwargs))
def names(self):
"""Get the list of names used by the layout.
Returns
-------
list[str]
the list of names of variables used by the layout
"""
ret = []
for item in self.values():
if item[0] == self.PlotTypes.barb:
ret.extend(item[1])
else:
ret.append(item[1])
return ret
def plot(self, plotter, data_dict):
"""Plot a collection of data using this layout for a station plot.
This function iterates through the entire specified layout, pulling the fields named
in the layout from `data_dict` and plotting them using `plotter` as specified
in the layout. Fields present in the layout, but not in `data_dict`, are ignored.
Parameters
----------
plotter : StationPlot
:class:`StationPlot` to use to plot the data. This controls the axes,
spacing, station locations, etc.
data_dict : dict[str, array-like]
Data container that maps a name to an array of data. Data from this object
will be used to fill out the station plot.
"""
def coerce_data(dat, u):
try:
return dat.to(u).magnitude
except AttributeError:
return dat
for loc, info in self.items():
typ, name, args = info
if typ == self.PlotTypes.barb:
# Try getting the data
u_name, v_name = name
u_data = data_dict.get(u_name)
v_data = data_dict.get(v_name)
# Plot if we have the data
if not (v_data is None or u_data is None):
units, kwargs = args
plotter.plot_barb(coerce_data(u_data, units), coerce_data(v_data, units),
**kwargs)
else:
# Check that we have the data for this location
data = data_dict.get(name)
if data is not None:
# If we have it, hand it to the appropriate method
if typ == self.PlotTypes.value:
fmt, units, kwargs = args
plotter.plot_parameter(loc, coerce_data(data, units), fmt, **kwargs)
elif typ == self.PlotTypes.symbol:
mapper, kwargs = args
plotter.plot_symbol(loc, data, mapper, **kwargs)
elif typ == self.PlotTypes.text:
plotter.plot_text(loc, data, **args)
def __repr__(self):
"""Return string representation of layout."""
return ('{'
+ ', '.join('{0}: ({1[0].name}, {1[1]}, ...)'.format(loc, info)
for loc, info in sorted(self.items()))
+ '}')
with exporter:
#: :desc: Simple station plot layout
simple_layout = StationPlotLayout()
simple_layout.add_barb('eastward_wind', 'northward_wind', 'knots')
simple_layout.add_value('NW', 'air_temperature', units='degC')
simple_layout.add_value('SW', 'dew_point_temperature', units='degC')
simple_layout.add_value('NE', 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
simple_layout.add_symbol('C', 'cloud_coverage', sky_cover)
simple_layout.add_symbol('W', 'present_weather', current_weather)
#: Full NWS station plot `layout`__
#:
#: __ http://oceanservice.noaa.gov/education/yos/resource/JetStream/synoptic/wxmaps.htm
nws_layout = StationPlotLayout()
nws_layout.add_value((-1, 1), 'air_temperature', units='degF')
nws_layout.add_symbol((0, 2), 'high_cloud_type', high_clouds)
nws_layout.add_symbol((0, 1), 'medium_cloud_type', mid_clouds)
nws_layout.add_symbol((0, -1), 'low_cloud_type', low_clouds)
nws_layout.add_value((1, 1), 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
nws_layout.add_value((-2, 0), 'visibility_in_air', fmt='.0f', units='miles')
nws_layout.add_symbol((-1, 0), 'present_weather', current_weather)
nws_layout.add_symbol((0, 0), 'cloud_coverage', sky_cover)
nws_layout.add_value((1, 0), 'tendency_of_air_pressure', units='mbar',
fmt=lambda v: ('-' if v < 0 else '') + format(10 * abs(v), '02.0f'))
nws_layout.add_symbol((2, 0), 'tendency_of_air_pressure_symbol', pressure_tendency)
nws_layout.add_barb('eastward_wind', 'northward_wind', units='knots')
nws_layout.add_value((-1, -1), 'dew_point_temperature', units='degF')
# TODO: Fix once we have the past weather symbols converted
nws_layout.add_symbol((1, -1), 'past_weather', current_weather)
|
|
""" Session related tags
Note: It's better to use tag names without underscores, since those need
to be escaped in MarkItUp CMS plugins.
"""
from anglicize import anglicize
from django import template
from conference import models
from ..utils import profile_url, talk_title
register = template.Library()
### Constants
# These must match the talk .type or .admin_type
TYPE_NAMES = (
('k', 'Keynotes', ''),
('t', 'Talks', ''),
('r', 'Training sessions', ''),
('p', 'Poster sessions', ''),
('i', 'Interactive sessions', ''),
('n', 'Panels', ''),
('h', 'Help desks', (
'Help desks provide slots for attendees to discuss '
'their problems one-on-one with experts from the projects.'
)),
('e', 'EuroPython sessions', (
'The EuroPython sessions are intended for anyone interested '
'in helping with the EuroPython organization in the coming years.'
)),
('c', 'Community sessions', (
'The community sessions are intended for Python communities such as '
'the Python Software Foundation (PSF) to use for members meetings.'
)),
)
def _check_talk_types(type_names):
d = set(x[0] for x in type_names)
for code, entry in models.TALK_TYPE:
assert code[0] in d, 'Talk type code %r is missing' % code[0]
_check_talk_types(TYPE_NAMES)
### Helpers
def speaker_listing(talk):
return [{
'url': profile_url(speaker.user),
'fullname': '{}'.format(speaker.user.assopy_user.name()),
} for speaker in talk.get_all_speakers()]
def speaker_name(speaker):
name = '%s %s' % (
speaker.user.first_name,
speaker.user.last_name)
# Remove whitespace
return name.strip()
def speaker_list_key(entry):
speaker = entry[1]
name = '%s %s' % (
speaker.user.first_name,
speaker.user.last_name)
# Remove whitespace and use title case
return anglicize(name.strip().title())
###
EXAMPLE_ACCEPTEDSESSIONS = """
{% load sessions %}
{% acceptedsessions "ep2018" filter_types="t,r,p,i,h,m" as sessiondata %}
{% for category in sessiondata %}
<h3>{{ category.name }}</h3>
{% if category.description %}
<p>{{ category.description }}</p>
{% endif %}
<ul>
{% for session in category.sessions %}
<li><a href="{{ session.url }}">{{ session.title }}</a> by
{% for speaker in session.speakers %}
<a href="{{ speaker.url }}">{{ speaker.fullname }}</a>{% if not forloop.last %}, {% endif %}
{% endfor %}
</li>
{% endfor %}
</ul>
{% if not category.sessions %}
<ul><li>No sessions have been selected yet.</li></ul>
{% endif %}
{% endfor %}
"""
# Note: Django has problems parsing multiple templatetag arguments if the
# arguments contain underscores. It works find with a single argument.
@register.simple_tag
def acceptedsessions(conference, filtertypes=None, filtercommunity=None,
filterdomain=None,
# For b/w compatibility
filter_types=None):
talks = models.Talk.objects.filter(
conference=conference, status='accepted')
if filter_types is not None and filtertypes is None:
# For b/w compatibility
filtertypes = filter_types
if filtercommunity:
talks = talks.filter(
p3_talk__sub_community=filtercommunity.strip())
if filterdomain:
talks = talks.filter(
domain=filterdomain.strip())
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if admin_type == 'm':
# EPS sessions
type = 'e'
elif admin_type == 'k':
# Keynotes
type = 'k'
elif admin_type == 'p':
# Community sessions
type = 'c'
elif admin_type in ('x', 'o', 'c', 'l', 'r', 's', 'e'):
# Don't list these placeholders or plenary sessions
# used in the schedule
continue
elif 'reserved for' in talk.title.lower():
# Don't list reserved talk slots
continue
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
if filtertypes is not None:
filtertypes = [x.strip() for x in filtertypes.split(',')]
types = [t
for t in TYPE_NAMES
if t[0] in filtertypes]
else:
types = TYPE_NAMES
output = []
for type, type_name, description in types:
bag = talk_types.get(type, [])
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
output.append({
'type': type,
'name': type_name,
'description': description,
'sessions': [{
'title': talk_title(talk),
'url': talk.get_absolute_url(),
'speakers': speaker_listing(talk),
'session': talk,
} for talk in bag]
})
return output
EXAMPLE_SPEAKERS = """
{% load sessions %}
{% speakers "ep2018" as speakerdata %}
{% for name, group in speakerdata.groups.items %}
<h3>{{ name }} ...</h3>
<ul>
{% for speaker in group %}
<li><a href="{{ speaker.url }}">{{ speaker.fullname }}</a></li>
{% endfor %}
</ul>
{% endfor %}
<p>{{ speakerdata.count }} speakers in total.</p>
"""
@register.simple_tag
def speakers(conference, filter_types=None):
talks = models.Talk.objects.filter(
conference=conference, status='accepted')
# Find all speakers
speaker_dict = {}
for talk in talks:
for speaker in talk.get_all_speakers():
name = speaker_name(speaker)
if not name:
continue
if name.lower() in ('to be announced', 'tobey announced'):
# Skip place holder names
continue
speaker_dict[speaker_name(speaker)] = speaker
# Prepare list
speaker_list = list(speaker_dict.items())
speaker_list.sort(key=speaker_list_key)
data = {
'listing': speaker_list,
'count': len(speaker_list),
}
# Print list of speakers
groups = {}
group = ''
for entry in speaker_list:
name, speaker = entry
sort_name = speaker_list_key(entry)
if not group or group != sort_name[0]:
group = sort_name[0]
group_data = []
groups[group] = group_data
group_data.append({
'speaker': speaker,
'url': profile_url(speaker.user),
'fullname': name,
})
data['groups'] = groups
data['groups_list'] = sorted(groups.items())
return data
|
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import io
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_utils as bu
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace("\r\n", "\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace("\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import tempfile
import unittest
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.io import avroio
from apache_beam.io import filebasedsource
from apache_beam.io import source_test_utils
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
from apache_beam.transforms.util import assert_that
from apache_beam.transforms.util import equal_to
# Importing following private class for testing purposes.
from apache_beam.io.avroio import _AvroSource as AvroSource
from apache_beam.io.avroio import _AvroSink as AvroSink
import avro.datafile
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
import avro.schema
import hamcrest as hc
# Import snappy optionally; some tests will be skipped when import fails.
try:
import snappy # pylint: disable=import-error
except ImportError:
snappy = None # pylint: disable=invalid-name
logging.warning('snappy is not installed; some tests will be skipped.')
class TestAvro(unittest.TestCase):
_temp_files = []
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def tearDown(self):
for path in self._temp_files:
if os.path.exists(path):
os.remove(path)
self._temp_files = []
RECORDS = [{'name': 'Thomas',
'favorite_number': 1,
'favorite_color': 'blue'}, {'name': 'Henry',
'favorite_number': 3,
'favorite_color': 'green'},
{'name': 'Toby',
'favorite_number': 7,
'favorite_color': 'brown'}, {'name': 'Gordon',
'favorite_number': 4,
'favorite_color': 'blue'},
{'name': 'Emily',
'favorite_number': -1,
'favorite_color': 'Red'}, {'name': 'Percy',
'favorite_number': 6,
'favorite_color': 'Green'}]
SCHEMA = avro.schema.parse('''
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}
''')
def _write_data(self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS)):
with tempfile.NamedTemporaryFile(
delete=False, dir=directory, prefix=prefix) as f:
writer = DataFileWriter(f, DatumWriter(), self.SCHEMA, codec=codec)
len_records = len(self.RECORDS)
for i in range(count):
writer.append(self.RECORDS[i % len_records])
writer.close()
self._temp_files.append(f.name)
return f.name
def _write_pattern(self, num_files):
assert num_files > 0
temp_dir = tempfile.mkdtemp()
file_name = None
for _ in range(num_files):
file_name = self._write_data(directory=temp_dir, prefix='mytemp')
assert file_name
file_name_prefix = file_name[:file_name.rfind(os.path.sep)]
return file_name_prefix + os.path.sep + 'mytemp*'
def _run_avro_test(self, pattern, desired_bundle_size, perform_splitting,
expected_result):
source = AvroSource(pattern)
read_records = []
if perform_splitting:
assert desired_bundle_size
splits = [
split
for split in source.split(desired_bundle_size=desired_bundle_size)
]
if len(splits) < 2:
raise ValueError('Test is trivial. Please adjust it so that at least '
'two splits get generated')
sources_info = [
(split.source, split.start_position, split.stop_position)
for split in splits
]
source_test_utils.assertSourcesEqualReferenceSource((source, None, None),
sources_info)
else:
read_records = source_test_utils.readFromSource(source, None, None)
self.assertItemsEqual(expected_result, read_records)
def test_read_without_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_source_display_data(self):
file_name = 'some_avro_source'
source = AvroSource(file_name, validate=False)
dd = DisplayData.create_from(source)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_display_data(self):
file_name = 'some_avro_source'
read = avroio.ReadFromAvro(file_name, validate=False)
dd = DisplayData.create_from(read)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_sink_display_data(self):
file_name = 'some_avro_sink'
sink = AvroSink(file_name,
self.SCHEMA,
'null',
'.end',
0,
None,
'application/x-avro')
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher(
'schema',
str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d.end'),
DisplayDataItemMatcher(
'codec',
'null'),
DisplayDataItemMatcher(
'compression',
'uncompressed')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_write_display_data(self):
file_name = 'some_avro_sink'
write = avroio.WriteToAvro(file_name,
self.SCHEMA)
dd = DisplayData.create_from(write)
expected_items = [
DisplayDataItemMatcher(
'schema',
str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d'),
DisplayDataItemMatcher(
'codec',
'deflate'),
DisplayDataItemMatcher(
'compression',
'uncompressed')]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_reentrant_without_splitting(self):
file_name = self._write_data()
source = AvroSource(file_name)
source_test_utils.assertReentrantReadsSucceed((source, None, None))
def test_read_reantrant_with_splitting(self):
file_name = self._write_data()
source = AvroSource(file_name)
splits = [
split for split in source.split(desired_bundle_size=100000)]
assert len(splits) == 1
source_test_utils.assertReentrantReadsSucceed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_read_without_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, 10000, True, expected_result)
def test_split_points(self):
file_name = self._write_data(count=12000)
source = AvroSource(file_name)
splits = [
split
for split in source.split(desired_bundle_size=float('inf'))
]
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
split_points_report = []
for _ in splits[0].source.read(range_tracker):
split_points_report.append(range_tracker.split_points())
# There are a total of three blocks. Each block has more than 10 records.
# When reading records of the first block, range_tracker.split_points()
# should return (0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
self.assertEquals(
split_points_report[:10],
[(0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)] * 10)
# When reading records of last block, range_tracker.split_points() should
# return (2, 1)
self.assertEquals(split_points_report[-10:], [(2, 1)] * 10)
def test_read_without_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
@unittest.skipIf(snappy is None, 'snappy not installed.')
def test_read_without_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
@unittest.skipIf(snappy is None, 'snappy not installed.')
def test_read_with_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_read_without_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, None, False, expected_result)
def test_read_with_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, 100, True, expected_result)
def test_dynamic_work_rebalancing_exhaustive(self):
# Adjusting block size so that we can perform a exhaustive dynamic
# work rebalancing test that completes within an acceptable amount of time.
old_sync_interval = avro.datafile.SYNC_INTERVAL
try:
avro.datafile.SYNC_INTERVAL = 2
file_name = self._write_data(count=5)
source = AvroSource(file_name)
splits = [split
for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
source_test_utils.assertSplitAtFractionExhaustive(splits[0].source)
finally:
avro.datafile.SYNC_INTERVAL = old_sync_interval
def test_corrupted_file(self):
file_name = self._write_data()
with open(file_name, 'rb') as f:
data = f.read()
# Corrupt the last character of the file which is also the last character of
# the last sync_marker.
last_char_index = len(data) - 1
corrupted_data = data[:last_char_index]
corrupted_data += 'A' if data[last_char_index] == 'B' else 'B'
with tempfile.NamedTemporaryFile(
delete=False, prefix=tempfile.template) as f:
f.write(corrupted_data)
corrupted_file_name = f.name
source = AvroSource(corrupted_file_name)
with self.assertRaises(ValueError) as exn:
source_test_utils.readFromSource(source, None, None)
self.assertEqual(0, exn.exception.message.find('Unexpected sync marker'))
def test_source_transform(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(p | avroio.ReadFromAvro(path), equal_to(self.RECORDS))
def test_sink_transform(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p | beam.Create(self.RECORDS) | avroio.WriteToAvro(path, self.SCHEMA)
with TestPipeline() as p:
# json used for stable sortability
readback = p | avroio.ReadFromAvro(path + '*') | beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(snappy is None, 'snappy not installed.')
def test_sink_transform_snappy(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p | beam.Create(self.RECORDS) | avroio.WriteToAvro(
path, self.SCHEMA, codec='snappy')
with TestPipeline() as p:
# json used for stable sortability
readback = p | avroio.ReadFromAvro(path + '*') | beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class SelfAdjointEigTest(test.TestCase):
def testWrongDimensions(self):
# The input to self_adjoint_eig should be a tensor of
# at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.self_adjoint_eig(vector)
def testConcurrentExecutesWithoutError(self):
all_ops = []
with self.session(use_gpu=True) as sess:
for compute_v_ in True, False:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
if compute_v_:
e1, v1 = linalg_ops.self_adjoint_eig(matrix1)
e2, v2 = linalg_ops.self_adjoint_eig(matrix2)
all_ops += [e1, v1, e2, v2]
else:
e1 = linalg_ops.self_adjoint_eigvals(matrix1)
e2 = linalg_ops.self_adjoint_eigvals(matrix2)
all_ops += [e1, e2]
val = sess.run(all_ops)
self.assertAllEqual(val[0], val[2])
# The algorithm is slightly different for compute_v being True and False,
# so require approximate equality only here.
self.assertAllClose(val[2], val[4])
self.assertAllEqual(val[4], val[5])
self.assertAllEqual(val[1], val[3])
def testMatrixThatFailsWhenFlushingDenormsToZero(self):
# Test a 32x32 matrix which is known to fail if denorm floats are flushed to
# zero.
matrix = np.genfromtxt(
test.test_src_dir_path(
"python/kernel_tests/testdata/"
"self_adjoint_eig_fail_if_denorms_flushed.txt")).astype(np.float32)
self.assertEqual(matrix.shape, (32, 32))
matrix_tensor = constant_op.constant(matrix)
with self.session(use_gpu=True) as sess:
(e, v) = sess.run(linalg_ops.self_adjoint_eig(matrix_tensor))
self.assertEqual(e.size, 32)
self.assertAllClose(
np.matmul(v, v.transpose()), np.eye(32, dtype=np.float32), atol=2e-3)
self.assertAllClose(matrix,
np.matmul(np.matmul(v, np.diag(e)), v.transpose()))
def SortEigenDecomposition(e, v):
if v.ndim < 2:
return e, v
else:
perm = np.argsort(e, -1)
return np.take(e, perm, -1), np.take(v, perm, -1)
def EquilibrateEigenVectorPhases(x, y):
"""Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`.
Eigenvectors are only unique up to an arbitrary phase. This function rotates x
such that it matches y. Precondition: The coluns of x and y differ by a
multiplicative complex phase factor only.
Args:
x: `np.ndarray` with Eigenvectors
y: `np.ndarray` with Eigenvectors
Returns:
`np.ndarray` containing an equilibrated version of x.
"""
phases = np.sum(np.conj(x) * y, -2, keepdims=True)
phases /= np.abs(phases)
return phases * x
def _GetSelfAdjointEigTest(dtype_, shape_, compute_v_):
def CompareEigenVectors(self, x, y, tol):
x = EquilibrateEigenVectorPhases(x, y)
self.assertAllClose(x, y, atol=tol)
def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
num_batches = int(np.prod(x_e.shape[:-1]))
n = x_e.shape[-1]
x_e = np.reshape(x_e, [num_batches] + [n])
x_v = np.reshape(x_v, [num_batches] + [n, n])
y_e = np.reshape(y_e, [num_batches] + [n])
y_v = np.reshape(y_v, [num_batches] + [n, n])
for i in range(num_batches):
x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
CompareEigenVectors(self, x_vi, y_vi, tol)
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
atol = 1e-4
else:
atol = 1e-12
np_e, np_v = np.linalg.eigh(a)
with self.session(use_gpu=True):
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(constant_op.constant(a))
# Check that V*diag(E)*V^T is close to A.
a_ev = math_ops.matmul(
math_ops.matmul(tf_v, array_ops.matrix_diag(tf_e)),
tf_v,
adjoint_b=True)
self.assertAllClose(a_ev.eval(), a, atol=atol)
# Compare to numpy.linalg.eigh.
CompareEigenDecompositions(self, np_e, np_v,
tf_e.eval(), tf_v.eval(), atol)
else:
tf_e = linalg_ops.self_adjoint_eigvals(constant_op.constant(a))
self.assertAllClose(
np.sort(np_e, -1), np.sort(tf_e.eval(), -1), atol=atol)
return Test
class SelfAdjointEigGradTest(test.TestCase):
pass # Filled in below
def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_):
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = tf_v[..., 0:1, :]
if tf_a.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_v *= phase
outputs = [tf_e, tf_v]
else:
tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
outputs = [tf_e]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == "__main__":
for compute_v in True, False:
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64,
dtypes_lib.complex128):
for size in 1, 2, 5, 10:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(size, size) < 10):
shape = batch_dims + (size, size)
name = "%s_%s_%s" % (dtype, "_".join(map(str, shape)), compute_v)
_AddTest(SelfAdjointEigTest, "SelfAdjointEig", name,
_GetSelfAdjointEigTest(dtype, shape, compute_v))
_AddTest(SelfAdjointEigGradTest, "SelfAdjointEigGrad", name,
_GetSelfAdjointEigGradTest(dtype, shape, compute_v))
test.main()
|
|
import functools
import os
import pkgutil
import sys
from collections import OrderedDict, defaultdict
from contextlib import suppress
from importlib import import_module
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (
BaseCommand, CommandError, CommandParser, handle_default_options,
)
from django.core.management.color import color_style
from django.utils import autoreload
from django.utils.encoding import force_text
def find_commands(management_dir):
"""
Given a path to a management directory, return a list of all the command
names that are available.
"""
command_dir = os.path.join(management_dir, 'commands')
return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, return the Command
class instance. Allow all errors raised by the import process
(ImportError, AttributeError) to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@functools.lru_cache(maxsize=None)
def get_commands():
"""
Return a dictionary mapping command names to their callback applications.
Look for a management.commands package in django.core, and in each
installed application -- if a commands package exists, register all
commands in that package.
Core commands are always included. If a settings module has been
specified, also include user-defined commands.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(__path__[0])}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(command_name, *args, **options):
"""
Call the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
`command_name` may be a string or a command object. Using a string is
preferred unless the command object is required for further processing or
testing.
Some examples:
call_command('migrate')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
from django.core.management.commands import flush
cmd = flush.Command()
call_command(cmd, verbosity=0, interactive=False)
# Do something with cmd ...
"""
if isinstance(command_name, BaseCommand):
# Command object passed in.
command = command_name
command_name = command.__class__.__module__.split('.')[-1]
else:
# Load the command object by name.
try:
app_name = get_commands()[command_name]
except KeyError:
raise CommandError("Unknown command: %r" % command_name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, command_name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', command_name)
# Use the `dest` option name from the parser option
opt_mapping = {
min(s_opt.option_strings).lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings
}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=[force_text(a) for a in args])
defaults = dict(defaults._get_kwargs(), **arg_options)
# Raise an error if any unknown options were passed.
stealth_options = set(command.base_stealth_options + command.stealth_options)
dest_parameters = {action.dest for action in parser._actions}
valid_options = (dest_parameters | stealth_options).union(opt_mapping)
unknown_options = set(options) - valid_options
if unknown_options:
raise TypeError(
"Unknown option(s) for %s command: %s. "
"Valid options are: %s." % (
command_name,
', '.join(sorted(unknown_options)),
', '.join(sorted(valid_options)),
)
)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility:
"""
Encapsulate the logic of the django-admin and manage.py utilities.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
if self.prog_name == '__main__.py':
self.prog_name = 'python -m django'
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""Return the script's main help text, as a string."""
if commands_only:
usage = sorted(get_commands())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = defaultdict(lambda: [])
for name, app in get_commands().items():
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Try to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get('DJANGO_SETTINGS_MODULE'):
# If `subcommand` is missing due to misconfigured settings, the
# following line will retrigger an ImproperlyConfigured exception
# (get_commands() swallows the original one) so the user is
# informed about it.
settings.INSTALLED_APPS
else:
sys.stderr.write("No Django settings specified.\n")
sys.stderr.write(
"Unknown command: %r\nType '%s help' for usage.\n"
% (subcommand, self.prog_name)
)
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'):
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
with suppress(ImportError):
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
parser = subcommand_cls.create_parser('', cwords[0])
options.extend(
(min(s_opt.option_strings), s_opt.nargs != 0)
for s_opt in parser._actions if s_opt.option_strings
)
# filter out previously specified options from available options
prev_opts = {x.split('=')[0] for x in cwords[1:cword - 1]}
options = (opt for opt in options if opt[0] not in prev_opts)
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for opt_label, require_arg in options:
# append '=' to options which require args
if require_arg:
opt_label += '='
print(opt_label)
# Exit code of the bash completion function is never passed back to
# the user, so it's safe to always exit with 0.
# For more details see #25420.
sys.exit(0)
def execute(self):
"""
Given the command-line arguments, figure out which subcommand is being
run, create a parser appropriate to that command, and run it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
with suppress(CommandError): # Ignore any option errors at this point.
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == 'runserver' and '--noreload' not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader. Pretend it didn't happen by
# loading an empty list of applications.
apps.all_models = defaultdict(OrderedDict)
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = True
# Remove options not compatible with the built-in runserver
# (e.g. options for the contrib.staticfiles' runserver).
# Changes here require manually testing as described in
# #27522.
_parser = self.fetch_command('runserver').create_parser('django', 'runserver')
_options, _args = _parser.parse_known_args(self.argv[2:])
for _arg in _args:
self.argv.remove(_arg)
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""Run a ManagementUtility."""
utility = ManagementUtility(argv)
utility.execute()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Workflow Logic the Identity service."""
from oslo_config import cfg
from oslo_log import log
from keystone.common import controller
from keystone.common import dependency
from keystone import exception
from keystone.i18n import _, _LW
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api', 'identity_api', 'resource_api')
class User(controller.V2Controller):
@controller.v2_deprecated
def get_user(self, context, user_id):
self.assert_admin(context)
ref = self.identity_api.get_user(user_id)
return {'user': self.v3_to_v2_user(ref)}
@controller.v2_deprecated
def get_users(self, context):
# NOTE(termie): i can't imagine that this really wants all the data
# about every single user in the system...
if 'name' in context['query_string']:
return self.get_user_by_name(
context, context['query_string'].get('name'))
self.assert_admin(context)
user_list = self.identity_api.list_users(
CONF.identity.default_domain_id)
return {'users': self.v3_to_v2_user(user_list)}
@controller.v2_deprecated
def get_user_by_name(self, context, user_name):
self.assert_admin(context)
ref = self.identity_api.get_user_by_name(
user_name, CONF.identity.default_domain_id)
return {'user': self.v3_to_v2_user(ref)}
# CRUD extension
@controller.v2_deprecated
def create_user(self, context, user):
user = self._normalize_OSKSADM_password_on_request(user)
user = self.normalize_username_in_request(user)
user = self._normalize_dict(user)
self.assert_admin(context)
if 'name' not in user or not user['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
if 'enabled' in user and not isinstance(user['enabled'], bool):
msg = _('Enabled field must be a boolean')
raise exception.ValidationError(message=msg)
default_project_id = user.pop('tenantId', None)
if default_project_id is not None:
# Check to see if the project is valid before moving on.
self.resource_api.get_project(default_project_id)
user['default_project_id'] = default_project_id
# The manager layer will generate the unique ID for users
user_ref = self._normalize_domain_id(context, user.copy())
new_user_ref = self.v3_to_v2_user(
self.identity_api.create_user(user_ref))
if default_project_id is not None:
self.assignment_api.add_user_to_project(default_project_id,
new_user_ref['id'])
return {'user': new_user_ref}
@controller.v2_deprecated
def update_user(self, context, user_id, user):
# NOTE(termie): this is really more of a patch than a put
user = self.normalize_username_in_request(user)
self.assert_admin(context)
if 'enabled' in user and not isinstance(user['enabled'], bool):
msg = _('Enabled field should be a boolean')
raise exception.ValidationError(message=msg)
default_project_id = user.pop('tenantId', None)
if default_project_id is not None:
user['default_project_id'] = default_project_id
old_user_ref = self.v3_to_v2_user(
self.identity_api.get_user(user_id))
# Check whether a tenant is being added or changed for the user.
# Catch the case where the tenant is being changed for a user and also
# where a user previously had no tenant but a tenant is now being
# added for the user.
if (('tenantId' in old_user_ref and
old_user_ref['tenantId'] != default_project_id and
default_project_id is not None) or
('tenantId' not in old_user_ref and
default_project_id is not None)):
# Make sure the new project actually exists before we perform the
# user update.
self.resource_api.get_project(default_project_id)
user_ref = self.v3_to_v2_user(
self.identity_api.update_user(user_id, user))
# If 'tenantId' is in either ref, we might need to add or remove the
# user from a project.
if 'tenantId' in user_ref or 'tenantId' in old_user_ref:
if user_ref['tenantId'] != old_user_ref.get('tenantId'):
if old_user_ref.get('tenantId'):
try:
member_role_id = CONF.member_role_id
self.assignment_api.remove_role_from_user_and_project(
user_id, old_user_ref['tenantId'], member_role_id)
except exception.NotFound:
# NOTE(morganfainberg): This is not a critical error it
# just means that the user cannot be removed from the
# old tenant. This could occur if roles aren't found
# or if the project is invalid or if there are no roles
# for the user on that project.
msg = _LW('Unable to remove user %(user)s from '
'%(tenant)s.')
LOG.warning(msg, {'user': user_id,
'tenant': old_user_ref['tenantId']})
if user_ref['tenantId']:
try:
self.assignment_api.add_user_to_project(
user_ref['tenantId'], user_id)
except exception.Conflict:
# We are already a member of that tenant
pass
except exception.NotFound:
# NOTE(morganfainberg): Log this and move on. This is
# not the end of the world if we can't add the user to
# the appropriate tenant. Most of the time this means
# that the project is invalid or roles are some how
# incorrect. This shouldn't prevent the return of the
# new ref.
msg = _LW('Unable to add user %(user)s to %(tenant)s.')
LOG.warning(msg, {'user': user_id,
'tenant': user_ref['tenantId']})
return {'user': user_ref}
@controller.v2_deprecated
def delete_user(self, context, user_id):
self.assert_admin(context)
self.identity_api.delete_user(user_id)
@controller.v2_deprecated
def set_user_enabled(self, context, user_id, user):
return self.update_user(context, user_id, user)
@controller.v2_deprecated
def set_user_password(self, context, user_id, user):
user = self._normalize_OSKSADM_password_on_request(user)
return self.update_user(context, user_id, user)
@staticmethod
def _normalize_OSKSADM_password_on_request(ref):
"""Sets the password from the OS-KSADM Admin Extension.
The OS-KSADM Admin Extension documentation says that
`OS-KSADM:password` can be used in place of `password`.
"""
if 'OS-KSADM:password' in ref:
ref['password'] = ref.pop('OS-KSADM:password')
return ref
@dependency.requires('identity_api')
class UserV3(controller.V3Controller):
collection_name = 'users'
member_name = 'user'
def __init__(self):
super(UserV3, self).__init__()
self.get_member_from_driver = self.identity_api.get_user
def _check_user_and_group_protection(self, context, prep_info,
user_id, group_id):
ref = {}
ref['user'] = self.identity_api.get_user(user_id)
ref['group'] = self.identity_api.get_group(group_id)
self.check_protection(context, prep_info, ref)
@controller.protected()
def create_user(self, context, user):
self._require_attribute(user, 'name')
# The manager layer will generate the unique ID for users
ref = self._normalize_dict(user)
ref = self._normalize_domain_id(context, ref)
ref = self.identity_api.create_user(ref)
return UserV3.wrap_member(context, ref)
@controller.filterprotected('domain_id', 'enabled', 'name')
def list_users(self, context, filters):
hints = UserV3.build_driver_hints(context, filters)
refs = self.identity_api.list_users(
domain_scope=self._get_domain_id_for_list_request(context),
hints=hints)
return UserV3.wrap_collection(context, refs, hints=hints)
@controller.filterprotected('domain_id', 'enabled', 'name')
def list_users_in_group(self, context, filters, group_id):
hints = UserV3.build_driver_hints(context, filters)
refs = self.identity_api.list_users_in_group(group_id, hints=hints)
return UserV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_user(self, context, user_id):
ref = self.identity_api.get_user(user_id)
return UserV3.wrap_member(context, ref)
def _update_user(self, context, user_id, user):
self._require_matching_id(user_id, user)
self._require_matching_domain_id(
user_id, user, self.identity_api.get_user)
ref = self.identity_api.update_user(user_id, user)
return UserV3.wrap_member(context, ref)
@controller.protected()
def update_user(self, context, user_id, user):
return self._update_user(context, user_id, user)
@controller.protected(callback=_check_user_and_group_protection)
def add_user_to_group(self, context, user_id, group_id):
self.identity_api.add_user_to_group(user_id, group_id)
@controller.protected(callback=_check_user_and_group_protection)
def check_user_in_group(self, context, user_id, group_id):
return self.identity_api.check_user_in_group(user_id, group_id)
@controller.protected(callback=_check_user_and_group_protection)
def remove_user_from_group(self, context, user_id, group_id):
self.identity_api.remove_user_from_group(user_id, group_id)
@controller.protected()
def delete_user(self, context, user_id):
return self.identity_api.delete_user(user_id)
@controller.protected()
def change_password(self, context, user_id, user):
original_password = user.get('original_password')
if original_password is None:
raise exception.ValidationError(target='user',
attribute='original_password')
password = user.get('password')
if password is None:
raise exception.ValidationError(target='user',
attribute='password')
try:
self.identity_api.change_password(
context, user_id, original_password, password)
except AssertionError:
raise exception.Unauthorized()
@dependency.requires('identity_api')
class GroupV3(controller.V3Controller):
collection_name = 'groups'
member_name = 'group'
def __init__(self):
super(GroupV3, self).__init__()
self.get_member_from_driver = self.identity_api.get_group
@controller.protected()
def create_group(self, context, group):
self._require_attribute(group, 'name')
# The manager layer will generate the unique ID for groups
ref = self._normalize_dict(group)
ref = self._normalize_domain_id(context, ref)
ref = self.identity_api.create_group(ref)
return GroupV3.wrap_member(context, ref)
@controller.filterprotected('domain_id', 'name')
def list_groups(self, context, filters):
hints = GroupV3.build_driver_hints(context, filters)
refs = self.identity_api.list_groups(
domain_scope=self._get_domain_id_for_list_request(context),
hints=hints)
return GroupV3.wrap_collection(context, refs, hints=hints)
@controller.filterprotected('name')
def list_groups_for_user(self, context, filters, user_id):
hints = GroupV3.build_driver_hints(context, filters)
refs = self.identity_api.list_groups_for_user(user_id, hints=hints)
return GroupV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_group(self, context, group_id):
ref = self.identity_api.get_group(group_id)
return GroupV3.wrap_member(context, ref)
@controller.protected()
def update_group(self, context, group_id, group):
self._require_matching_id(group_id, group)
self._require_matching_domain_id(
group_id, group, self.identity_api.get_group)
ref = self.identity_api.update_group(group_id, group)
return GroupV3.wrap_member(context, ref)
@controller.protected()
def delete_group(self, context, group_id):
self.identity_api.delete_group(group_id)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import weakref
import numpy as np
from .. import gloo
from .. import app
from .visuals import VisualNode
from ..visuals.transforms import TransformSystem
from ..color import Color
from ..util import logger, Frozen
from ..util.profiler import Profiler
from .subscene import SubScene
from .events import SceneMouseEvent
from .widgets import Widget
class SceneCanvas(app.Canvas, Frozen):
"""A Canvas that automatically draws the contents of a scene
Parameters
----------
title : str
The widget title
size : (width, height)
The size of the window.
position : (x, y)
The position of the window in screen coordinates.
show : bool
Whether to show the widget immediately. Default False.
autoswap : bool
Whether to swap the buffers automatically after a draw event.
Default True. If True, the ``swap_buffers`` Canvas method will
be called last (by default) by the ``canvas.draw`` event handler.
app : Application | str
Give vispy Application instance to use as a backend.
(vispy.app is used by default.) If str, then an application
using the chosen backend (e.g., 'pyglet') will be created.
Note the canvas application can be accessed at ``canvas.app``.
create_native : bool
Whether to create the widget immediately. Default True.
vsync : bool
Enable vertical synchronization.
resizable : bool
Allow the window to be resized.
decorate : bool
Decorate the window. Default True.
fullscreen : bool | int
If False, windowed mode is used (default). If True, the default
monitor is used. If int, the given monitor number is used.
config : dict
A dict with OpenGL configuration options, which is combined
with the default configuration options and used to initialize
the context. See ``canvas.context.config`` for possible
options.
shared : Canvas | GLContext | None
An existing canvas or context to share OpenGL objects with.
keys : str | dict | None
Default key mapping to use. If 'interactive', escape and F11 will
close the canvas and toggle full-screen mode, respectively.
If dict, maps keys to functions. If dict values are strings,
they are assumed to be ``Canvas`` methods, otherwise they should
be callable.
parent : widget-object
The parent widget if this makes sense for the used backend.
dpi : float | None
Resolution in dots-per-inch to use for the canvas. If dpi is None,
then the value will be determined by querying the global config first,
and then the operating system.
always_on_top : bool
If True, try to create the window in always-on-top mode.
px_scale : int > 0
A scale factor to apply between logical and physical pixels in addition
to the actual scale factor determined by the backend. This option
allows the scale factor to be adjusted for testing.
bgcolor : Color
The background color to use.
See also
--------
vispy.app.Canvas
Notes
-----
Receives the following events:
* initialize
* resize
* draw
* mouse_press
* mouse_release
* mouse_double_click
* mouse_move
* mouse_wheel
* key_press
* key_release
* stylus
* touch
* close
The ordering of the mouse_double_click, mouse_press, and mouse_release
events are not guaranteed to be consistent between backends. Only certain
backends natively support double-clicking (currently Qt and WX); on other
backends, they are detected manually with a fixed time delay.
This can cause problems with accessibility, as increasing the OS detection
time or using a dedicated double-click button will not be respected.
"""
def __init__(self, title='VisPy canvas', size=(800, 600), position=None,
show=False, autoswap=True, app=None, create_native=True,
vsync=False, resizable=True, decorate=True, fullscreen=False,
config=None, shared=None, keys=None, parent=None, dpi=None,
always_on_top=False, px_scale=1, bgcolor='black'):
self._scene = None
# A default widget that follows the shape of the canvas
self._central_widget = None
self._draw_order = weakref.WeakKeyDictionary()
self._drawing = False
self._update_pending = False
self._fb_stack = []
self._vp_stack = []
self._mouse_handler = None
self.transforms = TransformSystem(canvas=self)
self._bgcolor = Color(bgcolor).rgba
# Set to True to enable sending mouse events even when no button is
# pressed. Disabled by default because it is very expensive. Also
# private for now because this behavior / API needs more thought.
self._send_hover_events = False
super(SceneCanvas, self).__init__(
title, size, position, show, autoswap, app, create_native, vsync,
resizable, decorate, fullscreen, config, shared, keys, parent, dpi,
always_on_top, px_scale)
self.events.mouse_press.connect(self._process_mouse_event)
self.events.mouse_move.connect(self._process_mouse_event)
self.events.mouse_release.connect(self._process_mouse_event)
self.events.mouse_wheel.connect(self._process_mouse_event)
self.scene = SubScene()
self.freeze()
@property
def scene(self):
""" The SubScene object that represents the root node of the
scene graph to be displayed.
"""
return self._scene
@scene.setter
def scene(self, node):
oldscene = self._scene
self._scene = node
if oldscene is not None:
oldscene._set_canvas(None)
oldscene.events.children_change.disconnect(self._update_scenegraph)
if node is not None:
node._set_canvas(self)
node.events.children_change.connect(self._update_scenegraph)
@property
def central_widget(self):
""" Returns the default widget that occupies the entire area of the
canvas.
"""
if self._central_widget is None:
self._central_widget = Widget(size=self.size, parent=self.scene)
return self._central_widget
@property
def bgcolor(self):
return Color(self._bgcolor)
@bgcolor.setter
def bgcolor(self, color):
self._bgcolor = Color(color).rgba
if hasattr(self, '_backend'):
self.update()
def update(self, node=None):
"""Update the scene
Parameters
----------
node : instance of Node
Not used.
"""
# TODO: use node bounds to keep track of minimum drawable area
if self._drawing:
return
# Keep things civil in the node update system. Once an update
# has been scheduled, there is no need to flood the event queue
# of the backend with additional updates.
if not self._update_pending:
self._update_pending = True
super(SceneCanvas, self).update()
def on_draw(self, event):
"""Draw handler
Parameters
----------
event : instance of Event
The draw event.
"""
if self._scene is None:
return # Can happen on initialization
logger.debug('Canvas draw')
# Now that a draw event is going to be handled, open up the
# scheduling of further updates
self._update_pending = False
self._draw_scene()
def render(self, region=None, size=None, bgcolor=None, crop=None):
"""Render the scene to an offscreen buffer and return the image array.
Parameters
----------
region : tuple | None
Specifies the region of the canvas to render. Format is
(x, y, w, h). By default, the entire canvas is rendered.
size : tuple | None
Specifies the size of the image array to return. If no size is
given, then the size of the *region* is used, multiplied by the
pixel scaling factor of the canvas (see `pixel_scale`). This
argument allows the scene to be rendered at resolutions different
from the native canvas resolution.
bgcolor : instance of Color | None
The background color to use.
crop : array-like | None
If specified it determines the pixels read from the framebuffer.
In the format (x, y, w, h), relative to the region being rendered.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
self.set_current()
# Set up a framebuffer to render to
offset = (0, 0) if region is None else region[:2]
csize = self.size if region is None else region[2:]
s = self.pixel_scale
size = tuple([x * s for x in csize]) if size is None else size
fbo = gloo.FrameBuffer(color=gloo.RenderBuffer(size[::-1]),
depth=gloo.RenderBuffer(size[::-1]))
self.push_fbo(fbo, offset, csize)
try:
self._draw_scene(bgcolor=bgcolor)
return fbo.read(crop=crop)
finally:
self.pop_fbo()
def _draw_scene(self, bgcolor=None):
if bgcolor is None:
bgcolor = self._bgcolor
self.context.clear(color=bgcolor, depth=True)
self.draw_visual(self.scene)
def draw_visual(self, visual, event=None):
""" Draw a visual and its children to the canvas or currently active
framebuffer.
Parameters
----------
visual : Visual
The visual to draw
event : None or DrawEvent
Optionally specifies the original canvas draw event that initiated
this draw.
"""
prof = Profiler()
# make sure this canvas's context is active
self.set_current()
try:
self._drawing = True
# get order to draw visuals
if visual not in self._draw_order:
self._draw_order[visual] = self._generate_draw_order()
order = self._draw_order[visual]
# draw (while avoiding branches with visible=False)
stack = []
invisible_node = None
for node, start in order:
if start:
stack.append(node)
if invisible_node is None:
if not node.visible:
# disable drawing until we exit this node's subtree
invisible_node = node
else:
if hasattr(node, 'draw'):
node.draw()
prof.mark(str(node))
else:
if node is invisible_node:
invisible_node = None
stack.pop()
finally:
self._drawing = False
def _generate_draw_order(self, node=None):
"""Return a list giving the order to draw visuals.
Each node appears twice in the list--(node, True) appears before the
node's children are drawn, and (node, False) appears after.
"""
if node is None:
node = self._scene
order = [(node, True)]
children = node.children
children.sort(key=lambda ch: ch.order)
for ch in children:
order.extend(self._generate_draw_order(ch))
order.append((node, False))
return order
def _update_scenegraph(self, event):
"""Called when topology of scenegraph has changed.
"""
self._draw_order.clear()
self.update()
def _process_mouse_event(self, event):
prof = Profiler() # noqa
deliver_types = ['mouse_press', 'mouse_wheel']
if self._send_hover_events:
deliver_types += ['mouse_move']
picked = self._mouse_handler
if picked is None:
if event.type in deliver_types:
picked = self.visual_at(event.pos)
# No visual to handle this event; bail out now
if picked is None:
return
# Create an event to pass to the picked visual
scene_event = SceneMouseEvent(event=event, visual=picked)
# Deliver the event
if picked == self._mouse_handler:
# If we already have a mouse handler, then no other node may
# receive the event
if event.type == 'mouse_release':
self._mouse_handler = None
getattr(picked.events, event.type)(scene_event)
else:
# If we don't have a mouse handler, then pass the event through
# the chain of parents until a node accepts the event.
while picked is not None:
getattr(picked.events, event.type)(scene_event)
if scene_event.handled:
if event.type == 'mouse_press':
self._mouse_handler = picked
break
if event.type in deliver_types:
# events that are not handled get passed to parent
picked = picked.parent
scene_event.visual = picked
else:
picked = None
# If something in the scene handled the scene_event, then we mark
# the original event accordingly.
event.handled = scene_event.handled
def visual_at(self, pos):
"""Return the visual at a given position
Parameters
----------
pos : tuple
The position in logical coordinates to query.
Returns
-------
visual : instance of Visual | None
The visual at the position, if it exists.
"""
tr = self.transforms.get_transform('canvas', 'framebuffer')
fbpos = tr.map(pos)[:2]
try:
id_ = self._render_picking((fbpos[0], fbpos[1], 1, 1))
vis = VisualNode._visual_ids.get(id_[0, 0], None)
except RuntimeError:
# Don't have read_pixels() support for IPython. Fall back to
# bounds checking.
return self._visual_bounds_at(pos)
return vis
def _visual_bounds_at(self, pos, node=None):
"""Find a visual whose bounding rect encompasses *pos*.
"""
if node is None:
node = self.scene
for ch in node.children:
hit = self._visual_bounds_at(pos, ch)
if hit is not None:
return hit
if (not isinstance(node, VisualNode) or not node.visible or
not node.interactive):
return None
# let nodes know we are picking to handle any special cases (picking meshes)
# we can't do this before this or child nodes may be considered visible
# which would cause the above 'if' statement to pass when it shouldn't
node.picking = True
bounds = [node.bounds(axis=i) for i in range(2)]
node.picking = False
if None in bounds:
return None
tr = self.scene.node_transform(node).inverse
corners = np.array([
[bounds[0][0], bounds[1][0]],
[bounds[0][0], bounds[1][1]],
[bounds[0][1], bounds[1][0]],
[bounds[0][1], bounds[1][1]]])
bounds = tr.map(corners)
xhit = bounds[:, 0].min() < pos[0] < bounds[:, 0].max()
yhit = bounds[:, 1].min() < pos[1] < bounds[:, 1].max()
if xhit and yhit:
return node
def visuals_at(self, pos, radius=10):
"""Return a list of visuals within *radius* pixels of *pos*.
Visuals are sorted by their proximity to *pos*.
Parameters
----------
pos : tuple
(x, y) position at which to find visuals.
radius : int
Distance away from *pos* to search for visuals.
"""
tr = self.transforms.get_transform('canvas', 'framebuffer')
pos = tr.map(pos)[:2]
id = self._render_picking((pos[0]-radius, pos[1]-radius,
radius * 2 + 1, radius * 2 + 1))
ids = []
seen = set()
for i in range(radius):
subr = id[radius-i:radius+i+1, radius-i:radius+i+1]
subr_ids = set(list(np.unique(subr)))
ids.extend(list(subr_ids - seen))
seen |= subr_ids
visuals = [VisualNode._visual_ids.get(x, None) for x in ids]
return [v for v in visuals if v is not None]
def _render_picking(self, crop):
"""Render the scene in picking mode, returning a 2D array of visual
IDs in the area specified by crop.
Parameters
----------
crop : array-like
The crop (x, y, w, h) of the framebuffer to read. For picking the
full canvas is rendered and cropped on read as it is much faster
than triggering transform updates across the scene with every
click.
"""
try:
self._scene.picking = True
img = self.render(bgcolor=(0, 0, 0, 0), crop=crop)
finally:
self._scene.picking = False
img = img.astype('int32') * [2**0, 2**8, 2**16, 2**24]
id_ = img.sum(axis=2).astype('int32')
return id_
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_transforms()
if self._central_widget is not None:
self._central_widget.size = self.size
if len(self._vp_stack) == 0:
self.context.set_viewport(0, 0, *self.physical_size)
def on_close(self, event):
"""Close event handler
Parameters
----------
event : instance of Event
The event.
"""
self.events.mouse_press.disconnect(self._process_mouse_event)
self.events.mouse_move.disconnect(self._process_mouse_event)
self.events.mouse_release.disconnect(self._process_mouse_event)
self.events.mouse_wheel.disconnect(self._process_mouse_event)
# -------------------------------------------------- transform handling ---
def push_viewport(self, viewport):
""" Push a viewport (x, y, w, h) on the stack. Values must be integers
relative to the active framebuffer.
Parameters
----------
viewport : tuple
The viewport as (x, y, w, h).
"""
vp = list(viewport)
# Normalize viewport before setting;
if vp[2] < 0:
vp[0] += vp[2]
vp[2] *= -1
if vp[3] < 0:
vp[1] += vp[3]
vp[3] *= -1
self._vp_stack.append(vp)
try:
self.context.set_viewport(*vp)
except Exception:
self._vp_stack.pop()
raise
self._update_transforms()
def pop_viewport(self):
""" Pop a viewport from the stack.
"""
vp = self._vp_stack.pop()
# Activate latest
if len(self._vp_stack) > 0:
self.context.set_viewport(*self._vp_stack[-1])
else:
self.context.set_viewport(0, 0, *self.physical_size)
self._update_transforms()
return vp
def push_fbo(self, fbo, offset, csize):
""" Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
"""
self._fb_stack.append((fbo, offset, csize))
try:
fbo.activate()
h, w = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h))
except Exception:
self._fb_stack.pop()
raise
self._update_transforms()
def pop_fbo(self):
""" Pop an FBO from the stack.
"""
fbo = self._fb_stack.pop()
fbo[0].deactivate()
self.pop_viewport()
if len(self._fb_stack) > 0:
old_fbo = self._fb_stack[-1]
old_fbo[0].activate()
self._update_transforms()
return fbo
def _current_framebuffer(self):
""" Return (fbo, origin, canvas_size) for the current
FBO on the stack, or for the canvas if there is no FBO.
"""
if len(self._fb_stack) == 0:
return None, (0, 0), self.size
else:
return self._fb_stack[-1]
def _update_transforms(self):
"""Update the canvas's TransformSystem to correct for the current
canvas size, framebuffer, and viewport.
"""
if len(self._fb_stack) == 0:
fb_size = fb_rect = None
else:
fb, origin, fb_size = self._fb_stack[-1]
fb_rect = origin + fb_size
if len(self._vp_stack) == 0:
viewport = None
else:
viewport = self._vp_stack[-1]
self.transforms.configure(viewport=viewport, fbo_size=fb_size,
fbo_rect=fb_rect)
|
|
#!/usr/bin/env python
import os
import re
from genomicode import genefinder, timer, Matrix, matrixlib
from genomicode import parselib, filelib, arrayannot
# retrieve_all_dates
# retrieve_diseases
# Not quite a good idea.
datatype_match = {
'RSEM_genes' : (
'Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__' +
'RSEM_genes_normalized__data.Level_3'),
'RSEM_exons' : 'exon_expression__data.Level_3',
'RSEM_isoforms' : (
'Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__' +
'RSEM_isoforms_normalized__data.Level_3'),
'humanmethylation450' : (
'Merge_methylation__humanmethylation450__jhu_usc_edu__Level_3__' +
'within_bioassay_data_set_function__data.Level_3'),
'mirnaseq' : (
'Merge_mirnaseq__illuminahiseq_mirnaseq__bcgsc_ca__Level_3__' +
'miR_gene_expression__data.Level_3'),
'clinical' : 'Merge_Clinical.Level_1',
'rppa' : '.RPPA_AnnotateWithGene.Level_3',
'cnv_gistic2' : 'CopyNumber_Gistic2.Level_4',
'agilent' : [
'Merge_transcriptome__agilentg4502a_07_1__unc_edu__Level_3__unc_lowess_normalization_gene_level__data.Level_3',
'Merge_transcriptome__agilentg4502a_07_2__unc_edu__Level_3__unc_lowess_normalization_gene_level__data.Level_3',
],
'affymetrix' : (
'Merge_transcriptome__ht_hg_u133a__broad_mit_edu__Level_3__'
'gene_rma__data.Level_3'),
"mutation" : "Mutation_Packager_Calls.Level_3",
}
datatype2resource = {
"RSEM_genes" : "stddata",
"RSEM_exons" : "stddata",
"humanmethylation450" : "stddata",
"mirnaseq" : "stddata",
"clinical" : "stddata",
"rppa" : "stddata",
"RSEM_isoforms" : "stddata",
"cnv_gistic2" : "analyses",
"agilent" : "stddata",
"affymetrix" : "stddata",
"mutation" : "stddata",
}
resources = ["stddata", "analyses"]
URL2HTML = {}
def read_url(url):
global URL2HTML
import urllib2
if url not in URL2HTML:
#print "Reading %s" % url; sys.stdout.flush()
timer.wait(2, 'tcga')
response = urllib2.urlopen(url)
html = response.read()
URL2HTML[url] = html
return URL2HTML[url]
def retieve_dates_from_resource(resource):
#import pprint ###
# Return a dictionary of the date -> url with given resource.
#url points to an HTML page for the runs for a specific date. It has a table of the
# diseases and HREFs to the data for the diseases at that date.
#download_url = None
url = None
if resource == 'stddata':
dashboard_url = (
'https://confluence.broadinstitute.org/display/GDAC/Dashboard-Stddata')
url = 'http://gdac.broadinstitute.org/runs/info/stddata__runs_list.html'
elif resource == 'analyses':
dashboard_url =(
'https://confluence.broadinstitute.org/display/GDAC/Dashboard-Analyses')
url = 'http://gdac.broadinstitute.org/runs/info/analyses__runs_list.html'
else:
raise ValueError('we do not recognize the resource %s' %resource)
# The latest data is listed on the "Dashboard" page. The old data
# is shown on a different one. Download them separately.
all_dates = {}
urls = read_and_extract_urls(dashboard_url)
diseases, date = get_disease_lastest(urls,resource)
date = date.replace("_", "")
all_dates[date] = dashboard_url
# Read the old data.
html = read_url(url)
link_tags = parselib.get_tags_and_contents(html,'a')
for i in link_tags:
url = parselib.get_href(i)
date = re.search(r'[0-9]{4}_[0-9]{2}_[0-9]{2}', url)
if not date:
continue
date = date.group(0)
date = date.replace("_", "")
all_dates[date] = url
assert all_dates, "No dates found"
return all_dates
def retrieve_all_dates():
# Return a dictionary of the resource-> date and date is a dict which date -> url.
# url points to an HTML page for the runs for a specific date. It has a table of the
# diseases and HREFs to the data for the diseases at that date.
resources = ['stddata','analyses']
all_dates = {} # resource->date
all_dates_list = []
for resource in resources:
all_date_from_resource = retieve_dates_from_resource(resource)
all_dates[resource] = all_date_from_resource
all_dates_list.extend(all_date_from_resource.keys())
all_dates_list = list(set(all_dates_list))
all_dates_list = sorted(all_dates_list)
return all_dates, all_dates_list
def extract_all_hrefs(html):
x = parselib.get_tags_and_contents(html,'a')
x = [parselib.get_href(x) for x in x]
x = [x for x in x if x]
x = [x for x in x if not x.startswith("#")]
return x
def retrieve_disease_resource(date,resource):
all_dates_from_resource = retieve_dates_from_resource(resource)
if date not in all_dates_from_resource:
return None
url = all_dates_from_resource[date]
# URL:
# http://gdac.broadinstitute.org/runs/stddata__2014_07_15/data/ACC/20140715
pattern = re.compile(
r'http://%s/runs/%s__[0-9_]{10}/data/([A-Z]+)/([0-9]{8})' % (
"gdac.broadinstitute.org",resource), re.IGNORECASE)
diseases = []
html = read_url(url)
for href in extract_all_hrefs(html):
m = pattern.match(href)
if not m:
continue
x = m.group(1)
diseases.append(x)
return diseases
def retrieve_diseases(date):
all_diseases = []
for resource in resources:
disease = retrieve_disease_resource(date,resource)
if disease:
all_diseases.extend(disease)
all_diseases = list(set(all_diseases))
assert all_diseases, "could not find diseases"
return all_diseases
def download_file(disease, date, datatype):
assert len(date) == 8
long_date = "%s_%s_%s" % (date[:4], date[4:6], date[6:8])
resource = datatype2resource[datatype]
# URL:
# http://gdac.broadinstitute.org/runs/stddata__2014_07_15/data/ACC/20140715
link = "http://%s/runs/%s__%s/data/%s/%s/" % (
"gdac.broadinstitute.org", resource, long_date, disease, date)
newlinks = get_all_datas_on_page(link)
if isinstance(datatype_match[datatype],str):
match_items = [datatype_match[datatype]]
elif isinstance(datatype_match[datatype],list):
match_items = datatype_match[datatype]
else:
raise ValueError('cannot recognize datatype in datatype_match dict')
resultlinks = []
for newlink in newlinks:
for match_item in match_items:
if match_item in newlink and disease+'-FFPE' not in newlink:
data = read_url(link+newlink)
with open(newlink, "wb") as code:
code.write(data)
print 'finished download %s' % newlink
resultlinks.append(newlink)
if resultlinks:
return resultlinks
raise AssertionError, "download fails"
def get_data_type_resource(disease, date, resource):
# URL:
# http://gdac.broadinstitute.org/runs/stddata__2014_07_15/data/ACC/20140715
long_date = "%s_%s_%s" % (date[:4], date[4:6], date[6:8])
link = "http://%s/runs/%s__%s/data/%s/%s/" % (
"gdac.broadinstitute.org", resource, long_date, disease, date)
newlinks = get_all_datas_on_page(link)
result = []
for newlink in newlinks:
for datatype in datatype_match:
if datatype2resource[datatype] != resource:
continue
# Can be string or list of strings.
names = datatype_match[datatype]
if type(names) is type(""):
names = [names]
found = False
for name in names:
if newlink.find(name) >= 0:
found = True
break
if not found:
continue
#if datatype_match[datatype] not in newlink:
# continue
# Brittle
if disease+'-FFPE' in newlink:
continue
assert datatype not in result, "dup datatype"
result.append(datatype)
return result
def get_data_type(disease, date):
assert len(date) == 8
#long_date = "%s_%s_%s" % (date[:4], date[4:6], date[6:8])
result = []
for resource in resources:
result.extend(get_data_type_resource(disease,date,resource))
return result
def get_disease_lastest(urls,resource):
# Return list of diseases, latest_date
diseases = []
pattern = re.compile(
r'http://gdac.broadinstitute.org/runs/%s__[0-9_]*/[A-Z]*.html'%resource)
lastest = None
for url in urls:
disease_link = re.findall(pattern, url)
for link in disease_link:
x = re.search(r'[A-Z]+', link)
if not x:
continue
disease = x.group(0)
assert disease not in diseases
diseases.append(disease)
x = re.search(r'[0-9]{4}_[0-9]{2}_[0-9]{2}', link)
assert x, "date not found"
date = x.group(0)
assert not lastest or lastest == date
lastest = date
return diseases, lastest
def get_all_datas_on_page(page):
html = read_url(page)
links = []
link_tags = parselib.get_tags_and_contents(html,'a')
for i in link_tags:
url = parselib.get_href(i)
# This is brittle.
if url.startswith("gdac.broadinstitute.org_") and url.endswith(".gz"):
links.append(url)
return links
def read_and_extract_urls(page):
# Read a webpage a return a list of the URLs on that page.
html = read_url(page)
x = parselib.get_tags_and_contents(html,'a')
x = [parselib.get_href(x) for x in x]
x = [x for x in x if x]
return x
def merge_files(input_list, outfile):
"""input two files and merge, write to the outfile"""
import arrayio
assert len(input_list) == 2
A_file = input_list[0]
B_file = input_list[1]
M_A = arrayio.read(A_file)
M_B = arrayio.read(B_file)
assert arrayio.tab_delimited_format.is_matrix(M_A)
assert arrayio.tab_delimited_format.is_matrix(M_B)
[M_A, M_B] = matrixlib.align_rows(M_A, M_B)
assert M_A.nrow() > 0, 'there is no common genes between two files'
X = []
for i in range(M_A.dim()[0]):
x = M_A._X[i] + M_B._X[i]
X.append(x)
row_names = M_A._row_names
row_order = M_A._row_order
col_names = {}
for name in M_A._col_names:
if name not in M_B._col_names:
continue
newsample_list = []
for sample in M_B._col_names[name]:
if sample in M_A._col_names[name]:
newsample = sample + '_2'
else:
newsample = sample
newsample_list.append(newsample)
#x = M_A._col_names[name] + M_B._col_names[name]
x = M_A._col_names[name] + newsample_list
col_names[name] = x
M_c = Matrix.InMemoryMatrix(X, row_names, col_names, row_order)
handle = file(outfile,'w')
arrayio.tab_delimited_format.write(M_c, handle)
handle.close()
def merge_rppa_files(in_files, out_file):
import shutil
from genomicode import filelib
assert len(in_files) == 2
x1 = [x for x in in_files if x.endswith(".antibody_annotation.txt")]
x2 = [x for x in in_files if x.endswith(".rppa.txt")]
assert len(x1) == 1
assert len(x2) == 1
annotation_file = x1[0]
data_file = x2[0]
# Actually, just return the data_file. It contains all the
# information we need.
shutil.copy2(data_file, out_file)
return
# OV.antibody_annotation.txt
# Gene Name Composite Element REF
# YWHAB 14-3-3_beta
# YWHAE 14-3-3_epsilon
# YWHAZ 14-3-3_zeta
# EIF4EBP1 4E-BP1
# EIF4EBP1 4E-BP1_pS65
# OV.rppa.txt
# Composite.Element.REF TCGA-04-1335-01A-21-1561-20
# YWHAB|14-3-3_beta -0.00855276625000018
# YWHAE|14-3-3_epsilon 0.05985423025
# YWHAZ|14-3-3_zeta -0.04074335825
# EIF4EBP1|4E-BP1 -0.62276845725
# EIF4EBP1|4E-BP1_pS65 0.00776960074999994
# EIF4EBP1|4E-BP1_pT37_T46 -0.04959447325
# Make sure these files are aligned properly.
M1 = [x for x in filelib.read_cols(annotation_file)]
M2 = [x for x in filelib.read_cols(data_file)]
assert M1 and M2
assert M1[0][0] == "Gene Name"
assert M1[0][1] == "Composite Element REF"
assert M2[0][0] == "Composite.Element.REF"
assert len(M1) == len(M2)
# Make sure the header names don't conflict.
M1[0][1] = "Antibody"
for i in range(1, len(M1)):
name1 = M1[i][0]
x = M2[i][0]
x = x.split("|")
assert len(x) == 2
name2, antibody = x
assert name1 == name2
M = []
for i in range(len(M1)):
x = M1[i] + M2[i]
M.append(x)
handle = open(out_file, 'w')
for x in M:
print >>handle, "\t".join(x)
def merge_mutation_files(in_files, out_file):
# Merge the maf files.
x = [x for x in in_files if x.endswith(".maf.txt")]
assert x, "No maf files"
in_files = x
outhandle = open(out_file, 'w')
header = None
for filename in in_files:
handle = open(filename)
line = handle.readline()
if header is None:
header = line
print >>outhandle, line,
assert line == header
for line in handle:
print >>outhandle, line,
def extract_and_merge_files(gzfile_list, datatype, resource):
assert gzfile_list is not None
result = []
for gzfile in gzfile_list:
# BUG: This fails if passing an unzipped directory.
if not gzfile.endswith('tar.gz'):
return gzfile
import tarfile
tfile = tarfile.open(gzfile, 'r:gz')
gzname = os.path.split(gzfile)[-1]
newdir = os.path.join(os.getcwd(),gzname[:-7])
tfile.extractall(newdir)
folders = os.listdir(newdir)
folder = None
for folder in folders:
if folder == '._.DS_Store':
continue
directory = os.path.join(newdir, folder)
assert os.path.exists(directory)
files = os.listdir(directory)
if resource == 'stddata':
for filename in files:
if filename.endswith('txt') and filename != 'MANIFEST.txt':
result.append(os.path.join(directory,filename))
elif resource == 'analyses':
for filename in files:
# BAD. Hard-coded for CNV.
if filename =='all_data_by_genes.txt':
result.append(os.path.join(directory,filename))
else:
raise ValueError('not recoginzed resource %s' % resource)
if datatype == "rppa":
newname = "rppa.txt"
merge_rppa_files(result, newname)
return newname
elif datatype == "mutation":
newname = "mutation.txt"
merge_mutation_files(result, newname)
return newname
# If only one file found, then return it.
elif len(result) == 1:
return result[0]
# If two files found, then merge them and return.
elif len(result) == 2:
x = os.path.split(result[0])[-1]
# Should not hard-code this.
s = "Level_3__unc_lowess_normalization_gene_level__data"
x = x.replace(
'agilentg4502a_07_1__unc_edu__%s' % s,
'agilentg4502a_07__unc_edu__%s' % s)
newname = x
merge_files(result, newname)
return newname
raise ValueError('extract_and_merge_files can only handle two files')
def format_mutation_packager(filename, outfile):
import shutil
shutil.copy2(filename, outfile)
def format_firehose_rsem(filename, output):
import arrayio
HYB_REF = "Hybridization REF"
GENE_ID = "gene_id"
DATA = arrayio.read(filename)
assert DATA._row_order == [HYB_REF]
assert DATA._col_order == ["_SAMPLE_NAME", GENE_ID]
genes = DATA.row_names(HYB_REF)
gene_symbols = [None] * len(genes)
gene_ids = [None] * len(genes)
for i in range(len(genes)):
x = genes[i].split("|")
assert len(x) == 2
gene_symbol, gene_id = x
if gene_symbol == "?":
gene_symbol = ""
gene_ids[i] = gene_id
gene_symbols[i] = gene_symbol
f = file(output,'w')
header = ["Gene ID", "Gene Symbol"] + DATA.col_names("_SAMPLE_NAME")
f.write("\t".join(header)+'\n')
for i in range(DATA.nrow()):
x = [gene_ids[i], gene_symbols[i]] + DATA._X[i]
assert len(x) == len(header)
f.write("\t".join(map(str, x))+'\n')
f.close()
def format_firehose_exonexp(filename, output):
import arrayio
HYB_REF = "Hybridization REF"
GENE_ID = "exon"
DATA = arrayio.read(filename)
assert DATA._row_order == [HYB_REF]
assert DATA._col_order == ["_SAMPLE_NAME", GENE_ID]
# Line 2 of file is:
# exon raw_counts median_length_normalized RPKM [...]
# Only want RPKM columns.
col_headers = DATA.col_names(GENE_ID)
I_sig = []
for i in range(len(col_headers)):
if i % 3 == 2:
assert col_headers[i] == "RPKM"
I_sig.append(i)
else:
assert col_headers[i] != "RPKM"
genes = DATA.row_names(HYB_REF)
chroms = [None] * len(genes)
starts = [None] * len(genes)
ends = [None] * len(genes)
strands = [None] * len(genes)
for i in range(len(genes)):
x = genes[i].split(":")
assert len(x) == 3
chrom, x, strand = x
x = x.split("-")
assert len(x) == 2
start, end = x
chroms[i] = chrom
starts[i] = start
ends[i] = end
strands[i] = strand
x = DATA.col_names("_SAMPLE_NAME")
sample_names = [x[i] for i in I_sig]
f = file(output,'w')
header = ["Chrom", "Start", "End", "Strand"] + sample_names
f.write("\t".join(header)+'\n')
prev_line = None
for i in range(DATA.nrow()):
sig = [DATA._X[i][j] for j in I_sig]
x = [chroms[i], starts[i], ends[i], strands[i]] + sig
assert len(x) == len(header)
if x == prev_line:
continue
prev_line = x
f.write("\t".join(map(str, x))+'\n')
f.close()
def annotate_firehose_methylation(filename, output):
f=file(filename,'r')
text = f.readlines(2)
f.close()
handle = text[1].split('\t')
assert handle[:5] == ['Composite Element REF','Beta_value',
'Gene_Symbol','Chromosome','Genomic_Coordinate']
f=file(filename,'r')
all_symbols = {}
symbols=[]
for i in f.readlines():
words = i.split('\t')
symbol = words[2]
symbols = symbol.split(";")
for x in symbols:
all_symbols[x] = 1
f.close()
all_symbols = sorted(all_symbols)
#Look up all the symbols in the genefinder.
symbol2id = {}
genes = genefinder.find_many_genes(all_symbols, tax_id=9606)
for (symbol, gene) in zip(all_symbols, genes):
gene_id = gene[1]
if gene_id is None:
gene_id = ""
symbol2id[symbol] = gene_id
handle = filelib.read_row(filename, header=1)
samples_names = [handle._header[i] for i in range(len(handle._header)) if not (i-1)%4]
header = ["Probe.ID", "Gene.ID", "Gene.Symbol", "Chromosome",
"Genomic.Coordinate"] + samples_names
f = file(output, 'w')
f.write("\t".join(header)+'\n')
with open(filename) as FileObj:
for lines in FileObj:
if lines.startswith('Hybridization REF') or lines.startswith('Composite Element REF'):
continue
items = lines.split('\t')
probe_id = items[0]
Gene_symbols = items[2]
Chormosome = items[3]
Genomic_coordinate = items[4]
values = [items[i] for i in range(len(items)) if not (i-1)%4]
symbols = Gene_symbols.split(";")
ids = [symbol2id.get(x, "") for x in symbols]
#gene_symbol = ";".join(symbols)
gene_id = ";".join(map(str, ids))
row = [probe_id,gene_id,Gene_symbols,Chormosome,Genomic_coordinate]+values
assert len(row) == len(header)
f.write("\t".join(map(str, row))+'\n')
f.close()
def format_firehose_mirna(filename, output):
matrix = [x for x in filelib.read_cols(filename)]
HYB_REF = "Hybridization REF"
GENE_ID = "miRNA_ID"
assert matrix
assert matrix[0][0] == HYB_REF
assert matrix[1][0] == GENE_ID
header0 = matrix[0]
header1 = matrix[1]
for i in range(1, len(header1), 3):
assert header1[i] == "read_count"
assert header1[i+1] == "reads_per_million_miRNA_mapped"
assert header1[i+2] == "cross-mapped"
sample_name = [header0[i] for i in range(2, len(header0), 3)]
header = ["miRNA ID"] + sample_name
f = file(output, 'w')
f.write("\t".join(header)+'\n')
for i in range(2, len(matrix)):
x = [matrix[i][j] for j in range(2, len(matrix[i]), 3)]
x = [matrix[i][0]] + x
assert len(x) == len(header)
f.write("\t".join(x)+'\n')
f.close()
def format_firehose_rppa(filename, output):
COMP_REF = "Composite.Element.REF"
COMP_REF_H = "Composite_Element_REF"
iter = filelib.read_row(filename, header=1)
#assert iter._header[0] == COMP_REF
f = file(output, 'w')
header = ["Gene Symbol", "Gene ID", "Antibody"] + iter._header[1:]
f.write("\t".join(header)+'\n')
for d in iter:
assert hasattr(d, COMP_REF_H)
x = getattr(d, COMP_REF_H)
x = x.split("|")
assert len(x) == 2
x, antibody = x
gene_symbols = [x.strip() for x in x.split()]
x = genefinder.find_many_genes(gene_symbols, tax_id="9606")
x = [x[1] for x in x]
x = [x for x in x if x]
gene_ids = x
if gene_symbols == ["CDC2"]:
gene_ids = ["983"]
gene_symbol_str = ";".join(gene_symbols)
gene_id_str = ";".join(map(str, gene_ids))
x = [gene_symbol_str, gene_id_str, antibody] + d._cols[1:]
assert len(x) == len(header)
f.write("\t".join(map(str, x))+'\n')
f.close()
def format_firehose_gistic(filename, output):
f = file(output, 'w')
iter = filelib.read_row(filename, header=1)
header = ["Gene ID", "Gene Symbol"] + iter._header[2:]
print >>f, "\t".join(header)
for d in iter:
gene_symbol = d.Gene_Symbol
gene_id = d.Locus_ID
x = [gene_id, gene_symbol] + d._cols[2:]
assert len(x) == len(header)
print >>f, "\t".join(map(str, x))
f.close()
def format_rsem_isoforms(txt_file, outfile):
import arrayio
from genomicode import arrayplatformlib
M = arrayio.read(txt_file)
# detect platform
x = arrayplatformlib.score_matrix(M, min_score=0.8)
assert x, "Cannot identify platform."
header, platform = x.header, x.platform_name
probe_ids = M.row_names(header)
#if kg5, convert to kg7
if platform == 'UCSC_human_hg19_kg5':
new_platform = 'UCSC_human_hg19_kg7'
kg7_ids = arrayannot.convert_probe_ids(probe_ids,new_platform)
kg7_header = 'Hybridization REF kg7'
M = make_matrix_new_ids(M, kg7_ids,kg7_header,1)
probe_ids = M.row_names(kg7_header)
# add LocusLink ids
LocusLink_ids = arrayannot.convert_probe_ids(probe_ids,'Entrez_ID_human')
gene_symbol_ids = arrayannot.convert_probe_ids(probe_ids,'Entrez_Symbol_human')
newMatrix = make_matrix_new_ids(M,LocusLink_ids,'Entrez_ID_human',2)
newMatrix = make_matrix_new_ids(newMatrix,gene_symbol_ids,'Entrez_Symbol_human',3)
#get rid of scaled_estimate
assert 'scaled_estimate' in newMatrix._col_names['isoform_id']
assert 'raw_count' in newMatrix._col_names['isoform_id']
col_names = {}
col_names['_SAMPLE_NAME'] =[newMatrix._col_names['_SAMPLE_NAME'][i]
for i in range(len(newMatrix._col_names['_SAMPLE_NAME']))
if not i%2]
row_names = newMatrix._row_names.copy()
row_order = newMatrix._row_order[:]
col_order = newMatrix._col_order[:]
col_order.remove('isoform_id')
synonyms = newMatrix._synonyms.copy()
X = []
for line in newMatrix._X:
line = [line[i] for i in range(len(line)) if not i%2]
X.append(line)
x = Matrix.InMemoryMatrix(
X, row_names=row_names, col_names=col_names,
row_order=row_order, col_order=col_order, synonyms=synonyms)
f = file(outfile,'w')
arrayio.tab_delimited_format.write(x, f)
f.close()
def format_affymetrix(filename, output):
import arrayio
HYB_REF = "Hybridization REF"
DATA = arrayio.read(filename)
assert DATA._row_order == [HYB_REF]
assert DATA._col_order == ["_SAMPLE_NAME", 'Composite Element REF']
genes = DATA.row_names(HYB_REF)
f = file(output,'w')
header = [ "Gene Symbol"] + DATA.col_names("_SAMPLE_NAME")
f.write("\t".join(header)+'\n')
for i in range(DATA.nrow()):
row = [j if j is not None else '' for j in DATA._X[i] ]
x = [genes[i]] + row
assert len(x) == len(header)
f.write("\t".join(map(str, x))+'\n')
f.close()
def format_agilent(filename, output):
import arrayio
HYB_REF = "Hybridization REF"
DATA = arrayio.read(filename)
assert DATA._row_order == [HYB_REF]
assert DATA._col_order == ["_SAMPLE_NAME", 'Composite Element REF']
genes = DATA.row_names(HYB_REF)
f = file(output,'w')
header = [ "Gene Symbol"] + DATA.col_names("_SAMPLE_NAME")
f.write("\t".join(header)+'\n')
for i in range(DATA.nrow()):
row = [j if j is not None else '' for j in DATA._X[i] ]
x = [genes[i]] + row
assert len(x) == len(header)
f.write("\t".join(map(str, x))+'\n')
f.close()
def make_matrix_new_ids(DATA,output_ids,header,index):
# Make a matrix with the new IDs.
X = DATA._X
row_names = DATA._row_names.copy()
row_order = DATA._row_order[:]
col_names = DATA._col_names.copy()
col_order = DATA._col_order[:]
synonyms = DATA._synonyms.copy()
row_order.insert(index,header)
row_names[header] = output_ids
# Write the outfile.
x = Matrix.InMemoryMatrix(
X, row_names=row_names, col_names=col_names,
row_order=row_order, col_order=col_order, synonyms=synonyms)
return x
def process_data(data, txt_file, outfile):
if data == 'RSEM_genes':
format_firehose_rsem(txt_file, outfile)
elif data == 'RSEM_exons':
format_firehose_exonexp(txt_file, outfile)
elif data == 'humanmethylation450':
annotate_firehose_methylation(txt_file, outfile)
elif data == 'mirnaseq':
format_firehose_mirna(txt_file, outfile)
elif data == 'clinical':
raise NotImplementedError("have not figured out how to process")
elif data == 'rppa':
format_firehose_rppa(txt_file, outfile)
elif data == 'cnv_gistic2':
format_firehose_gistic(txt_file, outfile)
elif data == 'RSEM_isoforms':
format_rsem_isoforms(txt_file, outfile)
elif data == 'agilent':
format_agilent(txt_file, outfile)
elif data == 'affymetrix':
format_affymetrix(txt_file, outfile)
elif data == 'mutation':
format_mutation_packager(txt_file, outfile)
else:
raise ValueError("the data type is not matched to our list")
print 'processing finished '
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--disease',
help='Which disease to download data from. Format: BRCA')
parser.add_argument(
'--date',
help='Download data from this date. If not given, get the most '
'recent. Format: 20140715')
x = sorted(datatype_match)
x = ", ".join(x)
parser.add_argument(
'--data',
help="Which type of data to download. Possibilities: %s" % x)
parser.add_argument(
'output', nargs="?",
help='output file for the processed data')
parser.add_argument(
'--list_dates', action='store_true',
help='show the dates that have data. If disease is given, show the '
'dates for this disease')
parser.add_argument(
'--list_diseases', action='store_true',
help='show the available diseases. If date is not given, '
'show for the latest date.')
parser.add_argument(
'--list_data', dest='list_data', action='store_const',
const=True, default=False,
help='show the data available for the specify disease and date, '
'if date is not given, give the lastest one')
parser.add_argument(
'--download_only', action='store_true',
help='Download the raw data file without processing.')
parser.add_argument(
'--download_and_extract', action='store_true',
help='Download the raw data file with extracting.')
parser.add_argument(
'--process_only', action='store_true',
help="Process a previously downloaded file. Should be either the "
"original .tar.gz archive, or the text file to process.")
parser.add_argument('--input', help='input file for process')
args = parser.parse_args()
if args.data:
assert args.data in datatype_match.keys(), \
'the data you enter is not recognized'
if args.date:
assert re.search(r'[0-9]{8}', args.date)
if args.download_only:
assert args.disease, 'please specify the disease'
assert args.data, 'please specify the data'
if args.process_only:
assert args.data, 'please specify the data'
if args.list_dates:
assert not args.date
assert not args.data, "Not implemented"
print "Dates"
if args.disease:
raise NotImplementedError
else:
all_dates, all_dates_list = retrieve_all_dates()
for date in all_dates_list:
print date
return
elif args.list_diseases:
assert not args.disease
all_dates, all_dates_list = retrieve_all_dates()
date = all_dates_list[-1]
if args.date:
date = args.date
all_diseases = retrieve_diseases(date)
print "Diseases available on %s:" % date
for name in sorted(all_diseases):
if args.data:
all_data = get_data_type(name, date)
if args.data not in all_data:
continue
print name
elif args.list_data:
assert args.disease, "disease must be given."
all_dates, all_dates_list = retrieve_all_dates()
date = all_dates_list[-1]
if args.date:
date = args.date
all_data = get_data_type(args.disease, date)
for d in all_data:
print d
return
if args.process_only:
assert args.input, "Please specify --input file"
assert os.path.exists(args.input), '%s does not exists' % args.input
txt_file = extract_and_merge_files(
[args.input], args.data, datatype2resource[args.data])
process_data(args.data, txt_file, args.output)
elif args.download_only:
assert args.disease, "disease must be given."
assert args.data, "data must be given."
all_dates, all_dates_list = retrieve_all_dates()
date = sorted(all_dates_list)[-1]
if args.date:
date = args.date
download_file(args.disease,date,args.data)
elif args.download_and_extract:
assert args.disease, "disease must be given."
assert args.data, "data must be given."
all_dates, all_dates_list = retrieve_all_dates()
date = sorted(all_dates_list)[-1]
if args.date:
date = args.date
filenames = download_file(args.disease, date,args.data)
txt_file = extract_and_merge_files(
filenames, args.data, datatype2resource[args.data])
else:
assert args.disease, "Please specify a disease to download."
assert args.data, "data must be given."
assert args.output, "Please specify output path."
all_dates, all_dates_list = retrieve_all_dates()
date = all_dates_list[-1]
if args.date:
date = args.date
diseases_in_date = retrieve_diseases(date)
assert args.disease in diseases_in_date, (
'the diesease %s is not found in the date %s' %(
args.disease,date))
## assert args.data in all_data,('%s is not found in %s for %s' %(
## args.data,require_date,args.disease))
filenames = download_file(args.disease, date, args.data)
txt_file = extract_and_merge_files(
filenames, args.data, datatype2resource[args.data])
process_data(args.data, txt_file, args.output)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
Check that SendInput can work the way we want it to
The tips and tricks at http://www.pinvoke.net/default.aspx/user32.sendinput
is useful!
"""
import time
import ctypes
__all__ = ['KeySequenceError', 'SendKeys']
try:
str_class = basestring
def enforce_unicode(text):
return unicode(text)
except NameError:
str_class = str
def enforce_unicode(text):
return text
#pylint: disable-msg=R0903
DEBUG = 0
MapVirtualKey = ctypes.windll.user32.MapVirtualKeyW
SendInput = ctypes.windll.user32.SendInput
VkKeyScan = ctypes.windll.user32.VkKeyScanW
VkKeyScan.restype = ctypes.c_short
VkKeyScan.argtypes = [ctypes.c_wchar]
DWORD = ctypes.c_ulong
LONG = ctypes.c_long
WORD = ctypes.c_ushort
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283
class MOUSEINPUT(ctypes.Structure):
"Needed for complete definition of INPUT structure - not used"
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283
('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', DWORD),
]
assert ctypes.sizeof(MOUSEINPUT) == 24, ctypes.sizeof(MOUSEINPUT)
assert ctypes.alignment(MOUSEINPUT) == 2, ctypes.alignment(MOUSEINPUT)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292
class KEYBDINPUT(ctypes.Structure):
"A particular keyboard event"
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292
('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', DWORD),
]
assert ctypes.sizeof(KEYBDINPUT) == 16, ctypes.sizeof(KEYBDINPUT)
assert ctypes.alignment(KEYBDINPUT) == 2, ctypes.alignment(KEYBDINPUT)
class HARDWAREINPUT(ctypes.Structure):
"Needed for complete definition of INPUT structure - not used"
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4300
('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD),
]
assert ctypes.sizeof(HARDWAREINPUT) == 8, ctypes.sizeof(HARDWAREINPUT)
assert ctypes.alignment(HARDWAREINPUT) == 2, ctypes.alignment(HARDWAREINPUT)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314
class UNION_INPUT_STRUCTS(ctypes.Union):
"The C Union type representing a single Event of any type"
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314
('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT),
]
assert ctypes.sizeof(UNION_INPUT_STRUCTS) == 24, \
ctypes.sizeof(UNION_INPUT_STRUCTS)
assert ctypes.alignment(UNION_INPUT_STRUCTS) == 2, \
ctypes.alignment(UNION_INPUT_STRUCTS)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4310
class INPUT(ctypes.Structure):
"See: http://msdn.microsoft.com/en-us/library/ms646270%28VS.85%29.aspx"
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4310
('type', DWORD),
# Unnamed field renamed to '_'
('_', UNION_INPUT_STRUCTS),
]
assert ctypes.sizeof(INPUT) == 28, ctypes.sizeof(INPUT)
assert ctypes.alignment(INPUT) == 2, ctypes.alignment(INPUT)
INPUT_KEYBOARD = 1
KEYEVENTF_EXTENDEDKEY = 1
KEYEVENTF_KEYUP = 2
KEYEVENTF_UNICODE = 4
KEYEVENTF_SCANCODE = 8
VK_SHIFT = 16
VK_CONTROL = 17
VK_MENU = 18
# 'codes' recognized as {CODE( repeat)?}
CODES = {
'BACK': 8,
'BACKSPACE':8,
'BKSP': 8,
'BREAK': 3,
'BS': 8,
'CAP': 20,
'CAPSLOCK': 20,
'DEL': 46,
'DELETE': 46,
'DOWN': 40,
'END': 35,
'ENTER': 13,
'ESC': 27,
'F1': 112,
'F2': 113,
'F3': 114,
'F4': 115,
'F5': 116,
'F6': 117,
'F7': 118,
'F8': 119,
'F9': 120,
'F10': 121,
'F11': 122,
'F12': 123,
'F13': 124,
'F14': 125,
'F15': 126,
'F16': 127,
'F17': 128,
'F18': 129,
'F19': 130,
'F20': 131,
'F21': 132,
'F22': 133,
'F23': 134,
'F24': 135,
'HELP': 47,
'HOME': 36,
'INS': 45,
'INSERT': 45,
'LEFT': 37,
'LWIN': 91,
'NUMLOCK': 144,
'PGDN': 34,
'PGUP': 33,
'PRTSC': 44,
'RIGHT': 39,
'RMENU': 165,
'RWIN': 92,
'SCROLLLOCK':145,
'SPACE': 32,
'TAB': 9,
'UP': 38,
'VK_ACCEPT': 30,
'VK_ADD': 107,
'VK_APPS': 93,
'VK_ATTN': 246,
'VK_BACK': 8,
'VK_CANCEL': 3,
'VK_CAPITAL': 20,
'VK_CLEAR': 12,
'VK_CONTROL': 17,
'VK_CONVERT': 28,
'VK_CRSEL': 247,
'VK_DECIMAL': 110,
'VK_DELETE': 46,
'VK_DIVIDE': 111,
'VK_DOWN': 40,
'VK_END': 35,
'VK_EREOF': 249,
'VK_ESCAPE': 27,
'VK_EXECUTE': 43,
'VK_EXSEL': 248,
'VK_F1': 112,
'VK_F2': 113,
'VK_F3': 114,
'VK_F4': 115,
'VK_F5': 116,
'VK_F6': 117,
'VK_F7': 118,
'VK_F8': 119,
'VK_F9': 120,
'VK_F10': 121,
'VK_F11': 122,
'VK_F12': 123,
'VK_F13': 124,
'VK_F14': 125,
'VK_F15': 126,
'VK_F16': 127,
'VK_F17': 128,
'VK_F18': 129,
'VK_F19': 130,
'VK_F20': 131,
'VK_F21': 132,
'VK_F22': 133,
'VK_F23': 134,
'VK_F24': 135,
'VK_FINAL': 24,
'VK_HANGEUL': 21,
'VK_HANGUL': 21,
'VK_HANJA': 25,
'VK_HELP': 47,
'VK_HOME': 36,
'VK_INSERT': 45,
'VK_JUNJA': 23,
'VK_KANA': 21,
'VK_KANJI': 25,
'VK_LBUTTON': 1,
'VK_LCONTROL':162,
'VK_LEFT': 37,
'VK_LMENU': 164,
'VK_LSHIFT': 160,
'VK_LWIN': 91,
'VK_MBUTTON': 4,
'VK_MENU': 18,
'VK_MODECHANGE': 31,
'VK_MULTIPLY': 106,
'VK_NEXT': 34,
'VK_NONAME': 252,
'VK_NONCONVERT': 29,
'VK_NUMLOCK': 144,
'VK_NUMPAD0': 96,
'VK_NUMPAD1': 97,
'VK_NUMPAD2': 98,
'VK_NUMPAD3': 99,
'VK_NUMPAD4': 100,
'VK_NUMPAD5': 101,
'VK_NUMPAD6': 102,
'VK_NUMPAD7': 103,
'VK_NUMPAD8': 104,
'VK_NUMPAD9': 105,
'VK_OEM_CLEAR': 254,
'VK_PA1': 253,
'VK_PAUSE': 19,
'VK_PLAY': 250,
'VK_PRINT': 42,
'VK_PRIOR': 33,
'VK_PROCESSKEY': 229,
'VK_RBUTTON': 2,
'VK_RCONTROL': 163,
'VK_RETURN': 13,
'VK_RIGHT': 39,
'VK_RMENU': 165,
'VK_RSHIFT': 161,
'VK_RWIN': 92,
'VK_SCROLL': 145,
'VK_SELECT': 41,
'VK_SEPARATOR': 108,
'VK_SHIFT': 16,
'VK_SNAPSHOT': 44,
'VK_SPACE': 32,
'VK_SUBTRACT': 109,
'VK_TAB': 9,
'VK_UP': 38,
'ZOOM': 251,
}
# reverse the CODES dict to make it easy to look up a particular code name
CODE_NAMES = dict((entry[1], entry[0]) for entry in CODES.items())
# modifier keys
MODIFIERS = {
'+': VK_SHIFT,
'^': VK_CONTROL,
'%': VK_MENU,
}
class KeySequenceError(Exception):
"""Exception raised when a key sequence string has a syntax error"""
def __str__(self):
return ' '.join(self.args)
class KeyAction(object):
"""Class that represents a single 'keyboard' action
It represents either a PAUSE action (not really keyboard) or a keyboard
action (press or release or both) of a particular key.
"""
def __init__(self, key, down = True, up = True):
self.key = key
if isinstance(self.key, str_class):
self.key = enforce_unicode(key)
self.down = down
self.up = up
def _get_key_info(self):
"""Return virtual_key, scan_code, and flags for the action
This is one of the methods that will be overridden by sub classes"""
return 0, ord(self.key), KEYEVENTF_UNICODE
def GetInput(self):
"Build the INPUT structure for the action"
actions = 1
# if both up and down
if self.up and self.down:
actions = 2
inputs = (INPUT * actions)()
vk, scan, flags = self._get_key_info()
for inp in inputs:
inp.type = INPUT_KEYBOARD
inp._.ki.wVk = vk
inp._.ki.wScan = scan
inp._.ki.dwFlags |= flags
# if we are releasing - then let it up
if self.up:
inputs[-1]._.ki.dwFlags |= KEYEVENTF_KEYUP
return inputs
def Run(self):
"Execute the action"
inputs = self.GetInput()
return SendInput(
len(inputs),
ctypes.byref(inputs),
ctypes.sizeof(INPUT))
def _get_down_up_string(self):
"""Return a string that will show whether the string is up or down
return 'down' if the key is a press only
return 'up' if the key is up only
return '' if the key is up & down (as default)
"""
down_up = ""
if not (self.down and self.up):
if self.down:
down_up = "down"
elif self.up:
down_up = "up"
return down_up
def key_description(self):
"Return a description of the key"
vk, scan, flags = self._get_key_info()
desc = ''
if vk:
if vk in CODE_NAMES:
desc = CODE_NAMES[vk]
else:
desc = "VK %d"% vk
else:
desc = "%s"% self.key
return desc
def __str__(self):
parts = []
parts.append(self.key_description())
up_down = self._get_down_up_string()
if up_down:
parts.append(up_down)
return "<%s>"% (" ".join(parts))
__repr__ = __str__
class VirtualKeyAction(KeyAction):
"""Represents a virtual key action e.g. F9 DOWN, etc
Overrides necessary methods of KeyAction"""
def _get_key_info(self):
"Virtual keys have extended flag set"
# copied more or less verbatim from
# http://www.pinvoke.net/default.aspx/user32.sendinput
if (
(self.key >= 33 and self.key <= 46) or
(self.key >= 91 and self.key <= 93) ):
flags = KEYEVENTF_EXTENDEDKEY;
else:
flags = 0
# This works for %{F4} - ALT + F4
#return self.key, 0, 0
# this works for Tic Tac Toe i.e. +{RIGHT} SHIFT + RIGHT
return self.key, MapVirtualKey(self.key, 0), flags
class EscapedKeyAction(KeyAction):
"""Represents an escaped key action e.g. F9 DOWN, etc
Overrides necessary methods of KeyAction"""
def _get_key_info(self):
"""EscapedKeyAction doesn't send it as Unicode and the vk and
scan code are generated differently"""
vkey_scan = LoByte(VkKeyScan(self.key))
return (vkey_scan, MapVirtualKey(vkey_scan, 0), 0)
def key_description(self):
"Return a description of the key"
return "KEsc %s"% self.key
class PauseAction(KeyAction):
"Represents a pause action"
def __init__(self, how_long):
self.how_long = how_long
def Run(self):
"Pause for the lenght of time specified"
time.sleep(self.how_long)
def __str__(self):
return "<PAUSE %1.2f>"% (self.how_long)
__repr__ = __str__
#def GetInput(self):
# print `self.key`
# keys = KeyAction.GetInput(self)
#
# shift_state = HiByte(VkKeyScan(self.key))
#
# shift_down = shift_state & 0x100 # 1st bit
# ctrl_down = shift_state & 0x80 # 2nd bit
# alt_down = shift_state & 0x40 # 3rd bit
#
# print bin(shift_state), shift_down, ctrl_down, alt_down
#
# print keys
# keys = [k for k in keys]
#
# modifiers = []
# if shift_down:
# keys[0:0] = VirtualKeyAction(VK_SHIFT, up = False).GetInput()
# keys.append(VirtualKeyAction(VK_SHIFT, down = False).GetInput())
# if ctrl_down:
# keys[0:0] = VirtualKeyAction(VK_CONTROL, up = False).GetInput()
# keys.append(VirtualKeyAction(VK_CONTROL, down = False).GetInput())
# if alt_down:
# keys[0:0] = VirtualKeyAction(VK_ALT, up = False).GetInput()
# keys.append(VirtualKeyAction(VK_ALT, down = False).GetInput())
#
# print keys
# new_keys = (INPUT * len(keys)) ()
#
# for i, k in enumerate(keys):
# if hasattr(k, 'type'):
# new_keys[i] = k
# else:
# for sub_key in k:
# new_keys[i] = sub_key
#
# return new_keys
#
def handle_code(code):
"Handle a key or sequence of keys in braces"
code_keys = []
# it is a known code (e.g. {DOWN}, {ENTER}, etc)
if code in CODES:
code_keys.append(VirtualKeyAction(CODES[code]))
# it is an escaped modifier e.g. {%}, {^}, {+}
elif len(code) == 1:
code_keys.append(KeyAction(code))
# it is a repetition or a pause {DOWN 5}, {PAUSE 1.3}
elif ' ' in code:
to_repeat, count = code.rsplit(None, 1)
if to_repeat == "PAUSE":
try:
pause_time = float(count)
except ValueError:
raise KeySequenceError('invalid pause time %s'% count)
code_keys.append(PauseAction(pause_time))
else:
try:
count = int(count)
except ValueError:
raise KeySequenceError(
'invalid repetition count %s'% count)
# If the value in to_repeat is a VK e.g. DOWN
# we need to add the code repeated
if to_repeat in CODES:
code_keys.extend(
[VirtualKeyAction(CODES[to_repeat])] * count)
# otherwise parse the keys and we get back a KeyAction
else:
to_repeat = parse_keys(to_repeat)
if isinstance(to_repeat, list):
keys = to_repeat * count
else:
keys = [to_repeat] * count
code_keys.extend(keys)
else:
raise RuntimeError("Unknown code: %s"% code)
return code_keys
def parse_keys(string,
with_spaces = False,
with_tabs = False,
with_newlines = False,
modifiers = None):
"Return the parsed keys"
keys = []
if not modifiers:
modifiers = []
index = 0
while index < len(string):
c = string[index]
index += 1
# check if one of CTRL, SHIFT, ALT has been pressed
if c in MODIFIERS.keys():
modifier = MODIFIERS[c]
# remember that we are currently modified
modifiers.append(modifier)
# hold down the modifier key
keys.append(VirtualKeyAction(modifier, up = False))
if DEBUG:
print("MODS+", modifiers)
continue
# Apply modifiers over a bunch of characters (not just one!)
elif c == "(":
# find the end of the bracketed text
end_pos = string.find(")", index)
if end_pos == -1:
raise KeySequenceError('`)` not found')
keys.extend(
parse_keys(string[index:end_pos], modifiers = modifiers))
index = end_pos + 1
# Escape or named key
elif c == "{":
end_pos = string.find("}", index)
if end_pos == -1:
raise KeySequenceError('`}` not found')
code = string[index:end_pos]
index = end_pos + 1
keys.extend(handle_code(code))
# unmatched ")"
elif c == ')':
raise KeySequenceError('`)` should be preceeded by `(`')
# unmatched "}"
elif c == '}':
raise KeySequenceError('`}` should be preceeded by `{`')
# so it is a normal character
else:
# don't output white space unless flags to output have been set
if (c == ' ' and not with_spaces or
c == '\t' and not with_tabs or
c == '\n' and not with_newlines):
continue
# output nuewline
if c in ('~', '\n'):
keys.append(VirtualKeyAction(CODES["ENTER"]))
# safest are the virtual keys - so if our key is a virtual key
# use a VirtualKeyAction
#if ord(c) in CODE_NAMES:
# keys.append(VirtualKeyAction(ord(c)))
elif modifiers:
keys.append(EscapedKeyAction(c))
else:
keys.append(KeyAction(c))
# as we have handled the text - release the modifiers
while modifiers:
if DEBUG:
print("MODS-", modifiers)
keys.append(VirtualKeyAction(modifiers.pop(), down = False))
# just in case there were any modifiers left pressed - release them
while modifiers:
keys.append(VirtualKeyAction(modifiers.pop(), down = False))
return keys
def LoByte(val):
"Return the low byte of the value"
return val & 0xff
def HiByte(val):
"Return the high byte of the value"
return (val & 0xff00) >> 8
def SendKeys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True):
"Parse the keys and type them"
keys = parse_keys(keys, with_spaces, with_tabs, with_newlines)
for k in keys:
k.Run()
time.sleep(pause)
def main():
"Send some test strings"
actions = """
{LWIN}
{PAUSE .25}
r
{PAUSE .25}
Notepad.exe{ENTER}
{PAUSE 1}
Hello{SPACE}World!
{PAUSE 1}
%{F4}
{PAUSE .25}
n
"""
SendKeys(actions, pause = .1)
keys = parse_keys(actions)
for k in keys:
print(k)
k.Run()
time.sleep(.1)
# test_strings = [
# "\n"
# "(aa)some text\n",
# "(a)some{ }text\n",
# "(b)some{{}text\n",
# "(c)some{+}text\n",
# "(d)so%me{ab 4}text",
# "(e)so%me{LEFT 4}text",
# "(f)so%me{ENTER 4}text",
# "(g)so%me{^aa 4}text",
# "(h)some +(asdf)text",
# "(i)some %^+(asdf)text",
# "(j)some %^+a text+",
# "(k)some %^+a tex+{&}",
# "(l)some %^+a tex+(dsf)",
# "",
# ]
# for s in test_strings:
# print(repr(s))
# keys = parse_keys(s, with_newlines = True)
# print(keys)
# for k in keys:
# k.Run()
# time.sleep(.1)
# print()
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
role_name: str,
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles/{roleName}')
path_format_arguments = {
"roleName": _SERIALIZER.url("role_name", role_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class CloudServiceRolesOperations(object):
"""CloudServiceRolesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
role_name: str,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> "_models.CloudServiceRole":
"""Gets a role from a cloud service.
:param role_name: Name of the role.
:type role_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CloudServiceRole, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.CloudServiceRole
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceRole"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
role_name=role_name,
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CloudServiceRole', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles/{roleName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> Iterable["_models.CloudServiceRoleListResult"]:
"""Gets a list of all roles in a cloud service. Use nextLink property in the response to get the
next page of roles. Do this till nextLink is null to fetch all the roles.
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CloudServiceRoleListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.CloudServiceRoleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceRoleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CloudServiceRoleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles'} # type: ignore
|
|
#!/usr/bin/python
'''
VTK engine room for mrMeshPy viewer
The main vtk processing is done by functions here - although some hardcore
processing is handled in subroutines of other imported modules.
A core concept here is the tracking (kepping in scope) or the "targetVTKWindow"
- this is a vtkRenderWindowInteractor instance in the main program UI (user
interface) - by creatoing multiple instances of vtk windows we can load
multiple meshes. Some functions reference this specifically with a reference
index passed from mrVista --- mainWindowUI.vtkInstances[int(theMeshInstance)]
while others just referene the most recently added instance (e.g. when adding
a new mesh) --- mainWindowUI.vtkInstances[-1]
Note that it is the mainWindowUI that is passed to all functions so that all
funcitons have the content of the main window in scope.
Andre' Gouws 2017
'''
import vtk
from numpy import *
import time
from vtk.util import numpy_support
debug = True
# local modules
from mp_unpackIncomingData import unpackData
from mp_VTKProcessing import *
from mp_VTKDrawing import *
def loadNewMesh(currVTKInstance, commandArgs, mainWindowUI, the_TCPserver):
#first get all the data we are expecting from the server
## NB this assumes that the order of sending by the server is
# 1) vertices
# 2) triangles
# 3) color data r (rgba) for each vertex
# 4) color data g (rgba) for each vertex
# 5) color data b (rgba) for each vertex
# 6) color data a (rgba) for each vertex
if debug:
print('received request for new mesh with Args:')
print(commandArgs)
# sanity check
if ('vertices' in commandArgs[0]) and ('triangles' in commandArgs[1]):
pass
else:
return "error - expecting vertices, then triangles!"
# load the surfaces data
verticesArgs = commandArgs[0].strip().split(',')
vertices = unpackData(verticesArgs[1], int(verticesArgs[2]), the_TCPserver)
vertices = array(vertices,'f')
vertices = vertices.reshape((len(vertices)/3,3))
trianglesArgs = commandArgs[1].strip().split(',')
triangles = unpackData(trianglesArgs[1], int(trianglesArgs[2]), the_TCPserver)
triangles = array(triangles,'f')
if debug: print(triangles)
triangles = triangles.reshape((len(triangles)/3,3))
if debug: print(triangles)
# load the surface colour data
rVecArgs = commandArgs[2].strip().split(',')
r_vec = unpackData(rVecArgs[1], int(rVecArgs[2]), the_TCPserver)
r_vec = array(r_vec,'uint8')
if debug: print(r_vec)
gVecArgs = commandArgs[3].strip().split(',')
g_vec = unpackData(gVecArgs[1], int(gVecArgs[2]), the_TCPserver)
g_vec = array(g_vec,'uint8')
bVecArgs = commandArgs[4].strip().split(',')
b_vec = unpackData(bVecArgs[1], int(bVecArgs[2]), the_TCPserver)
b_vec = array(b_vec,'uint8')
aVecArgs = commandArgs[5].strip().split(',')
a_vec = unpackData(aVecArgs[1], int(aVecArgs[2]), the_TCPserver)
a_vec = array(a_vec,'uint8')
if debug:
print(len(r_vec))
print(len(g_vec))
print(len(b_vec))
print(len(a_vec))
#combine into numpy array
colorDat = squeeze(array(squeeze([r_vec,g_vec,b_vec,a_vec]),'B',order='F').transpose())
# convert this to a VTK unsigned char array
scalars = numpy_support.numpy_to_vtk(colorDat,0)
curr_scalars = vtk.vtkUnsignedCharArray()
curr_scalars.DeepCopy(scalars)
## ---- ok, we hav the data, lets turn it into vtk stuff
# Process vertices
points = vtk.vtkPoints()
for i in range(vertices.shape[0]):
points.InsertPoint(i,vertices[i][0],vertices[i][1],vertices[i][2])
# Process faces (triangles)
polys = vtk.vtkCellArray()
nTriangles = triangles.shape[0]
for i in range(nTriangles):
polys.InsertNextCell(3)
for j in range(3):
polys.InsertCellPoint(int(triangles[i][j]))
# check
if debug: print(points)
if debug: print(polys)
if debug: print(scalars)
if debug: print(currVTKInstance)
# Assemble as PolyData
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
polyData.SetPolys(polys)
polyData.GetPointData().SetScalars(scalars)
## TODO ? smoothing on first load?
smooth = vtk.vtkSmoothPolyDataFilter()
smooth = vtk.vtkSmoothPolyDataFilter()
smooth.SetNumberOfIterations(0)
smooth.SetRelaxationFactor(0.0)
smooth.FeatureEdgeSmoothingOff()
smooth.SetInputData(polyData)
pdm = vtk.vtkPolyDataMapper()
pdm.SetScalarModeToUsePointData()
pdm.SetInputConnection(smooth.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(pdm)
iren = mainWindowUI.vtkInstances[-1]
## ---- engine room for drawing on the surface
# add a picker that allows is top pick points on the surface
picker = vtk.vtkCellPicker()
picker.SetTolerance(0.0001)
mainWindowUI.vtkInstances[-1].SetPicker(picker)
mainWindowUI.vtkInstances[-1]._Iren.pickedPointIds = [] #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPointIds = mainWindowUI.vtkInstances[-1]._Iren.pickedPointIds
mainWindowUI.vtkInstances[-1]._Iren.pickedPointOrigValues = [] #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPointOrigValues = mainWindowUI.vtkInstances[-1]._Iren.pickedPointOrigValues
mainWindowUI.vtkInstances[-1]._Iren.pickedPoints = vtk.vtkPoints() #place holder for picked vtk point IDs so we can track
mainWindowUI.vtkInstances[-1].pickedPoints = mainWindowUI.vtkInstances[-1]._Iren.pickedPoints
mainWindowUI.vtkInstances[-1]._Iren.inDrawMode = 0 #TODO
mainWindowUI.vtkInstances[-1].inDrawMode = mainWindowUI.vtkInstances[-1]._Iren.inDrawMode
# drawing functions imported from mp_VTKDrawing
mainWindowUI.vtkInstances[-1].AddObserver('LeftButtonPressEvent', drawingPickPoint, 1.0)
mainWindowUI.vtkInstances[-1].AddObserver('RightButtonPressEvent', drawingMakeROI, 1.0)
ren = mainWindowUI.vtkInstances[-1].ren
mainWindowUI.vtkInstances[-1]._Iren.ren = ren
# ADD A LIGHT SOURCE TODO: MAKE THIS OPTIONAL/DEFAULT?
lightKit = vtk.vtkLightKit()
lightKit.SetKeyLightIntensity(0.5)
# TODO: SOME OPTIONS TO EXPLORE
#lightKit.MaintainLuminanceOn()
#lightKit.SetKeyLightIntensity(1.0)
## warmth of the lights
#lightKit.SetKeyLightWarmth(0.65)
#lightKit.SetFillLightWarmth(0.6)
#lightKit.SetHeadLightWarmth(0.45)
## intensity ratios
## back lights will be very dimm
lightKit.SetKeyToFillRatio(1.)
lightKit.SetKeyToHeadRatio(2.)
lightKit.SetKeyToBackRatio(1.)
lightKit.AddLightsToRenderer(ren)
ren.AddActor(actor)
ren.SetBackground(1,1,1)
ren.ResetCamera()
ren.Render()
mainWindowUI.vtkInstances[-1].Render()
# lets put some of the data objects in the scope of the
# main window so that they can be manipulated later.
mainWindowUI.vtkInstances[-1].curr_actor = actor
mainWindowUI.vtkInstances[-1].curr_smoother = smooth
mainWindowUI.vtkInstances[-1].curr_polydata = polyData
mainWindowUI.vtkInstances[-1].curr_mapper = pdm
mainWindowUI.vtkInstances[-1].curr_camera = ren.GetActiveCamera()
# and the raw mesh coordinate data.. why not
mainWindowUI.vtkInstances[-1].curr_points = points
mainWindowUI.vtkInstances[-1].curr_polys = polys
mainWindowUI.vtkInstances[-1].curr_scalars = curr_scalars #Deep copied
# turns out that later processes access the inherited renderwindowinteractor (?)
# so lets put all the above in the scope of that too
mainWindowUI.vtkInstances[-1]._Iren.curr_actor = actor
mainWindowUI.vtkInstances[-1]._Iren.curr_smoother = smooth
mainWindowUI.vtkInstances[-1]._Iren.curr_polydata = polyData
mainWindowUI.vtkInstances[-1]._Iren.curr_mapper = pdm
mainWindowUI.vtkInstances[-1]._Iren.curr_camera = ren.GetActiveCamera()
mainWindowUI.vtkInstances[-1]._Iren.curr_points = points
mainWindowUI.vtkInstances[-1]._Iren.curr_polys = polys
mainWindowUI.vtkInstances[-1]._Iren.curr_scalars = curr_scalars #Deep copied
# and so we can access ui controls (e.g. statusbar) from the inherited window
mainWindowUI.vtkInstances[-1]._Iren.parent_ui = mainWindowUI
def KeyPress(obj, evt):
key = obj.GetKeySym()
if key == 'l':
currVTKinstance = len(mainWindowUI.vtkInstances)
print(key)
print(mainWindowUI.vtkInstances[currVTKinstance-1])
#let's also track key presses per instance esp for the draw routine :)
mainWindowUI.vtkInstances[-1].AddObserver("KeyPressEvent",KeyPress)
mainWindowUI.tabWidget.setCurrentIndex(len(mainWindowUI.vtkInstances)-1) #zero index
def smoothMesh(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver):
#lets try to get the apt window
try:
targetVTKWindow = mainWindowUI.vtkInstances[mainWindowUI.vtkDict[theMeshInstance]]
except:
print ('No mesh instance with id:%s currently available - may need a re-synch' %theMeshInstance)
#return error
return 1
# lets show the correct tab
mainWindowUI.tabWidget.setCurrentIndex(int(mainWindowUI.vtkDict[theMeshInstance]))
#mainWindowUI.tabWidget.repaint()
mainWindowUI.tabWidget.update()
#lets get the original data
the_smoother = targetVTKWindow.curr_smoother
the_mapper = targetVTKWindow.curr_mapper
if debug: print(targetVTKWindow.curr_actor.GetMapper().GetInput().GetPointData().GetScalars())
if debug: print(targetVTKWindow.curr_actor.GetMapper().GetInput().GetPointData().GetScalars().GetTuple(1000))
#expecting a string that reads something like 'iterations,200,relaxationfactor,1.2'
# sanity check
if ('iterations' in commandArgs[0]) and ('relaxationfactor' in commandArgs[0]):
smoothingArgs = commandArgs[0].strip().split(',')
iterations = int(smoothingArgs[1])
relaxationfactor = float(smoothingArgs[3])
else:
return "error - expecting vertices, then curvature, then triangles!"
if debug: print 'starting smoothing callback'
newActor = VTK_smoothing(the_smoother, the_mapper, iterations, relaxationfactor)
if debug: print 'smoothing callback returned new actor'
if debug: print 'removing old actor'
targetVTKWindow.ren.RemoveActor(targetVTKWindow.curr_actor)
if debug: print 'adding new actor'
targetVTKWindow.ren.AddActor(newActor)
if debug: print 'added new actor - changing curr actor pointer'
targetVTKWindow.curr_actor = newActor #lets keep track
if debug: print 'trying to update '
# run mesh update to reset the color map (smoothing "messes" this up)
updateMeshData(theMeshInstance, [], mainWindowUI, the_TCPserver)
if debug: print 'update completed'
#return success
return 0
def updateMeshData(theMeshInstance, commandArgs, mainWindowUI, the_TCPserver):
# here the base mesh is already loaded and we are simply updating with the
# current View settings in from the vista session WITH THE COLOR VALUES FROM
# VISTA - i.e. do not go through a lookuptable
#lets try to get the apt window
try:
targetVTKWindow = mainWindowUI.vtkInstances[mainWindowUI.vtkDict[theMeshInstance]]
except:
print ('No mesh instance with id:%s currently available - may need a re-synch' %theMeshInstance)
#return error
return 1
# lets show the correct tab
mainWindowUI.tabWidget.setCurrentIndex(int(mainWindowUI.vtkDict[theMeshInstance])) #zero index
#mainWindowUI.tabWidget.repaint()
mainWindowUI.tabWidget.update()
#lets get the original data
the_polyData = targetVTKWindow.curr_polydata
the_mapper = targetVTKWindow.curr_mapper
#first get all the data we are expecting from the server
## NB this assumes that the order of sending by the server is
# 1) r_vector - red component
# 2) g_vector - blue component
# 3) b_vector - green component
# 4) a_vector - aplha component
if debug:
print('received request for UPDATE DIRECT mesh with Args:')
print(commandArgs)
if len(commandArgs) != 0 : #new data has come from MATLAB so recompute
# load the surfaces data
rVecArgs = commandArgs[0].strip().split(',')
r_vec = unpackData(rVecArgs[1], int(rVecArgs[2]), the_TCPserver)
r_vec = array(r_vec,'uint8')
if debug: print(r_vec)
gVecArgs = commandArgs[1].strip().split(',')
g_vec = unpackData(gVecArgs[1], int(gVecArgs[2]), the_TCPserver)
g_vec = array(g_vec,'uint8')
bVecArgs = commandArgs[2].strip().split(',')
b_vec = unpackData(bVecArgs[1], int(bVecArgs[2]), the_TCPserver)
b_vec = array(b_vec,'uint8')
aVecArgs = commandArgs[3].strip().split(',')
a_vec = unpackData(aVecArgs[1], int(aVecArgs[2]), the_TCPserver)
a_vec = array(a_vec,'uint8')
if debug:
print(len(r_vec))
print(len(g_vec))
print(len(b_vec))
print(len(a_vec))
#combine into numpy array
colorDat = squeeze(array(squeeze([r_vec,g_vec,b_vec,a_vec]),'B',order='F').transpose())
# convert this to a VTK unsigned char array
vtkColorArray = numpy_support.numpy_to_vtk(colorDat,0)
# keep a "deep" copy - this is to workaround some artifacts generated
# by vtk algorithms (e.g. smoothing) that also smooth the color data
# on the surface and then automatically update the inherited color map
# - we allow vtk to do this but then overwrite the recomptued color
# map AFTER the algorithms have run
deepCopyScalars = vtk.vtkUnsignedCharArray()
deepCopyScalars.DeepCopy(vtkColorArray)
targetVTKWindow.curr_scalars = deepCopyScalars
#TODO - this may have impact on later processing - investigate
else:
# no new data from MATLAB, probably just an internal re-draw call
# after something like smoothing - just grab the current deep
# copy of the required scalars
vtkColorArray = targetVTKWindow.curr_scalars
# OK - we have the data - let's update the mesh
newActor = VTK_updateMesh(targetVTKWindow, vtkColorArray, mainWindowUI)
targetVTKWindow.ren.AddActor(newActor)
targetVTKWindow.ren.RemoveActor(targetVTKWindow.curr_actor)
targetVTKWindow.curr_actor = newActor #lets keep track
targetVTKWindow.ren.Render()
targetVTKWindow.Render()
print('success with direct mesh update routine')
#return success
return 0
## --------------------------------------------------------------------------------
# test example animation
def rotateMeshAnimation(currVTKInstance, commandArgs, mainWindowUI, the_TCPserver):
#rotation args
rotations = commandArgs[0].strip().split(',')
rotations = unpackData(rotations[1], int(rotations[2]), the_TCPserver)
if debug: print(rotations)
targetVTKWindow = mainWindowUI.vtkInstances[int(currVTKInstance)] #NB zero indexing
camera = targetVTKWindow.ren.GetActiveCamera()
if debug: print(camera)
for i in range(len(rotations)):
camera.Azimuth(rotations[i])
#targetVTKWindow.ren.Render()
targetVTKWindow.iren.Render()
time.sleep(0.02)
the_TCPserver.socket.write(str('send useful message back here TODO'))
## --------------------------------------------------------------------------------
|
|
#!/usr/bin/env python
"""Command line tool for merging PRs."""
import collections
import pathlib
import sys
import textwrap
import click
import plumbum
import requests
from plumbum import cmd
IBIS_HOME = pathlib.Path(__file__).parent.parent
GITHUB_API_BASE = "https://api.github.com/repos/ibis-project/ibis"
git = cmd.git["-C", IBIS_HOME]
def merge_pr(
pr_num: int,
base_ref: str,
target_ref: str,
commit_title: str,
body: str,
pr_repo_desc: str,
original_head: str,
remote: str,
merge_method: str,
github_user: str,
password: str,
) -> None:
"""Merge a pull request."""
git_log = git[
"log",
"{remote}/{target_ref}..{base_ref}".format(
remote=remote, target_ref=target_ref, base_ref=base_ref
),
]
commit_authors = git_log["--pretty=format:%an <%ae>"]().splitlines()
author_count = collections.Counter(commit_authors)
distinct_authors = [author for author, _ in author_count.most_common()]
commits = git_log["--pretty=format:%h [%an] %s"]().splitlines()
merge_message_pieces = []
if body:
merge_message_pieces.append("\n".join(textwrap.wrap(body)))
merge_message_pieces.extend(map("Author: {}".format, distinct_authors))
# The string "Closes #{pull_request_number:d}" is required for GitHub to
# correctly close the PR
merge_message_pieces.append(
(
"\nCloses #{pr_num:d} from {pr_repo_desc} and squashes the "
"following commits:\n"
).format(pr_num=pr_num, pr_repo_desc=pr_repo_desc)
)
merge_message_pieces += commits
commit_message = "\n".join(merge_message_pieces)
resp = requests.put(
"{GITHUB_API_BASE}/pulls/{pr_num:d}/merge".format(
GITHUB_API_BASE=GITHUB_API_BASE, pr_num=pr_num
),
json={
'commit_title': commit_title,
'commit_message': commit_message,
'merge_method': merge_method,
},
auth=(github_user, password),
)
status_code = resp.status_code
if status_code == 200:
resp_json = resp.json()
assert resp_json["merged"]
click.echo(resp_json["message"])
elif status_code == 405 or status_code == 409:
resp_json = resp.json()
raise click.ClickException(resp_json["message"])
else:
resp.raise_for_status()
@click.command()
@click.option(
"-p",
"--pull-request-number",
type=int,
prompt="Which pull request would you like to merge? (e.g., 34)",
help="The pull request number to merge.",
)
@click.option(
"-M",
"--merge-method",
type=click.Choice(("merge", "squash", "rebase")),
default="squash",
help="The method to use for merging the PR.",
show_default=True,
)
@click.option(
"-r",
"--remote",
default="upstream",
help="A valid git remote.",
show_default=True,
)
@click.option("-u", "--github-user", help="Your GitHub user name.")
@click.option(
"-P",
"--password",
help="Your GitHub password for authentication and authorization.",
)
def main(
pull_request_number: int,
merge_method: str,
remote: str,
github_user: str,
password: str,
) -> None: # noqa: D103
try:
git["fetch", remote]()
except plumbum.commands.processes.ProcessExecutionError as e:
raise click.ClickException(e.stderr)
try:
git[
"fetch",
remote,
"pull/{pull_request_number:d}/head".format(
pull_request_number=pull_request_number
),
]()
except plumbum.commands.processes.ProcessExecutionError as e:
raise click.ClickException(e.stderr)
original_head = git["rev-parse", "--abbrev-ref", "HEAD"]().strip()
if not original_head:
original_head = git["rev-parse", "HEAD"]().strip()
resp = requests.get(
"{GITHUB_API_BASE}/pulls/{pull_request_number:d}".format(
GITHUB_API_BASE=GITHUB_API_BASE,
pull_request_number=pull_request_number,
)
)
if resp.status_code == 404:
pr_json = resp.json()
message = pr_json.get("message", None)
if message is not None:
raise click.ClickException(
"PR {pull_request_number:d} does not exist.".format(
pull_request_number=pull_request_number
)
)
else:
resp.raise_for_status()
pr_json = resp.json()
# no-op if already merged
if pr_json["merged"]:
click.echo(
"#{pr_num:d} already merged. Nothing to do.".format(
pr_num=pull_request_number
)
)
sys.exit(0)
if not pr_json["mergeable"]:
raise click.ClickException(
(
"Pull request #{pr_num:d} cannot be merged in its current "
"form. See "
"https://github.com/ibis-project/ibis/pulls/{pr_num:d} for "
"more details."
).format(pr_num=pull_request_number)
)
url = pr_json["url"]
commit_title = pr_json["title"]
body = pr_json["body"]
target_ref = pr_json["base"]["ref"]
user_login = pr_json["user"]["login"]
base_ref = pr_json["head"]["ref"]
pr_repo_desc = "{user_login}/{base_ref}".format(
user_login=user_login, base_ref=base_ref
)
click.echo(
"=== Pull Request #{pull_request_number:d} ===".format(
pull_request_number=pull_request_number
)
)
click.echo(
(
"title\t{commit_title}\n"
"source\t{pr_repo_desc}\n"
"target\t{remote}/{target_ref}\n"
"url\t{url}"
).format(
commit_title=commit_title,
pr_repo_desc=pr_repo_desc,
remote=remote,
target_ref=target_ref,
url=url,
)
)
base_ref_commit = (
git[
"ls-remote",
remote,
"refs/pull/{pull_request_number:d}/head".format(
pull_request_number=pull_request_number
),
]()
.strip()
.split()[0]
)
merge_pr(
pull_request_number,
base_ref_commit,
target_ref,
commit_title,
body,
pr_repo_desc,
original_head,
remote,
merge_method,
github_user,
password,
)
if __name__ == "__main__":
main()
|
|
"""bot - Bot Module
This module defines the Bot Component which connects to an IRC
Network and reacts to IRC Events. The Bot Component consists
of the TCPClient and IRC Components.
"""
import re
from socket import gethostname
from traceback import format_exc
from circuits.net.events import connect
from circuits.net.sockets import TCPClient
from circuits import handler, BaseComponent
from circuits.protocols.irc import IRC, NICK, NOTICE, PASS, PRIVMSG, USER, ERR_NICKNAMEINUSE
from cidict import cidict
import kdb
from .utils import log
from .events import cmd
from .plugin import BasePlugin
def wrapvalue(command, event, value):
if value is None:
return []
return value if isinstance(value, list) else [value]
class Bot(BaseComponent):
channel = "bot"
def init(self, data, config):
self.data = data
self.config = config
self.terminate = False
self.host = self.config["host"]
self.port = self.config["port"]
self.auth = {
"host": gethostname(),
"server": self.host,
"nick": self.config["nick"],
"ident": kdb.__name__,
"name": self.config.get("settings", {}).get("name", kdb.__description__),
}
if "password" in self.config:
self.auth["password"] = self.config["password"]
# command -> plugin
self.command = cidict()
# plugin name -> commands
self.commands = cidict()
# plugin name -> plugin
self.plugins = cidict()
self.data.init(
{
"state": {
"host": self.auth["host"],
"server": self.auth["server"],
"nick": self.auth["nick"],
"ident": self.auth["ident"],
"name": self.auth["name"],
}
}
)
self.transport = TCPClient(channel=self.channel).register(self)
self.protocol = IRC(channel=self.channel).register(self)
def is_addressed(self, source, target, message):
nick = self.data.state["nick"]
if nick is None:
return False, target, message
match = re.match("^@?{0:s}[,: ]*(.*)$(?i)".format(nick), message)
if target.lower() == nick.lower() or match is not None:
if match is not None:
message = match.group(1)
if target.lower() == nick.lower():
return True, source[0], message
else:
return True, target, message
else:
return False, target, message
@handler("registered", channel="*")
def _on_registered(self, component, manager):
if component.channel == "commands":
for event in component.events():
if event not in self.command:
self.command[event] = component
if component.parent.name in self.commands:
events = self.commands[component.parent.name]
events = events.union(component.events())
self.commands[component.parent.name] = events
else:
self.commands[component.parent.name] = set(component.events())
if isinstance(component, BasePlugin):
if component.name not in self.plugins:
self.plugins[component.name] = component
@handler("unregistered", channel="*")
def _on_unregistered(self, component, manager):
if component.channel == "commands":
for event in component.events():
if event in self.command:
del self.command[event]
if isinstance(component, BasePlugin):
if component.name in self.commands:
del self.commands[component.name]
if component.name in self.plugins:
del self.plugins[component.name]
@handler("ready")
def _on_ready(self, component):
self.fire(connect(self.host, self.port))
@handler("connected")
def _on_connected(self, host, port=None):
if "password" in self.auth:
self.fire(PASS(self.auth["password"]))
auth = self.auth.get
ident = auth("ident")
host = auth("host")
server = auth("server")
name = auth("name")
self.fire(USER(ident, host, server, name))
nick = auth("nick")
self.fire(NICK(nick))
@handler("disconnected")
def _on_disconnected(self):
if self.terminate:
raise SystemExit(0)
@handler("terminate")
def _on_terminate(self):
self.terminate = True
@handler("nick")
def _on_nick(self, source, newnick):
if source[0].lower() == self.data.state["nick"].lower():
self.data["state"]["nick"] = newnick
@handler("numeric")
def _on_numeric(self, source, numeric, *args):
if numeric == ERR_NICKNAMEINUSE:
newnick = "{0:s}_".format(args[1])
self.data["state"]["nick"] = newnick
self.fire(NICK(newnick))
@handler("privmsg", "notice")
def _on_privmsg_or_notice(self, event, source, target, message):
addressed, target, message = self.is_addressed(
source, target, message
)
Reply = PRIVMSG if event.name == "privmsg" else NOTICE
if addressed:
tokens = message.split(" ", 1)
command = tokens[0].encode("utf-8").lower()
args = (len(tokens) > 1 and tokens[1]) or ""
if command not in self.command:
msg = log("Unknown Command: {0:s}", command)
self.fire(Reply(target, msg))
else:
event = cmd.create(command, source, target, args)
try:
value = yield self.call(event, "commands")
if value.errors:
etype, evalue, etraceback = value.value
msg = log(
"ERROR: {0:s}: ({1:s})", evalue, repr(message)
)
log(format_exc())
self.fire(Reply(target, msg))
else:
for msg in wrapvalue(command, event, value.value):
self.fire(Reply(target, msg))
except Exception as error:
msg = log("ERROR: {0:s}: ({1:s})", error, repr(message))
log(format_exc())
self.fire(Reply(target, msg))
|
|
# -*- coding: utf-8 -*-
import io
import textwrap
import string
import pytest
import decimal
import canmatrix.formats.dbc
def test_long_signal_name_imports():
long_signal_name = u'FAILURE_ZELL_UNTERTEMPERATUR_ENTLADEN_ALARM_IDX_01'
assert len(long_signal_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 testFrame1: 1 TEST_ECU
SG_ someShortenedDummyName: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_ "SystemSignalLongSymbol" SG_ 1 someShortenedDummyName "{}";
''').format(long_signal_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
assert matrix.frames[0].signals[0].name == long_signal_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
long_name_found = False
name_found = False
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("SG_"):
assert len(line.split()[1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[5][1:-2] == long_signal_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_create_define():
defaults = {}
test_string = canmatrix.formats.dbc.create_define("my_data_type", canmatrix.Define('ENUM "A","B"'), "BA_", defaults)
assert test_string == 'BA_DEF_ BA_ "my_data_type" ENUM "A","B";\n'
def test_create_attribute_string():
test_string = canmatrix.formats.dbc.create_attribute_string("my_attribute", "BO_", "name", "value", True)
assert test_string == 'BA_ "my_attribute" BO_ name "value";\n'
test_string = canmatrix.formats.dbc.create_attribute_string("my_attribute", "BO_", "name", 1.23, False)
assert test_string == 'BA_ "my_attribute" BO_ name 1.23;\n'
def test_create_comment_string():
test_string = canmatrix.formats.dbc.create_comment_string("BO_", "ident", "some comment", "utf8", "utf8", "")
assert test_string == b'CM_ BO_ ident "some comment";\n'
def test_parse_comment_from_dbc():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 someFrame: 1 someEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
CM_ SG_ 1 someSignal "resistance setting (0-100%)" ;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
assert matrix.frames[0].signals[0].comment == "resistance setting (0-100%)"
def test_parse_multi_line_comment():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 someFrame: 1 someEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
CM_ SG_ 1 someSignal "Debug request message from the ECU to the BMS.
** ignore for now, more definition to be provided in Rev 14 regarding which messages to change if we have this debug flag implemented. " ;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
assert matrix.frames[0].signals[0].comment == 'Debug request message from the ECU to the BMS.\n** ignore for now, more definition to be provided in Rev 14 regarding which messages to change if we have this debug flag implemented. '
def test_long_frame_name_imports():
long_frame_name = u'A_VERY_LONG_FRAME_NAME_WHICH_SHOULD_BE_SPLIT_SOMEHOW'
assert len(long_frame_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 shortendeFrameName: 1 someEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_ "SystemMessageLongSymbol" BO_ 1 "{}";
''').format(long_frame_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
long_name_found = False
name_found = False
assert matrix.frames[0].name == long_frame_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("BO_"):
assert len(line.split()[2][:-1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[4][1:-2] == long_frame_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_long_ecu_name_imports():
long_ecu_name = u'A_VERY_LONG_ECU_NAME_WHICH_SHOULD_BE_SPLIT_SOMEHOW'
assert len(long_ecu_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: SoMEShortenedEcuName
BO_ 1 testFrame1: 1 SoMEShortenedEcuName
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_ "SystemNodeLongSymbol" BU_ SoMEShortenedEcuName "{}";
''').format(long_ecu_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
long_name_found = False
name_found = False
assert matrix.ecus[0].name == long_ecu_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("BU_"):
assert len(line.split()[1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[4][1:-2] == long_ecu_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_long_envvar_name_imports():
long_envvar_name = u'A_VERY_LONG_ENVIROMENT_NAME_WHICH_SHOULD_BE_SPLIT_SOMEHOW'
assert len(long_envvar_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 frameName: 1 someEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
EV_ someShortendEnvVar: 0 [0|0] "" 0 2 DUMMY_NODE_VECTOR0 Vector__XXX;
BA_ "SystemEnvVarLongSymbol" EV_ someShortendEnvVar "{}";
''').format(long_envvar_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
assert list(matrix.env_vars)[0] == long_envvar_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
long_name_found = False
name_found = False
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("EV_"):
assert len(line.split()[1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[3][1:-2] == long_envvar_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_enum_with_comma():
dbc = io.BytesIO(textwrap.dedent(u'''\
BA_DEF_ "example0" ENUM "Val1",",";
BA_DEF_ BO_ "example1" ENUM "Val 1","vector_leerstring",""," ","'","(",")","[","]","/","-","|","{","}",";",":","<",">",".","?","!","@","#","$","%","^","&","=","`","~";
BA_DEF_ SG_ "example2" ENUM "Val1",",";
BA_DEF_ EV_ "example3" ENUM "Val1",",";
BA_DEF_ BU_ "example4" ENUM "Val1",",";
BA_DEF_DEF_ "example0" ",";
BA_DEF_DEF_ "example1" ",";
BA_DEF_DEF_ "example2" ",";
BA_DEF_DEF_ "example3" ",";
BA_DEF_DEF_ "example4" ",";
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frame_defines[u'example1'].values == ["Val 1", "", ""] + list(" '()[]/-|{};:<>.?!@#$%^&=`~")
assert matrix.signal_defines[u'example2'].values == ['Val1', ',']
assert matrix.ecu_defines[u'example4'].values == ['Val1', ',']
@pytest.mark.parametrize(
'character',
[
['{}'.format(c if c != '"' else '\\"')]
for c in string.punctuation
],
)
def test_enum_with_special_character(character):
dbc = io.BytesIO(textwrap.dedent(u'''\
BA_DEF_ BO_ "example1" ENUM "Val 1","{}";
''').format(character[0]).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frame_defines[u'example1'].values == ["Val 1", character[0]]
def test_export_of_unknown_defines():
db = canmatrix.CanMatrix()
db.add_frame_defines("Receivable", 'BOOL False True')
db.add_frame_defines("Sendable", 'BOOL False True')
for (dataType, define) in db.frame_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition != define.definition
db.add_signal_defines("LongName", 'STR')
for (dataType, define) in db.signal_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition != define.definition
frame = canmatrix.Frame("someFrame")
signal = canmatrix.Signal("SomeSignal")
signal.add_attribute("LongName", "EnableCalcIDCTrip Calc. IDC trip")
frame.add_signal(signal)
db.add_frame(frame)
db.add_ecu_defines("someName", 'STRING')
for (dataType, define) in db.ecu_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition == define.definition
db.add_global_defines("someGlobaName", 'BOOL')
for (dataType, define) in db.global_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition != define.definition
outdbc = io.BytesIO()
canmatrix.formats.dump(db, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.startswith("BA_DEF_ "):
assert line.endswith("STRING;")
if line.startswith("BA_ "):
assert line.endswith('";')
def test_braces_in_attributes():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 20 frameName: 1 someEcu
SG_ sometext: 1|2@0+ (1,0) [0|0] "" someOtherEcu
BA_ "Signal Age [ms]" SG_ 20 sometext 5000;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
def test_defines_with_spaces():
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 123 someFrame: 1 someOtherEcu
EV_ someEnvVar: 0 [0|0] "" 0 2 DUMMY_NODE_VECTOR0 Vector__XXX;
BA_DEF_ BU_ "Node Address" INT 0 255;
BA_DEF_ BO_ "Period [ms]" INT 0 5000;
BA_DEF_ BU_ "Description X" STRING;
BA_DEF_ EV_ "some attrib" STRING;
BA_ "Node Address" BU_ someOtherEcu 42;
BA_ "Description X" BU_ someOtherEcu "Some Some Text";
BA_ "Period [ms]" BO_ 123 3000;
BA_ "some attrib" EV_ someEnvVar "some space";
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.ecu_defines["Node Address"].type == "INT"
assert matrix.ecu_defines["Node Address"].min == 0
assert matrix.ecu_defines["Node Address"].max == 255
assert matrix.frame_defines["Period [ms]"].min == 0
assert matrix.frame_defines["Period [ms]"].max == 5000
assert matrix.frames[0].attributes["Period [ms]"] == '3000'
assert matrix.env_vars["someEnvVar"]["attributes"]["some attrib"] == '"some space"'
assert matrix.ecus[0].attributes["Description X"] == "Some Some Text"
def test_writing_complex_multiplex():
db = canmatrix.CanMatrix()
frame = canmatrix.Frame("someFrame")
frame.is_complex_multiplexed = True
signal = canmatrix.Signal("mx")
signal.mux_val_max = 5
signal.mux_val_min = 1
signal.muxer_for_signal = 4
frame.add_signal(signal)
db.add_frame(frame)
outdbc = io.BytesIO()
canmatrix.formats.dump(db, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if "SG_MUL_VAL" in line:
return True
assert False
def test_defines_with_special_cars():
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 123 someFrame: 1 someOtherEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_DEF_ SG_ "Accuracy" STRING;
BA_ "Accuracy" SG_ 123 someSignal "+/- 10.2 at 55.1%";
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].signals[0].attributes["Accuracy"] == "+/- 10.2 at 55.1%"
def test_j1939_frametype():
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 2147483648 someFrame: 1 someOtherEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_DEF_ BO_ "VFrameFormat" ENUM "StandardCAN","ExtendedCAN","J1939PG";
BA_ "VFrameFormat" BO_ 2147483648 2;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].is_j1939 == True
# negative test
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 2147483648 someFrame: 1 someOtherEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_DEF_ BO_ "VFrameFormat" ENUM "StandardCAN","ExtendedCAN","J1939PG";
BA_ "VFrameFormat" BO_ 2147483648 0;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].is_j1939 == False
def test_attributes_with_spaces_before_semicolumn():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 8 Frame_1: 8 Vector__XXX
BO_ 9 Frame_2: 8 Vector__XXX
BA_DEF_ BO_ "someAttribute" STRING ;
BA_ "someAttribute" BO_ 8 "str" ;
BA_DEF_DEF_ "someAttribute" "asd" ;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].attributes["someAttribute"] == 'str'
assert matrix.frames[1].attribute("someAttribute", matrix) == 'asd'
def test_cycle_time_handling():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 17 Frame_1: 8 Vector__XXX
SG_ sig2 : 8|8@1- (1,0) [0|0] "" Vector__XXX
SG_ sig1 : 0|8@1- (1,0) [0|0] "" Vector__XXX
BA_DEF_ BO_ "GenMsgCycleTime" INT 0 3600000;
BA_DEF_ SG_ "GenSigCycleTime" INT 0 3600000;
BA_DEF_DEF_ "GenMsgCycleTime" 0;
BA_DEF_DEF_ "GenSigCycleTime" 0;
BA_ "GenMsgCycleTime" BO_ 17 100;
BA_ "GenSigCycleTime" SG_ 17 sig2 20;
BA_ "GenSigCycleTime" SG_ 17 sig1 10;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].cycle_time == 100
assert matrix.frames[0].signal_by_name("sig1").cycle_time == 10
assert matrix.frames[0].signal_by_name("sig2").cycle_time == 20
# assert "GenMsgCycleTime" not in matrix.frame_defines
# assert "GenSigCycleTime" not in matrix.signal_defines
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
assert 'BA_ "GenMsgCycleTime" BO_ 17 100;' in outdbc.getvalue().decode('utf8')
assert 'BA_ "GenSigCycleTime" SG_ 17 sig2 20;' in outdbc.getvalue().decode('utf8')
assert 'BA_ "GenSigCycleTime" SG_ 17 sig1 10;' in outdbc.getvalue().decode('utf8')
outdbc = io.BytesIO()
canmatrix.formats.dump({"aa":matrix}, outdbc, "kcd")
def test_keep_cycle_time_defines():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 17 Frame_1: 8 Vector__XXX
SG_ sig1 : 0|8@1- (1,0) [0|0] "" Vector__XXX
BA_DEF_ BO_ "GenMsgCycleTime" INT 0 50000 ;
BA_DEF_DEF_ "GenMsgCycleTime" 0 ;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
assert 'BA_DEF_ BO_ "GenMsgCycleTime" INT 0 50000' in outdbc.getvalue().decode('utf8')
assert 'BA_DEF_DEF_ "GenMsgCycleTime" 0' in outdbc.getvalue().decode('utf8')
def test_unique_signal_names():
db = canmatrix.CanMatrix()
frame = canmatrix.Frame("some Frame")
frame.add_signal(canmatrix.Signal("signal_name", size=1, start_bit=1))
frame.add_signal(canmatrix.Signal("signal_name", size=2, start_bit=9))
db.add_frame(frame)
outdbc = io.BytesIO()
canmatrix.formats.dump(db, outdbc, "dbc")
assert "signal_name0" in outdbc.getvalue().decode('utf8')
assert "signal_name1" in outdbc.getvalue().decode('utf8')
outdbc = io.BytesIO()
canmatrix.formats.dump(db, outdbc, "dbc", dbcUniqueSignalNames=False)
assert "signal_name0" not in outdbc.getvalue().decode('utf8')
assert "signal_name1" not in outdbc.getvalue().decode('utf8')
assert "signal_name" in outdbc.getvalue().decode('utf8')
def test_signal_inital_value():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 17 Frame_1: 8 Vector__XXX
SG_ sig1 : 0|8@1- (1,0) [0|0] "" Vector__XXX
BA_DEF_ SG_ "GenSigStartValue" FLOAT 0 100000000000;
BA_ "GenSigStartValue" SG_ 17 sig1 2.7;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].signal_by_name("sig1").initial_value == decimal.Decimal("2.7")
# assert "GenSigStartValue" not in matrix.signal_defines
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
assert 'BA_ "GenSigStartValue" SG_ 17 sig1 2.7;' in outdbc.getvalue().decode('utf8')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient checker for any ops, graphs.
The gradient checker verifies numerically that an op/graph properly
computes the gradients
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.platform import tf_logging as logging
import tensorflow as tf
def _product(t):
if isinstance(t, int):
return t
else:
y = 1
for x in t:
y *= x
return y
def _extra_feeds(extra_feed_dict, new_feeds):
if not extra_feed_dict:
return new_feeds
r = {}
r.update(extra_feed_dict)
r.update(new_feeds)
return r
def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx,
extra_feed_dict):
"""Computes the theoretical Jacobian for dy/dx.
Computes the theoretical Jacobian using the ops generated by
compute_gradient().
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy parray as the input data for x
dy: the tensor "dy".
dy_shape: the dimensions of dy as a tuple or an array of ints.
dx: Tensor or IndexedSlices representing dx
extra_feed_dict: dict that allows fixing specified tensor values
during the jacobian calculation.
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "dy_size" columns where "x_size" is the number of elements in x and
"dy_size" is the number of elements in dy.
Raises:
ValueError: If `dy` is empty but the gradient is nonzero.
"""
# Complex vectors are treated as vectors of twice as many reals.
if x.dtype.is_complex:
x_shape = tuple(x_shape) + (2,)
dy_factor = 2 if tf.float32.is_complex else 1
# To compute the jacobian, we treat x and y as one-dimensional vectors.
x_size = _product(x_shape)
x_val_size = _product(x_shape[1:]) # This is used for sparse gradients
dy_size = _product(dy_shape) * dy_factor
# Allocate 2-D Jacobian, with x dimensions smashed into the first
# dimension and y dimensions smashed into the second.
jacobian = np.zeros((x_size, dy_size),
dtype=x.dtype.real_dtype.as_numpy_dtype)
# For each of the entry of dy, we set this to be 1 and
# everything else to be 0 and compute the backprop -- this will give us one
# one column of the Jacobian matrix.
dy_data = np.zeros(dy_shape, dtype=tf.float32.as_numpy_dtype)
dy_data_flat = dy_data.ravel().view(tf.float32.real_dtype.as_numpy_dtype)
sess = ops.get_default_session()
for col in range(dy_size):
dy_data_flat[col] = 1
if isinstance(dx, ops.IndexedSlices):
backprop_indices, backprop_values = sess.run(
[dx.indices, dx.values],
feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
for i, v in zip(backprop_indices, backprop_values):
r_begin = i * x_val_size
r_end = r_begin + x_val_size
jacobian[r_begin:r_end, col] += v.flat
else:
assert isinstance(dx, ops.Tensor), "dx = " + str(dx)
backprop = sess.run(
dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
jacobian[:, col] = backprop.ravel().view(jacobian.dtype)
dy_data_flat[col] = 0
# If the output is empty, run the gradients at least once and make sure
# they produce zeros.
if not dy_size:
backprop = sess.run(
dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
if backprop.shape != x_data.shape:
raise ValueError("Empty gradient has wrong shape: expected %s, got %s" %
(x_data.shape, backprop.shape))
if np.any(backprop):
raise ValueError("Empty tensor with nonzero gradients")
logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian)
return jacobian
def _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta,
extra_feed_dict):
"""Computes the numeric Jacobian for dy/dx.
Computes the numeric Jacobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy array as the input data for x
y: the tensor "y".
y_shape: the dimensions of y as a tuple or an array of ints.
delta: the amount of perturbation we give to the input
extra_feed_dict: dict that allows fixing specified tensor values
during the jacobian calculation.
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in x and
"y_size" is the number of elements in y.
"""
# To compute the jacobian, we treat x and y as one-dimensional vectors
x_size = _product(x_shape) * (2 if x.dtype.is_complex else 1)
y_size = _product(y_shape) * (2 if y.dtype.is_complex else 1)
x_dtype = x.dtype.real_dtype.as_numpy_dtype
y_dtype = y.dtype.real_dtype.as_numpy_dtype
# Make sure we have the right types
x_data = np.asarray(x_data, dtype=x.dtype.as_numpy_dtype)
scale = np.asarray(2 * delta, dtype=y_dtype)[()]
jacobian = np.zeros((x_size, y_size), dtype=x_dtype)
# For each of the entry of x, we slightly perturbs this by adding and
# subtracting a delta and then compute difference between the outputs. This
# will give us one row of the Jacobian matrix.
for row in range(x_size):
x_pos = x_data.copy()
x_neg = x_data.copy()
x_pos.ravel().view(x_dtype)[row] += delta
y_pos = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_pos}))
x_neg.ravel().view(x_dtype)[row] -= delta
y_neg = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_neg}))
diff = (y_pos - y_neg) / scale
jacobian[row, :] = diff.ravel().view(y_dtype)
logging.vlog(1, "Numeric Jacobian =\n%s", jacobian)
return jacobian
def _compute_dx_and_dy(x, y, y_shape):
"""Returns a node to compute gradient of x wrt y."""
# We make up a dy so that we can compute the gradients. We don't really use
# the value of dy -- we will always feed it. We need to add an identity node
# so that we can always feed it properly. Otherwise, for the Add operation,
# dx is the same as dy and we cannot fetch the tensor that we are feeding.
with x.graph.as_default():
dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=tf.float32)
dy = array_ops.identity(dy_orig)
# We compute the gradients for x wrt. y
grads = gradients.gradients([y], x, dy)
assert len(grads) == 1
return grads[0], dy_orig
def _compute_gradient(x,
x_shape,
dx,
y,
y_shape,
dy,
x_init_value=None,
delta=1e-3,
extra_feed_dict=None):
"""Computes the theoretical and numerical jacobian."""
t = dtypes.as_dtype(x.dtype)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128]
assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
t2 = dtypes.as_dtype(tf.float32)
assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name
if x_init_value is not None:
i_shape = list(x_init_value.shape)
assert (list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
x_shape, i_shape)
x_data = x_init_value
else:
x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype)
if t.is_complex:
x_data.imag = np.random.random_sample(x_shape)
jacob_t = _compute_theoretical_jacobian(
x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)
jacob_n = _compute_numeric_jacobian(
x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)
return jacob_t, jacob_n
def _compute_gradient_list(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Compute gradients for a list of x values."""
assert isinstance(x, list)
dx, dy = zip(*[_compute_dx_and_dy(xi, y, y_shape) for xi in x])
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
if x_init_value is None:
x_init_value = [None] * len(x)
ret = [_compute_gradient(xi, x_shapei, dxi, y, y_shape, dyi, x_init_valuei,
delta, extra_feed_dict=extra_feed_dict)
for xi, x_shapei, dxi, dyi, x_init_valuei in zip(x, x_shape, dx, dy,
x_init_value)]
return ret
def compute_gradient(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Computes and returns the theoretical and numerical Jacobian.
If `x` or `y` is complex, the Jacobian will still be real but the
corresponding Jacobian dimension(s) will be twice as large. This is required
even if both input and output is complex since TensorFlow graphs are not
necessarily holomorphic, and may have gradients not expressible as complex
numbers. For example, if `x` is complex with shape `[m]` and `y` is complex
with shape `[n]`, each Jacobian `J` will have shape `[m * 2, n * 2]` with
J[:m, :n] = d(Re y)/d(Re x)
J[:m, n:] = d(Im y)/d(Re x)
J[m:, :n] = d(Re y)/d(Im x)
J[m:, n:] = d(Im y)/d(Im x)
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): remove this argument.
extra_feed_dict: dict that allows fixing specified tensor values
during the Jacobian calculation.
Returns:
Two 2-d numpy arrays representing the theoretical and numerical
Jacobian for dy/dx. Each has "x_size" rows and "y_size" columns
where "x_size" is the number of elements in x and "y_size" is the
number of elements in y. If x is a list, returns a list of two numpy arrays.
"""
if extra_feed_dict is None:
extra_feed_dict = {}
if isinstance(x, list):
return _compute_gradient_list(x, x_shape, y, y_shape, x_init_value, delta,
init_targets, extra_feed_dict=extra_feed_dict)
else:
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
dx, dy = _compute_dx_and_dy(x, y, y_shape)
ret = _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value, delta,
extra_feed_dict=extra_feed_dict)
return ret
def compute_gradient_error(x, x_shape, y, y_shape, x_init_value=None, delta=1e-3, init_targets=None,
extra_feed_dict=None):
"""Computes the gradient error.
Computes the maximum error for dy/dx between the computed Jacobian and the
numerically estimated Jacobian.
This function will modify the tensors passed in as it adds more operations
and hence changing the consumers of the operations of the input tensors.
This function adds operations to the current session. To compute the error
using a particular device, such as a GPU, use the standard methods for
setting a device (e.g. using with sess.graph.device() or setting a device
function in the session constructor).
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): Remove this argument.
extra_feed_dict: dict that allows fixing specified tensor values
during the Jacobian calculation.
Returns:
The maximum error in between the two Jacobians.
"""
grad = compute_gradient(x, x_shape, y, y_shape, x_init_value, delta,
init_targets, extra_feed_dict=extra_feed_dict)
if isinstance(grad, tuple):
grad = [grad]
error = 0
for j_t, j_n in grad:
if j_t.size or j_n.size: # Handle zero size tensors correctly
error = np.maximum(error, np.fabs(j_t - j_n).max())
return error
|
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
PrivateLinkFactory,
)
from osf.utils import permissions
@pytest.fixture()
def admin():
return AuthUserFactory()
@pytest.fixture()
def base_url():
return '/{}nodes/'.format(API_BASE)
@pytest.fixture()
def read_contrib():
return AuthUserFactory()
@pytest.fixture()
def write_contrib():
return AuthUserFactory()
@pytest.fixture()
def valid_contributors(admin, read_contrib, write_contrib):
return [
admin._id,
read_contrib._id,
write_contrib._id,
]
@pytest.fixture()
def private_node_one(admin, read_contrib, write_contrib):
private_node_one = ProjectFactory(
is_public=False,
creator=admin,
title='Private One')
private_node_one.add_contributor(
read_contrib, permissions=permissions.READ, save=True)
private_node_one.add_contributor(
write_contrib,
permissions=permissions.WRITE,
save=True)
return private_node_one
@pytest.fixture()
def private_node_one_anonymous_link(private_node_one):
private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
private_node_one_anonymous_link.nodes.add(private_node_one)
private_node_one_anonymous_link.save()
return private_node_one_anonymous_link
@pytest.fixture()
def private_node_one_private_link(private_node_one):
private_node_one_private_link = PrivateLinkFactory(anonymous=False)
private_node_one_private_link.nodes.add(private_node_one)
private_node_one_private_link.save()
return private_node_one_private_link
@pytest.fixture()
def private_node_one_url(private_node_one):
return '/{}nodes/{}/'.format(API_BASE, private_node_one._id)
@pytest.fixture()
def private_node_two(admin, read_contrib, write_contrib):
private_node_two = ProjectFactory(
is_public=False,
creator=admin,
title='Private Two')
private_node_two.add_contributor(
read_contrib, permissions=permissions.READ, save=True)
private_node_two.add_contributor(
write_contrib,
permissions=permissions.WRITE,
save=True)
return private_node_two
@pytest.fixture()
def private_node_two_url(private_node_two):
return '/{}nodes/{}/'.format(API_BASE, private_node_two._id)
@pytest.fixture()
def public_node_one(admin, read_contrib, write_contrib):
public_node_one = ProjectFactory(
is_public=True, creator=admin, title='Public One')
public_node_one.add_contributor(
read_contrib, permissions=permissions.READ, save=True)
public_node_one.add_contributor(
write_contrib,
permissions=permissions.WRITE,
save=True)
return public_node_one
@pytest.fixture()
def public_node_one_anonymous_link(public_node_one):
public_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
public_node_one_anonymous_link.nodes.add(public_node_one)
public_node_one_anonymous_link.save()
return public_node_one_anonymous_link
@pytest.fixture()
def public_node_one_private_link(public_node_one):
public_node_one_private_link = PrivateLinkFactory(anonymous=False)
public_node_one_private_link.nodes.add(public_node_one)
public_node_one_private_link.save()
return public_node_one_private_link
@pytest.fixture()
def public_node_one_url(public_node_one):
return '/{}nodes/{}/'.format(API_BASE, public_node_one._id)
@pytest.fixture()
def public_node_two(admin, read_contrib, write_contrib):
public_node_two = ProjectFactory(
is_public=True, creator=admin, title='Public Two')
public_node_two.add_contributor(
read_contrib, permissions=permissions.READ, save=True)
public_node_two.add_contributor(
write_contrib,
permissions=permissions.WRITE,
save=True)
return public_node_two
@pytest.fixture()
def public_node_two_url(public_node_two):
return '/{}nodes/{}/'.format(API_BASE, public_node_two._id)
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.usefixtures(
'admin',
'read_contrib',
'write_contrib',
'valid_contributors',
'private_node_one',
'private_node_one_anonymous_link',
'private_node_one_private_link',
'private_node_one_url',
'private_node_two',
'private_node_two_url',
'public_node_one',
'public_node_one_anonymous_link',
'public_node_one_private_link',
'public_node_one_url',
'public_node_two',
'public_node_two_url')
class TestNodeDetailViewOnlyLinks:
def test_private_node(
self, app, admin, read_contrib, valid_contributors,
private_node_one, private_node_one_url,
private_node_one_private_link,
private_node_one_anonymous_link,
public_node_one_url,
public_node_one_private_link,
public_node_one_anonymous_link):
# test_private_node_with_link_works_when_using_link
res_normal = app.get(private_node_one_url, auth=read_contrib.auth)
assert res_normal.status_code == 200
res_linked = app.get(
private_node_one_url,
{'view_only': private_node_one_private_link.key})
assert res_linked.status_code == 200
assert res_linked.json['data']['attributes']['current_user_permissions'] == [
permissions.READ]
# Remove any keys that will be different for view-only responses
res_normal_json = res_normal.json
res_linked_json = res_linked.json
user_can_comment = res_normal_json['data']['attributes'].pop(
'current_user_can_comment')
view_only_can_comment = res_linked_json['data']['attributes'].pop(
'current_user_can_comment')
assert user_can_comment
assert not view_only_can_comment
# test_private_node_with_link_unauthorized_when_not_using_link
res = app.get(private_node_one_url, expect_errors=True)
assert res.status_code == 401
# test_private_node_with_link_anonymous_does_not_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
embeds = res.json['data'].get('embeds', None)
assert embeds is None or 'contributors' not in embeds
# test_private_node_with_link_non_anonymous_does_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_private_node_logged_in_with_anonymous_link_does_not_expose_contributor_id
res = app.get(private_node_one_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
}, auth=admin.auth)
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_public_node_with_link_anonymous_does_not_expose_user_id
res = app.get(public_node_one_url, {
'view_only': public_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
embeds = res.json['data'].get('embeds', None)
assert embeds is None or 'contributors' not in embeds
# test_public_node_with_link_non_anonymous_does_expose_contributor_id
res = app.get(public_node_one_url, {
'view_only': public_node_one_private_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_public_node_with_link_unused_does_expose_contributor_id
res = app.get(public_node_one_url, {
'embed': 'contributors',
})
assert res.status_code == 200
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert contributor['id'].split('-')[1] in valid_contributors
# test_view_only_link_does_not_grant_write_permission
payload = {
'data': {
'attributes': {
'title': 'Cannot touch this'},
'id': private_node_one._id,
'type': 'nodes',
}
}
res = app.patch_json_api(private_node_one_url, payload, {
'view_only': private_node_one_private_link.key,
}, expect_errors=True)
assert res.status_code == 401
# test_view_only_link_from_anther_project_does_not_grant_view_permission
res = app.get(private_node_one_url, {
'view_only': public_node_one_private_link.key,
}, expect_errors=True)
assert res.status_code == 401
# test_private_project_logs_with_anonymous_link_does_not_expose_user_id
res = app.get(private_node_one_url + 'logs/', {
'view_only': str(private_node_one_anonymous_link.key),
})
assert res.status_code == 200
body = res.body.decode()
for id in valid_contributors:
assert id not in body
# test_private_project_with_anonymous_link_does_not_expose_registrations_or_forks
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
})
assert res.status_code == 200
attributes = res.json['data']['attributes']
relationships = res.json['data']['relationships']
if 'embeds' in res.json['data']:
embeds = res.json['data']['embeds']
else:
embeds = {}
assert 'current_user_can_comment' not in attributes
assert 'citation' not in relationships
assert 'custom_citation' not in attributes
assert 'node_license' not in attributes
assert 'registrations' not in relationships
assert 'forks' not in relationships
assert 'registrations' not in embeds
assert 'forks' not in embeds
# test_deleted_anonymous_VOL_gives_401_for_unauthorized
private_node_one_anonymous_link.is_deleted = True
private_node_one_anonymous_link.save()
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
}, expect_errors=True)
assert res.status_code == 401
# test_deleted_anonymous_VOL_does_not_anonymize_data_for_authorized
res = app.get(private_node_one_url, {
'view_only': private_node_one_anonymous_link.key,
}, auth=admin.auth)
assert res.status_code == 200
assert 'anonymous' not in res.json['meta']
attributes = res.json['data']['attributes']
relationships = res.json['data']['relationships']
assert 'current_user_can_comment' in attributes
assert 'citation' in relationships
assert 'custom_citation' in attributes
assert 'node_license' in attributes
assert 'forks' in relationships
# test_bad_view_only_link_does_not_modify_permissions
res = app.get(private_node_one_url + 'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, expect_errors=True)
assert res.status_code == 401
res = app.get(private_node_one_url + 'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, auth=admin.auth)
assert res.status_code == 200
# test_view_only_key_in_relationships_links
res = app.get(
private_node_one_url,
{'view_only': private_node_one_private_link.key})
assert res.status_code == 200
res_relationships = res.json['data']['relationships']
for key, value in res_relationships.items():
if isinstance(value, list):
for relationship in value:
links = relationship.get('links', {})
if links.get('related', False):
assert private_node_one_private_link.key in links['related']['href']
if links.get('self', False):
assert private_node_one_private_link.key in links['self']['href']
else:
links = value.get('links', {})
if links.get('related', False):
assert private_node_one_private_link.key in links['related']['href']
if links.get('self', False):
assert private_node_one_private_link.key in links['self']['href']
# test_view_only_key_in_self_and_html_links
res = app.get(
private_node_one_url,
{'view_only': private_node_one_private_link.key})
assert res.status_code == 200
links = res.json['data']['links']
assert private_node_one_private_link.key in links['self']
assert private_node_one_private_link.key in links['html']
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
@pytest.mark.usefixtures(
'admin',
'read_contrib',
'write_contrib',
'valid_contributors',
'private_node_one',
'private_node_one_anonymous_link',
'private_node_one_private_link',
'private_node_one_url',
'private_node_two',
'private_node_two_url',
'public_node_one',
'public_node_one_anonymous_link',
'public_node_one_private_link',
'public_node_one_url',
'public_node_two',
'public_node_two_url')
class TestNodeListViewOnlyLinks:
def test_node_list_view_only_links(
self, app, valid_contributors,
private_node_one,
private_node_one_private_link,
private_node_one_anonymous_link,
base_url):
# test_private_link_does_not_show_node_in_list
res = app.get(base_url, {
'view_only': private_node_one_private_link.key,
})
assert res.status_code == 200
nodes = res.json['data']
node_ids = []
for node in nodes:
node_ids.append(node['id'])
assert private_node_one._id not in node_ids
# test_anonymous_link_does_not_show_contributor_id_in_node_list
res = app.get(base_url, {
'view_only': private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
nodes = res.json['data']
assertions = 0
for node in nodes:
embeds = node.get('embeds', None)
assert embeds is None or 'contributors' not in embeds
assertions += 1
assert assertions != 0
# test_non_anonymous_link_does_show_contributor_id_in_node_list
res = app.get(base_url, {
'view_only': private_node_one_private_link.key,
'embed': 'contributors',
})
assert res.status_code == 200
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert contributor['id'].split('-')[1] in valid_contributors
assert assertions != 0
|
|
try:
import unittest2 as unittest
except ImportError:
import unittest
from graphite.render.attime import parseTimeReference, parseATTime, parseTimeOffset, getUnitString
from datetime import datetime, timedelta
from django.utils import timezone
from django.test import TestCase
import pytz
import mock
class MockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2015, 3, 8, 12, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', MockedDateTime)
class ATTimeTimezoneTests(TestCase):
default_tz = timezone.get_current_timezone()
specified_tz = pytz.timezone("America/Los_Angeles")
def test_should_return_absolute_time(self):
time_string = '12:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz(self):
time_string = '12:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_YYMMDD(self):
time_string = '20150110'
expected_time = self.default_tz.localize(datetime.strptime(time_string, '%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time.astimezone(self.specified_tz))
def test_midnight(self):
expected_time = self.default_tz.localize(datetime.strptime("0:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight", self.specified_tz)
self.assertEqual(actual_time, expected_time.astimezone(self.specified_tz))
def test_offset_with_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("5:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+5h", self.specified_tz)
self.assertEqual(actual_time, expected_time.astimezone(self.specified_tz))
def test_relative_day_with_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("0:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_day_and_offset_with_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("3:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow+3h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_should_return_current_time(self):
expected_time = self.default_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now")
self.assertEqual(actual_time, expected_time)
def test_now_should_respect_tz(self):
expected_time = self.default_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_time_in_alternate_zone(self):
expected_time = self.specified_tz.localize(datetime.strptime("04:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("-1h", self.specified_tz)
self.assertEqual(actual_time.hour, expected_time.hour)
def test_should_handle_dst_boundary(self):
expected_time = self.default_tz.localize(datetime.strptime("02:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+2h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
class AnotherMockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2015, 1, 1, 11, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', AnotherMockedDateTime)
class parseTimeReferenceTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
def test_parse_empty_return_now(self):
time_ref = parseTimeReference('')
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_None_return_now(self):
time_ref = parseTimeReference(None)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_random_string_raise_Exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeReference("random")
def test_parse_now_return_now(self):
time_ref = parseTimeReference("now")
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_colon_raises_ValueError(self):
with self.assertRaises(ValueError):
time_ref = parseTimeReference(":")
def test_parse_hour_return_hour_of_today(self):
time_ref = parseTimeReference("8:50")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_am(self):
time_ref = parseTimeReference("8:50am")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_pm(self):
time_ref = parseTimeReference("8:50pm")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 20, 50))
self.assertEquals(time_ref, expected)
def test_parse_noon(self):
time_ref = parseTimeReference("noon")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 12, 0))
self.assertEquals(time_ref, expected)
def test_parse_midnight(self):
time_ref = parseTimeReference("midnight")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_teatime(self):
time_ref = parseTimeReference("teatime")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 16, 0))
self.assertEquals(time_ref, expected)
def test_parse_yesterday(self):
time_ref = parseTimeReference("yesterday")
expected = self.zone.localize(datetime(2014, 12, 31, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_tomorrow(self):
time_ref = parseTimeReference("tomorrow")
expected = self.zone.localize(datetime(2015, 1, 2, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/25/15")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YYYY(self):
time_ref = parseTimeReference("02/25/2015")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20140606")
expected = self.zone.localize(datetime(2014, 6, 6, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_onedigits(self):
time_ref = parseTimeReference("january8")
expected = self.zone.localize(datetime(2015, 1, 8, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_twodigits(self):
time_ref = parseTimeReference("january10")
expected = self.zone.localize(datetime(2015, 1, 10, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_threedigits_raise_ValueError(self):
with self.assertRaises(ValueError):
time_ref = parseTimeReference("january800")
def test_parse_MonthName_without_DayOfMonth_raise_Exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeReference("january")
def test_parse_monday_return_monday_before_now(self):
time_ref = parseTimeReference("monday")
expected = self.zone.localize(datetime(2014, 12, 29, 0, 0))
self.assertEquals(time_ref, expected)
class Bug551771MockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2010, 3, 30, 00, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', Bug551771MockedDateTime)
class parseTimeReferenceTestBug551771(TestCase):
zone = pytz.utc
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/23/10")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20100223")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
class parseTimeOffsetTest(TestCase):
def test_parse_None_returns_empty_timedelta(self):
time_ref = parseTimeOffset(None)
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_integer_raises_TypeError(self):
with self.assertRaises(TypeError):
time_ref = parseTimeOffset(1)
def test_parse_string_starting_neither_with_minus_nor_digit_raises_KeyError(self):
with self.assertRaises(KeyError):
time_ref = parseTimeOffset("Something")
def test_parse_m_as_unit_raises_Exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeOffset("1m")
def test_parse_digits_only_raises_exception(self):
with self.assertRaises(Exception):
time_ref = parseTimeOffset("10")
def test_parse_alpha_only_raises_KeyError(self):
with self.assertRaises(KeyError):
time_ref = parseTimeOffset("month")
def test_parse_minus_only_returns_zero(self):
time_ref = parseTimeOffset("-")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_plus_only_returns_zero(self):
time_ref = parseTimeOffset("+")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_ten_days(self):
time_ref = parseTimeOffset("10days")
expected = timedelta(10)
self.assertEquals(time_ref, expected)
def test_parse_zero_days(self):
time_ref = parseTimeOffset("0days")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_minus_ten_days(self):
time_ref = parseTimeOffset("-10days")
expected = timedelta(-10)
self.assertEquals(time_ref, expected)
def test_parse_five_seconds(self):
time_ref = parseTimeOffset("5seconds")
expected = timedelta(seconds=5)
self.assertEquals(time_ref, expected)
def test_parse_five_minutes(self):
time_ref = parseTimeOffset("5minutes")
expected = timedelta(minutes=5)
self.assertEquals(time_ref, expected)
def test_parse_five_hours(self):
time_ref = parseTimeOffset("5hours")
expected = timedelta(hours=5)
self.assertEquals(time_ref, expected)
def test_parse_five_weeks(self):
time_ref = parseTimeOffset("5weeks")
expected = timedelta(weeks=5)
self.assertEquals(time_ref, expected)
def test_parse_one_month_returns_thirty_days(self):
time_ref = parseTimeOffset("1month")
expected = timedelta(30)
self.assertEquals(time_ref, expected)
def test_parse_two_months_returns_sixty_days(self):
time_ref = parseTimeOffset("2months")
expected = timedelta(60)
self.assertEquals(time_ref, expected)
def test_parse_twelve_months_returns_360_days(self):
time_ref = parseTimeOffset("12months")
expected = timedelta(360)
self.assertEquals(time_ref, expected)
def test_parse_one_year_returns_365_days(self):
time_ref = parseTimeOffset("1year")
expected = timedelta(365)
self.assertEquals(time_ref, expected)
def test_parse_two_years_returns_730_days(self):
time_ref = parseTimeOffset("2years")
expected = timedelta(730)
self.assertEquals(time_ref, expected)
class getUnitStringTest(TestCase):
def test_get_seconds(self):
test_cases = ['s', 'se', 'sec', 'second', 'seconds']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'seconds')
def test_get_minutes(self):
test_cases = ['min', 'minute', 'minutes']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'minutes')
def test_get_hours(self):
test_cases = ['h', 'ho', 'hour', 'hours']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'hours')
def test_get_days(self):
test_cases = ['d', 'da', 'day', 'days']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'days')
def test_get_weeks(self):
test_cases = ['w', 'we', 'week', 'weeks']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'weeks')
def test_get_months(self):
test_cases = ['mon', 'month', 'months']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'months')
def test_get_years(self):
test_cases = ['y', 'ye', 'year', 'years']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'years')
def test_m_raises_Exception(self):
with self.assertRaises(Exception):
result = getUnitString("m")
def test_integer_raises_Exception(self):
with self.assertRaises(Exception):
result = getUnitString(1)
class LeapYearMockedDateTime(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2016, 2, 29, 00, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', LeapYearMockedDateTime)
class parseATTimeTestLeapYear(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2015, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2012, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2016, 1, 30, 0, 0))
self.assertEquals(time_ref, expected)
class LeapYearMockedDateTime2(datetime):
def __new__(cls, *args, **kwargs):
return datetime.__new__(datetime, *args, **kwargs)
@classmethod
def now(cls, tzinfo=None):
return cls(2013, 2, 28, 00, 0, 0, tzinfo=tzinfo)
@mock.patch('graphite.render.attime.datetime', LeapYearMockedDateTime2)
class parseATTimeTestLeapYear2(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2012, 2, 29, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2009, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2013, 1, 29, 0, 0))
self.assertEquals(time_ref, expected)
class parseATTimeTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
@unittest.expectedFailure
def test_parse_noon_plus_yesterday(self):
time_ref = parseATTime("noon+yesterday")
expected = datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day - 1, 12, 00)
self.assertEquals(time_ref, expected)
|
|
import pytest
from tests.utils import MockCoroutine
import io
import time
import base64
import hashlib
from http import client
from unittest import mock
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import metadata
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.s3compat import S3CompatProvider
from waterbutler.providers.s3compat.metadata import S3CompatFileMetadata
from waterbutler.providers.s3compat.metadata import S3CompatFolderMetadata
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {
'host': 'Target Host',
'access_key': 'Dont dead',
'secret_key': 'open inside',
}
@pytest.fixture
def settings():
return {
'bucket': 'that kerning',
'encrypt_uploads': False
}
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=1454684930.0)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def provider(auth, credentials, settings):
return S3CompatProvider(auth, credentials, settings)
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def folder_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>my-image.jpg</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>434234</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>my-third-image.jpg</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"1b2cf535f27731c974343645a3985328"</ETag>
<Size>64994</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<CommonPrefixes>
<Prefix> photos/</Prefix>
</CommonPrefixes>
</ListBucketResult>'''
@pytest.fixture
def just_a_folder_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>naptime/</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
</ListBucketResult>'''
@pytest.fixture
def contents_and_self():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>thisfolder/</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>thisfolder/item1</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>thisfolder/item2</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
</ListBucketResult>'''
@pytest.fixture
def folder_empty_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
</ListBucketResult>'''
@pytest.fixture
def file_metadata():
return {
'Content-Length': 9001,
'Last-Modified': 'SomeTime',
'Content-Type': 'binary/octet-stream',
'ETag': '"fba9dede5f27731c9771645a39863328"',
'X-AMZ-SERVER-SIDE-ENCRYPTION': 'AES256'
}
@pytest.fixture
def version_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>bucket</Name>
<Prefix>my</Prefix>
<KeyMarker/>
<VersionIdMarker/>
<MaxKeys>5</MaxKeys>
<IsTruncated>false</IsTruncated>
<Version>
<Key>my-image.jpg</Key>
<VersionId>3/L4kqtJl40Nr8X8gdRQBpUMLUo</VersionId>
<IsLatest>true</IsLatest>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>434234</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
<Version>
<Key>my-image.jpg</Key>
<VersionId>QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</VersionId>
<IsLatest>false</IsLatest>
<LastModified>2009-10-10T17:50:30.000Z</LastModified>
<ETag>"9b2cf535f27731c974343645a3985328"</ETag>
<Size>166434</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
<Version>
<Key>my-image.jpg</Key>
<VersionId>UIORUnfndfhnw89493jJFJ</VersionId>
<IsLatest>false</IsLatest>
<LastModified>2009-10-11T12:50:30.000Z</LastModified>
<ETag>"772cf535f27731c974343645a3985328"</ETag>
<Size>64</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
</ListVersionsResult>'''
def location_response(location):
return (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
'{}</LocationConstraint>'
).format(location)
def list_objects_response(keys, truncated=False):
response = '''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>'''
response += '<IsTruncated>' + str(truncated).lower() + '</IsTruncated>'
response += ''.join(map(
lambda x: '<Contents><Key>{}</Key></Contents>'.format(x),
keys
))
response += '</ListBucketResult>'
return response.encode('utf-8')
def bulk_delete_body(keys):
payload = '<?xml version="1.0" encoding="UTF-8"?>'
payload += '<Delete>'
payload += ''.join(map(
lambda x: '<Object><Key>{}</Key></Object>'.format(x),
keys
))
payload += '</Delete>'
payload = payload.encode('utf-8')
md5 = base64.b64encode(hashlib.md5(payload).digest())
headers = {
'Content-Length': str(len(payload)),
'Content-MD5': md5.decode('ascii'),
'Content-Type': 'text/xml',
}
return (payload, headers)
def build_folder_params(path):
return {'prefix': path.path, 'delimiter': '/'}
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, file_metadata, mock_time):
file_path = 'foobah'
params = {'prefix': '/' + file_path + '/', 'delimiter': '/'}
good_metadata_url = provider.bucket.new_key('/' + file_path).generate_url(100, 'HEAD')
bad_metadata_url = provider.bucket.generate_url(100)
aiohttpretty.register_uri('HEAD', good_metadata_url, headers=file_metadata)
aiohttpretty.register_uri('GET', bad_metadata_url, params=params, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + file_path)
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + file_path + '/')
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + file_path)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, folder_metadata, mock_time):
folder_path = 'Photos'
params = {'prefix': '/' + folder_path + '/', 'delimiter': '/'}
good_metadata_url = provider.bucket.generate_url(100)
bad_metadata_url = provider.bucket.new_key('/' + folder_path).generate_url(100, 'HEAD')
aiohttpretty.register_uri(
'GET', good_metadata_url, params=params,
body=folder_metadata, headers={'Content-Type': 'application/xml'}
)
aiohttpretty.register_uri('HEAD', bad_metadata_url, status=404)
try:
wb_path_v1 = await provider.validate_v1_path('/' + folder_path + '/')
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + folder_path)
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + folder_path + '/')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
async def test_normal_name(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/path.txt')
assert path.name == 'path.txt'
assert path.parent.name == 'a'
assert path.is_file
assert not path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_folder(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
@pytest.mark.asyncio
async def test_root(self, provider, mock_time):
path = await provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
class TestCRUD:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(100, response_headers={'response-content-disposition': 'attachment'})
aiohttpretty.register_uri('GET', url[:url.index('?')], body=b'delicious', auto_length=True)
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_version(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(
100,
query_parameters={'versionId': 'someversion'},
response_headers={'response-content-disposition': 'attachment'},
)
aiohttpretty.register_uri('GET', url[:url.index('?')], body=b'delicious', auto_length=True)
result = await provider.download(path, version='someversion')
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_display_name(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(100, response_headers={'response-content-disposition': "attachment; filename*=UTF-8''tuna"})
aiohttpretty.register_uri('GET', url[:url.index('?')], body=b'delicious', auto_length=True)
result = await provider.download(path, displayName='tuna')
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_not_found(self, provider, mock_time):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(100, response_headers={'response-content-disposition': 'attachment'})
aiohttpretty.register_uri('GET', url[:url.index('?')], status=404)
with pytest.raises(exceptions.DownloadError):
await provider.download(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_folder_400s(self, provider, mock_time):
with pytest.raises(exceptions.DownloadError) as e:
await provider.download(WaterButlerPath('/cool/folder/mom/'))
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_update(self, provider, file_content, file_stream, file_metadata, mock_time):
path = WaterButlerPath('/foobah')
content_md5 = hashlib.md5(file_content).hexdigest()
url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', metadata_url, headers=file_metadata)
aiohttpretty.register_uri('PUT', url, status=201, headers={'ETag': '"{}"'.format(content_md5)})
metadata, created = await provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert not created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload_encrypted(self, provider, file_content, file_stream, file_metadata, mock_time):
# Set trigger for encrypt_key=True in s3compat.provider.upload
provider.encrypt_uploads = True
path = WaterButlerPath('/foobah')
content_md5 = hashlib.md5(file_content).hexdigest()
url = provider.bucket.new_key(path.path).generate_url(100, 'PUT', encrypt_key=True)
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_metadata},
],
)
aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"{}"'.format(content_md5)})
metadata, created = await provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert metadata.extra['encryption'] == 'AES256'
assert created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_delete(self, provider, mock_time):
path = WaterButlerPath('/some-file')
url = provider.bucket.new_key(path.path).generate_url(100, 'DELETE')
aiohttpretty.register_uri('DELETE', url, status=200)
await provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=url)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_folder_delete(self, provider, contents_and_self, mock_time):
path = WaterButlerPath('/some-folder/')
params = {'prefix': 'some-folder/'}
query_url = provider.bucket.generate_url(100, 'GET')
aiohttpretty.register_uri(
'GET',
query_url,
params=params,
body=contents_and_self,
status=200,
)
target_items = ['thisfolder/', 'thisfolder/item1', 'thisfolder/item2']
delete_urls = []
for i in target_items:
delete_url = provider.bucket.new_key(i).generate_url(
100,
'DELETE',
)
delete_urls.append(delete_url)
aiohttpretty.register_uri('DELETE', delete_url, status=204)
await provider.delete(path)
assert aiohttpretty.has_call(method='GET', uri=query_url, params=params)
for delete_url in delete_urls:
assert aiohttpretty.has_call(method='DELETE', uri=delete_url)
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder(self, provider, folder_metadata, mock_time):
path = WaterButlerPath('/darp/')
url = provider.bucket.generate_url(100)
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 3
assert result[0].name == ' photos'
assert result[1].name == 'my-image.jpg'
assert result[2].extra['md5'] == '1b2cf535f27731c974343645a3985328'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_self_listing(self, provider, contents_and_self, mock_time):
path = WaterButlerPath('/thisfolder/')
url = provider.bucket.generate_url(100)
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=contents_and_self)
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
for fobj in result:
assert fobj.name != path.path
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_just_a_folder_metadata_folder(self, provider, just_a_folder_metadata, mock_time):
path = WaterButlerPath('/')
url = provider.bucket.generate_url(100)
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=just_a_folder_metadata,
headers={'Content-Type': 'application/xml'})
result = await provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 1
assert result[0].kind == 'folder'
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_must_have_slash(self, provider, just_a_folder_metadata, mock_time):
# with pytest.raises(exceptions.InvalidPathError):
# await provider.metadata('')
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, file_metadata, mock_time):
path = WaterButlerPath('/Foo/Bar/my-image.jpg')
url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', url, headers=file_metadata)
result = await provider.metadata(path)
assert isinstance(result, metadata.BaseFileMetadata)
assert result.path == str(path)
assert result.name == 'my-image.jpg'
assert result.extra['md5'] == 'fba9dede5f27731c9771645a39863328'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_missing(self, provider, mock_time):
path = WaterButlerPath('/notfound.txt')
url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', url, status=404)
with pytest.raises(exceptions.MetadataError):
await provider.metadata(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_upload(self, provider, file_content, file_stream, file_metadata, mock_time):
path = WaterButlerPath('/foobah')
content_md5 = hashlib.md5(file_content).hexdigest()
url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_metadata},
],
)
aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"{}"'.format(content_md5)}),
metadata, created = await provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_raise_409(self, provider, just_a_folder_metadata, mock_time):
path = WaterButlerPath('/alreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, body=just_a_folder_metadata,
headers={'Content-Type': 'application/xml'})
with pytest.raises(exceptions.FolderNamingConflict) as e:
await provider.create_folder(path)
assert e.value.code == 409
assert e.value.message == 'Cannot create folder "alreadyexists", because a file or folder already exists with that name'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_must_start_with_slash(self, provider, mock_time):
path = WaterButlerPath('/alreadyexists')
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 400
assert e.value.message == 'Path must be a directory'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_out(self, provider, mock_time):
path = WaterButlerPath('/alreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
params = build_folder_params(path)
create_url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
aiohttpretty.register_uri('GET', url, params=params, status=404)
aiohttpretty.register_uri('PUT', create_url, status=403)
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 403
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_out_metadata(self, provider, mock_time):
path = WaterButlerPath('/alreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, status=403)
with pytest.raises(exceptions.MetadataError) as e:
await provider.create_folder(path)
assert e.value.code == 403
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_creates(self, provider, mock_time):
path = WaterButlerPath('/doesntalreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
params = build_folder_params(path)
create_url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
aiohttpretty.register_uri('GET', url, params=params, status=404)
aiohttpretty.register_uri('PUT', create_url, status=200)
resp = await provider.create_folder(path)
assert resp.kind == 'folder'
assert resp.name == 'doesntalreadyexists'
assert resp.path == '/doesntalreadyexists/'
class TestOperations:
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_copy(self, provider, file_metadata, mock_time):
# dest_path = WaterButlerPath('/dest')
# source_path = WaterButlerPath('/source')
# headers = {'x-amz-copy-source': '/{}/{}'.format(provider.settings['bucket'], source_path.path)}
# metadata_url = provider.bucket.new_key(dest_path.path).generate_url(100, 'HEAD')
# url = provider.bucket.new_key(dest_path.path).generate_url(100, 'PUT', headers=headers)
# aiohttpretty.register_uri('PUT', url, status=200)
# aiohttpretty.register_uri('HEAD', metadata_url, headers=file_metadata)
# resp = await provider.copy(provider, source_path, dest_path)
# # TODO: matching url content for request
# assert resp['kind'] == 'file'
# assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=url, headers=headers)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_version_metadata(self, provider, version_metadata, mock_time):
path = WaterButlerPath('/my-image.jpg')
url = provider.bucket.generate_url(100, 'GET', query_parameters={'versions': ''})
params = build_folder_params(path)
aiohttpretty.register_uri('GET', url, params=params, status=200, body=version_metadata)
data = await provider.revisions(path)
assert isinstance(data, list)
assert len(data) == 3
for item in data:
assert hasattr(item, 'extra')
assert hasattr(item, 'version')
assert hasattr(item, 'version_identifier')
assert aiohttpretty.has_call(method='GET', uri=url, params=params)
async def test_equality(self, provider, mock_time):
assert provider.can_intra_copy(provider)
assert provider.can_intra_move(provider)
|
|
""" OpenOffice to HTML Converter for PubTal
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import xml.sax, zipfile, StringIO, cgi, re, os.path
try:
import logging
except:
from pubtal import InfoLogging as logging
import OOFilter
from pubtal import HTMLWriter
OFFICE_URI='http://openoffice.org/2000/office'
TEXT_URI='http://openoffice.org/2000/text'
STYLE_URI='http://openoffice.org/2000/style'
TABLE_URI='http://openoffice.org/2000/table'
FORMAT_URI='http://www.w3.org/1999/XSL/Format'
DUBLIN_URI='http://purl.org/dc/elements/1.1/'
META_URI='http://openoffice.org/2000/meta'
XLINK_URI='http://www.w3.org/1999/xlink'
SVG_URI='http://www.w3.org/2000/svg'
DRAW_URI='http://openoffice.org/2000/drawing'
# These are the fo styles that will be treated as CSS styles.
SUPPORTED_FO_STYLES = {'text-align':1, 'font-weight':1, 'font-style':1, 'margin-left':1}
# These lists act as filters on which styles are applied to which kind of elements.
HEADING_STYLE_FILTER = ['text-align', 'margin-left']
PARAGRAPH_STYLE_FILTER = ['text-align', 'underline', 'line-through', 'overline'
,'font-weight', 'font-style', 'vertical-align', 'margin-left']
SPAN_STYLE_FILTER = PARAGRAPH_STYLE_FILTER
# These are the assumed defaults for paragraphs - OO setting these will be ignored.
DEFAULT_PARAGRAPH_STYLES = { 'text-align': 'start', 'font-weight': 'normal'
,'font-style': 'normal', 'margin-left': '0cm'}
class OpenOfficeConverter:
""" Convert OpenOffice format to HTML, XHTML or PlainText
"""
def __init__ (self):
self.log = logging.getLogger ("PubTal.OOC")
self.contentParser = SXWContentPraser ()
def convert (self, fileName, config={}):
archive = zipfile.ZipFile (fileName, 'r')
self.contentParser.parseContent (archive, config)
archive.close()
def getMetaInfo (self):
return self.contentParser.getMetaInfo()
def getContent (self):
return self.contentParser.getContent()
def getFootNotes (self):
return self.contentParser.getFootNotes()
def getPictures (self):
return self.contentParser.getPictures()
class SXWContentPraser (xml.sax.handler.DTDHandler):
""" Convert OpenOffice format to HTML, XHTML or PlainText
"""
def __init__ (self):
self.log = logging.getLogger ("PubTal.OOC.SWXContentParser")
self.saxFilter = OOFilter.SAXFilter ()
def parseContent (self, archive, config):
self.officeHandler = OfficeHandler(config)
self.styleHandler = StyleHandler(config)
self.textHandler = TextHandler (self.styleHandler, config)
self.tableHandler = TableHandler (self.styleHandler, self.textHandler.result, config)
self.drawHandler = DrawHandler (self.styleHandler, self.textHandler, config)
self.saxFilter.setHandler (OFFICE_URI, self.officeHandler)
self.saxFilter.setHandler (DUBLIN_URI, self.officeHandler)
self.saxFilter.setHandler (META_URI, self.officeHandler)
self.saxFilter.setHandler (STYLE_URI, self.styleHandler)
self.saxFilter.setHandler (TEXT_URI, self.textHandler)
self.saxFilter.setHandler (TABLE_URI, self.tableHandler)
self.saxFilter.setHandler (DRAW_URI, self.drawHandler)
self.saxFilter.setHandler (SVG_URI, self.drawHandler)
self.ourParser = xml.sax.make_parser()
self.log.debug ("Setting features of parser")
self.ourParser.setFeature (xml.sax.handler.feature_external_ges, 0)
self.ourParser.setFeature (xml.sax.handler.feature_namespaces, 1)
self.ourParser.setContentHandler (self.saxFilter)
# Initialise our variables
self.pictureList = []
self.log.debug ("Parsing meta data.")
sxwContent = archive.read ('meta.xml')
contentFile = StringIO.StringIO (sxwContent)
self.ourParser.parse (contentFile)
self.log.debug ("Parsing styles.")
sxwContent = archive.read ('styles.xml')
contentFile = StringIO.StringIO (sxwContent)
self.ourParser.parse (contentFile)
self.log.debug ("Parsing actual content.")
sxwContent = archive.read ('content.xml')
contentFile = StringIO.StringIO (sxwContent)
self.ourParser.parse (contentFile)
# Read pictures
for pictureFilename, newFilename in self.drawHandler.getBundledPictures():
self.pictureList.append ((newFilename, archive.read (pictureFilename)))
def getMetaInfo (self):
return self.officeHandler.getMetaInfo()
def getContent (self):
return self.textHandler.getContent()
def getFootNotes (self):
return self.textHandler.getFootNotes()
def getPictures (self):
return self.pictureList
class OfficeHandler:
def __init__ (self, config):
self.log = logging.getLogger ("PubTal.OOC.OfficeHandler")
self.metaData = {}
self.keywords = []
self.charData = []
self.cleanSmartQuotes = config.get ('CleanSmartQuotes', 0)
self.cleanHyphens = config.get ('CleanHyphens', 0)
def startElementNS (self, name, qname, atts):
self.charData = []
if (name[1] == 'document-content'):
try:
version = atts [(OFFICE_URI,'version')]
self.log.debug ("Open Office format %s found." % version)
if (float (version) != 1.0):
self.log.warn ("Only OpenOffice format 1.0 is supported, version %s detected." % version)
except Exception, e:
msg = "Error determining OO version. Error: " + str (e)
self.log.error (msg)
raise OpenOfficeFormatException (msg)
def endElementNS (self, name, qname):
data = u"".join (self.charData)
self.charData = []
if (name[0] == META_URI):
if (name [1] == 'keyword'):
self.keywords.append (data)
elif (name [1] == 'creation-date'):
self.metaData [name [1]] = data
if (name[0] == DUBLIN_URI):
self.metaData [name [1]] = data
def characters (self, data):
if (self.cleanSmartQuotes):
data = data.replace (u'\u201c', '"')
data = data.replace (u'\u201d', '"')
if (self.cleanHyphens):
data = data.replace (u'\u2013', '-')
self.charData.append (data)
def getMetaInfo (self):
self.metaData ['keywords'] = self.keywords
return self.metaData
class StyleHandler:
def __init__ (self, config):
self.log = logging.getLogger ("PubTal.OOC.StyleHandler")
self.textStyleMap = {}
self.paragraphStyleMap = {}
self.currentStyleFamily = None
self.currentStyle = None
def startElementNS (self, name, qname, atts):
realName = name [1]
if (realName == 'style'):
try:
self.currentStyle = {}
self.currentStyle ['name'] = atts [(STYLE_URI, 'name')]
self.currentStyleFamily = atts [(STYLE_URI, 'family')]
self.currentStyle ['parent-name'] = atts.get ((STYLE_URI, 'parent-style-name'), None)
except Exception, e:
msg = "Error parsing style information. Error: " + str (e)
self.log.error (msg)
raise OpenOfficeFormatException (msg)
if (realName == 'properties' and self.currentStyle is not None):
for uri, attName in atts.keys():
if (uri == FORMAT_URI):
if SUPPORTED_FO_STYLES.has_key (attName):
attValue = atts [(FORMAT_URI, attName)]
self.currentStyle [attName] = attValue
if (uri == STYLE_URI):
attValue = atts [(STYLE_URI, attName)]
if (attValue != 'none'):
if (attName == 'text-underline'):
self.currentStyle ['underline'] = 'underline'
if (attName == 'text-crossing-out'):
self.currentStyle ['line-through'] = 'line-through'
if (attName == 'text-position'):
actualPosition = attValue [0:attValue.find (' ')]
self.currentStyle ['vertical-align'] = actualPosition
def endElementNS (self, name, qname):
if (name[1] == 'style'):
if (self.currentStyle is not None):
name = self.currentStyle ['name']
if (self.currentStyleFamily == "paragraph"):
self.log.debug ("Recording paragraph style %s" % name)
self.paragraphStyleMap [name] = self.currentStyle
elif (self.currentStyleFamily == "text"):
self.log.debug ("Recording text style %s" % name)
self.textStyleMap [name] = self.currentStyle
else:
self.log.debug ("Unsupported style family %s" % self.currentStyleFamily)
self.currentStyle = None
self.currentStyleFamily = None
def characters (self, data):
pass
def getTextStyle (self, name):
return self.styleLookup (name, self.textStyleMap)
return foundStyle
def getParagraphStyle (self, name):
return self.styleLookup (name, self.paragraphStyleMap)
def styleLookup (self, name, map):
foundStyle = {}
styleHierachy = []
lookupName = name
while (lookupName is not None):
lookupStyle = map.get (lookupName, None)
if (lookupStyle is not None):
styleHierachy.append (lookupStyle)
lookupName = lookupStyle ['parent-name']
else:
self.log.debug ("Style %s not found!" % lookupName)
lookupName = None
styleHierachy.reverse()
for style in styleHierachy:
foundStyle.update (style)
return foundStyle
class TextHandler:
def __init__ (self, styleHandler, config):
self.log = logging.getLogger ("PubTal.OOC.TextHandler")
self.styleHandler = styleHandler
# Check for the kind of output we are generating
outputType = config.get ('output-type', 'HTML')
self.outputPlainText = 0
if (outputType == 'HTML'):
self.outputXHTML = 0
elif (outputType == 'XHTML'):
self.outputXHTML = 1
elif (outputType == 'PlainText'):
# Plain text trumps outputXHTML
self.outputPlainText = 1
else:
msg = "Attempt to configure for unsupported output-type %s. " + outputType
self.log.error (msg)
raise OpenOfficeFormatException (msg)
if (self.outputPlainText):
# We do not preserve spaces with because our output is not space clean.
self.result = HTMLWriter.PlainTextWriter(outputStream=StringIO.StringIO(), outputXHTML=1, preserveSpaces = 0)
else:
self.result = HTMLWriter.HTMLWriter(outputStream=StringIO.StringIO(), outputXHTML=self.outputXHTML, preserveSpaces = 0)
# We use this stack to re-direct output into footnotes.
self.resultStack = []
# We treat footnotes and endnotes the same.
self.footNoteID = None
self.footnotes = []
self.charData = []
# The closeTagsStack holds one entry per open OO text tag.
# Those that have corresponding HTML tags have text, everything else has None
self.closeTagsStack = []
# The effectiveStyleStack holds the effective style (e.g. paragraph) and is used to filter out
# un-needed style changes.
self.effectiveStyleStack = [DEFAULT_PARAGRAPH_STYLES]
self.cleanSmartQuotes = config.get ('CleanSmartQuotes', 0)
self.cleanHyphens = config.get ('CleanHyphens', 0)
self.preserveSpaces = config.get ('preserveSpaces', 1)
def startElementNS (self, name, qname, atts):
#self.log.debug ("Start: %s" % name[1])
realName = name [1]
styleName = atts.get ((TEXT_URI, 'style-name'), None)
if (realName == 'h'):
self.charData = []
# We have a heading - get the level and style.
try:
headingLevel = int (atts [(TEXT_URI, 'level')])
applicableStyle = self.styleHandler.getParagraphStyle (styleName)
if (headingLevel > 6):
self.log.warn ("Heading level of %s used, but HTML only supports up to level 6." % str (headingLevel))
headingLevel = 6
self.result.startElement ('h%s' % str (headingLevel), self.getCSSStyle (applicableStyle, HEADING_STYLE_FILTER))
self.closeTagsStack.append ('h%s' % str (headingLevel))
except Exception, e:
msg = "Error parsing heading. Error: " + str (e)
self.log.error (msg)
raise OpenOfficeFormatException (msg)
elif (realName == 'p'):
# We have a paragraph
self.charData = []
applicableStyle = self.styleHandler.getParagraphStyle (styleName)
if (styleName == "Preformatted Text"):
# We have PRE text
self.result.startElement ('pre', self.getCSSStyle (applicableStyle, PARAGRAPH_STYLE_FILTER))
self.closeTagsStack.append ('pre')
elif (styleName == "Quotations"):
# We have a block qutoe.
self.result.startElement ('blockquote')
self.result.startElement ('p', self.getCSSStyle (applicableStyle, PARAGRAPH_STYLE_FILTER))
self.closeTagsStack.append (['p', 'blockquote'])
else:
self.result.startElement ('p', self.getCSSStyle (applicableStyle, PARAGRAPH_STYLE_FILTER))
self.closeTagsStack.append ('p')
# Footnotes can start with either paragraphs or lists.
if (self.footNoteID is not None):
self.result.startElement ('a', ' name="%s" style="vertical-align: super" href="#src%s"'% (self.footNoteID, self.footNoteID))
self.result.write (str (len (self.footnotes) + 1))
self.result.endElement ('a')
self.footNoteID = None
elif (realName == 'ordered-list'):
self.charData = []
applicableStyle = self.styleHandler.getParagraphStyle (styleName)
self.result.startElement ('ol', self.getCSSStyle (applicableStyle, PARAGRAPH_STYLE_FILTER))
self.closeTagsStack.append ('ol')
# Footnotes can start with either paragraphs or lists.
if (self.footNoteID is not None):
self.result.startElement ('a', ' name="%s" style="vertical-align: super" href="#src%s"'% (self.footNoteID, self.footNoteID))
self.result.write (str (len (self.footnotes) + 1))
self.result.endElement ('a')
self.footNoteID = None
elif (realName == 'unordered-list'):
self.charData = []
applicableStyle = self.styleHandler.getParagraphStyle (styleName)
self.result.startElement ('ul', self.getCSSStyle (applicableStyle, PARAGRAPH_STYLE_FILTER))
self.closeTagsStack.append ('ul')
# Footnotes can start with either paragraphs or lists.
if (self.footNoteID is not None):
self.result.startElement ('a', ' name="%s" style="vertical-align: super" href="#src%s"'% (self.footNoteID, self.footNoteID))
self.result.write (str (len (self.footnotes) + 1))
self.result.endElement ('a')
self.footNoteID = None
elif (realName == 'list-item'):
applicableStyle = self.styleHandler.getTextStyle (styleName)
self.result.startElement ('li', self.getCSSStyle (applicableStyle, SPAN_STYLE_FILTER))
self.closeTagsStack.append ('li')
elif (realName == 'span'):
# We have some text formatting - write out any data already accumulated.
self.writeData()
applicableStyle = self.styleHandler.getTextStyle (styleName)
if (styleName == "Source Text"):
# We have PRE text
self.result.startElement ('code', self.getCSSStyle (applicableStyle, SPAN_STYLE_FILTER))
self.closeTagsStack.append ('code')
else:
cssStyle = self.getCSSStyle (applicableStyle, SPAN_STYLE_FILTER)
if (len (cssStyle) > 0):
self.result.startElement ('span', cssStyle)
self.closeTagsStack.append ('span')
else:
#self.log.debug ("Suppressing span - no change in style.")
self.closeTagsStack.append (None)
elif (realName == 'a'):
self.writeData()
linkDest = atts.get ((XLINK_URI, 'href'), None)
if (linkDest is not None):
self.result.startElement ('a', ' href="%s"' % linkDest)
self.closeTagsStack.append ('a')
else:
self.closeTagsStack.append (None)
# Links are underlined - we want this done by the style sheet, so ignore the underline.
newEffectiveStyle = {}
newEffectiveStyle.update (self.effectiveStyleStack[-1])
newEffectiveStyle ['underline'] = 'underline'
self.effectiveStyleStack.append (newEffectiveStyle)
elif (realName == 'footnote' or realName == 'endnote'):
try:
footnoteID = atts[(TEXT_URI, 'id')]
except Exception, e:
msg = "Error getting footnoteid. Error: " + str (e)
self.log.error (msg)
raise OpenOfficeFormatException (msg)
# Write out any data we have currently stored.
self.writeData()
# Now write out the link to the footnote
self.result.startElement ('a', ' name="src%s" style="vertical-align: super" href="#%s"' % (footnoteID, footnoteID))
self.result.write (str (len (self.footnotes) + 1))
self.result.endElement ('a')
self.resultStack.append (self.result)
if (self.outputPlainText):
self.result = HTMLWriter.PlainTextWriter (outputStream = StringIO.StringIO(), outputXHTML=1, preserveSpaces = 0)
else:
self.result = HTMLWriter.HTMLWriter(outputStream = StringIO.StringIO(), outputXHTML=self.outputXHTML, preserveSpaces = 0)
self.closeTagsStack.append (None)
# Re-set the style stack for the footenote
self.effectiveStyleStack.append (DEFAULT_PARAGRAPH_STYLES)
# Keep this foonote id around for the first paragraph.
self.footNoteID = footnoteID
elif (realName == 'footnote-body' or realName == 'endnote-body'):
self.closeTagsStack.append (None)
# Keep the effective style as-is
self.effectiveStyleStack.append (self.effectiveStyleStack[-1])
elif (realName == 'bookmark-start' or realName == 'bookmark'):
try:
bookmarkName = atts[(TEXT_URI, 'name')]
except Exception, e:
msg = "Error getting bookmark name. Error: " + str (e)
self.log.error (msg)
raise OpenOfficeFormatException (msg)
self.writeData()
self.result.startElement ('a', ' name="%s"' % bookmarkName)
self.closeTagsStack.append ('a')
# Keep the effective style as-is
self.effectiveStyleStack.append (self.effectiveStyleStack[-1])
elif (realName == 'line-break'):
self.writeData()
self.result.lineBreak()
self.closeTagsStack.append (None)
# Keep the effective style as-is
self.effectiveStyleStack.append (self.effectiveStyleStack[-1])
elif (realName == 's'):
# An extra space or two
# Remove the leading space if possible so that we can output ' ' instead of ' '
removedSpace = 0
if (len (self.charData) > 0):
if (self.charData [-1][-1] == u" "):
self.charData [-1] = self.charData [-1][:-1]
removedSpace = 1
self.writeData()
count = int (atts.get ((TEXT_URI, 'c'), 1))
if (self.preserveSpaces):
for spaces in xrange (count):
self.result.nonbreakingSpace()
if (removedSpace):
# Add it back now
self.charData.append (u" ")
# Keep the effective style as-is, and ignore the close element
self.effectiveStyleStack.append (self.effectiveStyleStack[-1])
self.closeTagsStack.append (None)
else:
# We have no HTML output associated with this OO tag.
self.closeTagsStack.append (None)
# Keep the effective style as-is
self.effectiveStyleStack.append (self.effectiveStyleStack[-1])
def endElementNS (self, name, qname):
if (len (self.closeTagsStack) > 0):
htmlTag = self.closeTagsStack.pop()
if (htmlTag is not None):
self.writeData()
if (type (htmlTag) == type ([])):
for a in htmlTag:
self.result.endElement (a)
else:
self.result.endElement (htmlTag)
# Remove this effective style.
self.effectiveStyleStack.pop()
if (name[1] == 'footnote' or name[1] == 'endnote'):
# We have just closed a footnote or endnote - record the result, pop the stack.
outputFile = self.result.getOutput()
self.footnotes.append (outputFile.getvalue())
outputFile.close()
self.result = self.resultStack.pop()
def characters (self, data):
if (self.cleanSmartQuotes):
data = data.replace (u'\u201c', '"')
data = data.replace (u'\u201d', '"')
if (self.cleanHyphens):
data = data.replace (u'\u2013', '-')
self.charData.append (data)
def writeData (self):
data = u"".join (self.charData)
self.result.write (cgi.escape (data))
self.charData = []
def getCSSStyle (self, applicableStyle, styleList):
#self.log.debug ("Filtering styles %s for styles %s" % (str (applicableStyle), str (styleList)))
textDecoration = []
cssStyles = []
# Take a look at the effective styles.
effectiveStyles = self.effectiveStyleStack [-1]
# Store the new effective style for future comparison
newEffectiveStyle = {}
newEffectiveStyle.update (effectiveStyles)
for style in styleList:
if (applicableStyle.has_key (style)):
if (style in ["underline", "line-through", "overline"]):
if (not effectiveStyles.has_key (style)):
textDecoration.append (style)
else:
# We check to see whether the effective style already has this value
# I.e. handle paragraph of font-style=normal and span of font-style=normal
styleValue = applicableStyle [style]
if (effectiveStyles.has_key (style)):
if (effectiveStyles[style] != styleValue):
cssStyles.append (u"%s:%s" % (style, styleValue))
else:
#self.log.debug ("Style %s already in effect with value %s" % (style, styleValue))
pass
else:
cssStyles.append (u"%s:%s" % (style, styleValue))
# Note this new effective style
newEffectiveStyle [style] = styleValue
if (len (textDecoration) > 0):
cssStyles.append (u"text-decoration: %s" % u",".join (textDecoration))
#self.log.debug ("Adding real effective style (%s) to stack." % str (newEffectiveStyle))
self.effectiveStyleStack.append (newEffectiveStyle)
cssStyleList = ";".join (cssStyles)
if (len (cssStyleList) > 0):
return ' style="%s"' % cssStyleList
return ''
def getContent (self):
return self.result.getOutput().getvalue()
def getFootNotes (self):
return self.footnotes
class DrawHandler:
def __init__ (self, styleHandler, textHandler, config):
self.log = logging.getLogger ("PubTal.OOC.DrawHandler")
self.styleHandler = styleHandler
self.result = textHandler.result
self.textHandler = textHandler
self.charData = []
# The effectiveStyleStack holds the effective style (e.g. paragraph) and is used to filter out
# un-needed style changes.
self.effectiveStyleStack = [DEFAULT_PARAGRAPH_STYLES]
self.closeTagsStack = []
self.bundledPictureList = []
self.currentImage = None
# Check for the kind of output we are generating
self.cleanSmartQuotes = config.get ('CleanSmartQuotes', 0)
self.cleanHyphens = config.get ('CleanHyphens', 0)
self.picturePrefix = os.path.join ('Pictures', config.get ('DestinationFile', '').replace ('.', '_'))
self.log.debug ("Determined picture prefix as %s" % self.picturePrefix)
def getBundledPictures (self):
return self.bundledPictureList
def startElementNS (self, name, qname, atts):
theURI = name [0]
realName = name [1]
if (theURI == DRAW_URI):
if (realName == 'image'):
styleName = atts.get ((DRAW_URI, 'style-name'), None)
href = atts.get ((XLINK_URI, 'href'), None)
if (href is None):
self.log.warn ("No href attribute found for image!")
self.closeTagsStack = None
return
# Deal with bundled pictures
if (href.startswith ('#Pictures/')):
self.log.debug ("Found bundled picture %s" % href)
archivePicName = href [1:]
href = self.picturePrefix + archivePicName[9:]
self.bundledPictureList.append ((archivePicName, href))
alt = atts.get ((DRAW_URI, 'name'), None)
self.currentImage = {'href': href, 'alt': alt}
self.closeTagsStack.append (None)
elif (realName == 'a'):
linkDest = atts.get ((XLINK_URI, 'href'), None)
if (linkDest is not None):
self.textHandler.writeData()
self.result.startElement ('a', ' href="%s"' % linkDest)
self.closeTagsStack.append ('a')
else:
self.closeTagsStack.append (None)
elif (theURI == SVG_URI):
if (realName == 'desc'):
self.charData = []
self.closeTagsStack.append (None)
else:
self.closeTagsStack.append (None)
def endElementNS (self, name, qname):
if (len (self.closeTagsStack) > 0):
htmlTag = self.closeTagsStack.pop()
if (htmlTag is not None):
self.result.endElement (htmlTag)
# Remove this effective style.
#self.effectiveStyleStack.pop()
theURI = name [0]
realName = name [1]
if (theURI == SVG_URI):
if (realName == 'desc'):
# We have an image description - note it!
altText = cgi.escape (u"".join (self.charData))
self.charData = []
if (self.currentImage is not None):
self.currentImage ['alt'] = altText
elif (theURI == DRAW_URI):
if (realName == 'image'):
self.textHandler.writeData()
self.result.startElement ('img', ' src="%s" alt="%s"' % (self.currentImage ['href'], self.currentImage ['alt']))
self.result.endElement ('img')
self.currentImage = None
def characters (self, data):
if (self.cleanSmartQuotes):
data = data.replace (u'\u201c', '"')
data = data.replace (u'\u201d', '"')
if (self.cleanHyphens):
data = data.replace (u'\u2013', '-')
self.charData.append (data)
class TableHandler:
def __init__ (self, styleHandler, resultWriter, config):
self.log = logging.getLogger ("PubTal.OOC.TextHandler")
self.styleHandler = styleHandler
self.result = resultWriter
self.closeTagsStack = []
self.tableStatusStack = []
def startElementNS (self, name, qname, atts):
#self.log.debug ("Start: %s" % name[1])
realName = name [1]
styleName = atts.get ((TABLE_URI, 'style-name'), None)
if (realName == 'table' or realName == 'sub-table'):
self.result.startElement ('table')
self.closeTagsStack.append ('table')
self.tableStatusStack.append ({'inHeader':0, 'firstRow': 1})
elif (realName == 'table-header-rows'):
status = self.tableStatusStack [-1]
status ['inHeader'] = 1
self.result.startElement ('thead')
self.closeTagsStack.append ('thead')
elif (realName == 'table-row'):
status = self.tableStatusStack [-1]
if ((not status ['inHeader']) and (status ['firstRow'])):
status ['firstRow'] = 0
self.result.startElement ('tbody')
self.result.startElement ('tr')
self.closeTagsStack.append ('tr')
elif (realName == 'table-cell'):
status = self.tableStatusStack [-1]
colSpan = int (atts.get ((TABLE_URI, 'number-columns-spanned'), 0))
if (colSpan != 0):
colSpanTxt = ' colspan="%s"' % str (colSpan)
else:
colSpanTxt = ''
if (status ['inHeader']):
self.result.startElement ('th', colSpanTxt)
self.closeTagsStack.append ('th')
else:
self.result.startElement ('td', colSpanTxt)
self.closeTagsStack.append ('td')
else:
self.closeTagsStack.append (None)
def endElementNS (self, name, qname):
realName = name [1]
# We check for table because we want to insert tbody close before table close.
if (len (self.tableStatusStack) > 0):
status = self.tableStatusStack [-1]
if (realName == 'table' or realName == 'sub-table'):
if (not status ['firstRow']):
# The table actually had content.
self.result.endElement ('tbody')
if (len (self.closeTagsStack) > 0):
htmlTag = self.closeTagsStack.pop()
if (htmlTag is not None):
self.result.endElement (htmlTag)
# We check for table header rows here.
if (realName == 'table-header-rows'):
status ['inHeader'] = 0
if (realName == 'table'):
# Pop this table status off the stack
self.tableStatusStack.pop()
def characters (self, data):
pass
class OpenOfficeFormatException (Exception):
pass
|
|
#!/usr/bin/env python
# Author: Will Skywalker
# Air Traffic Control Simulator
# License: Apache 2.0
from __future__ import division
import random, math, time, json
import sim_gui, sound
__version__ = '0.0.1'
APPROACHING_POINTS = (((220, 50), 100, 190),
((560, 50), 120, 255),
((760, 50), 180, 260),
((790, 240), 220, 320),
((790, 540), 230, 350),
((640, 680), 280, 360),
((370, 680), 0, 90),
((220, 630), 35, 85))
COMPANY_NUMBER = 12
def choose_airport(name):
return json.load(open('maps/'+name+'.json'))
class Airport(object):
def __init__(self, the_map):
self._info = the_map
self._runway = the_map['runway']
self._runway_point = the_map['runway_point']
self._runway_available = True
self.full_name = the_map['full_name']
# self._wind_direction = wind_direction
# self._wind_speed = wind_speed
self._ready_line = {}
self._waiting_line = {}
self._arrival_line = {}
def get_companies(self):
return self._info['companies']
def get_codes(self):
return self._info['code']
def get_modes(self):
return self._info['mode']
def get_runway(self):
return (self._runway[0][0]+215, self._runway[0][1]+40,
self._runway[1][0]+215, self._runway[1][1]+40)
def get_arrival_line(self):
return self._arrival_line
def get_ready_line(self):
return self._ready_line
def get_waiting_line(self):
return self._waiting_line
def update(self):
for each in self._arrival_line.values():
each.update()
if each.get_height() == 0:
del self._arrival_line[each.get_number()]
raise EOFError, each.get_number()+': '+ \
sound.male_report("We have landed at Runway "+each.get_landing_way()+'. Thank you.')
def control_plane(self, code, order, num=0, *otras):
if code.upper() in self._ready_line:
self._ready_line[code.upper()].receive_order(order, num)
elif code.upper() in self._waiting_line:
self._waiting_line[code.upper()].receive_order(order, num)
elif code.upper() in self._arrival_line:
self._arrival_line[code.upper()].receive_order(order, num)
def new_arrival_plane(self):
codenum = random.randrange(COMPANY_NUMBER)
num = random.randrange(30, 4000)
point = random.choice(APPROACHING_POINTS)
self._arrival_line[self._info['code'][codenum]+str(num)] = Plane(
self._info['companies'][codenum], random.choice(self._info['mode']),
self._info['code'][codenum]+str(num), 'Arrival',
random.choice([5000, 6000, 7000, 8000]),
random.randrange(240, 300), random.randrange(point[1], point[2]),
point[0])
return self._info['code'][codenum], self._info['companies'][codenum]+' ', num
class Plane(Airport):
def __init__(self, company, model, number, state='Ready', height=0,
speed=0, direction=0, place=[799, 799]):
self._company = company
self._model = model
self._number = number
self._state = state
self._target_height = self._height = height
self._target_speed = self._speed = speed
self._target_direction = self._direction = direction
self._place = list(place)
def get_info(self):
return self._model
def get_number(self):
return self._number
def get_company(self):
return self._company
def get_state(self):
return self._state
def get_speed(self):
return self._speed
def get_height(self):
return self._height
# def get_direction(self):
# return self._direction
def get_place(self):
return self._place
def get_landing_way(self):
return self._landing_way
def receive_order(self, order, num):
time.sleep(1.5)
if order.lower() == 'c':
if len(num) == 3:
if 0 <= int(num) <= 360:
self._target_direction = int(num)
sound.male_report('Roger, heading '+' '.join(list(num)))
elif len(num) == 1 and num > 1:
self._target_height = int(num)*1000
sound.male_report('Roger, maintain '+num+'000 inches.')
elif order.lower() == 's':
self._target_speed = int(num)
sound.male_report('Roger, speed changing to '+num+' knots.')
elif order.lower() == 'l':
if self._height > 3000:
raise ValueError, self._number+': '+ \
sound.male_report("Negative, we are too high to land now.")
elif (315 > abs(self._direction - the_map['runway_point'][num][1]) > 45)or \
math.sqrt((self._place[0]-the_map['runway_point'][num][0][0])**2 \
+(self._place[1]-the_map['runway_point'][num][0][1])**2) > 150:
raise ValueError, self._number+': '+ \
sound.male_report("Negative, we are too far away from the runway.")
else:
self._state = 'Landing'
self._target_speed = random.randrange(120, 160)
self._target_height = random.randrange(600, 900)
sound.male_report("Roger, we are approaching the runway.")
self._target_direction = math.degrees(math.atan2((the_map['runway_point']\
[num][0][0]-self._place[0]),
(the_map['runway_point'][num][0][1]\
-self._place[1])))%360
self._landing_way = num
else:
raise ValueError, self._number+': '+ \
sound.male_report("Negative, your order is invalid.")
def update(self):
# print self._number, self._direction
self._place[0] += self._speed * math.sin((self._direction)/180*math.pi) / 200
self._place[1] += self._speed * math.cos((self._direction)/180*math.pi) / -200
if self._height != self._target_height:
self._height = self._height+50 if self._height<self._target_height else self._height-50
if abs(self._speed - self._target_speed) > 5:
self._speed = self._speed+5 if self._speed<self._target_speed else self._speed-5
if self._direction != self._target_direction:
self._direction = (self._direction+1)%360 \
if 0<(self._target_direction-self._direction)<180 \
or (self._target_direction-self._direction)<-180 \
else (self._direction-1)%360
if self._state == 'Landing':
self._target_direction = math.degrees(math.atan2((the_map['runway_point']\
[self._landing_way][0][0]-self._place[0]),
(self._place[1]-the_map['runway_point']\
[self._landing_way][0][1])))%360
compare = math.degrees(math.atan2((540-self._place[0]), (self._place[1]-395)))%360
if abs(self._place[0]-the_map['runway_point'][self._landing_way][0][0])<5 and \
abs(self._place[1]-the_map['runway_point'][self._landing_way][0][1])<5:
self._target_height = 0
self._target_direction = the_map['runway_point'][self._landing_way][1]
self._target_speed = 1
self._state = 'Landed'
if __name__ == '__main__':
the_map = choose_airport('kaitak')
sim_gui.SimulatorGUI(Airport(choose_airport('kaitak')))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST manipulation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class CleanCopier(object):
"""NodeTransformer-like visitor that copies an AST."""
def __init__(self, preserve_annos):
super(CleanCopier, self).__init__()
self.preserve_annos = preserve_annos
def copy(self, node):
"""Returns a deep copy of node (excluding some fields, see copy_clean)."""
if isinstance(node, list):
return [self.copy(n) for n in node]
elif isinstance(node, tuple):
return tuple(self.copy(n) for n in node)
elif not isinstance(node, (gast.AST, ast.AST)):
# Assuming everything that's not an AST, list or tuple is a value type
# and may simply be assigned.
return node
assert isinstance(node, (gast.AST, ast.AST))
new_fields = {}
for f in node._fields:
if not f.startswith('__') and hasattr(node, f):
new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
if self.preserve_annos:
for k in self.preserve_annos:
anno.copyanno(node, new_node, k)
return new_node
def copy_clean(node, preserve_annos=None):
"""Creates a deep copy of an AST.
The copy will not include fields that are prefixed by '__', with the
exception of user-specified annotations.
Args:
node: ast.AST
preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
copy
Returns:
ast.AST
"""
return CleanCopier(preserve_annos).copy(node)
class SymbolRenamer(gast.NodeTransformer):
"""Transformer that can rename symbols to a simple names."""
def __init__(self, name_map):
self.name_map = name_map
def _process(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
new_node = gast.Name(str(self.name_map[qn]), node.ctx, None)
# All annotations get carried over.
for k in anno.keys(node):
anno.copyanno(node, new_node, k)
return new_node
return self.generic_visit(node)
def visit_Name(self, node):
return self._process(node)
def visit_Attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
return self._process(node)
# Attributes of dynamic objects will not have a QN.
return self.generic_visit(node)
def rename_symbols(node, name_map):
"""Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(renamer.visit(n) for n in node)
return renamer.visit(node)
def keywords_to_dict(keywords):
"""Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
keys.append(gast.Str(kw.arg))
values.append(kw.value)
return gast.Dict(keys=keys, values=values)
class PatternMatcher(gast.NodeVisitor):
"""Matches a node against a pattern represented by a node."""
def __init__(self, pattern):
self.pattern = pattern
self.pattern_stack = []
self.matches = True
def compare_and_visit(self, node, pattern):
self.pattern_stack.append(self.pattern)
self.pattern = pattern
self.generic_visit(node)
self.pattern = self.pattern_stack.pop()
def no_match(self):
self.matches = False
return False
def is_wildcard(self, p):
if isinstance(p, (list, tuple)) and len(p) == 1:
p, = p
if isinstance(p, gast.Name) and p.id == '_':
return True
if p == '_':
return True
return False
def generic_visit(self, node):
if not self.matches:
return
pattern = self.pattern
for f in node._fields:
if f.startswith('__'):
continue
if not hasattr(node, f):
if hasattr(pattern, f) and getattr(pattern, f):
return self.no_match()
else:
continue
if not hasattr(pattern, f):
return self.no_match()
v = getattr(node, f)
p = getattr(pattern, f)
if self.is_wildcard(p):
continue
if isinstance(v, (list, tuple)):
if not isinstance(p, (list, tuple)) or len(v) != len(p):
return self.no_match()
for v_item, p_item in zip(v, p):
self.compare_and_visit(v_item, p_item)
elif isinstance(v, (gast.AST, ast.AST)):
if not isinstance(v, type(p)) and not isinstance(p, type(v)):
return self.no_match()
self.compare_and_visit(v, p)
else:
# Assume everything else is a value type.
if v != p:
return self.no_match()
def matches(node, pattern):
"""Basic pattern matcher for AST.
The pattern may contain wildcards represented by the symbol '_'. A node
matches a pattern if for every node in the tree, either there is a node of
the same type in pattern, or a Name node with id='_'.
Args:
node: ast.AST
pattern: ast.AST
Returns:
bool
"""
if isinstance(pattern, str):
pattern = parser.parse_expression(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
def apply_to_single_assignments(targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
used with the targets field of an ast.Assign node
values: ast.AST
apply_fn: Callable[[ast.AST, ast.AST], None], called with the
respective nodes of each single assignment
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
idx = parser.parse_expression(str(i))
value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())
apply_to_single_assignments(target_el, value_el, apply_fn)
else:
apply_fn(target, values)
def parallel_walk(node, other):
"""Walks two ASTs in parallel.
The two trees must have identical structure.
Args:
node: Union[ast.AST, Iterable[ast.AST]]
other: Union[ast.AST, Iterable[ast.AST]]
Yields:
Tuple[ast.AST, ast.AST]
Raises:
ValueError: if the two trees don't have identical structure.
"""
if isinstance(node, (list, tuple)):
node_stack = list(node)
else:
node_stack = [node]
if isinstance(other, (list, tuple)):
other_stack = list(other)
else:
other_stack = [other]
while node_stack and other_stack:
assert len(node_stack) == len(other_stack)
n = node_stack.pop()
o = other_stack.pop()
if (not isinstance(n, (ast.AST, gast.AST)) or
not isinstance(o, (ast.AST, gast.AST)) or
n.__class__.__name__ != o.__class__.__name__):
raise ValueError('inconsistent nodes: {} and {}'.format(n, o))
yield n, o
for f in n._fields:
n_child = getattr(n, f, None)
o_child = getattr(o, f, None)
if f.startswith('__') or n_child is None or o_child is None:
continue
if isinstance(n_child, (list, tuple)):
if (not isinstance(o_child, (list, tuple)) or
len(n_child) != len(o_child)):
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
node_stack.extend(n_child)
other_stack.extend(o_child)
elif isinstance(n_child, (gast.AST, ast.AST)):
node_stack.append(n_child)
other_stack.append(o_child)
elif n_child != o_child:
raise ValueError(
'inconsistent values for field {}: {} and {}'.format(
f, n_child, o_child))
|
|
from Node import error
SYNTAX_NODE_SERIALIZATION_CODES = {
# 0 is 'Token'. Needs to be defined manually
# 1 is 'Unknown'. Needs to be defined manually
'UnknownDecl': 2,
'TypealiasDecl': 3,
'AssociatedtypeDecl': 4,
'IfConfigDecl': 5,
'PoundErrorDecl': 6,
'PoundWarningDecl': 7,
'PoundSourceLocation': 8,
'ClassDecl': 9,
'StructDecl': 10,
'ProtocolDecl': 11,
'ExtensionDecl': 12,
'FunctionDecl': 13,
'InitializerDecl': 14,
'DeinitializerDecl': 15,
'SubscriptDecl': 16,
'ImportDecl': 17,
'AccessorDecl': 18,
'VariableDecl': 19,
'EnumCaseDecl': 20,
'EnumDecl': 21,
'OperatorDecl': 22,
'PrecedenceGroupDecl': 23,
'UnknownExpr': 24,
'InOutExpr': 25,
'PoundColumnExpr': 26,
'TryExpr': 27,
'IdentifierExpr': 28,
'SuperRefExpr': 29,
'NilLiteralExpr': 30,
'DiscardAssignmentExpr': 31,
'AssignmentExpr': 32,
'SequenceExpr': 33,
'PoundLineExpr': 34,
'PoundFileExpr': 35,
'PoundFunctionExpr': 36,
'PoundDsohandleExpr': 37,
'SymbolicReferenceExpr': 38,
'PrefixOperatorExpr': 39,
'BinaryOperatorExpr': 40,
'ArrowExpr': 41,
'FloatLiteralExpr': 42,
'TupleExpr': 43,
'ArrayExpr': 44,
'DictionaryExpr': 45,
'ImplicitMemberExpr': 46,
'IntegerLiteralExpr': 47,
'StringLiteralExpr': 48,
'BooleanLiteralExpr': 49,
'TernaryExpr': 50,
'MemberAccessExpr': 51,
'DotSelfExpr': 52,
'IsExpr': 53,
'AsExpr': 54,
'TypeExpr': 55,
'ClosureExpr': 56,
'UnresolvedPatternExpr': 57,
'FunctionCallExpr': 58,
'SubscriptExpr': 59,
'OptionalChainingExpr': 60,
'ForcedValueExpr': 61,
'PostfixUnaryExpr': 62,
'SpecializeExpr': 63,
'StringInterpolationExpr': 64,
'KeyPathExpr': 65,
'KeyPathBaseExpr': 66,
'ObjcKeyPathExpr': 67,
'ObjcSelectorExpr': 68,
'EditorPlaceholderExpr': 69,
'ObjectLiteralExpr': 70,
'UnknownStmt': 71,
'ContinueStmt': 72,
'WhileStmt': 73,
'DeferStmt': 74,
'ExpressionStmt': 75,
'RepeatWhileStmt': 76,
'GuardStmt': 77,
'ForInStmt': 78,
'SwitchStmt': 79,
'DoStmt': 80,
'ReturnStmt': 81,
'FallthroughStmt': 82,
'BreakStmt': 83,
'DeclarationStmt': 84,
'ThrowStmt': 85,
'IfStmt': 86,
'Decl': 87,
'Expr': 88,
'Stmt': 89,
'Type': 90,
'Pattern': 91,
'CodeBlockItem': 92,
'CodeBlock': 93,
'DeclNameArgument': 94,
'DeclNameArguments': 95,
'FunctionCallArgument': 96,
'TupleElement': 97,
'ArrayElement': 98,
'DictionaryElement': 99,
'ClosureCaptureItem': 100,
'ClosureCaptureSignature': 101,
'ClosureParam': 102,
'ClosureSignature': 103,
'StringSegment': 104,
'ExpressionSegment': 105,
'ObjcNamePiece': 106,
'TypeInitializerClause': 107,
'ParameterClause': 108,
'ReturnClause': 109,
'FunctionSignature': 110,
'IfConfigClause': 111,
'PoundSourceLocationArgs': 112,
'DeclModifier': 113,
'InheritedType': 114,
'TypeInheritanceClause': 115,
'MemberDeclBlock': 116,
'MemberDeclListItem': 117,
'SourceFile': 118,
'InitializerClause': 119,
'FunctionParameter': 120,
'AccessLevelModifier': 121,
'AccessPathComponent': 122,
'AccessorParameter': 123,
'AccessorBlock': 124,
'PatternBinding': 125,
'EnumCaseElement': 126,
'OperatorPrecedenceAndTypes': 127,
'PrecedenceGroupRelation': 128,
'PrecedenceGroupNameElement': 129,
'PrecedenceGroupAssignment': 130,
'PrecedenceGroupAssociativity': 131,
'Attribute': 132,
'LabeledSpecializeEntry': 133,
'ImplementsAttributeArguments': 134,
'ObjCSelectorPiece': 135,
'WhereClause': 136,
'ConditionElement': 137,
'AvailabilityCondition': 138,
'MatchingPatternCondition': 139,
'OptionalBindingCondition': 140,
'ElseIfContinuation': 141,
'ElseBlock': 142,
'SwitchCase': 143,
'SwitchDefaultLabel': 144,
'CaseItem': 145,
'SwitchCaseLabel': 146,
'CatchClause': 147,
'GenericWhereClause': 148,
'SameTypeRequirement': 149,
'GenericParameter': 150,
'GenericParameterClause': 151,
'ConformanceRequirement': 152,
'CompositionTypeElement': 153,
'TupleTypeElement': 154,
'GenericArgument': 155,
'GenericArgumentClause': 156,
'TypeAnnotation': 157,
'TuplePatternElement': 158,
'AvailabilityArgument': 159,
'AvailabilityLabeledArgument': 160,
'AvailabilityVersionRestriction': 161,
'VersionTuple': 162,
'CodeBlockItemList': 163,
'FunctionCallArgumentList': 164,
'TupleElementList': 165,
'ArrayElementList': 166,
'DictionaryElementList': 167,
'StringInterpolationSegments': 168,
'DeclNameArgumentList': 169,
'ExprList': 170,
'ClosureCaptureItemList': 171,
'ClosureParamList': 172,
'ObjcName': 173,
'FunctionParameterList': 174,
'IfConfigClauseList': 175,
'InheritedTypeList': 176,
'MemberDeclList': 177,
'ModifierList': 178,
'AccessPath': 179,
'AccessorList': 180,
'PatternBindingList': 181,
'EnumCaseElementList': 182,
'PrecedenceGroupAttributeList': 183,
'PrecedenceGroupNameList': 184,
'TokenList': 185,
'NonEmptyTokenList': 186,
'AttributeList': 187,
'SpecializeAttributeSpecList': 188,
'ObjCSelector': 189,
'SwitchCaseList': 190,
'CatchClauseList': 191,
'CaseItemList': 192,
'ConditionElementList': 193,
'GenericRequirementList': 194,
'GenericParameterList': 195,
'CompositionTypeElementList': 196,
'TupleTypeElementList': 197,
'GenericArgumentList': 198,
'TuplePatternElementList': 199,
'AvailabilitySpecList': 200,
'UnknownPattern': 201,
'EnumCasePattern': 202,
'IsTypePattern': 203,
'OptionalPattern': 204,
'IdentifierPattern': 205,
'AsTypePattern': 206,
'TuplePattern': 207,
'WildcardPattern': 208,
'ExpressionPattern': 209,
'ValueBindingPattern': 210,
'UnknownType': 211,
'SimpleTypeIdentifier': 212,
'MemberTypeIdentifier': 213,
'ClassRestrictionType': 214,
'ArrayType': 215,
'DictionaryType': 216,
'MetatypeType': 217,
'OptionalType': 218,
'ImplicitlyUnwrappedOptionalType': 219,
'CompositionType': 220,
'TupleType': 221,
'FunctionType': 222,
'AttributedType': 223,
'YieldStmt': 224,
'YieldList': 225,
'IdentifierList': 226,
'NamedAttributeStringArgument': 227,
'DeclName': 228,
}
def verify_syntax_node_serialization_codes(nodes, serialization_codes):
# Verify that all nodes have serialization codes
for node in nodes:
if not node.is_base() and node.syntax_kind not in serialization_codes:
error('Node %s has no serialization code' % node.syntax_kind)
# Verify that no serialization code is used twice
used_codes = set()
for serialization_code in serialization_codes.values():
if serialization_code in used_codes:
error("Serialization code %d used twice" % serialization_code)
used_codes.add(serialization_code)
|
|
"""
Giving models a custom manager
You can use a custom ``Manager`` in a particular model by extending the base
``Manager`` class and instantiating your custom ``Manager`` in your model.
There are two reasons you might want to customize a ``Manager``: to add extra
``Manager`` methods, and/or to modify the initial ``QuerySet`` the ``Manager``
returns.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.db import models
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
class PublishedBookManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_published=True)
class CustomQuerySet(models.QuerySet):
def filter(self, *args, **kwargs):
queryset = super().filter(fun=True)
queryset._filter_CustomQuerySet = True
return queryset
def public_method(self, *args, **kwargs):
return self.all()
def _private_method(self, *args, **kwargs):
return self.all()
def optout_public_method(self, *args, **kwargs):
return self.all()
optout_public_method.queryset_only = True
def _optin_private_method(self, *args, **kwargs):
return self.all()
_optin_private_method.queryset_only = False
class BaseCustomManager(models.Manager):
def __init__(self, arg):
super().__init__()
self.init_arg = arg
def filter(self, *args, **kwargs):
queryset = super().filter(fun=True)
queryset._filter_CustomManager = True
return queryset
def manager_only(self):
return self.all()
CustomManager = BaseCustomManager.from_queryset(CustomQuerySet)
class CustomInitQuerySet(models.QuerySet):
# QuerySet with an __init__() method that takes an additional argument.
def __init__(self, custom_optional_arg=None, model=None, query=None, using=None, hints=None):
super().__init__(model=model, query=query, using=using, hints=hints)
class DeconstructibleCustomManager(BaseCustomManager.from_queryset(CustomQuerySet)):
def __init__(self, a, b, c=1, d=2):
super().__init__(a)
class FunPeopleManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(fun=True)
class BoringPeopleManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(fun=False)
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=False)
favorite_book = models.ForeignKey('Book', models.SET_NULL, null=True, related_name='favorite_books')
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = PersonManager()
fun_people = FunPeopleManager()
boring_people = BoringPeopleManager()
custom_queryset_default_manager = CustomQuerySet.as_manager()
custom_queryset_custom_manager = CustomManager('hello')
custom_init_queryset_manager = CustomInitQuerySet.as_manager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class FunPerson(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=True)
favorite_book = models.ForeignKey(
'Book',
models.SET_NULL,
null=True,
related_name='fun_people_favorite_books',
)
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = FunPeopleManager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField(default=False)
published_objects = PublishedBookManager()
authors = models.ManyToManyField(Person, related_name='books')
fun_authors = models.ManyToManyField(FunPerson, related_name='books')
favorite_things = GenericRelation(
Person,
content_type_field='favorite_thing_type',
object_id_field='favorite_thing_id',
)
fun_people_favorite_things = GenericRelation(
FunPerson,
content_type_field='favorite_thing_type',
object_id_field='favorite_thing_id',
)
def __str__(self):
return self.title
class FastCarManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(top_speed__gt=150)
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
def __str__(self):
return self.name
class FastCarAsBase(Car):
class Meta:
proxy = True
base_manager_name = 'fast_cars'
class FastCarAsDefault(Car):
class Meta:
proxy = True
default_manager_name = 'fast_cars'
class RestrictedManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class RelatedModel(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class RestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.ForeignKey(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
class OneToOneRestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.OneToOneField(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
class AbstractPerson(models.Model):
abstract_persons = models.Manager()
objects = models.CharField(max_length=30)
class Meta:
abstract = True
class PersonFromAbstract(AbstractPerson):
pass
|
|
"""Tests for ETL pipelines"""
from contextlib import contextmanager
from importlib import reload
from unittest.mock import patch
from course_catalog.constants import PlatformType
from course_catalog.etl import pipelines
from course_catalog.etl.constants import (
ProgramLoaderConfig,
CourseLoaderConfig,
LearningResourceRunLoaderConfig,
OfferedByLoaderConfig,
)
@contextmanager
def reload_mocked_pipeline(*patchers):
"""Create a context that is rolled back after executing the pipeline"""
mocks = [patcher.start() for patcher in patchers]
reload(pipelines)
yield mocks
for patcher in patchers:
patcher.stop()
reload(pipelines)
def test_micromasters_etl():
"""Verify that micromasters etl pipeline executes correctly"""
values = [1, 2, 3]
with reload_mocked_pipeline(
patch("course_catalog.etl.micromasters.extract", autospec=True),
patch(
"course_catalog.etl.micromasters.transform",
return_value=values,
autospec=True,
),
patch("course_catalog.etl.loaders.load_programs", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_programs = patches
result = pipelines.micromasters_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_programs.assert_called_once_with(
PlatformType.micromasters.value,
mock_transform.return_value,
config=ProgramLoaderConfig(
courses=CourseLoaderConfig(
offered_by=OfferedByLoaderConfig(additive=True),
runs=LearningResourceRunLoaderConfig(
offered_by=OfferedByLoaderConfig(additive=True)
),
)
),
)
assert result == mock_load_programs.return_value
def test_xpro_programs_etl():
"""Verify that xpro programs etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.xpro.extract_programs", autospec=True),
patch("course_catalog.etl.xpro.transform_programs", autospec=True),
patch("course_catalog.etl.loaders.load_programs", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_programs = patches
result = pipelines.xpro_programs_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_programs.assert_called_once_with(
PlatformType.xpro.value, mock_transform.return_value
)
assert result == mock_load_programs.return_value
def test_xpro_courses_etl():
"""Verify that xpro courses etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.xpro.extract_courses", autospec=True),
patch("course_catalog.etl.xpro.transform_courses", autospec=True),
patch("course_catalog.etl.loaders.load_courses", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_courses = patches
result = pipelines.xpro_courses_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_courses.assert_called_once_with(
PlatformType.xpro.value, mock_transform.return_value
)
assert result == mock_load_courses.return_value
def test_mitx_etl():
"""Verify that mitx etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.mitx.extract", autospec=True),
patch("course_catalog.etl.mitx.transform", autospec=True),
patch("course_catalog.etl.loaders.load_courses", autospec=True),
patch("course_catalog.etl.ocw.upload_mitx_course_manifest", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_courses, mock_upload_manifest = patches
result = pipelines.mitx_etl()
mock_extract.assert_called_once_with()
# each of these should be called with the return value of the extract
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_upload_manifest.assert_called_once_with(mock_extract.return_value)
# load_courses should be called *only* with the return value of transform
mock_load_courses.assert_called_once_with(
PlatformType.mitx.value,
mock_transform.return_value,
config=CourseLoaderConfig(
offered_by=OfferedByLoaderConfig(additive=True),
runs=LearningResourceRunLoaderConfig(
offered_by=OfferedByLoaderConfig(additive=True)
),
),
)
assert result == mock_load_courses.return_value
def test_oll_etl():
"""Verify that OLL etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.oll.extract", autospec=True),
patch("course_catalog.etl.oll.transform", autospec=True),
patch("course_catalog.etl.loaders.load_courses", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_courses = patches
result = pipelines.oll_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_courses.assert_called_once_with(
PlatformType.oll.value, mock_transform.return_value
)
assert result == mock_load_courses.return_value
def test_see_etl():
"""Verify that SEE etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.see.extract", autospec=True),
patch("course_catalog.etl.see.transform", autospec=True),
patch("course_catalog.etl.loaders.load_courses", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_courses = patches
result = pipelines.see_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_courses.assert_called_once_with(
PlatformType.see.value,
mock_transform.return_value,
config=CourseLoaderConfig(prune=False),
)
assert result == mock_load_courses.return_value
def test_mitpe_etl():
"""Verify that MITPE etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.mitpe.extract", autospec=True),
patch("course_catalog.etl.mitpe.transform", autospec=True),
patch("course_catalog.etl.loaders.load_courses", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_courses = patches
result = pipelines.mitpe_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_courses.assert_called_once_with(
PlatformType.mitpe.value,
mock_transform.return_value,
config=CourseLoaderConfig(prune=False),
)
assert result == mock_load_courses.return_value
def test_csail_etl():
"""Verify that CSAIL etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.csail.extract", autospec=True),
patch("course_catalog.etl.csail.transform", autospec=True),
patch("course_catalog.etl.loaders.load_courses", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_courses = patches
result = pipelines.csail_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_courses.assert_called_once_with(
PlatformType.csail.value,
mock_transform.return_value,
config=CourseLoaderConfig(prune=False),
)
assert result == mock_load_courses.return_value
def test_youtube_etl():
"""Verify that youtube etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.youtube.extract", autospec=True),
patch("course_catalog.etl.youtube.transform", autospec=True),
patch("course_catalog.etl.loaders.load_video_channels", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_video_channels = patches
result = pipelines.youtube_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_video_channels.assert_called_once_with(mock_transform.return_value)
assert result == mock_load_video_channels.return_value
def test_podcast_etl():
"""Verify that podcast etl pipeline executes correctly"""
with reload_mocked_pipeline(
patch("course_catalog.etl.podcast.extract", autospec=True),
patch("course_catalog.etl.podcast.transform", autospec=True),
patch("course_catalog.etl.loaders.load_podcasts", autospec=True),
) as patches:
mock_extract, mock_transform, mock_load_podcasts = patches
result = pipelines.podcast_etl()
mock_extract.assert_called_once_with()
mock_transform.assert_called_once_with(mock_extract.return_value)
mock_load_podcasts.assert_called_once_with(mock_transform.return_value)
assert result == mock_load_podcasts.return_value
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import json
import os
import shutil
import socket
import tempfile
import threading
from six import BytesIO
from six.moves import http_client
import tensorflow as tf
from werkzeug import serving
from tensorflow.tensorboard import main as tensorboard
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins import base_plugin
class FakePlugin(base_plugin.TBPlugin):
"""A plugin with no functionality."""
def __init__(self, plugin_name, is_active_value, routes_mapping):
"""Constructs a fake plugin.
Args:
plugin_name: The name of this plugin.
is_active_value: Whether the plugin is active.
routes_mapping: A dictionary mapping from route (string URL path) to the
method called when a user issues a request to that route.
"""
self.plugin_name = plugin_name
self._is_active_value = is_active_value
self._routes_mapping = routes_mapping
def get_plugin_apps(self, multiplexer, logdir):
"""Returns a mapping from routes to handlers offered by this plugin.
Args:
multiplexer: The event multiplexer.
logdir: The path to the directory containing logs.
Returns:
A dictionary mapping from routes to handlers offered by this plugin.
"""
return self._routes_mapping
def is_active(self):
"""Returns whether this plugin is active.
Returns:
A boolean. Whether this plugin is active.
"""
return self._is_active_value
class TensorboardServerTest(tf.test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
def setUp(self):
self.logdir = self.get_temp_dir()
self._GenerateTestData(run_name='run1')
self._multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = [
FakePlugin(plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(plugin_name='bar', is_active_value=False, routes_mapping={})
]
app = application.TensorBoardWSGIApp(
self.logdir, plugins, self._multiplexer, reload_interval=0)
try:
self._server = serving.BaseWSGIServer('localhost', 0, app)
# 0 to pick an unused port.
except IOError:
# BaseWSGIServer has a preference for IPv4. If that didn't work, try again
# with an explicit IPv6 address.
self._server = serving.BaseWSGIServer('::1', 0, app)
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers=None):
"""Perform a GET request for the given path."""
if headers is None:
headers = {}
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': self.logdir})
def testPluginsListing(self):
"""Test the format of the data/plugins_listing endpoint."""
parsed_object = self._getJson('/data/plugins_listing')
# Plugin foo is active. Plugin bar is not.
self.assertEqual(parsed_object, {'foo': True, 'bar': False})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
self.assertEqual(run_json, ['run1'])
def testRunsAppendOnly(self):
"""Test that new runs appear after old ones in /data/runs."""
# We use three runs: the 'run1' that we already created in our
# `setUp` method, plus runs with names lexicographically before and
# after it (so that just sorting by name doesn't have a chance of
# working).
fake_wall_times = {
'run1': 1234.0,
'avocado': 2345.0,
'zebra': 3456.0,
'mysterious': None,
}
stubs = tf.test.StubOutForTesting()
# pylint: disable=invalid-name
def FirstEventTimestamp_stub(multiplexer_self, run_name):
del multiplexer_self
matches = [candidate_name
for candidate_name in fake_wall_times
if run_name.endswith(candidate_name)]
self.assertEqual(len(matches), 1, '%s (%s)' % (matches, run_name))
wall_time = fake_wall_times[matches[0]]
if wall_time is None:
raise ValueError('No event timestamp could be found')
else:
return wall_time
# pylint: enable=invalid-name
stubs.SmartSet(self._multiplexer,
'FirstEventTimestamp',
FirstEventTimestamp_stub)
def add_run(run_name):
self._GenerateTestData(run_name)
self._multiplexer.AddRunsFromDirectory(self.logdir)
self._multiplexer.Reload()
# Add one run: it should come last.
add_run('avocado')
self.assertEqual(self._getJson('/data/runs'),
['run1', 'avocado'])
# Add another run: it should come last, too.
add_run('zebra')
self.assertEqual(self._getJson('/data/runs'),
['run1', 'avocado', 'zebra'])
# And maybe there's a run for which we somehow have no timestamp.
add_run('mysterious')
self.assertEqual(self._getJson('/data/runs'),
['run1', 'avocado', 'zebra', 'mysterious'])
stubs.UnsetAll()
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(
response.getheader('Cache-Control'),
'private, max-age=3600',
msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs', '/data/logdir'):
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def _GenerateTestData(self, run_name):
"""Generates the test data directory.
The test data has a single run of the given name, containing:
- a graph definition and metagraph definition
Arguments:
run_name: the directory under self.logdir into which to write
events
"""
run_path = os.path.join(self.logdir, run_name)
os.makedirs(run_path)
writer = tf.summary.FileWriter(run_path)
# Add a simple graph event.
graph_def = tf.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = tf.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
writer.flush()
writer.close()
class TensorboardServerPluginNameTest(tf.test.TestCase):
def _test(self, name, should_be_okay):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = [
FakePlugin(plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(plugin_name=name, is_active_value=True, routes_mapping={}),
FakePlugin(plugin_name='bar', is_active_value=False, routes_mapping={})
]
if should_be_okay:
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0)
else:
with self.assertRaisesRegexp(ValueError, r'invalid name'):
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0)
def testEmptyName(self):
self._test('', False)
def testNameWithSlashes(self):
self._test('scalars/data', False)
def testNameWithSpaces(self):
self._test('my favorite plugin', False)
def testSimpleName(self):
self._test('scalars', True)
def testComprehensiveName(self):
self._test('Scalar-Dashboard_3000.1', True)
class TensorboardServerPluginRouteTest(tf.test.TestCase):
def _test(self, route, should_be_okay):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = [
FakePlugin(
plugin_name='foo',
is_active_value=True,
routes_mapping={route: lambda environ, start_response: None}),
]
if should_be_okay:
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0)
else:
with self.assertRaisesRegexp(ValueError, r'invalid route'):
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0)
def testNormalRoute(self):
self._test('/runs', True)
def testEmptyRoute(self):
self._test('', False)
def testSlashlessRoute(self):
self._test('runaway', False)
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(tf.test.TestCase):
def testRunName(self):
logdir = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testMultipleDirectories(self):
logdir = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testNormalizesPaths(self):
logdir = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testAbsolutifies(self):
logdir = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsGCSPath(self):
logdir = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsHDFSPath(self):
logdir = 'hdfs://foo/path'
expected = {'hdfs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotNormalizeGCSPath(self):
logdir = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRunNameWithGCSPath(self):
logdir = 'lol:gs://foo/path'
expected = {'gs://foo/path': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
class TensorBoardAssetsTest(tf.test.TestCase):
def testTagFound(self):
tag = application.get_tensorboard_tag()
self.assertTrue(tag)
app = application.standard_tensorboard_wsgi('', True, 60, [])
self.assertEqual(app.tag, tag)
class TensorBoardPluginsTest(tf.test.TestCase):
def testPluginsAdded(self):
def foo_handler():
pass
def bar_handler():
pass
plugins = [
FakePlugin(
plugin_name='foo',
is_active_value=True,
routes_mapping={'/foo_route': foo_handler}),
FakePlugin(
plugin_name='bar',
is_active_value=True,
routes_mapping={'/bar_route': bar_handler}),
]
# The application should have added routes for both plugins.
app = application.standard_tensorboard_wsgi('', True, 60, plugins)
# The routes are prefixed with /data/plugin/[plugin name].
self.assertDictContainsSubset({
'/data/plugin/foo/foo_route': foo_handler,
'/data/plugin/bar/bar_route': bar_handler,
}, app.data_applications)
class TensorboardSimpleServerConstructionTest(tf.test.TestCase):
"""Tests that the default HTTP server is constructed without error.
Mostly useful for IPv4/IPv6 testing. This test should run with only IPv4, only
IPv6, and both IPv4 and IPv6 enabled.
"""
class _StubApplication(object):
tag = ''
def testMakeServerBlankHost(self):
# Test that we can bind to all interfaces without throwing an error
server, url = tensorboard.make_simple_server(
self._StubApplication(),
host='',
port=0) # Grab any available port
self.assertTrue(server)
self.assertTrue(url)
def testSpecifiedHost(self):
one_passed = False
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='127.0.0.1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://127.0.0.1:')
one_passed = True
except socket.error:
# IPv4 is not supported
pass
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='::1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://[::1]:')
one_passed = True
except socket.error:
# IPv6 is not supported
pass
self.assertTrue(one_passed) # We expect either IPv4 or IPv6 to be supported
class TensorBoardApplcationConstructionTest(tf.test.TestCase):
def testExceptions(self):
logdir = '/fake/foo'
multiplexer = event_multiplexer.EventMultiplexer()
# Fails if there is an unnamed plugin
with self.assertRaises(ValueError):
# This plugin lacks a name.
plugins = [
FakePlugin(plugin_name=None, is_active_value=True, routes_mapping={})
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)
# Fails if there are two plugins with same name
with self.assertRaises(ValueError):
plugins = [
FakePlugin(
plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(
plugin_name='foo', is_active_value=True, routes_mapping={}),
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)
if __name__ == '__main__':
tf.test.main()
|
|
"""
Author: Nathan Sprague
"""
import numpy as np
import theano
import unittest
import numpy.testing
import lasagne
import deep_q_rl.q_network as q_network
class ChainMDP(object):
"""Simple markov chain style MDP. Three "rooms" and one absorbing
state. States are encoded for the q_network as arrays with
indicator entries. E.g. [1, 0, 0, 0] encodes state 0, and [0, 1,
0, 0] encodes state 1. The absorbing state is [0, 0, 0, 1]
Action 0 moves the agent left, departing the maze if it is in state 0.
Action 1 moves the agent to the right, departing the maze if it is in
state 2.
The agent receives a reward of .7 for departing the chain on the left, and
a reward of 1.0 for departing the chain on the right.
Assuming deterministic actions and a discount rate of .5, the
correct Q-values are:
.7|.25, .35|.5, .25|1.0, 0|0
"""
def __init__(self, success_prob=1.0):
self.num_actions = 2
self.num_states = 4
self.success_prob = success_prob
self.actions = [np.array([[0]], dtype='int32'),
np.array([[1]], dtype='int32')]
self.reward_zero = np.array([[0]], dtype=theano.config.floatX)
self.reward_left = np.array([[.7]], dtype=theano.config.floatX)
self.reward_right = np.array([[1.0]], dtype=theano.config.floatX)
self.states = []
for i in range(self.num_states):
self.states.append(np.zeros((1, 1, 1, self.num_states),
dtype=theano.config.floatX))
self.states[-1][0, 0, 0, i] = 1
def act(self, state, action_index):
"""
action 0 is left, 1 is right.
"""
state_index = np.nonzero(state[0, 0, 0, :])[0][0]
next_index = state_index
if np.random.random() < self.success_prob:
next_index = state_index + action_index * 2 - 1
# Exit left
if next_index == -1:
return self.reward_left, self.states[-1], np.array([[True]])
# Exit right
if next_index == self.num_states - 1:
return self.reward_right, self.states[-1], np.array([[True]])
if np.random.random() < self.success_prob:
return (self.reward_zero,
self.states[state_index + action_index * 2 - 1],
np.array([[False]]))
else:
return (self.reward_zero, self.states[state_index],
np.array([[False]]))
class LinearTests(unittest.TestCase):
"""With no neural network, and simple sgd updates, the deep
Q-learning code operates as good-ol-fashioned Q-learning. These
tests check that the basic updates code is working correctly.
"""
def setUp(self):
# Divide the desired learning rate by two, because loss is
# defined as L^2, not 1/2 L^2.
self.learning_rate = .1 / 2.0
self.discount = .5
self.mdp = ChainMDP()
def all_q_vals(self, net):
""" Helper method to get the entire Q-table """
q_vals = np.zeros((self.mdp.num_states, self.mdp.num_actions))
for i in range(self.mdp.num_states):
q_vals[i, :] = net.q_vals(self.mdp.states[i])
return q_vals
def train(self, net, steps):
mdp = self.mdp
for _ in range(steps):
state = mdp.states[np.random.randint(0, mdp.num_states-1)]
action_index = np.random.randint(0, mdp.num_actions)
reward, next_state, terminal = mdp.act(state, action_index)
net.train(state, mdp.actions[action_index], reward, next_state,
terminal)
def test_updates_sgd_no_freeze(self):
freeze_interval = -1
net = q_network.DeepQLearner(self.mdp.num_states, 1,
self.mdp.num_actions, 1,
self.discount,
self.learning_rate, 0, 0, 0, 0,
freeze_interval, 1, 'linear',
'sgd', 'sum', 1.0)
mdp = self.mdp
# Depart left:
state = mdp.states[0]
action_index = 0
reward, next_state, terminal = mdp.act(state, action_index)
net.train(state, mdp.actions[action_index], reward, next_state,
terminal)
numpy.testing.assert_almost_equal(self.all_q_vals(net),
[[.07, 0], [0, 0], [0, 0], [0, 0]])
# Depart right:
state = mdp.states[-2]
action_index = 1
reward, next_state, terminal = mdp.act(state, action_index)
net.train(state, mdp.actions[action_index], reward, next_state,
terminal)
numpy.testing.assert_almost_equal(self.all_q_vals(net),
[[.07, 0], [0, 0], [0, .1], [0, 0]])
# Move into leftmost state
state = mdp.states[1]
action_index = 0
reward, next_state, terminal = mdp.act(state, action_index)
net.train(state, mdp.actions[action_index], reward, next_state,
terminal)
numpy.testing.assert_almost_equal(self.all_q_vals(net),
[[.07, 0], [0.0035, 0], [0, .1],
[0, 0]])
def test_convergence_sgd_no_freeze(self):
freeze_interval = -1
net = q_network.DeepQLearner(self.mdp.num_states, 1,
self.mdp.num_actions, 1,
self.discount,
self.learning_rate, 0, 0, 0, 0,
freeze_interval, 1, 'linear',
'sgd', 'sum', 1.0)
self.train(net, 1000)
numpy.testing.assert_almost_equal(self.all_q_vals(net),
[[.7, .25], [.35, .5],
[.25, 1.0], [0., 0.]], 3)
def test_convergence_random_initialization(self):
""" This test will only pass if terminal states are handled
correctly. Otherwise the random initialization of the value of the
terminal state will propagate back.
"""
freeze_interval = -1
net = q_network.DeepQLearner(self.mdp.num_states, 1,
self.mdp.num_actions, 1,
self.discount,
self.learning_rate, 0, 0, 0, 0,
freeze_interval, 1, 'linear',
'sgd', 'sum', 1.0)
# Randomize initial q-values:
params = lasagne.layers.helper.get_all_param_values(net.l_out)
rand = np.random.random(params[0].shape)
rand = numpy.array(rand, dtype=theano.config.floatX)
lasagne.layers.helper.set_all_param_values(net.l_out, [rand])
self.train(net, 1000)
numpy.testing.assert_almost_equal(self.all_q_vals(net)[0:3,:],
[[.7, .25],
[.35, .5],
[.25, 1.0]], 3)
def test_convergence_sgd_permanent_freeze(self):
freeze_interval = 1000000
net = q_network.DeepQLearner(self.mdp.num_states, 1,
self.mdp.num_actions, 1,
self.discount,
self.learning_rate, 0, 0, 0, 0,
freeze_interval, 1, 'linear',
'sgd', 'sum', 1.0)
self.train(net, 1000)
numpy.testing.assert_almost_equal(self.all_q_vals(net),
[[.7, 0], [0, 0],
[0, 1.0], [0., 0.]], 3)
def test_convergence_sgd_frequent_freeze(self):
freeze_interval = 2
net = q_network.DeepQLearner(self.mdp.num_states, 1,
self.mdp.num_actions, 1,
self.discount,
self.learning_rate, 0, 0, 0, 0,
freeze_interval, 1, 'linear',
'sgd', 'sum', 1.0)
self.train(net, 1000)
numpy.testing.assert_almost_equal(self.all_q_vals(net),
[[.7, .25], [.35, .5],
[.25, 1.0], [0., 0.]], 3)
def test_convergence_sgd_one_freeze(self):
freeze_interval = 500
net = q_network.DeepQLearner(self.mdp.num_states, 1,
self.mdp.num_actions, 1,
self.discount,
self.learning_rate, 0, 0, 0, 0,
freeze_interval, 1, 'linear',
'sgd', 'sum', 1.0)
self.train(net, freeze_interval * 2)
numpy.testing.assert_almost_equal(self.all_q_vals(net),
[[.7, 0], [.35, .5],
[0, 1.0], [0., 0.]], 3)
if __name__ == "__main__":
unittest.main()
|
|
<<<<<<< HEAD
<<<<<<< HEAD
'''A multi-producer, multi-consumer queue.'''
try:
import threading
except ImportError:
import dummy_threading as threading
from collections import deque
from heapq import heappush, heappop
try:
from time import monotonic as time
except ImportError:
from time import time
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
'Exception raised by Queue.get(block=0)/get_nowait().'
pass
class Full(Exception):
'Exception raised by Queue.put(block=0)/put_nowait().'
pass
class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
=======
'''A multi-producer, multi-consumer queue.'''
try:
import threading
except ImportError:
import dummy_threading as threading
from collections import deque
from heapq import heappush, heappop
try:
from time import monotonic as time
except ImportError:
from time import time
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
'Exception raised by Queue.get(block=0)/get_nowait().'
pass
class Full(Exception):
'Exception raised by Queue.put(block=0)/put_nowait().'
pass
class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
'''A multi-producer, multi-consumer queue.'''
try:
import threading
except ImportError:
import dummy_threading as threading
from collections import deque
from heapq import heappush, heappop
try:
from time import monotonic as time
except ImportError:
from time import time
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
'Exception raised by Queue.get(block=0)/get_nowait().'
pass
class Full(Exception):
'Exception raised by Queue.put(block=0)/put_nowait().'
pass
class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# hardirqs Summarize hard IRQ (interrupt) event time.
# For Linux, uses BCC, eBPF.
#
# USAGE: hardirqs [-h] [-T] [-N] [-C] [-d] [-c CPU] [interval] [outputs]
#
# Thanks Amer Ather for help understanding irq behavior.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 19-Oct-2015 Brendan Gregg Created this.
# 22-May-2021 Hengqi Chen Migrated to kernel tracepoints.
# 07-Mar-2022 Rocky Xing Added CPU filter support.
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
import argparse
# arguments
examples = """examples:
./hardirqs # sum hard irq event time
./hardirqs -d # show hard irq event time as histograms
./hardirqs 1 10 # print 1 second summaries, 10 times
./hardirqs -NT 1 # 1s summaries, nanoseconds, and timestamps
./hardirqs -c 1 # sum hard irq event time on CPU 1 only
"""
parser = argparse.ArgumentParser(
description="Summarize hard irq event time as histograms",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-N", "--nanoseconds", action="store_true",
help="output in nanoseconds")
parser.add_argument("-C", "--count", action="store_true",
help="show event counts instead of timing")
parser.add_argument("-d", "--dist", action="store_true",
help="show distributions as histograms")
parser.add_argument("-c", "--cpu", type=int,
help="trace this CPU only")
parser.add_argument("interval", nargs="?", default=99999999,
help="output interval, in seconds")
parser.add_argument("outputs", nargs="?", default=99999999,
help="number of outputs")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
countdown = int(args.outputs)
if args.count and (args.dist or args.nanoseconds):
print("The --count option can't be used with time-based options")
exit()
if args.count:
factor = 1
label = "count"
elif args.nanoseconds:
factor = 1
label = "nsecs"
else:
factor = 1000
label = "usecs"
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/interrupt.h>
// Add cpu_id as part of key for irq entry event to handle the case which irq
// is triggered while idle thread(swapper/x, tid=0) for each cpu core.
// Please see more detail at pull request #2804, #3733.
typedef struct entry_key {
u32 tid;
u32 cpu_id;
} entry_key_t;
typedef struct irq_key {
char name[32];
u64 slot;
} irq_key_t;
typedef struct irq_name {
char name[32];
} irq_name_t;
BPF_HASH(start, entry_key_t);
BPF_HASH(irqnames, entry_key_t, irq_name_t);
BPF_HISTOGRAM(dist, irq_key_t);
"""
bpf_text_count = """
TRACEPOINT_PROBE(irq, irq_handler_entry)
{
struct entry_key key = {};
irq_name_t name = {};
u32 cpu = bpf_get_smp_processor_id();
FILTER_CPU
key.tid = bpf_get_current_pid_tgid();
key.cpu_id = cpu;
TP_DATA_LOC_READ_STR(&name.name, name, sizeof(name));
irqnames.update(&key, &name);
return 0;
}
TRACEPOINT_PROBE(irq, irq_handler_exit)
{
struct entry_key key = {};
u32 cpu = bpf_get_smp_processor_id();
FILTER_CPU
key.tid = bpf_get_current_pid_tgid();
key.cpu_id = cpu;
// check ret value of irq handler is not IRQ_NONE to make sure
// the current event belong to this irq handler
if (args->ret != IRQ_NONE) {
irq_name_t *namep;
namep = irqnames.lookup(&key);
if (namep == 0) {
return 0; // missed irq name
}
char *name = (char *)namep->name;
irq_key_t key = {.slot = 0 /* ignore */};
bpf_probe_read_kernel(&key.name, sizeof(key.name), name);
dist.atomic_increment(key);
}
irqnames.delete(&key);
return 0;
}
"""
bpf_text_time = """
TRACEPOINT_PROBE(irq, irq_handler_entry)
{
u64 ts = bpf_ktime_get_ns();
irq_name_t name = {};
struct entry_key key = {};
u32 cpu = bpf_get_smp_processor_id();
FILTER_CPU
key.tid = bpf_get_current_pid_tgid();
key.cpu_id = cpu;
TP_DATA_LOC_READ_STR(&name.name, name, sizeof(name));
irqnames.update(&key, &name);
start.update(&key, &ts);
return 0;
}
TRACEPOINT_PROBE(irq, irq_handler_exit)
{
u64 *tsp, delta;
irq_name_t *namep;
struct entry_key key = {};
u32 cpu = bpf_get_smp_processor_id();
key.tid = bpf_get_current_pid_tgid();
key.cpu_id = cpu;
// check ret value of irq handler is not IRQ_NONE to make sure
// the current event belong to this irq handler
if (args->ret != IRQ_NONE) {
// fetch timestamp and calculate delta
tsp = start.lookup(&key);
namep = irqnames.lookup(&key);
if (tsp == 0 || namep == 0) {
return 0; // missed start
}
char *name = (char *)namep->name;
delta = bpf_ktime_get_ns() - *tsp;
// store as sum or histogram
STORE
}
start.delete(&key);
irqnames.delete(&key);
return 0;
}
"""
if args.count:
bpf_text += bpf_text_count
else:
bpf_text += bpf_text_time
# code substitutions
if args.dist:
bpf_text = bpf_text.replace('STORE',
'irq_key_t key = {.slot = bpf_log2l(delta / %d)};' % factor +
'bpf_probe_read_kernel(&key.name, sizeof(key.name), name);' +
'dist.atomic_increment(key);')
else:
bpf_text = bpf_text.replace('STORE',
'irq_key_t key = {.slot = 0 /* ignore */};' +
'bpf_probe_read_kernel(&key.name, sizeof(key.name), name);' +
'dist.atomic_increment(key, delta);')
if args.cpu is not None:
bpf_text = bpf_text.replace('FILTER_CPU',
'if (cpu != %d) { return 0; }' % int(args.cpu))
else:
bpf_text = bpf_text.replace('FILTER_CPU', '')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# load BPF program
b = BPF(text=bpf_text)
if args.count:
print("Tracing hard irq events... Hit Ctrl-C to end.")
else:
print("Tracing hard irq event time... Hit Ctrl-C to end.")
# output
exiting = 0 if args.interval else 1
dist = b.get_table("dist")
while (1):
try:
sleep(int(args.interval))
except KeyboardInterrupt:
exiting = 1
print()
if args.timestamp:
print("%-8s\n" % strftime("%H:%M:%S"), end="")
if args.dist:
dist.print_log2_hist(label, "hardirq")
else:
print("%-26s %11s" % ("HARDIRQ", "TOTAL_" + label))
for k, v in sorted(dist.items(), key=lambda dist: dist[1].value):
print("%-26s %11d" % (k.name.decode('utf-8', 'replace'), v.value / factor))
dist.clear()
countdown -= 1
if exiting or countdown == 0:
exit()
|
|
from django.db import models
from datetime import date
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponse
from wagtail.wagtailcore.models import Page, Orderable
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel, MultiFieldPanel, InlinePanel
)
from wagtail.wagtailsearch import index
from modelcluster.tags import ClusterTaggableManager
from taggit.models import TaggedItemBase, Tag
from modelcluster.fields import ParentalKey
from utils.models import LinkFields, RelatedLink, CarouselItem
from .event_utils import export_event
EVENT_AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
class EventIndexPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('events.EventIndexPage', related_name='related_links')
class EventIndexPage(Page):
intro = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
@property
def events(self):
events = EventPage.objects.live().descendant_of(self)
events = events.filter(date_from__gte=date.today())
events = events.order_by('date_from')
return events
def get_context(self, request):
# Get events
events = self.events
# Filter by tag
tag = request.GET.get('tag')
if tag:
events = events.filter(tags__name=tag)
# Pagination
page = request.GET.get('page')
paginator = Paginator(events, 9) # Show 10 events per page
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
# Update template context
context = super(EventIndexPage, self).get_context(request)
context['events'] = events
context['tags'] = Tag.objects.filter(
events_eventpagetag_items__isnull=False,
events_eventpagetag_items__content_object__live=True
).distinct().order_by('name')
return context
EventIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel('related_links', label="Related links"),
]
EventIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class EventPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('events.EventPage', related_name='carousel_items')
class EventPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('events.EventPage', related_name='related_links')
class EventPageSpeaker(Orderable, LinkFields):
page = ParentalKey('events.EventPage', related_name='speakers')
full_name = models.CharField("Name", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def name_display(self):
return self.full_name
panels = [
FieldPanel('full_name'),
ImageChooserPanel('image'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class EventPageTag(TaggedItemBase):
content_object = ParentalKey('events.EventPage',
related_name='tagged_items')
class EventPage(Page):
date_from = models.DateField("Start date")
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(
max_length=255, choices=EVENT_AUDIENCE_CHOICES, null=True, blank=True
)
location = models.CharField(max_length=255, null=True, blank=True)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255, null=True, blank=True)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
tags = ClusterTaggableManager(through=EventPageTag, blank=True)
search_fields = Page.search_fields + [
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
]
parent_page_types = ['events.EventIndexPage']
@property
def event_index(self):
return self.get_ancestors().type(EventIndexPage).last()
def serve(self, request):
if "format" in request.GET:
if request.GET['format'] == 'ical':
# Export to ical format
response = HttpResponse(
export_event(self, 'ical'),
content_type='text/calendar',
)
content_dispo = 'attachment; filename=' + self.slug + '.ics'
response['Content-Disposition'] = content_dispo
return response
else:
message = 'Could not export event\n\nUnrecognised format: ' + \
request.GET['format']
return HttpResponse(message, content_type='text/plain')
else:
return super(EventPage, self).serve(request)
EventPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
FieldPanel('tags'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers"),
InlinePanel('related_links', label="Related links"),
]
EventPage.promote_panels = Page.promote_panels + [
ImageChooserPanel('feed_image'),
]
|
|
# Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for instances, volumes, and floating ips."""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import importutils
import six
import webob
from neutron.common import exceptions
from neutron.i18n import _LI, _LW
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
QUOTA_DB_MODULE = 'neutron.db.quota.driver'
QUOTA_DB_DRIVER = '%s.DbQuotaDriver' % QUOTA_DB_MODULE
QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver'
default_quota_items = ['network', 'subnet', 'port']
quota_opts = [
cfg.ListOpt('quota_items',
default=default_quota_items,
deprecated_for_removal=True,
help=_('Resource name(s) that are supported in quota '
'features. This option is now deprecated for '
'removal.')),
cfg.IntOpt('default_quota',
default=-1,
help=_('Default number of resource allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_network',
default=10,
help=_('Number of networks allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_subnet',
default=10,
help=_('Number of subnets allowed per tenant, '
'A negative value means unlimited.')),
cfg.IntOpt('quota_port',
default=50,
help=_('Number of ports allowed per tenant. '
'A negative value means unlimited.')),
cfg.StrOpt('quota_driver',
default=QUOTA_DB_DRIVER,
help=_('Default driver to use for quota checks')),
cfg.BoolOpt('track_quota_usage',
default=True,
help=_('Keep in track in the database of current resource'
'quota usage. Plugins which do not leverage the '
'neutron database should set this flag to False')),
]
# Register the configuration options
cfg.CONF.register_opts(quota_opts, 'QUOTAS')
class ConfDriver(object):
"""Configuration driver.
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the default values
in neutron.conf.
"""
def _get_quotas(self, context, resources):
"""Get quotas.
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = resource.default
return quotas
def limit_check(self, context, tenant_id,
resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param tennant_id: The tenant_id to check quota.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exceptions.InvalidQuotaValue(unders=sorted(unders))
# Get the applicable quotas
quotas = self._get_quotas(context, resources)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
@staticmethod
def get_tenant_quotas(context, resources, tenant_id):
quotas = {}
sub_resources = dict((k, v) for k, v in resources.items())
for resource in sub_resources.values():
quotas[resource.name] = resource.default
return quotas
@staticmethod
def get_all_quotas(context, resources):
return []
@staticmethod
def delete_tenant_quota(context, tenant_id):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
@staticmethod
def update_quota_limit(context, tenant_id, resource, limit):
msg = _('Access to this resource was denied.')
raise webob.exc.HTTPForbidden(msg)
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
_instance = None
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
self._driver = None
self._driver_class = quota_driver_class
def get_driver(self):
if self._driver is None:
_driver_class = (self._driver_class or
cfg.CONF.QUOTAS.quota_driver)
if (_driver_class == QUOTA_DB_DRIVER and
QUOTA_DB_MODULE not in sys.modules):
# If quotas table is not loaded, force config quota driver.
_driver_class = QUOTA_CONF_DRIVER
LOG.info(_LI("ConfDriver is used as quota_driver because the "
"loaded plugin does not support 'quotas' table."))
if isinstance(_driver_class, six.string_types):
_driver_class = importutils.import_object(_driver_class)
if isinstance(_driver_class, ConfDriver):
versionutils.report_deprecated_feature(
LOG, _LW("The quota driver neutron.quota.ConfDriver is "
"deprecated as of Liberty. "
"neutron.db.quota.driver.DbQuotaDriver should "
"be used in its place"))
self._driver = _driver_class
LOG.info(_LI('Loaded quota_driver: %s.'), _driver_class)
return self._driver
def count(self, context, resource_name, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource_name: The name of the resource, as a string.
"""
# Get the resource
res = resource_registry.get_resource(resource_name)
if not res or not hasattr(res, 'count'):
raise exceptions.QuotaResourceUnknown(unknown=[resource_name])
return res.count(context, *args, **kwargs)
def limit_check(self, context, tenant_id, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a countable resource.
If any of the proposed values exceeds the respective quota defined
for the tenant, an OverQuota exception will be raised.
The exception will include a sorted list with the resources
which exceed the quota limit. Otherwise, the method returns nothing.
:param context: Request context
:param tenant_id: Tenant for which the quota limit is being checked
:param values: Dict specifying requested deltas for each resource
"""
# Verify that resources are managed by the quota engine
requested_resources = set(values.keys())
managed_resources = set([res for res in
resource_registry.get_all_resources()
if res in requested_resources])
# Make sure we accounted for all of them...
unknown_resources = requested_resources - managed_resources
if unknown_resources:
raise exceptions.QuotaResourceUnknown(
unknown=sorted(unknown_resources))
return self.get_driver().limit_check(
context, tenant_id, resource_registry.get_all_resources(), values)
QUOTAS = QuotaEngine.get_instance()
def register_resources_from_config():
# This operation is now deprecated. All the neutron core and extended
# resource for which quota limits are enforced explicitly register
# themselves with the quota engine.
versionutils.report_deprecated_feature(
LOG, _LW("Registering resources to apply quota limits to using the "
"quota_items option is deprecated as of Liberty."
"Resource REST controllers should take care of registering "
"resources with the quota engine."))
for resource_item in (set(cfg.CONF.QUOTAS.quota_items) -
set(default_quota_items)):
resource_registry.register_resource_by_name(resource_item)
register_resources_from_config()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import subprocess
import sys
from libs import config_libs
from libs import utils_libs
from libs import verify_libs
def main():
# Run the Testcases:
test = test_gbp_nsp_func()
if test.test_gbp_nsp_func_1() == 0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_1')
if test.test_gbp_nsp_func_2() == 0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_2')
if test.test_gbp_nsp_func_3() == 0:
test.cleanup(tc_name='TESTCASE_GBP_NSP_FUNC_3')
test.cleanup()
utils_libs.report_results('test_gbp_nsp_func', 'test_results.txt')
sys.exit(1)
class test_gbp_nsp_func(object):
# Initialize logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s - %(message)s',
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_nsp_func.log'
subprocess.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_nsp_func.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_log.addHandler(hdlr)
_log.setLevel(logging.INFO)
_log.setLevel(logging.DEBUG)
def __init__(self):
"""
Init def
"""
self._log.info(
"\n## START OF GBP NETWORK_SERVICE_POLICY FUNCTIONALITY "
"TESTSUITE\n")
self.gbpcfg = config_libs.Gbp_Config()
self.gbpverify = verify_libs.Gbp_Verify()
self.nsp_name = 'demo_nsp'
def cleanup(self, tc_name=''):
if tc_name != '':
self._log.info('%s: FAILED' % (tc_name))
for obj in ['group', 'nsp']:
self.gbpcfg.gbp_del_all_anyobj(obj)
def test_gbp_nsp_func_1(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_NSP_FUNC_1: TO CREATE/REFER/DELETE/VERIFY "
"NTK-SVC-POLICY in PTG\n"
"TEST_STEPS::\n"
"Create two NSPs one with type:ip-pool & ip-single, "
"value:self_subnet and self_subnet\n"
"Verify the attributes & values\n"
"Create two PTGs and reference each one of the above "
"NSP in one of the PTG\n"
"Verify the NSP reference in the PTGs\n"
"Delete the PTG and the NSP\n"
"Verify that NSP got deleted\n"
"##############################################################\n")
# Testcase work-flow starts
# Create and Verify NSPolicy with type=ip_single & ip-single,
# name:self_subnet & self_subnet
self._log.info(
'\n## Step 1: Create NSPolicy with type=ip_single & '
'name:self_subnet ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_1',
network_service_params="type=ip_single,name=vip_ip1,"
"value=self_subnet")
if nsp1_uuid == 0:
self._log.info(
"\n## Step 1A: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
nsp2_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_2',
network_service_params="type=ip_single,name=vip_ip2,"
"value=self_subnet")
if nsp2_uuid == 0:
self._log.info(
"\n## Step 1B: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
# Verify
self._log.info(
"\n## Step 2: Verify NSPolicies are successfully created")
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'nsp',
nsp1_uuid,
name='demo_nsp_1',
network_service_params='{"type": "ip_single", "name": '
'"vip_ip1", "value": '
'"self_subnet"}') == 0:
self._log.info(
"\n## Step 2A: Verify NSPolicy demo_nsp_1 with valued "
"attributes, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1,
'nsp',
nsp2_uuid,
name='demo_nsp_2',
network_service_params='{"type": "ip_single", '
'"name": "vip_ip2", "value": '
'"self_subnet"}') == 0:
self._log.info(
"\n## Step 2A: Verify NSPolicy demo_nsp_2 with "
"valued attributes, Failed")
return 0
# Create two PTGs, each referencing one of the two NSPs
self._log.info(
"\n## Step 3: Create and Verify two PTGs each "
"referencing one of the two NSPs")
uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'group', 'demo_ptg_1', network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info(
"\n## Step 3A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
_uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'group', 'demo_ptg_2', network_service_policy=nsp2_uuid)
if _uuid == 0:
self._log.info(
"\n## Step 3B: Create PTG using NSP demo_nsp_2,Failed")
return 0
else:
ptg2_uuid = _uuid[0]
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 3C: Verify PTG demo_ptg_1 seen in NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp2_uuid, policy_target_groups=ptg2_uuid) == 0:
self._log.info(
"\n## Step 3C: Verify PTG demo_ptg_2 seen in NSP "
"demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp1_uuid) == 0:
self._log.info(
"\n## Step 3D: Verify PTG demo_ptg_1 references NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg2_uuid,
network_service_policy_id=nsp2_uuid) == 0:
self._log.info(
"\n## Step 3D: Verify PTG demo_ptg_2 references NSP "
"demo_nsp_2, Failed")
return 0
# Delete PTGs & NSPs
self._log.info(
"\n## Step 4: Delete and Verify two PTGs each referencing "
"one of the two NSPs")
ptg_list = [ptg1_uuid, ptg2_uuid]
nsp_list = [nsp1_uuid, nsp2_uuid]
for i in range(len(ptg_list)):
if self.gbpcfg.gbp_policy_cfg_all(0, 'group', ptg_list[i]) == 0:
self._log.info(
"\n## Step 4A: Deletion of PTG %s, Failed" %
(ptg_list[i]))
return 0
if self.gbpcfg.gbp_policy_cfg_all(0, 'nsp', nsp_list[i]) == 0:
self._log.info(
"\n## Step 4B: Deletion of NSP %s, Failed" %
(nsp_list[i]))
return 0
# Verify
for n in range(len(nsp_list)):
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp_list[n]) != 0:
self._log.info("\n## Step 4C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_1: PASSED")
return 1
def test_gbp_nsp_func_2(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_NSP_FUNC_2: TO CREATE/UPDATE/DELETE/VERIFY a PTG "
"with NTK-SVC-POLICY with MULTIPLE PTGs\n"
"TEST_STEPS::\n"
"Create two NSPolicy Object with non-default params\n"
"Create PTG using one of the two NSPs\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Update the PTG to use the second NSP\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Update/Revert the PTG so that it refers to the initial NSP\n"
"Delete all PTG, NSP\n"
"Verify that PTG and NSPs got deleted\n"
"##############################################################\n")
# Testcase work-flow starts
# Create NSPolicy with non-default attrs
self._log.info('\n## Step 1: Create two NSPolicy ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_1',
network_service_params="type=ip_single,name=vip_ip1,"
"value=self_subnet")
if nsp1_uuid == 0:
self._log.info(
"\n## Step 1A: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
nsp2_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_2',
network_service_params="type=ip_single,name=vip_ip2,"
"value=self_subnet")
if nsp2_uuid == 0:
self._log.info(
"\n## Step 1B: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
# Create PTG, referencing one of the two NSPs
self._log.info(
"\n## Step 3: Create and Verify PTG referencing one of "
"the two NSPs")
uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'group', 'demo_ptg_1', network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info(
"\n## Step 3A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 3B: Verify PTG demo_ptg_1 seen in NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp1_uuid) == 0:
self._log.info(
"\n## Step 3C: Verify PTG demo_ptg_1 references "
"NSP demo_nsp_1, Failed")
return 0
self._log.info(
"\n## Step 4: Update and Verify the PTG with the second NSP")
# Update the PTG with second NSP and Verify
if self.gbpcfg.gbp_policy_cfg_all(
2, 'group', ptg1_uuid, network_service_policy=nsp2_uuid) == 0:
self._log.info(
"\n## Step 4A: Updating NSP attribute of PTG, Failed")
return 0
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) != 0:
self._log.info(
"\n## Step 4B: Verify PTG demo_ptg_1 is NOT seen "
"in NSP demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp2_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 4C: Verify PTG demo_ptg_1 is seen in NSP "
"demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp2_uuid) == 0:
self._log.info(
"\n## Step 4D: Verify PTG demo_ptg_1 references NSP "
"demo_nsp_2, Failed")
return 0
self._log.info(
"\n## Step 5: Update/Revert the NSP attr of PTG and Verify")
# Update the PTG by reverting the NSP to its initial one
if self.gbpcfg.gbp_policy_cfg_all(
2, 'group', ptg1_uuid, network_service_policy=nsp1_uuid) == 0:
self._log.info(
"\n## Step 5A: Reverting the NSP attribute of PTG by "
"update action, Failed")
return 0
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp2_uuid, policy_target_groups=ptg1_uuid) != 0:
self._log.info(
"\n## Step 5B: Verify PTG demo_ptg_1 is NOT seen in NSP "
"demo_nsp_2, Failed")
return 0
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 5C: Verify PTG demo_ptg_1 is seen in NSP "
"demo_nsp_1, Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp1_uuid) == 0:
self._log.info(
"\n## Step 5D: Verify PTG demo_ptg_1 references NSP "
"demo_nsp_1, Failed")
return 0
self._log.info(
"\n## Step 6: Delete and Verify two PTGs each referencing "
"one of the two NSPs")
# Delete PTG & NSP
if self.gbpcfg.gbp_policy_cfg_all(0, 'group', ptg1_uuid) == 0:
self._log.info("\n## Step 6A: Deletion of PTG,Failed")
return 0
nsp_list = [nsp1_uuid, nsp2_uuid]
for i in range(len(nsp_list)):
if self.gbpcfg.gbp_policy_cfg_all(0, 'nsp', nsp_list[i]) == 0:
self._log.info(
"\n## Step 6B: Deletion of NSP %s, Failed" %
(nsp_list[i]))
return 0
# Verify
for n in range(len(nsp_list)):
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp_list[n]) != 0:
self._log.info("\n## Step 6C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_2: PASSED")
return 1
def test_gbp_nsp_func_3(self):
self._log.info(
"\n############################################################\n"
"TESTCASE_GBP_NSP_FUNC_3: TO CREATE/DELETE/VERIFY "
"NTK-SVC-POLICY while REFERENCED IN PTG\n"
"TEST_STEPS::\n"
"Create NSPolicy Object with non-default params\n"
"Create PTG referencing the NSP\n"
"Verify the PTG and NSP are reflecting in each other in the DB\n"
"Delete and Verify the deletion of referenced NSP fails\n"
"Delete PTG & NSP, Verify that PTG and NSPs got deleted\n"
"##############################################################\n")
# Testcase work-flow starts
# Create NSPolicy with non-default attrs
self._log.info(
'\n## Step 1: Create NSPolicy with non-default params ##\n')
nsp1_uuid = self.gbpcfg.gbp_policy_cfg_all(
1,
'nsp',
'demo_nsp_1',
network_service_params="type=ip_single,name=vip_ip1,"
"value=self_subnet")
if nsp1_uuid == 0:
self._log.info(
"\n## Step 1A: Create NSPolicy with type=ip_single & "
"name:self_subnet == Failed")
return 0
# Create PTG, referencing one of the two NSPs
self._log.info(
"\n## Step 2: Create and Verify PTG referencing the NSP")
uuid = self.gbpcfg.gbp_policy_cfg_all(
1, 'group', 'demo_ptg_1', network_service_policy=nsp1_uuid)
if uuid == 0:
self._log.info(
"\n## Step 2A: Create PTG using NSP demo_nsp_1,Failed")
return 0
else:
ptg1_uuid = uuid[0]
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(
1, 'nsp', nsp1_uuid, policy_target_groups=ptg1_uuid) == 0:
self._log.info(
"\n## Step 2B: Verify PTG demo_ptg_1 seen in NSP demo_nsp_1, "
"Failed")
return 0
if self.gbpverify.gbp_policy_verify_all(
1, 'group', ptg1_uuid,
network_service_policy_id=nsp1_uuid) == 0:
self._log.info(
"\n## Step 2C: Verify PTG demo_ptg_1 references "
"NSP demo_nsp_1, Failed")
return 0
# Delete the referenced NSP
self._log.info(
"\n## Step 3: Delete the NSP while it is still referenced "
"in a PTG")
if self.gbpcfg.gbp_policy_cfg_all(0, 'nsp', nsp1_uuid) != 0:
self._log.info(
"\n## Step 3A: Deletion of Referenced NSP DID NOT fail")
return 0
# Delete PTG & NSP
self._log.info("\n## Step 4: Delete PTG followed by NSP and Verify")
if self.gbpcfg.gbp_policy_cfg_all(0, 'group', ptg1_uuid) == 0:
self._log.info("\n## Step 4A: Deletion of PTG,Failed")
return 0
if self.gbpcfg.gbp_policy_cfg_all(0, 'nsp', nsp1_uuid) == 0:
self._log.info("\n## Step 4B: Deletion of NSP,Failed")
return 0
# Verify
if self.gbpverify.gbp_l2l3ntk_pol_ver_all(1, 'nsp', nsp1_uuid) != 0:
self._log.info("\n## Step 4C: Verify deletion of NSP, Failed")
return 0
self._log.info("\n## TESTCASE_GBP_NSP_FUNC_3: PASSED")
return 1
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for MusicBrainz API wrapper.
"""
from __future__ import division, absolute_import, print_function
from test import _common
from beets.autotag import mb
from beets import config
import unittest
import mock
class MBAlbumInfoTest(_common.TestCase):
def _make_release(self, date_str='2009', tracks=None, track_length=None,
track_artist=False):
release = {
'title': 'ALBUM TITLE',
'id': 'ALBUM ID',
'asin': 'ALBUM ASIN',
'disambiguation': 'R_DISAMBIGUATION',
'release-group': {
'type': 'Album',
'first-release-date': date_str,
'id': 'RELEASE GROUP ID',
'disambiguation': 'RG_DISAMBIGUATION',
},
'artist-credit': [
{
'artist': {
'name': 'ARTIST NAME',
'id': 'ARTIST ID',
'sort-name': 'ARTIST SORT NAME',
},
'name': 'ARTIST CREDIT',
}
],
'date': '3001',
'medium-list': [],
'label-info-list': [{
'catalog-number': 'CATALOG NUMBER',
'label': {'name': 'LABEL NAME'},
}],
'text-representation': {
'script': 'SCRIPT',
'language': 'LANGUAGE',
},
'country': 'COUNTRY',
'status': 'STATUS',
}
if tracks:
track_list = []
for i, recording in enumerate(tracks):
track = {
'recording': recording,
'position': i + 1,
'number': 'A1',
}
if track_length:
# Track lengths are distinct from recording lengths.
track['length'] = track_length
if track_artist:
# Similarly, track artists can differ from recording
# artists.
track['artist-credit'] = [
{
'artist': {
'name': 'TRACK ARTIST NAME',
'id': 'TRACK ARTIST ID',
'sort-name': 'TRACK ARTIST SORT NAME',
},
'name': 'TRACK ARTIST CREDIT',
}
]
track_list.append(track)
release['medium-list'].append({
'position': '1',
'track-list': track_list,
'format': 'FORMAT',
'title': 'MEDIUM TITLE',
})
return release
def _make_track(self, title, tr_id, duration, artist=False):
track = {
'title': title,
'id': tr_id,
}
if duration is not None:
track['length'] = duration
if artist:
track['artist-credit'] = [
{
'artist': {
'name': 'RECORDING ARTIST NAME',
'id': 'RECORDING ARTIST ID',
'sort-name': 'RECORDING ARTIST SORT NAME',
},
'name': 'RECORDING ARTIST CREDIT',
}
]
return track
def test_parse_release_with_year(self):
release = self._make_release('1984')
d = mb.album_info(release)
self.assertEqual(d.album, 'ALBUM TITLE')
self.assertEqual(d.album_id, 'ALBUM ID')
self.assertEqual(d.artist, 'ARTIST NAME')
self.assertEqual(d.artist_id, 'ARTIST ID')
self.assertEqual(d.original_year, 1984)
self.assertEqual(d.year, 3001)
self.assertEqual(d.artist_credit, 'ARTIST CREDIT')
def test_parse_release_type(self):
release = self._make_release('1984')
d = mb.album_info(release)
self.assertEqual(d.albumtype, 'album')
def test_parse_release_full_date(self):
release = self._make_release('1987-03-31')
d = mb.album_info(release)
self.assertEqual(d.original_year, 1987)
self.assertEqual(d.original_month, 3)
self.assertEqual(d.original_day, 31)
def test_parse_tracks(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
t = d.tracks
self.assertEqual(len(t), 2)
self.assertEqual(t[0].title, 'TITLE ONE')
self.assertEqual(t[0].track_id, 'ID ONE')
self.assertEqual(t[0].length, 100.0)
self.assertEqual(t[1].title, 'TITLE TWO')
self.assertEqual(t[1].track_id, 'ID TWO')
self.assertEqual(t[1].length, 200.0)
def test_parse_track_indices(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
t = d.tracks
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[1].medium_index, 2)
self.assertEqual(t[1].index, 2)
def test_parse_medium_numbers_single_medium(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
self.assertEqual(d.mediums, 1)
t = d.tracks
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[1].medium, 1)
def test_parse_medium_numbers_two_mediums(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=[tracks[0]])
second_track_list = [{
'recording': tracks[1],
'position': '1',
'number': 'A1',
}]
release['medium-list'].append({
'position': '2',
'track-list': second_track_list,
})
d = mb.album_info(release)
self.assertEqual(d.mediums, 2)
t = d.tracks
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[0].index, 1)
self.assertEqual(t[1].medium, 2)
self.assertEqual(t[1].medium_index, 1)
self.assertEqual(t[1].index, 2)
def test_parse_release_year_month_only(self):
release = self._make_release('1987-03')
d = mb.album_info(release)
self.assertEqual(d.original_year, 1987)
self.assertEqual(d.original_month, 3)
def test_no_durations(self):
tracks = [self._make_track('TITLE', 'ID', None)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
self.assertEqual(d.tracks[0].length, None)
def test_track_length_overrides_recording_length(self):
tracks = [self._make_track('TITLE', 'ID', 1.0 * 1000.0)]
release = self._make_release(tracks=tracks, track_length=2.0 * 1000.0)
d = mb.album_info(release)
self.assertEqual(d.tracks[0].length, 2.0)
def test_no_release_date(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertFalse(d.original_year)
self.assertFalse(d.original_month)
self.assertFalse(d.original_day)
def test_various_artists_defaults_false(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertFalse(d.va)
def test_detect_various_artists(self):
release = self._make_release(None)
release['artist-credit'][0]['artist']['id'] = \
mb.VARIOUS_ARTISTS_ID
d = mb.album_info(release)
self.assertTrue(d.va)
def test_parse_artist_sort_name(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.artist_sort, 'ARTIST SORT NAME')
def test_parse_releasegroupid(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.releasegroup_id, 'RELEASE GROUP ID')
def test_parse_asin(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.asin, 'ALBUM ASIN')
def test_parse_catalognum(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.catalognum, 'CATALOG NUMBER')
def test_parse_textrepr(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.script, 'SCRIPT')
self.assertEqual(d.language, 'LANGUAGE')
def test_parse_country(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.country, 'COUNTRY')
def test_parse_status(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.albumstatus, 'STATUS')
def test_parse_media(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(None, tracks=tracks)
d = mb.album_info(release)
self.assertEqual(d.media, 'FORMAT')
def test_parse_disambig(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.albumdisambig,
'RG_DISAMBIGUATION, R_DISAMBIGUATION')
def test_parse_disctitle(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(None, tracks=tracks)
d = mb.album_info(release)
t = d.tracks
self.assertEqual(t[0].disctitle, 'MEDIUM TITLE')
self.assertEqual(t[1].disctitle, 'MEDIUM TITLE')
def test_missing_language(self):
release = self._make_release(None)
del release['text-representation']['language']
d = mb.album_info(release)
self.assertEqual(d.language, None)
def test_parse_recording_artist(self):
tracks = [self._make_track('a', 'b', 1, True)]
release = self._make_release(None, tracks=tracks)
track = mb.album_info(release).tracks[0]
self.assertEqual(track.artist, 'RECORDING ARTIST NAME')
self.assertEqual(track.artist_id, 'RECORDING ARTIST ID')
self.assertEqual(track.artist_sort, 'RECORDING ARTIST SORT NAME')
self.assertEqual(track.artist_credit, 'RECORDING ARTIST CREDIT')
def test_track_artist_overrides_recording_artist(self):
tracks = [self._make_track('a', 'b', 1, True)]
release = self._make_release(None, tracks=tracks, track_artist=True)
track = mb.album_info(release).tracks[0]
self.assertEqual(track.artist, 'TRACK ARTIST NAME')
self.assertEqual(track.artist_id, 'TRACK ARTIST ID')
self.assertEqual(track.artist_sort, 'TRACK ARTIST SORT NAME')
self.assertEqual(track.artist_credit, 'TRACK ARTIST CREDIT')
def test_data_source(self):
release = self._make_release()
d = mb.album_info(release)
self.assertEqual(d.data_source, 'MusicBrainz')
class ParseIDTest(_common.TestCase):
def test_parse_id_correct(self):
id_string = "28e32c71-1450-463e-92bf-e0a46446fc11"
out = mb._parse_id(id_string)
self.assertEqual(out, id_string)
def test_parse_id_non_id_returns_none(self):
id_string = "blah blah"
out = mb._parse_id(id_string)
self.assertEqual(out, None)
def test_parse_id_url_finds_id(self):
id_string = "28e32c71-1450-463e-92bf-e0a46446fc11"
id_url = "http://musicbrainz.org/entity/%s" % id_string
out = mb._parse_id(id_url)
self.assertEqual(out, id_string)
class ArtistFlatteningTest(_common.TestCase):
def _credit_dict(self, suffix=''):
return {
'artist': {
'name': 'NAME' + suffix,
'sort-name': 'SORT' + suffix,
},
'name': 'CREDIT' + suffix,
}
def _add_alias(self, credit_dict, suffix='', locale='', primary=False):
alias = {
'alias': 'ALIAS' + suffix,
'locale': locale,
'sort-name': 'ALIASSORT' + suffix
}
if primary:
alias['primary'] = 'primary'
if 'alias-list' not in credit_dict['artist']:
credit_dict['artist']['alias-list'] = []
credit_dict['artist']['alias-list'].append(alias)
def test_single_artist(self):
a, s, c = mb._flatten_artist_credit([self._credit_dict()])
self.assertEqual(a, 'NAME')
self.assertEqual(s, 'SORT')
self.assertEqual(c, 'CREDIT')
def test_two_artists(self):
a, s, c = mb._flatten_artist_credit(
[self._credit_dict('a'), ' AND ', self._credit_dict('b')]
)
self.assertEqual(a, 'NAMEa AND NAMEb')
self.assertEqual(s, 'SORTa AND SORTb')
self.assertEqual(c, 'CREDITa AND CREDITb')
def test_alias(self):
credit_dict = self._credit_dict()
self._add_alias(credit_dict, suffix='en', locale='en', primary=True)
self._add_alias(credit_dict, suffix='en_GB', locale='en_GB',
primary=True)
self._add_alias(credit_dict, suffix='fr', locale='fr')
self._add_alias(credit_dict, suffix='fr_P', locale='fr', primary=True)
self._add_alias(credit_dict, suffix='pt_BR', locale='pt_BR')
# test no alias
config['import']['languages'] = ['']
flat = mb._flatten_artist_credit([credit_dict])
self.assertEqual(flat, ('NAME', 'SORT', 'CREDIT'))
# test en primary
config['import']['languages'] = ['en']
flat = mb._flatten_artist_credit([credit_dict])
self.assertEqual(flat, ('ALIASen', 'ALIASSORTen', 'CREDIT'))
# test en_GB en primary
config['import']['languages'] = ['en_GB', 'en']
flat = mb._flatten_artist_credit([credit_dict])
self.assertEqual(flat, ('ALIASen_GB', 'ALIASSORTen_GB', 'CREDIT'))
# test en en_GB primary
config['import']['languages'] = ['en', 'en_GB']
flat = mb._flatten_artist_credit([credit_dict])
self.assertEqual(flat, ('ALIASen', 'ALIASSORTen', 'CREDIT'))
# test fr primary
config['import']['languages'] = ['fr']
flat = mb._flatten_artist_credit([credit_dict])
self.assertEqual(flat, ('ALIASfr_P', 'ALIASSORTfr_P', 'CREDIT'))
# test for not matching non-primary
config['import']['languages'] = ['pt_BR', 'fr']
flat = mb._flatten_artist_credit([credit_dict])
self.assertEqual(flat, ('ALIASfr_P', 'ALIASSORTfr_P', 'CREDIT'))
class MBLibraryTest(unittest.TestCase):
def test_match_track(self):
with mock.patch('musicbrainzngs.search_recordings') as p:
p.return_value = {
'recording-list': [{
'title': 'foo',
'id': 'bar',
'length': 42,
}],
}
ti = list(mb.match_track('hello', 'there'))[0]
p.assert_called_with(artist='hello', recording='there', limit=5)
self.assertEqual(ti.title, 'foo')
self.assertEqual(ti.track_id, 'bar')
def test_match_album(self):
mbid = 'd2a6f856-b553-40a0-ac54-a321e8e2da99'
with mock.patch('musicbrainzngs.search_releases') as sp:
sp.return_value = {
'release-list': [{
'id': mbid,
}],
}
with mock.patch('musicbrainzngs.get_release_by_id') as gp:
gp.return_value = {
'release': {
'title': 'hi',
'id': mbid,
'medium-list': [{
'track-list': [{
'recording': {
'title': 'foo',
'id': 'bar',
'length': 42,
},
'position': 9,
'number': 'A1',
}],
'position': 5,
}],
'artist-credit': [{
'artist': {
'name': 'some-artist',
'id': 'some-id',
},
}],
'release-group': {
'id': 'another-id',
}
}
}
ai = list(mb.match_album('hello', 'there'))[0]
sp.assert_called_with(artist='hello', release='there', limit=5)
gp.assert_called_with(mbid, mock.ANY)
self.assertEqual(ai.tracks[0].title, 'foo')
self.assertEqual(ai.album, 'hi')
def test_match_track_empty(self):
with mock.patch('musicbrainzngs.search_recordings') as p:
til = list(mb.match_track(' ', ' '))
self.assertFalse(p.called)
self.assertEqual(til, [])
def test_match_album_empty(self):
with mock.patch('musicbrainzngs.search_releases') as p:
ail = list(mb.match_album(' ', ' '))
self.assertFalse(p.called)
self.assertEqual(ail, [])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
from tornado.websocket import WebSocketHandler
from pystil.context import url, MESSAGE_QUEUE, log, Hdr
from pystil.db import CriterionView as Visit
from tornado.options import options
from tornado.ioloop import IOLoop
from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
from sqlalchemy.sql.compiler import SQLCompiler
from sqlalchemy.sql.expression import between
from pystil.utils import visit_to_table_line
from sqlalchemy import func
from datetime import datetime, timedelta
from functools import partial
import psycopg2
import momoko
dsn = 'dbname=%s user=%s password=%s host=%s port=%s' % (
options.db_name, options.db_user, options.db_password,
options.db_host, options.db_port)
adb = momoko.Pool(dsn=dsn, size=5)
@url(r'/last_visits')
class LastVisitsWebSocket(Hdr, WebSocketHandler):
waiters = set()
def open(self):
log.info('Opening last visits websocket')
site = self.get_secure_cookie('_pystil_site')
site = site and site.decode('utf-8').split('|')[0]
if site is not None:
self.site = site
LastVisitsWebSocket.waiters.add(self)
else:
log.warn('Last visits websocket open without secure cookie')
self.close()
def on_message(self, message):
if message == '/count':
self.write_message(
'INFO|There are %d clients' % len(LastVisitsWebSocket.waiters))
elif message == '/queue_count':
self.write_message(
'INFO|There are %d waiting messages' % MESSAGE_QUEUE.qsize())
elif message == '/site':
self.write_message(
'INFO|You are on %s' % self.site)
def on_close(self):
if self in LastVisitsWebSocket.waiters:
LastVisitsWebSocket.waiters.remove(self)
def broadcast(message):
for client in set(LastVisitsWebSocket.waiters):
try:
client.write_message(message)
except:
client.log.warn('Error broadcasting to %r' % client, exc_info=True)
LastVisitsWebSocket.waiters.remove(client)
client.close()
@url(r'/query')
class QueryWebSocket(Hdr, WebSocketHandler):
def open(self):
log.info('Opening query websocket')
site = self.get_secure_cookie('_pystil_site')
site = site and site.decode('utf-8').split('|')[0]
self.query = None
self.state = None
if site is None:
log.warn('Query websocket open without secure cookie')
self.close()
return
def on_message(self, message):
command = message.split('|')[0]
query = '|'.join(message.split('|')[1:])
if command == 'criterion':
criterion = query.split('|')[0]
value = '|'.join(query.split('|')[1:])
if criterion == 'date':
try:
value = datetime.strptime(
value.replace('+', ' '), '%Y-%m-%d %H:%M:%S')
except ValueError:
try:
value = datetime.strptime('%Y-%m-%d')
except ValueError:
value = datetime.now()
filter_ = between(Visit.date,
value.date(),
value.date() + timedelta(days=1))
elif criterion in (
'referrer', 'asn', 'browser_name', 'site',
'browser_version', 'browser_name_version', 'query'):
filter_ = getattr(Visit, criterion).ilike('%%%s%%' % value)
else:
filter_ = func.lower(
getattr(Visit, criterion)) == value.lower()
query = (self.db
.query(Visit)
.filter(filter_))
dialect = query.session.bind.dialect
compiler = SQLCompiler(dialect, query.statement)
compiler.compile()
self.count = 0
self.stop = 20
self.state = 'start'
self.execute(compiler.string, compiler.params)
elif command == 'more':
if self.state == 'paused':
self.stop += 20
self.state = 'executing'
self.cursor.execute(
'FETCH FORWARD 1 FROM visit_cur;')
elif command == '/status':
for i, conn in enumerate(adb._pool):
if conn.busy():
self.write_message(
'INFO|Connection %d is busy: '
'Executing? %s Closed? %d Status? %s (%d)' % (
i, conn.connection.isexecuting(),
conn.connection.closed,
conn.connection.get_transaction_status(),
conn.connection.get_backend_pid()))
else:
self.write_message('INFO|Connection %d is free' % i)
def execute(self, query, parameters):
self.momoko_connection = adb._get_connection()
if not self.momoko_connection:
if self.state == 'start':
log.info('No connection')
self.write_message('BUSY|Server busy, waiting for slot')
self.state = 'busy'
if self.ws_connection:
return adb._ioloop.add_callback(partial(
self.execute, query, parameters))
if not self.ws_connection:
return
self.state = 'executing'
self.write_message('BEGIN|Searching')
self.connection = self.momoko_connection.connection
self.cursor = self.connection.cursor(
cursor_factory=psycopg2.extras.NamedTupleCursor)
self.cursor.execute(
'BEGIN;'
'DECLARE visit_cur SCROLL CURSOR FOR '
'%s;'
'SELECT null as id;' % query, parameters)
# 'FETCH FORWARD 1 FROM visit_cur;' % query, parameters)
self.momoko_connection.ioloop.add_handler(
self.momoko_connection.fileno,
self.io_callback,
IOLoop.WRITE)
def io_callback(self, fd=None, events=None):
try:
state = self.connection.poll()
except psycopg2.extensions.QueryCanceledError:
log.info('Canceling request %r' % self, exc_info=True)
self.cursor.execute('ROLLBACK -- CANCEL')
self.state = 'terminated'
except (psycopg2.Warning, psycopg2.Error):
log.exception('Poll error')
self.momoko_connection.ioloop.remove_handler(
self.momoko_connection.fileno)
raise
else:
if state == POLL_OK:
if self.state == 'terminated':
self.momoko_connection.ioloop.remove_handler(
self.momoko_connection.fileno)
return
rows = self.cursor.fetchmany()
if not rows:
self.state = 'terminated'
self.cursor.execute('CLOSE visit_cur; ROLLBACK; -- NOROWS')
try:
self.write_message('END|Done found %d visit%s' % (
self.count, 's' if self.count > 1 else ''))
except:
pass
else:
try:
for row in rows:
if row.id:
self.count += 1
self.write_message(
'VISIT|' + visit_to_table_line(row))
except Exception as e:
log.warn('During write', exc_info=True)
self.state = 'terminated'
self.cursor.execute(
'CLOSE visit_cur; ROLLBACK; -- WSERROR')
try:
self.write_message(
'END|%s: %s' % (type(e), str(e)))
except:
pass
else:
if self.count < self.stop:
self.cursor.execute(
'FETCH FORWARD 1 FROM visit_cur;')
else:
self.state = 'paused'
try:
self.write_message(
'PAUSE|Paused on %d visits' % self.count)
except:
pass
elif state == POLL_READ:
self.momoko_connection.ioloop.update_handler(
self.momoko_connection.fileno, IOLoop.READ)
elif state == POLL_WRITE:
self.momoko_connection.ioloop.update_handler(
self.momoko_connection.fileno, IOLoop.WRITE)
else:
raise psycopg2.OperationalError(
'poll() returned {0}'.format(state))
def on_close(self):
log.info('Closing %r' % self)
if self.state is None or self.state == 'terminated':
return
if self.state == 'paused':
self.state = 'terminated'
self.cursor.execute('CLOSE visit_cur; ROLLBACK; -- ONCLOSE')
elif hasattr(self, 'connection'):
self.connection.cancel()
|
|
# Copyright 2014, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import types
import six
from six.moves import StringIO
from sdict import adict
import click
from tower_cli import models, resources
from tower_cli.api import client
from tower_cli.conf import settings
from tower_cli.utils import debug, exceptions as exc
from tests.compat import unittest, mock
class ResourceMetaTests(unittest.TestCase):
"""A set of tests to establish that the ResourceMeta metaclass works
in the way we expect.
"""
def test_commands(self):
"""Establish that commands are appropriately classified within
the resource, and that the stock commands are not present on a
BaseResource subclass.
"""
# Create the resource.
class MyResource(models.BaseResource):
endpoint = '/bogus/'
@resources.command
def foo(self):
pass
@resources.command
def bar(self):
pass
def boring_method(self):
pass
# Establish that the commands are present on the resource where
# we expect, and that the defined methods are still plain methods.
#
# Note: We can use something like types.FunctionType or
# types.UnboundMethodType to test against directly, but using a
# regular method is preferable because of differences between
# the type internals in Python 2 vs. Python 3.
#
# By just getting the desirable control type from another method
# on the resource, we are ensuring that it "just matches" regardless
# of which version of Python is in use.
self.assertIsInstance(MyResource.foo, type(MyResource.boring_method))
self.assertIsInstance(MyResource.bar, type(MyResource.boring_method))
self.assertEqual(set(MyResource.commands), set(['foo', 'bar']))
def test_inherited_commands(self):
"""Establish that the stock commands are automatically present
on classes inherited from Resource.
"""
# Create the resource.
class MyResource(models.Resource):
endpoint = '/bogus/'
# Establish it has the commands we expect.
self.assertEqual(set(MyResource.commands),
set(['create', 'modify', 'list', 'get', 'delete']))
def test_subclassed_commands(self):
"""Establish that commands overridden in subclasses retain their
superclass implementation options.
"""
# Create the subclass resource, overriding a superclass command.
class MyResource(models.Resource):
endpoint = '/bogus/'
@resources.command
def list(self, **kwargs):
return super(MyResource, self).list(**kwargs)
# Establish that it has one of the options added to the
# superclass list command.
self.assertTrue(any([i.name == 'query'
for i in MyResource.list.__click_params__]))
def test_fields(self):
"""Establish that fields are appropriately classified within
the resource.
"""
# Create the resource.
class MyResource(models.Resource):
endpoint = '/bogus/'
foo = models.Field(unique=True)
bar = models.Field()
# Establish that our fields lists are the length we expect.
self.assertEqual(len(MyResource.fields), 2)
self.assertEqual(len(MyResource.unique_fields), 1)
# Establish that the fields are present in fields.
self.assertEqual(MyResource.fields[0].name, 'foo')
self.assertEqual(MyResource.fields[1].name, 'bar')
self.assertEqual(MyResource.unique_fields, set(['foo']))
def test_error_no_endpoint(self):
"""Establish that Resource subclasses are required to have
an endpoint, and attempting to create one that lacks an endpoint
raises TypeError.
"""
with self.assertRaises(TypeError):
class MyResource(models.Resource):
pass
def test_endpoint_normalization(self):
"""Establish that the endpoints have leading and trailing slashes
added if they are not present on a resource.
"""
class MyResource(models.Resource):
endpoint = 'foo'
self.assertEqual(MyResource.endpoint, '/foo/')
class SubcommandTests(unittest.TestCase):
"""A set of tests for establishing that the Subcommand class created
on the basis of a Reosurce class works in the way we expect.
"""
def setUp(self):
"""Install a resource instance sufficient for testing common
things with subcommands.
"""
class BasicResource(models.Resource):
endpoint = '/basic/'
name = models.Field(unique=True)
self.resource = BasicResource()
self.command = self.resource.as_command()
def test_command_instance(self):
"""Establish that the command based on a resource is, in fact, a
click MultiCommand.
"""
# Assert that it is a click command, and that it has the commands
# available on the resource.
self.assertIsInstance(self.command, click.MultiCommand)
def test_list_commands(self):
"""Establish that the `list_commands` method for the command
corresponds to the commands available on the resource.
"""
self.assertEqual(set(self.resource.commands),
set(self.command.list_commands(None)))
def test_get_command(self):
"""Establish that the `get_command` method returns the appropriate
resource method wrapped as a click command.
"""
list_command = self.command.get_command(None, 'list')
# Ensure that this is a click command.
self.assertIsInstance(list_command, click.core.Command)
# Ensure that this command has an option corresponding to the
# "name" unique field.
self.assertEqual(list_command.params[0].name, 'name')
self.assertEqual(list_command.params[0].opts, ['--name'])
def test_get_command_error(self):
"""Establish that if `get_command` is called against a command that
does not actually exist on the resource, that we raise UsageError.
"""
with self.assertRaises(exc.UsageError):
self.command.get_command(None, 'bogus')
def test_command_with_pk(self):
"""Establish that the `get_command` method appropriately adds a
primary key argument if the method has a "pk" positional argument.
"""
# Create a resource with an appropriate command.
class PKResource(models.BaseResource):
endpoint = '/pkr/'
@resources.command
def my_method(self, pk):
pass
# Get the command version of my_method.
my_method = PKResource().as_command().get_command(None, 'my_method')
# Establish that the `my_method` command does, in fact, have a PK
# click argument attached.
self.assertEqual(my_method.params[-1].name, 'pk')
def test_use_fields_as_options_false(self):
"""Establish that the `use_fields_as_options` attribute is honored
if set to False.
"""
# Create a resource with a command that doesn't expect its
# fields to become options.
class NoOptResource(models.BaseResource):
endpoint = '/nor/'
f1 = models.Field()
f2 = models.Field()
@resources.command(use_fields_as_options=False)
def noopt(self):
pass
# Make the resource into a command, and get the noopt subcommand.
noopt = NoOptResource().as_command().get_command(None, 'noopt')
# Establish that the noopt command does NOT have fields as options.
self.assertFalse(any([o.name == 'f1' for o in noopt.params]))
self.assertFalse(any([o.name == 'f2' for o in noopt.params]))
def test_use_fields_as_options_enumerated(self):
"""Establish that the `use_fields_as_options` attribute is honored
if set to a tuple containing a subset of fields.
"""
# Create a resource with a command that doesn't expect its
# fields to become options.
class NoOptResource(models.BaseResource):
endpoint = '/nor/'
f1 = models.Field()
f2 = models.Field()
@resources.command(use_fields_as_options=('f2',))
def noopt(self):
pass
# Make the resource into a command, and get the noopt subcommand.
noopt = NoOptResource().as_command().get_command(None, 'noopt')
# Establish that the noopt command does NOT have fields as options.
self.assertFalse(any([o.name == 'f1' for o in noopt.params]))
self.assertTrue(any([o.name == 'f2' for o in noopt.params]))
def test_fields_not_options(self):
"""Establish that a field which is not an option is not made into
an option for commands.
"""
# Create a resource with a field that is an option and another
# field that isn't.
class NoOptionResource(models.Resource):
endpoint = '/nor/'
yes = models.Field()
no = models.Field(is_option=False)
# Make the resource into a command, and get a reasonably-arbitrary
# subcommand.
cmd = NoOptionResource().as_command().get_command(None, 'list')
# Establish that "yes" is an option on the command and "no" is not.
self.assertTrue(any([o.name == 'yes' for o in cmd.params]))
self.assertFalse(any([o.name == 'no' for o in cmd.params]))
def test_field_explicit_key(self):
"""Establish that if a field is given an explicit key, that they
key is used for the field name instead of the implicit name.
"""
# Create a resource with a field that has an explicit key.
class ExplicitKeyResource(models.Resource):
endpoint = '/ekr/'
option_name = models.Field('internal_name')
# Make the resource into a command, and get a reasonably-arbitrary
# subcommand.
cmd = ExplicitKeyResource().as_command().get_command(None, 'get')
# Establish that the field has an option of --option-name, and
# a name of internal_name.
opt = cmd.params[0]
self.assertEqual(opt.name, 'internal_name')
self.assertEqual(opt.opts, ['--option-name'])
def test_docstring_replacement_an(self):
"""Establish that for resources with names beginning with vowels,
that the automatic docstring replacement is gramatically correct.
"""
# Create a resource with an approriate name.
class Oreo(models.Resource):
resource_name = 'Oreo cookie' # COOOOOOKIES!!!!
endpoint = '/oreo/'
# Get the Oreo resource's create method.
create = Oreo().as_command().get_command(None, 'create')
self.assertIn('Create an Oreo cookie', create.help)
def test_docstring_replacement_y(self):
"""Establish that for resources with names ending in y, that plural
replacement is correct.
"""
# Create a resource with an approriate name.
class Oreo(models.Resource):
resource_name = 'telephony'
endpoint = '/telephonies/'
# Get the Oreo resource's create method.
create = Oreo().as_command().get_command(None, 'list')
self.assertIn('list of telephonies', create.help)
def test_echo_method(self):
"""Establish that the _echo_method subcommand class works in the
way we expect.
"""
func = self.command._echo_method(lambda: {'foo': 'bar'})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='json'):
func()
secho.assert_called_once_with(json.dumps({'foo': 'bar'}, indent=2))
def test_echo_method_changed_false(self):
"""Establish that the _echo_method subcommand decorator works
in the way we expect if we get an unchanged designation.
"""
func = self.command._echo_method(lambda: {'changed': False})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='json', color=True):
func()
answer = json.dumps({'changed': False}, indent=2)
secho.assert_called_once_with(answer, fg='green')
def test_echo_method_changed_true(self):
"""Establish that the _echo_method subcommand decorator works
in the way we expect if we get an changed designation.
"""
func = self.command._echo_method(lambda: {'changed': True})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='json', color=True):
func()
answer = json.dumps({'changed': True}, indent=2)
secho.assert_called_once_with(answer, fg='yellow')
def test_echo_method_human_formatted(self):
"""Establish that the `_echo_method` properly returns human formatting
when it gets back a list of objects.
"""
func = self.command._echo_method(lambda: {'results': [
{'id': 1, 'name': 'Durham, NC'},
{'id': 2, 'name': 'Austin, TX'},
]})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
func()
output = secho.mock_calls[-1][1][0]
self.assertIn('1 Durham, NC', output)
self.assertIn('2 Austin, TX', output)
def test_echo_method_human_formatted_changed(self):
"""Establish that if there is a change and no id is returned,
we print a generic OK message.
"""
func = self.command._echo_method(lambda: {'changed': False})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
func()
output = secho.mock_calls[-1][1][0]
self.assertEqual(output, 'OK. (changed: false)')
def test_echo_method_human_formatted_no_records(self):
"""Establish that if there are no records sent to the human formatter,
that it prints a terse message to that effect.
"""
func = self.command._echo_method(lambda: {'results': []})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
func()
output = secho.mock_calls[-1][1][0]
self.assertEqual(output, 'No records found.')
def test_echo_method_human_formatted_single_result(self):
"""Establish that a single result sent to the human formatter
shows a table with a single row as expected.
"""
f = self.command._echo_method(lambda: {'id': 1, 'name': 'Durham, NC'})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
f()
output = secho.mock_calls[-1][1][0]
self.assertIn('1 Durham, NC', output)
def test_echo_method_human_boolean_formatting(self):
"""Establish that booleans are formatted right-aligned, lower-cased
in human output.
"""
func = self.command._echo_method(lambda: {'results': [
{'id': 1, 'name': 'Durham, NC'},
{'id': 2, 'name': True},
]})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
func()
output = secho.mock_calls[-1][1][0]
self.assertIn('1 Durham, NC', output)
self.assertIn('2 true', output)
def test_echo_method_human_pagination(self):
"""Establish that pagination works in human formatting, and it
prints the way we expect.
"""
func = self.command._echo_method(lambda: {'results': [
{'id': 1, 'name': 'Durham, NC'},
{'id': 2, 'name': True},
], 'next': 3, 'count': 10, 'previous': 1})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
func()
output = secho.mock_calls[-1][1][0]
self.assertIn('(Page 2 of 5.)', output)
def test_echo_method_human_pagination_last_page(self):
"""Establish that pagination works in human formatting, and it
prints the way we expect on the final page..
"""
func = self.command._echo_method(lambda: {'results': [
{'id': 1, 'name': 'Durham, NC'},
], 'next': None, 'count': 3, 'previous': 1})
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
func()
output = secho.mock_calls[-1][1][0]
self.assertIn('(Page 2 of 2.)', output)
def test_echo_method_human_custom_output(self):
"""Establish that a custom dictionary with no ID is made into a
table and printed as expected.
"""
func = self.command._echo_method(lambda: adict(foo='bar', spam='eggs'))
with mock.patch.object(click, 'secho') as secho:
with settings.runtime_values(format='human'):
func()
output = secho.mock_calls[-1][1][0]
self.assertIn('foo spam', output)
self.assertIn('bar eggs', output)
class ResourceTests(unittest.TestCase):
"""A set of tests to establish that the Resource class works in the
way that we expect.
"""
def setUp(self):
# Create a resource class that can be used across this particular
# suite.
class FooResource(models.Resource):
endpoint = '/foo/'
name = models.Field(unique=True)
description = models.Field(required=False)
self.res = FooResource()
def test_get(self):
"""Establish that the Resource class' `get` method works in the
way that we expect.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'description': 'bar',
'name': 'foo'})
result = self.res.get(42)
self.assertEqual(result['id'], 42)
self.assertEqual(result['name'], 'foo')
def test_list_no_kwargs(self):
"""Establish that the Resource class' `list` method correctly
requests the resource and parses out a list of results.
"""
with client.test_mode as t:
t.register_json('/foo/', {'count': 2, 'results': [
{'id': 1, 'name': 'foo', 'description': 'bar'},
{'id': 2, 'name': 'spam', 'description': 'eggs'},
], 'next': None, 'previous': None})
result = self.res.list()
self.assertEqual(t.requests[0].url,
'https://20.12.4.21/api/v1/foo/')
self.assertEqual(result['count'], 2)
self.assertEqual(result['results'][0]['id'], 1)
def test_list_all_pages(self):
"""Establish that the Resource class' `list` method correctly
accepts the --all-pages flag and checks follow-up pages.
"""
with client.test_mode as t:
# Register the first, second, and third page.
t.register_json('/foo/', {'count': 3, 'results': [
{'id': 1, 'name': 'foo', 'description': 'bar'},
], 'next': '/foo/?page=2', 'previous': None})
t.register_json('/foo/?page=2', {'count': 3, 'results': [
{'id': 2, 'name': 'spam', 'description': 'eggs'},
], 'next': '/foo/?page=3', 'previous': None})
t.register_json('/foo/?page=3', {'count': 3, 'results': [
{'id': 3, 'name': 'bacon', 'description': 'cheese'},
], 'next': None, 'previous': None})
# Get the list
result = self.res.list(all_pages=True)
# Assert that there are three results, and three requests.
self.assertEqual(len(t.requests), 3)
self.assertEqual(len(result['results']), 3)
def test_list_custom_kwargs(self):
"""Establish that if we pass custom keyword arguments to list, that
they are included in the final request.
"""
with client.test_mode as t:
t.register_json('/foo/?bar=baz', {'count': 0, 'results': [],
'next': None, 'previous': None})
result = self.res.list(query=[('bar', 'baz')])
self.assertTrue(t.requests[0].url.endswith('bar=baz'))
def test_list_duplicate_kwarg(self):
"""Establish that if we attempt to query on the same field twice,
that we get an error.
"""
with client.test_mode as t:
with self.assertRaises(exc.BadRequest):
result = self.res.list(name='Batman',
query=[('name', 'Robin')])
self.assertEqual(len(t.requests), 0)
def test_get_unexpected_zero_results(self):
"""Establish that if a read method gets 0 results when it should have
gotten one or more, that it raises NotFound.
"""
with client.test_mode as t:
t.register_json('/foo/?name=spam', {'count': 0, 'results': []})
with self.assertRaises(exc.NotFound):
result = self.res.get(name='spam')
def test_get_no_debug_header(self):
"""Establish that if get is called with include_debug_header=False,
no debug header is issued.
"""
with mock.patch.object(type(self.res), 'read') as read:
with mock.patch.object(debug, 'log') as dlog:
read.return_value = {'results': [True]}
result = self.res.get(42, include_debug_header=False)
self.assertEqual(dlog.call_count, 0)
self.assertTrue(result)
def test_get_unexpected_multiple_results(self):
"""Establish that if a read method gets more than one result when
it should have gotten one and exactly one, that it raises
MultipleResults.
"""
# Register the response to the request URL.
# Note that this response should represent bad data, since name is
# generally unique within Tower. This doesn't matter for the purpose
# of this test; what's important is that if we expected one and exactly
# one result and we get two or more, that we complain in an expected
# (and later, handled) way.
with client.test_mode as t:
t.register_json('/foo/?name=spam', {'count': 2, 'results': [
{'id': 1, 'name': 'spam'},
{'id': 2, 'name': 'spam'},
], 'next': None, 'previous': None})
with self.assertRaises(exc.MultipleResults):
result = self.res.get(name='spam')
def test_list_with_none_kwargs(self):
"""Establish that if `list` is called with keyword arguments with
None values, that these are ignored.
This is to ensure that click's eagerness to send None values doesn't
cause problems.
"""
# Register the request and make the call.
with client.test_mode as t:
t.register_json('/foo/?name=foo', {'count': 1, 'results': [
{'id': 1, 'name': 'foo', 'description': 'bar'},
], 'next': None, 'previous': None})
result = self.res.list(name='foo', description=None)
self.assertEqual(len(t.requests), 1)
# Ensure that there are no other query param arguments other
# than `?name=foo` in the request URL.
self.assertNotIn('&', t.requests[0].url)
self.assertTrue(t.requests[0].url.endswith('?name=foo'))
def test_list_with_pagination(self):
"""Establish that the `list` method returns pages as integers
if it is given pages at all.
"""
with client.test_mode as t:
t.register_json('/foo/', {'count': 10, 'results': [
{'id': 1, 'name': 'bar'},
], 'next': '/api/v1/foo/?page=2', 'previous': None})
result = self.res.list()
self.assertEqual(result['next'], 2)
def test_reading_with_file(self):
"""Establish that if we get a file-like object, that it is
appropriately read.
"""
# Note: This is primarily for a case of longer input that belongs
# in files (such as SSH RSA/DSA private keys), but in this case we're
# using something trivial; we need only provide a proof of concept
# to test against.
sio = StringIO('bar')
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 0, 'results': [],
'next': None, 'previous': None})
result = self.res.list(name=sio)
self.assertTrue(t.requests[0].url.endswith('?name=bar'))
def test_create(self):
"""Establish that a standard create call works in the way that
we expect.
"""
with client.test_mode as t:
# `create` will attempt to see if the record already exists;
# mock this to state that it does not.
t.register_json('/foo/?name=bar', {'count': 0, 'results': [],
'next': None, 'previous': None})
t.register_json('/foo/', {'changed': True, 'id': 42},
method='POST')
result = self.res.create(name='bar')
self.assertEqual(t.requests[0].method, 'GET')
self.assertEqual(t.requests[1].method, 'POST')
def test_create_already_existing(self):
"""Establish that if we attempt to create a record that already exists,
that no action ends up being taken.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar'},
], 'next': None, 'previous': None})
result = self.res.create(name='bar')
self.assertEqual(len(t.requests), 1)
self.assertFalse(result['changed'])
def test_create_missing_required_fields(self):
"""Establish that if we attempt to create a record and don't specify
all required fields, that we raise BadRequest.
"""
# Create a resource with a required field that isn't the name
# field.
class BarResource(models.Resource):
endpoint = '/bar/'
name = models.Field(unique=True)
required = models.Field()
res = BarResource()
# Attempt to write the resource and prove that it fails.
with client.test_mode as t:
t.register_json('/bar/?name=foo', {'count': 0, 'results': [],
'next': None, 'previous': None})
with self.assertRaises(exc.BadRequest):
res.create(name='foo')
def test_modify(self):
"""Establish that the modify method works in the way we expect,
given a normal circumstance.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'name': 'bar',
'description': 'baz'})
t.register_json('/foo/42/',
{'changed': True, 'id': 42}, method='PATCH')
result = self.res.modify(42, description='spam')
self.assertTrue(result['changed'])
self.assertEqual(t.requests[1].body, '{"description": "spam"}')
def test_modify_no_changes(self):
"""Establish that the modify method does not actually attempt
a modification if there are no changes.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'name': 'bar',
'description': 'baz'})
result = self.res.modify(42, description='baz')
self.assertFalse(result['changed'])
self.assertEqual(len(t.requests), 1)
def test_modify_ignore_kwargs_none(self):
"""Establish that we ignore keyword arguments set to None when
performing writes.
"""
with client.test_mode as t:
t.register_json('/foo/42/', {'id': 42, 'name': 'bar',
'description': 'baz'})
result = self.res.modify(42, name=None, description='baz')
self.assertFalse(result['changed'])
self.assertEqual(len(t.requests), 1)
self.assertNotIn('name', t.requests[0].url)
def test_write_file_like_object(self):
"""Establish that our write method, if it gets a file-like object,
correctly reads it and uses the file's value as what it sends.
"""
sio = StringIO('bar')
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar', 'description': 'baz'},
], 'next': None, 'previous': None})
result = self.res.modify(name=sio, description='baz')
self.assertFalse(result['changed'])
self.assertIn('name=bar', t.requests[0].url)
def test_delete_with_pk(self):
"""Establish that calling `delete` and providing a primary key
works in the way that we expect.
"""
with client.test_mode as t:
t.register('/foo/42/', '', method='DELETE')
result = self.res.delete(42)
self.assertTrue(result['changed'])
def test_delete_without_pk(self):
"""Establish that calling `delete` with keyword arguments works
in the way that we expect.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar', 'description': 'baz'},
], 'next': None, 'previous': None})
t.register('/foo/42/', '', method='DELETE')
result = self.res.delete(name='bar')
self.assertEqual(len(t.requests), 2)
self.assertTrue(t.requests[1].url.endswith('/foo/42/'))
self.assertTrue(result['changed'])
def test_delete_with_pk_already_missing(self):
"""Establish that calling `delete` on a record that does not exist
returns back an unchanged response.
"""
with client.test_mode as t:
t.register_json('/foo/42/', '', method='DELETE', status_code=404)
result = self.res.delete(42)
self.assertFalse(result['changed'])
def test_delete_with_pk_already_missing_exc(self):
"""Establish that calling `delete` on a record that does not
exist raises an exception if requested.
"""
with client.test_mode as t:
t.register_json('/foo/42/', '', method='DELETE', status_code=404)
with self.assertRaises(exc.NotFound):
result = self.res.delete(42, fail_on_missing=True)
def test_delete_without_pk_already_missing(self):
"""Establish that calling `delete` on a record without a primary
key correctly sends back an unchanged response.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 0, 'results': []})
result = self.res.delete(name='bar')
self.assertFalse(result['changed'])
def test_delete_without_pk_already_missing_exc(self):
"""Establish that calling `delete` on a record without a primary
key correctly sends back an unchanged response.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 0, 'results': []})
with self.assertRaises(exc.NotFound):
result = self.res.delete(name='bar', fail_on_missing=True)
def test_assoc_already_present(self):
"""Establish that the _assoc method returns an unchanged status
message if it attempts to associate two records that are already
associated.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 1, 'results': [
{'id': 84},
], 'next': None, 'previous': None})
result = self.res._assoc('bar', 42, 84)
self.assertFalse(result['changed'])
def test_assoc_not_already_present(self):
"""Establish that the _assoc method returns an changed status
message and associates objects if appropriate.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 0, 'results': []})
t.register_json('/foo/42/bar/', {}, method='POST')
result = self.res._assoc('bar', 42, 84)
self.assertEqual(json.loads(t.requests[1].body),
{'associate': True, 'id': 84})
self.assertTrue(result['changed'])
def test_disassoc_not_already_present(self):
"""Establish that the _disassoc method returns an unchanged status
message if it attempts to associate two records that are not
associated.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 0, 'results': []})
result = self.res._disassoc('bar', 42, 84)
self.assertFalse(result['changed'])
def test_disassoc_already_present(self):
"""Establish that the _assoc method returns an changed status
message and associates objects if appropriate.
"""
with client.test_mode as t:
t.register_json('/foo/42/bar/?id=84', {'count': 1, 'results': [
{'id': 84},
], 'next': None, 'previous': None})
t.register_json('/foo/42/bar/', {}, method='POST')
result = self.res._disassoc('bar', 42, 84)
self.assertEqual(json.loads(t.requests[1].body),
{'disassociate': True, 'id': 84})
self.assertTrue(result['changed'])
def test_lookup_with_unique_field_not_present(self):
"""Establish that a if _lookup is invoked without any unique
field specified, that BadRequest is raised.
"""
with client.test_mode as t:
with self.assertRaises(exc.BadRequest):
self.res._lookup(description='abcd')
def test_lookup_errant_found(self):
"""Establish that if _lookup is invoked and finds a record when it
should not, that an appropriate exception is raised.
"""
with client.test_mode as t:
t.register_json('/foo/?name=bar', {'count': 1, 'results': [
{'id': 42, 'name': 'bar'},
], 'next': None, 'previous': None})
with self.assertRaises(exc.Found):
self.res._lookup(name='bar', fail_on_found=True)
class MonitorableResourcesTests(unittest.TestCase):
"""Estblaish that the MonitorableResource abstract class works in the
way that we expect.
"""
def test_status_not_implemented(self):
"""Establish that the abstract MonitorableResource's status
method raises NotImplementedError.
"""
with self.assertRaises(NotImplementedError):
models.MonitorableResource().status(None)
|
|
"""Here some convenience getter methods for models."""
import re
# TODO: store original_key in the 'info' field
# TODO: these should work:
# nicety -> niceties
# orchestrate -> orchestration
# stratum -> strata
# man -> men // check if it is a suffix for example craftsman -> craftsmen
# glass -> glasses
# dig -> digging // doubling the last letter when gerund form
# affluent -> affluence // making noun from adjective
# TODO:
# detect if a word directly after slash is present also somewhere before,
# if yes, then elongate the boundary of slash operator arguments
# vanish into thin air/vanish from the face of the earth (=used for emphasis)
IRREGULARS = ['beat,beat,beaten', 'become,became,become', 'begin,began,begun', 'bend,bent,bent', 'bet,bet,bet',
'bite,bit,bitten', 'bleed,bled,bled', 'blow,blew,blown', 'break,broke,broken', 'breed,bred,bred',
'bring,brought,brought', 'build,built,built', 'burn,burnt,burned,burnt,burned', 'buy,bought,bought',
'catch,caught,caught', 'choose,chose,chosen', 'come,came,come', 'cost,cost,cost', 'cut,cut,cut', 'do,did,done',
'dig,dug,dug', 'draw,drew,drawn', 'dream,dreamt,dreamed,dreamt,dreamed', 'drink,drank,drunk', 'drive,drove,driven',
'eat,ate,eaten', 'fall,fell,fallen', 'feed,fed,fed', 'feel,felt,felt', 'fight,fought,fought', 'find,found,found',
'fly,flew,flown', 'forget,forgot,forgotten', 'forgive,forgave,forgiven', 'freeze,froze,frozen', 'get,got,got',
'give,gave,given', 'go,went,gone', 'grow,grew,grown', 'have,had,had', 'hear,heard,heard', 'hide,hid,hidden',
'hit,hit,hit', 'hold,held,held', 'hurt,hurt,hurt', 'keep,kept,kept', 'know,knew,known', 'lay,laid,laid',
'lead,led,led', 'lean,leant,leaned,leant,leaned', 'leave,left,left', 'lend,lent,lent', 'let,let,let',
'lose,lost,lost', 'make,made,made', 'mean,meant,meant', 'meet,met,met', 'pay,paid,paid', 'put,put,put',
'quit,quit,quit', 'read,read,read', 'ride,rode,ridden', 'ring,rang,rung', 'rise,rose,risen', 'run,ran,run',
'say,said,said', 'see,saw,seen', 'sell,sold,sold', 'send,sent,sent', 'set,set,set', 'shake,shook,shaken',
'shine,shone,shone', 'shoe,shod,shod', 'shoot,shot,shot', 'show,showed,shown', 'shrink,shrank,shrunk',
'shut,shut,shut', 'sing,sang,sung', 'sink,sank,sunk', 'sit,sat,sat', 'sleep,slept,slept', 'speak,spoke,spoken',
'spend,spent,spent', 'spill,spilt,spilled,spilt,spilled', 'spread,spread,spread', 'speed,sped,sped',
'stand,stood,stood', 'steal,stole,stolen', 'stick,stuck,stuck', 'sting,stung,stung', 'stink,stank,stunk',
'swear,swore,sworn', 'sweep,swept,swept', 'swim,swam,swum', 'swing,swung,swung', 'take,took,taken',
'teach,taught,taught', 'tear,tore,torn', 'tell,told,told', 'think,thought,thought', 'throw,threw,thrown',
'understand,understood,understood', 'wake,woke,woken', 'wear,wore,worn', 'win,won,won', 'write,wrote,written']
class EraseAlgorithm(object):
def __init__(self):
self.irregulars = {}
for i in IRREGULARS:
i = i.split(",")
self.irregulars[i[0]] = "/".join(i)
def match_words(self, txt, pat, try_irreg=True):
"""The order of arguments is relevant here."""
txt = txt.lower()
pat = pat.lower()
if pat in self.irregulars and try_irreg:
return self.match_words(txt, self.irregulars[pat], try_irreg=False)
if '/' in pat:
pat = pat.split('/')
for i in pat:
if self.match_words(txt, i, try_irreg=try_irreg):
return True
return False
placeholders = [
"you", "your", "someone", "somebody", "something",
"a", "an"
]
if pat in placeholders:
return True
if txt.startswith(pat) and len(txt) - len(pat) <= 2:
return True
if len(txt) > 5 and txt[-3:] == "ing":
txt = txt[:-3]
if txt.startswith(pat) and len(txt) - len(pat) <= 1:
return True
if pat.startswith(txt) and len(pat) - len(txt) <= 1:
return True
return False
def anonymize_word(self, wrd):
return ("_" * len(wrd))
def try_erase_lists(self, text, pattern):
"""Take two lists of words and return erased list of words on
success and None on failure."""
res = []
res_key = []
while True:
if pattern == []:
break
elif text == []:
return None
elif self.match_words(text[0], pattern[0]):
res.append(self.anonymize_word(text[0]))
res_key.append(text[0])
text = text[1:]
pattern = pattern[1:]
elif len(text) >= 2 and self.match_words(text[1], pattern[0]):
res.append(text[0])
res.append(self.anonymize_word(text[1]))
res_key.append(text[1])
text = text[2:]
pattern = pattern[1:]
elif len(text) >= 3 and self.match_words(text[2], pattern[0]):
res.append(text[0])
res.append(text[1])
res.append(self.anonymize_word(text[2]))
res_key.append(text[2])
text = text[3:]
pattern = pattern[1:]
elif len(text) >= 4 and self.match_words(text[3], pattern[0]):
res.append(text[0])
res.append(text[1])
res.append(text[2])
res.append(self.anonymize_word(text[3]))
res_key.append(text[3])
text = text[4:]
pattern = pattern[1:]
elif len(text) >= 5 and self.match_words(text[4], pattern[0]):
res.append(text[0])
res.append(text[1])
res.append(text[2])
res.append(text[3])
res.append(self.anonymize_word(text[4]))
res_key.append(text[4])
text = text[5:]
pattern = pattern[1:]
elif len(text) >= 6 and self.match_words(text[5], pattern[0]):
res.append(text[0])
res.append(text[1])
res.append(text[2])
res.append(text[3])
res.append(text[4])
res.append(self.anonymize_word(text[5]))
res_key.append(text[5])
text = text[6:]
pattern = pattern[1:]
else:
return None
while text != []:
res.append(text[0])
text = text[1:]
return (res, res_key)
def erase_lists(self, text, pattern):
res = []
while text != []:
tmp = self.try_erase_lists(text, pattern)
if tmp is not None:
(r, r_k) = tmp
return (res + r, r_k)
res.append(text[0])
text = text[1:]
return None
def split_sentence(self, text):
text = text.replace(",", " , ")
text = text.replace(".", " . ")
text = text.replace("'", " ' ")
text = text.replace(" - ", " ~ ")
text = text.replace("-", " - ")
text = text.split()
return text
def join_sentence(self, text):
res = " ".join(text)
res = res.replace(" ,", ",")
res = res.replace(" .", ".")
res = res.replace(" ' ", "'")
res = res.replace(" - ", "-")
res = res.replace(" ~ ", " - ")
return res
def remove_irrelevant_parts(self, sentence):
res = sentence
# remove 'something' and 'someone'
res = filter(
lambda i: not(i.startswith("some") and len(i) > 4), res)
return res
def remove_parentheses(self, text):
return re.sub(r'\([^)]*\)', '', text)
def glue_short_words(self, text):
# text = text.replace(" the ", " the^")
# text = text.replace(" a ", " a^")
# text = text.replace(" an ", " an^")
# maybe a not good idea:
#text = text.replace(" my ", " my^")
#text = text.replace(" your ", " your^")
#text = text.replace(" our ", " our^")
#text = text.replace(" his ", " his^")
#text = text.replace(" her ", " her^")
return text
def erase_sentence(self, text, pat):
pat = self.remove_parentheses(pat)
pat = pat.replace("do something", "")
pat = pat.replace("doing something", "")
pat = pat.replace("did something", "")
pat = pat.replace("does something", "")
pat = pat.replace("etc", "")
text = self.glue_short_words(text)
pat = self.glue_short_words(pat)
text = self.split_sentence(text)
pat = self.split_sentence(pat)
if len(pat) > 2:
pat = self.remove_irrelevant_parts(pat)
# print(text)
# print(pat)
res_comp = self.erase_lists(text, pat)
if res_comp is None:
return None
res, res_key = res_comp
res = self.join_sentence(res)
res_key = self.join_sentence(res_key)
res_key = res_key.lower()
res = res.replace("^", " ")
res_key = res_key.replace("^", " ")
return (res, res_key)
erase_alg = EraseAlgorithm()
class Base(object):
@classmethod
def erase(cls, text, s):
if isinstance(s, basestring):
s = [s]
s = reversed(sorted(s, key=len))
for i in s:
text = text.replace(i, " _ "*len(i) + " ")
return text
def get_original_keys(self):
if self.original_key is None:
return []
k = self.original_key
k = [i.strip() for i in k.split("|")]
return k
def format_key_html(self):
k = self.get_keys()
res = "<b>"
res += "</b><i> or </i><b>".join(k)
res += "</b>"
return res
def format_original_key_html(self):
k = self.get_original_keys()
res = "<b>"
res += "</b><i> or </i><b>".join(k)
res += "</b>"
return res
class BaseSense(Base):
def get_erased_definition(self):
res_content = None
res_key = None
for key in sorted(self.get_keys(), key=len):
res_comp = erase_alg.erase_sentence(self.definition, key)
if res_comp is not None:
res_content, res_key = res_comp
break
if res_content is None:
return self.definition
else:
return res_content
def create_anki_note(self):
"""Create a note only with definition, without any examples.
Return 2-tuple: (front, back)
"""
front = self.get_erased_definition()
back = self.format_key_html()
return (front, back)
class Example(Base):
def get_keys(self):
if self.sense:
return self.get_original_keys() or self.sense.get_keys()
else:
return self.get_original_keys() or self.subsense.get_keys()
def get_erased_content(self):
res_content = None
res_key = None
for key in sorted(self.get_keys(), key=len):
res_comp = erase_alg.erase_sentence(self.content, key)
if res_comp is not None:
res_content, res_key = res_comp
break
if res_content is None:
return self.content
else:
return res_content
def create_anki_note(self):
res_content = None
res_key = None
for key in sorted(self.get_keys(), key=len):
res_comp = erase_alg.erase_sentence(self.content, key)
if res_comp is not None:
res_content, res_key = res_comp
break
if res_content is None:
res_content = self.content
res_key = self.format_key_html()
else:
res_key = "<b>" + res_key + "</b>"
front = "<i>" + res_content + "</i> <br />"
front += "<span style='color: grey;'> ("
if self.sense is not None:
front += self.sense.get_erased_definition()
else:
front += self.subsense.get_erased_definition()
front += ") </span>"
back = res_key
'''
print("\n\n==============")
print(front)
print(back)
print("==============\n\n")
'''
return (front, back)
class SubSense(BaseSense):
def get_keys(self):
return self.get_original_keys() or self.sense.get_keys()
class Sense(BaseSense):
def get_keys(self):
return self.get_original_keys() or self.entry.get_keys()
class Link(object):
pass
class Entry(Base):
def get_keys(self):
return self.get_original_keys()
|
|
# -*- coding: utf-8 -*-
from io import BytesIO, BufferedReader
from pytest import raises
from watson.auth.providers.session.decorators import auth, login, logout, forgotten, reset
from watson.events import types
from watson.framework import controllers, exceptions
from tests.watson.auth import support
class SampleController(controllers.Action):
@auth(login_redirect='login')
def index_action(self):
return 'index'
@login
def login_action(self, form):
return 'login'
@logout
def logout_action(self):
return 'logout'
@logout(redirect='/custom-logout')
def logout_custom_action(self):
return 'logout'
class BaseDecoratorCase(object):
def setup(self):
controller = support.app.container.get(
'tests.watson.auth.decorators.test_session.SampleController')
event = types.Event('test', params={
'context': {
'request': self._generate_request()
}
})
controller.event = event
self.controller = controller
def _generate_request(self, **kwargs):
request = support.Request.from_environ(
support.sample_environ(**kwargs), 'watson.http.sessions.Memory')
request.user = None
return request
class TestLogin(BaseDecoratorCase):
def test_no_post(self):
response = self.controller.login_action()
assert response == 'login'
def test_invalid_user(self):
post_data = 'username=simon&password=test'
environ = support.sample_environ(
PATH_INFO='/login',
REQUEST_METHOD='POST',
CONTENT_LENGTH=len(post_data))
environ['wsgi.input'] = BufferedReader(
BytesIO(post_data.encode('utf-8')))
self.controller.request = self._generate_request(**environ)
response = self.controller.login_action()
assert response.headers['location'].endswith('/login')
def test_invalid_form(self):
post_data = 'username=simon'
environ = support.sample_environ(
REQUEST_METHOD='POST',
CONTENT_LENGTH=len(post_data))
environ['wsgi.input'] = BufferedReader(
BytesIO(post_data.encode('utf-8')))
self.controller.request = self._generate_request(**environ)
self.controller.login_action()
assert len(self.controller.flash_messages) == 1
def test_valid_user(self):
post_data = 'username=admin&password=test'
environ = support.sample_environ(
REQUEST_METHOD='POST',
CONTENT_LENGTH=len(post_data))
environ['wsgi.input'] = BufferedReader(
BytesIO(post_data.encode('utf-8')))
self.controller.request = self._generate_request(**environ)
response = self.controller.login_action()
assert response.headers['location'] == '/'
def test_already_authenticated_user(self):
self.controller.request.user = support.regular_user
response = self.controller.login_action()
assert response.headers['location'] == '/'
def test_login_redirect_to_referrer(self):
post_data = 'username=admin&password=test'
environ = support.sample_environ(
REQUEST_METHOD='POST',
CONTENT_LENGTH=len(post_data),
PATH_INFO='/login',
QUERY_STRING='redirect=http%3A%2F%2F127.0.0.1%2Fexisting-url%253Fto-here%2526and-here')
environ['wsgi.input'] = BufferedReader(
BytesIO(post_data.encode('utf-8')))
self.controller.request = self._generate_request(**environ)
response = self.controller.login_action()
assert response.headers['location'].endswith('/existing-url?to-here&and-here')
class TestLogout(BaseDecoratorCase):
def setup(self):
post_data = 'username=admin&password=test'
environ = support.sample_environ(
REQUEST_METHOD='POST',
CONTENT_LENGTH=len(post_data))
environ['wsgi.input'] = BufferedReader(
BytesIO(post_data.encode('utf-8')))
self.controller = support.app.container.get(
'tests.watson.auth.decorators.test_session.SampleController')
self.controller.request = self._generate_request(**environ)
self.controller.login_action()
def test_logout(self):
self.controller.logout_action()
assert not self.controller.request.user
def test_redirect_logout(self):
response = self.controller.logout_custom_action()
assert response.headers['location'] == '/custom-logout'
class TestAuth(BaseDecoratorCase):
def test_unauthenticated(self):
response = self.controller.index_action()
assert response.headers['location'].startswith('/login')
# def test_authenticated(self):
# self.controller.request.session['watson.user'] = 'regular'
# response = self.controller.index_action()
# assert response == 'index'
# def test_authenticated_injected_user(self):
# self.controller.request.session['watson.user'] = 'someone'
# response = self.controller.index_action()
# assert response.headers['location'] == '/login'
# def test_authenticated_no_matching_role(self):
# self.controller.request.session['watson.user'] = 'regular'
# response = self.controller.admin_role_action()
# assert response.headers['location'] == 'unauthorized'
# def test_authenticated_matching_role(self):
# self.controller.request.session['watson.user'] = 'admin'
# response = self.controller.admin_role_action()
# assert response == 'admin role'
# def test_authenticated_no_matching_permission(self):
# self.controller.request.session['watson.user'] = 'regular'
# response = self.controller.permissions_action()
# assert response.headers['location'] == 'unauthorized'
# def test_authenticated_matching_permission(self):
# self.controller.request.session['watson.user'] = 'admin'
# response = self.controller.permissions_action()
# assert response == 'permissions'
# def test_authenticated_matching_role_permission(self):
# self.controller.request.session['watson.user'] = 'admin'
# response = self.controller.permissions_role()
# assert response == 'permissions and role'
# def test_unauthenticated_custom_url(self):
# response = self.controller.unauthed_custom_url_action()
# assert response.headers['location'] == '/unauthed-test'
# def test_unauthorized_custom_url(self):
# self.controller.request.session['watson.user'] = 'regular'
# response = self.controller.unauthorized_custom_url_action()
# assert response.headers['location'] == '/unauthorized-test'
# def test_raises_404_unauthorized(self):
# with raises(exceptions.NotFoundError):
# self.controller.request.session['watson.user'] = 'regular'
# self.controller.unauthorized_404_action()
# def test_redirect_to_login_with_existing_url(self):
# environ = support.sample_environ(
# PATH_INFO='/existing-url', QUERY_STRING='to-here&and-here')
# self.controller.request = self._generate_request(**environ)
# response = self.controller.unauthed_url_redirect()
# assert response.headers['location'] == 'login?redirect=http%3A%2F%2F127.0.0.1%2Fexisting-url%3Fto-here%26and-here'
# def test_valid_requires(self):
# self.controller.request.session['watson.user'] = 'regular'
# assert self.controller.valid_action() == 'valid'
# def test_invalid_requires(self):
# self.controller.request.session['watson.user'] = 'regular'
# response = self.controller.invalid_action()
# assert response.headers['location'] == '/unauthorized-test'
# class TestForgottenPassword(object):
# def setup(self):
# controller = support.app.container.get(
# 'tests.watson.auth.test_decorators.SampleController')
# controller.request = Request.from_environ(sample_environ(), 'watson.http.sessions.Memory')
# self.controller = controller
# def test_valid_user(self):
# post_data = 'username=test'
# environ = sample_environ(PATH_INFO='/forgotten-password',
# REQUEST_METHOD='POST',
# CONTENT_LENGTH=len(post_data))
# environ['wsgi.input'] = BufferedReader(
# BytesIO(post_data.encode('utf-8')))
# self.controller.request = Request.from_environ(environ, 'watson.http.sessions.Memory')
# response = self.controller.forgotten_action()
# assert response.headers['location'] == 'http://127.0.0.1/forgotten-password'
# def test_invalid_user(self):
# post_data = 'username=doesnt_exist'
# environ = sample_environ(PATH_INFO='/forgotten-password',
# REQUEST_METHOD='POST',
# CONTENT_LENGTH=len(post_data))
# environ['wsgi.input'] = BufferedReader(
# BytesIO(post_data.encode('utf-8')))
# self.controller.request = Request.from_environ(environ, 'watson.http.sessions.Memory')
# response = self.controller.forgotten_action()
# assert response == 'forgotten'
# class TestResetPassword(object):
# def setup(self):
# controller = support.app.container.get(
# 'tests.watson.auth.test_decorators.SampleController')
# controller.request = Request.from_environ(sample_environ(), 'watson.http.sessions.Memory')
# self.controller = controller
# def test_valid_token(self):
# authenticator = support.app.container.get('auth_authenticator')
# manager = managers.ForgottenPasswordToken(
# config=support.app.container.get('application.config')['auth']['forgotten_password'],
# session=authenticator.session,
# mailer=support.app.container.get('mailer'),
# email_field='email')
# user = authenticator.get_user('test')
# token = manager.create_token(
# user, request=support.request)
# post_data = 'password=test1&confirm_password=test1'
# environ = sample_environ(PATH_INFO='/reset-password',
# QUERY_STRING='token={}'.format(token.token),
# REQUEST_METHOD='POST',
# CONTENT_LENGTH=len(post_data))
# environ['wsgi.input'] = BufferedReader(
# BytesIO(post_data.encode('utf-8')))
# self.controller.request = Request.from_environ(environ, 'watson.http.sessions.Memory')
# response = self.controller.reset_action()
# assert response.headers['location'] == '/'
# def test_invalid_token(self):
# post_data = ''
# environ = sample_environ(PATH_INFO='/reset-password',
# REQUEST_METHOD='POST',
# CONTENT_LENGTH=len(post_data))
# environ['wsgi.input'] = BufferedReader(
# BytesIO(post_data.encode('utf-8')))
# self.controller.request = Request.from_environ(environ, 'watson.http.sessions.Memory')
# response = self.controller.reset_action()
# assert response.headers['location'] == '/'
|
|
# Common utilities and Python wrappers for qemu-iotests
#
# Copyright (C) 2012 IBM Corp.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import subprocess
import string
import unittest
import sys; sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'scripts', 'qmp'))
import qmp
import struct
__all__ = ['imgfmt', 'imgproto', 'test_dir' 'qemu_img', 'qemu_io',
'VM', 'QMPTestCase', 'notrun', 'main']
# This will not work if arguments or path contain spaces but is necessary if we
# want to support the override options that ./check supports.
qemu_img_args = os.environ.get('QEMU_IMG', 'qemu-img').strip().split(' ')
qemu_io_args = os.environ.get('QEMU_IO', 'qemu-io').strip().split(' ')
qemu_args = os.environ.get('QEMU', 'qemu').strip().split(' ')
imgfmt = os.environ.get('IMGFMT', 'raw')
imgproto = os.environ.get('IMGPROTO', 'file')
test_dir = os.environ.get('TEST_DIR', '/var/tmp')
output_dir = os.environ.get('OUTPUT_DIR', '.')
cachemode = os.environ.get('CACHEMODE')
socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper')
def qemu_img(*args):
'''Run qemu-img and return the exit code'''
devnull = open('/dev/null', 'r+')
return subprocess.call(qemu_img_args + list(args), stdin=devnull, stdout=devnull)
def qemu_img_verbose(*args):
'''Run qemu-img without suppressing its output and return the exit code'''
return subprocess.call(qemu_img_args + list(args))
def qemu_img_pipe(*args):
'''Run qemu-img and return its output'''
return subprocess.Popen(qemu_img_args + list(args), stdout=subprocess.PIPE).communicate()[0]
def qemu_io(*args):
'''Run qemu-io and return the stdout data'''
args = qemu_io_args + list(args)
return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
def compare_images(img1, img2):
'''Return True if two image files are identical'''
return qemu_img('compare', '-f', imgfmt,
'-F', imgfmt, img1, img2) == 0
def create_image(name, size):
'''Create a fully-allocated raw image with sector markers'''
file = open(name, 'w')
i = 0
while i < size:
sector = struct.pack('>l504xl', i / 512, i / 512)
file.write(sector)
i = i + 512
file.close()
class VM(object):
'''A QEMU VM'''
def __init__(self):
self._monitor_path = os.path.join(test_dir, 'qemu-mon.%d' % os.getpid())
self._qemu_log_path = os.path.join(test_dir, 'qemu-log.%d' % os.getpid())
self._args = qemu_args + ['-chardev',
'socket,id=mon,path=' + self._monitor_path,
'-mon', 'chardev=mon,mode=control',
'-qtest', 'stdio', '-machine', 'accel=qtest',
'-display', 'none', '-vga', 'none']
self._num_drives = 0
# This can be used to add an unused monitor instance.
def add_monitor_telnet(self, ip, port):
args = 'tcp:%s:%d,server,nowait,telnet' % (ip, port)
self._args.append('-monitor')
self._args.append(args)
def add_drive(self, path, opts=''):
'''Add a virtio-blk drive to the VM'''
options = ['if=virtio',
'format=%s' % imgfmt,
'cache=%s' % cachemode,
'file=%s' % path,
'id=drive%d' % self._num_drives]
if opts:
options.append(opts)
self._args.append('-drive')
self._args.append(','.join(options))
self._num_drives += 1
return self
def pause_drive(self, drive, event=None):
'''Pause drive r/w operations'''
if not event:
self.pause_drive(drive, "read_aio")
self.pause_drive(drive, "write_aio")
return
self.qmp('human-monitor-command',
command_line='qemu-io %s "break %s bp_%s"' % (drive, event, drive))
def resume_drive(self, drive):
self.qmp('human-monitor-command',
command_line='qemu-io %s "remove_break bp_%s"' % (drive, drive))
def hmp_qemu_io(self, drive, cmd):
'''Write to a given drive using an HMP command'''
return self.qmp('human-monitor-command',
command_line='qemu-io %s "%s"' % (drive, cmd))
def add_fd(self, fd, fdset, opaque, opts=''):
'''Pass a file descriptor to the VM'''
options = ['fd=%d' % fd,
'set=%d' % fdset,
'opaque=%s' % opaque]
if opts:
options.append(opts)
self._args.append('-add-fd')
self._args.append(','.join(options))
return self
def send_fd_scm(self, fd_file_path):
# In iotest.py, the qmp should always use unix socket.
assert self._qmp.is_scm_available()
bin = socket_scm_helper
if os.path.exists(bin) == False:
print "Scm help program does not present, path '%s'." % bin
return -1
fd_param = ["%s" % bin,
"%d" % self._qmp.get_sock_fd(),
"%s" % fd_file_path]
devnull = open('/dev/null', 'rb')
p = subprocess.Popen(fd_param, stdin=devnull, stdout=sys.stdout,
stderr=sys.stderr)
return p.wait()
def launch(self):
'''Launch the VM and establish a QMP connection'''
devnull = open('/dev/null', 'rb')
qemulog = open(self._qemu_log_path, 'wb')
try:
self._qmp = qmp.QEMUMonitorProtocol(self._monitor_path, server=True)
self._popen = subprocess.Popen(self._args, stdin=devnull, stdout=qemulog,
stderr=subprocess.STDOUT)
self._qmp.accept()
except:
os.remove(self._monitor_path)
raise
def shutdown(self):
'''Terminate the VM and clean up'''
if not self._popen is None:
self._qmp.cmd('quit')
self._popen.wait()
os.remove(self._monitor_path)
os.remove(self._qemu_log_path)
self._popen = None
underscore_to_dash = string.maketrans('_', '-')
def qmp(self, cmd, **args):
'''Invoke a QMP command and return the result dict'''
qmp_args = dict()
for k in args.keys():
qmp_args[k.translate(self.underscore_to_dash)] = args[k]
return self._qmp.cmd(cmd, args=qmp_args)
def get_qmp_event(self, wait=False):
'''Poll for one queued QMP events and return it'''
return self._qmp.pull_event(wait=wait)
def get_qmp_events(self, wait=False):
'''Poll for queued QMP events and return a list of dicts'''
events = self._qmp.get_events(wait=wait)
self._qmp.clear_events()
return events
index_re = re.compile(r'([^\[]+)\[([^\]]+)\]')
class QMPTestCase(unittest.TestCase):
'''Abstract base class for QMP test cases'''
def dictpath(self, d, path):
'''Traverse a path in a nested dict'''
for component in path.split('/'):
m = index_re.match(component)
if m:
component, idx = m.groups()
idx = int(idx)
if not isinstance(d, dict) or component not in d:
self.fail('failed path traversal for "%s" in "%s"' % (path, str(d)))
d = d[component]
if m:
if not isinstance(d, list):
self.fail('path component "%s" in "%s" is not a list in "%s"' % (component, path, str(d)))
try:
d = d[idx]
except IndexError:
self.fail('invalid index "%s" in path "%s" in "%s"' % (idx, path, str(d)))
return d
def assert_qmp_absent(self, d, path):
try:
result = self.dictpath(d, path)
except AssertionError:
return
self.fail('path "%s" has value "%s"' % (path, str(result)))
def assert_qmp(self, d, path, value):
'''Assert that the value for a specific path in a QMP dict matches'''
result = self.dictpath(d, path)
self.assertEqual(result, value, 'values not equal "%s" and "%s"' % (str(result), str(value)))
def assert_no_active_block_jobs(self):
result = self.vm.qmp('query-block-jobs')
self.assert_qmp(result, 'return', [])
def cancel_and_wait(self, drive='drive0', force=False, resume=False):
'''Cancel a block job and wait for it to finish, returning the event'''
result = self.vm.qmp('block-job-cancel', device=drive, force=force)
self.assert_qmp(result, 'return', {})
if resume:
self.vm.resume_drive(drive)
cancelled = False
result = None
while not cancelled:
for event in self.vm.get_qmp_events(wait=True):
if event['event'] == 'BLOCK_JOB_COMPLETED' or \
event['event'] == 'BLOCK_JOB_CANCELLED':
self.assert_qmp(event, 'data/device', drive)
result = event
cancelled = True
self.assert_no_active_block_jobs()
return result
def wait_until_completed(self, drive='drive0', check_offset=True):
'''Wait for a block job to finish, returning the event'''
completed = False
while not completed:
for event in self.vm.get_qmp_events(wait=True):
if event['event'] == 'BLOCK_JOB_COMPLETED':
self.assert_qmp(event, 'data/device', drive)
self.assert_qmp_absent(event, 'data/error')
if check_offset:
self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len)
completed = True
self.assert_no_active_block_jobs()
return event
def notrun(reason):
'''Skip this test suite'''
# Each test in qemu-iotests has a number ("seq")
seq = os.path.basename(sys.argv[0])
open('%s/%s.notrun' % (output_dir, seq), 'wb').write(reason + '\n')
print '%s not run: %s' % (seq, reason)
sys.exit(0)
def main(supported_fmts=[]):
'''Run tests'''
if supported_fmts and (imgfmt not in supported_fmts):
notrun('not suitable for this image format: %s' % imgfmt)
# We need to filter out the time taken from the output so that qemu-iotest
# can reliably diff the results against master output.
import StringIO
output = StringIO.StringIO()
class MyTestRunner(unittest.TextTestRunner):
def __init__(self, stream=output, descriptions=True, verbosity=1):
unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
# unittest.main() will use sys.exit() so expect a SystemExit exception
try:
unittest.main(testRunner=MyTestRunner)
finally:
sys.stderr.write(re.sub(r'Ran (\d+) tests? in [\d.]+s', r'Ran \1 tests', output.getvalue()))
|
|
import mock
import csv
import furl
import pytz
import pytest
from datetime import datetime, timedelta
from nose import tools as nt
from django.test import RequestFactory
from django.http import Http404
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Permission
from tests.base import AdminTestCase
from website import settings
from framework.auth import Auth
from osf.models.user import OSFUser
from osf.models.tag import Tag
from osf_tests.factories import (
UserFactory,
AuthUserFactory,
ProjectFactory,
TagFactory,
UnconfirmedUserFactory
)
from admin_tests.utilities import setup_view, setup_log_view, setup_form_view
from admin.users import views
from admin.users.forms import WorkshopForm, UserSearchForm
from osf.models.admin_log_entry import AdminLogEntry
pytestmark = pytest.mark.django_db
class TestUserView(AdminTestCase):
def test_no_guid(self):
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request)
with nt.assert_raises(AttributeError):
view.get_object()
def test_load_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
res = view.get_object()
nt.assert_is_instance(res, dict)
def test_name_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
temp_object = view.get_object()
view.object = temp_object
res = view.get_context_data()
nt.assert_equal(res[views.UserView.context_object_name], temp_object)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.UserView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
user.user_permissions.add(view_permission)
user.save()
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
response = views.UserView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestResetPasswordView(AdminTestCase):
def setUp(self):
super(TestResetPasswordView, self).setUp()
self.user = UserFactory()
self.request = RequestFactory().get('/fake_path')
self.request.user = self.user
self.plain_view = views.ResetPasswordView
self.view = setup_view(self.plain_view(), self.request, guid=self.user._id)
def test_get_initial(self):
self.view.user = self.user
self.view.get_initial()
res = self.view.initial
nt.assert_is_instance(res, dict)
nt.assert_equal(res['guid'], self.user._id)
nt.assert_equal(res['emails'], [(r, r) for r in self.user.emails.values_list('address', flat=True)])
def test_reset_password_context(self):
self.view.user = self.user
res = self.view.get_context_data()
nt.assert_is_instance(res, dict)
nt.assert_in((self.user.emails.first().address, self.user.emails.first().address), self.view.initial['emails'])
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.ResetPasswordView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
response = views.ResetPasswordView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestDisableUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.UserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_reactivate_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
self.view().delete(self.request)
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user(self):
view = setup_view(views.UserDeleteView(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestHamUserRestore(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.HamUserRestoreView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
self.spam_confirmed, created = Tag.objects.get_or_create(name='spam_confirmed')
self.ham_confirmed, created = Tag.objects.get_or_create(name='ham_confirmed')
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_enable_user(self):
self.user.disable_account()
self.user.save()
nt.assert_true(self.user.is_disabled)
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_false(self.user.all_tags.filter(name=self.spam_confirmed.name).exists())
nt.assert_true(self.user.all_tags.filter(name=self.ham_confirmed.name).exists())
class TestDisableSpamUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.public_node = ProjectFactory(creator=self.user, is_public=True)
self.private_node = ProjectFactory(creator=self.user, is_public=False)
self.request = RequestFactory().post('/fake_path')
self.view = views.SpamUserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_spam_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
self.public_node.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_true(self.user.all_tags.filter(name='spam_confirmed').exists())
nt.assert_false(self.public_node.is_public)
nt.assert_equal(AdminLogEntry.objects.count(), count + 3)
def test_no_user(self):
view = setup_view(self.view(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class SpamUserListMixin(object):
def setUp(self):
spam_flagged = TagFactory(name='spam_flagged')
spam_confirmed = TagFactory(name='spam_confirmed')
ham_confirmed = TagFactory(name='ham_confirmed')
self.flagged_user = UserFactory()
self.flagged_user.tags.add(spam_flagged)
self.flagged_user.save()
self.spam_user = UserFactory()
self.spam_user.tags.add(spam_confirmed)
self.spam_user.save()
self.ham_user = UserFactory()
self.ham_user.tags.add(ham_confirmed)
self.ham_user.save()
self.request = RequestFactory().post('/fake_path')
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(self.url)
request.user = user
with self.assertRaises(PermissionDenied):
self.plain_view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
spam_permission = Permission.objects.get(codename='view_spam')
user.user_permissions.add(view_permission)
user.user_permissions.add(spam_permission)
user.save()
request = RequestFactory().get(self.url)
request.user = user
response = self.plain_view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestFlaggedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestFlaggedSpamUserList, self).setUp()
self.plain_view = views.UserFlaggedSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:flagged-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.flagged_user._id)
class TestConfirmedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedSpamUserList, self).setUp()
self.plain_view = views.UserKnownSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.spam_user._id)
class TestConfirmedHamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedHamUserList, self).setUp()
self.plain_view = views.UserKnownHamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-ham')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.ham_user._id)
class TestRemove2Factor(AdminTestCase):
def setUp(self):
super(TestRemove2Factor, self).setUp()
self.user = AuthUserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.User2FactorDeleteView
self.setup_view = setup_log_view(self.view(), self.request, guid=self.user._id)
self.url = reverse('users:remove2factor', kwargs={'guid': self.user._id})
@mock.patch('osf.models.user.OSFUser.delete_addon')
def test_remove_two_factor_get(self, mock_delete_addon):
self.setup_view.delete(self.request)
mock_delete_addon.assert_called_with('twofactor')
def test_integration_delete_two_factor(self):
user_addon = self.user.get_or_add_addon('twofactor')
nt.assert_not_equal(user_addon, None)
user_settings = self.user.get_addon('twofactor')
nt.assert_not_equal(user_settings, None)
count = AdminLogEntry.objects.count()
self.setup_view.delete(self.request)
post_addon = self.user.get_addon('twofactor')
nt.assert_equal(post_addon, None)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user_permissions_raises_error(self):
guid = self.user._id
request = RequestFactory().get(self.url)
request.user = self.user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
guid = self.user._id
change_permission = Permission.objects.get(codename='change_osfuser')
self.user.user_permissions.add(change_permission)
self.user.save()
request = RequestFactory().get(self.url)
request.user = self.user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestUserWorkshopFormView(AdminTestCase):
def setUp(self):
self.user = AuthUserFactory()
self.auth = Auth(self.user)
self.view = views.UserWorkshopFormView()
self.node = ProjectFactory(creator=self.user)
self.mock_data = mock.patch.object(
csv,
'reader',
# parse data into the proper format handling None values as csv reader would
side_effect=(lambda values: [[item or '' for item in value] for value in values])
)
self.mock_data.start()
def tearDown(self):
self.mock_data.stop()
def _setup_workshop(self, date):
self.workshop_date = date
self.data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, self.user.username, None],
]
self.user_exists_by_name_data = [
['number', 'date', 'location', 'topic', 'name', 'email', 'other'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, self.user.fullname, 'unknown@example.com', None],
]
self.user_not_found_data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, 'fake@example.com', None],
]
def _add_log(self, date):
self.node.add_log('log_added', params={'project': self.node._id}, auth=self.auth, log_date=date, save=True)
def test_correct_number_of_columns_added(self):
self._setup_workshop(self.node.date_created)
added_columns = ['OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Data']
result_csv = self.view.parse(self.data)
nt.assert_equal(len(self.data[0]) + len(added_columns), len(result_csv[0]))
def test_user_activity_day_of_workshop_and_before(self):
self._setup_workshop(self.node.date_created)
# add logs 0 to 48 hours back
for time_mod in range(9):
self._add_log(self.node.date_created - timedelta(hours=(time_mod * 6)))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_after_workshop(self):
self._setup_workshop(self.node.date_created - timedelta(hours=25))
self._add_log(self.node.date_created)
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
# 1 node created, 1 bookmarks collection created (new user), 1 node log
nt.assert_equal(user_logs_since_workshop, 3)
nt.assert_equal(user_nodes_created_since_workshop, 1)
# Test workshop 30 days ago
self._setup_workshop(self.node.date_created - timedelta(days=30))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 3)
nt.assert_equal(user_nodes_created_since_workshop, 1)
# Test workshop a year ago
self._setup_workshop(self.node.date_created - timedelta(days=365))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 3)
nt.assert_equal(user_nodes_created_since_workshop, 1)
# Regression test for OSF-8089
def test_utc_new_day(self):
node_date = self.node.date_created
date = datetime(node_date.year, node_date.month, node_date.day, 0, tzinfo=pytz.utc) + timedelta(days=1)
self._setup_workshop(date)
self._add_log(self.workshop_date + timedelta(hours=25))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
nt.assert_equal(user_logs_since_workshop, 1)
# Regression test for OSF-8089
def test_utc_new_day_plus_hour(self):
node_date = self.node.date_created
date = datetime(node_date.year, node_date.month, node_date.day, 0, tzinfo=pytz.utc) + timedelta(days=1, hours=1)
self._setup_workshop(date)
self._add_log(self.workshop_date + timedelta(hours=25))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
nt.assert_equal(user_logs_since_workshop, 1)
# Regression test for OSF-8089
def test_utc_new_day_minus_hour(self):
node_date = self.node.date_created
date = datetime(node_date.year, node_date.month, node_date.day, 0, tzinfo=pytz.utc) + timedelta(days=1) - timedelta(hours=1)
self._setup_workshop(date)
self._add_log(self.workshop_date + timedelta(hours=25))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
nt.assert_equal(user_logs_since_workshop, 1)
def test_user_osf_account_not_found(self):
self._setup_workshop(self.node.date_created)
result_csv = self.view.parse(self.user_not_found_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, '')
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_found_by_name(self):
self._setup_workshop(self.node.date_created)
result_csv = self.view.parse(self.user_exists_by_name_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, self.user.id)
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_form_valid(self):
request = RequestFactory().post('/fake_path')
data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, '9/1/16', None, None, None, self.user.username, None],
]
uploaded = SimpleUploadedFile('test_name', bytes(csv.reader(data)), content_type='text/csv')
form = WorkshopForm(data={'document': uploaded})
form.is_valid()
form.cleaned_data['document'] = uploaded
setup_form_view(self.view, request, form)
class TestUserSearchView(AdminTestCase):
def setUp(self):
self.user_1 = AuthUserFactory(fullname='Broken Matt Hardy')
self.user_2 = AuthUserFactory(fullname='Jeff Hardy')
self.user_3 = AuthUserFactory(fullname='Reby Sky')
self.user_4 = AuthUserFactory(fullname='King Maxel Hardy')
self.user_2_alternate_email = 'brothernero@delapidatedboat.com'
self.user_2.emails.create(address=self.user_2_alternate_email)
self.user_2.save()
self.request = RequestFactory().get('/fake_path')
self.view = views.UserFormView()
self.view = setup_form_view(self.view, self.request, form=UserSearchForm())
def test_search_user_by_guid(self):
form_data = {
'guid': self.user_1.guids.first()._id
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_name(self):
form_data = {
'name': 'Hardy'
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/search/Hardy/')
def test_search_user_by_username(self):
form_data = {
'email': self.user_1.username
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_alternate_email(self):
form_data = {
'email': self.user_2_alternate_email
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_2.guids.first()._id))
def test_search_user_list(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'Hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
def test_search_user_list_case_insensitive(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
class TestGetLinkView(AdminTestCase):
def test_get_user_confirmation_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
user_token = user.email_verifications.keys()[0]
ideal_link_path = '/confirm/{}/{}/'.format(user._id, user_token)
link = view.get_link(user)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_user_confirmation_link_with_expired_token(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
old_user_token = user.email_verifications.keys()[0]
user.email_verifications[old_user_token]['expiration'] = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(hours=24)
user.save()
link = view.get_link(user)
new_user_token = user.email_verifications.keys()[0]
link_path = str(furl.furl(link).path)
ideal_link_path = '/confirm/{}/{}/'.format(user._id, new_user_token)
nt.assert_equal(link_path, ideal_link_path)
def test_get_password_reset_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetPasswordResetLink()
view = setup_view(view, request, guid=user._id)
link = view.get_link(user)
user_token = user.verification_key_v2.get('token')
nt.assert_is_not_none(user_token)
ideal_link_path = '/resetpassword/{}/{}'.format(user._id, user_token)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_unclaimed_node_links(self):
project = ProjectFactory()
unregistered_contributor = project.add_unregistered_contributor(fullname='Brother Nero', email='matt@hardyboyz.biz', auth=Auth(project.creator))
project.save()
request = RequestFactory().get('/fake_path')
view = views.GetUserClaimLinks()
view = setup_view(view, request, guid=unregistered_contributor._id)
links = view.get_claim_links(unregistered_contributor)
unclaimed_records = unregistered_contributor.unclaimed_records
nt.assert_equal(len(links), 1)
nt.assert_equal(len(links), len(unclaimed_records.keys()))
link = links[0]
nt.assert_in(project._id, link)
nt.assert_in(unregistered_contributor.unclaimed_records[project._id]['token'], link)
class TestUserReindex(AdminTestCase):
def setUp(self):
super(TestUserReindex, self).setUp()
self.request = RequestFactory().post('/fake_path')
self.user = AuthUserFactory()
@mock.patch('website.search.search.update_user')
def test_reindex_user_elastic(self, mock_reindex_elastic):
count = AdminLogEntry.objects.count()
view = views.UserReindexElastic()
view = setup_log_view(view, self.request, guid=self.user._id)
view.delete(self.request)
nt.assert_true(mock_reindex_elastic.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
|
|
#!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Example master TV measurement system / tester
=======================================
Purpose and Usage
-----------------
This is an example command line tool that runs a measurement system, acting in
the role of the CSA, to check the synchronisation timing of a master TV that
it connects to. The measurements are taken by an Arduino Due microcontroller
connected via USB and with light sensor(s) and audio input(s) connected. These
observe the pattern of flashes/beeps from a test video sequence played by the
TV.
The final output is a measurement (to the nearest millisecond) of when the
flashes/beeps occurred compared to when they were expected to occur (given the
time line information being conveyed via the protocol).
See the main README.md for more information and example usage.
Use `--help` at the command line for information on arguments.
How it works
------------
This module runs the process of controlling the arduino (activating the appropriate pins for reading),
the sampling of the flashes / beeps, and the detection of the centre point timings of these
and the analysis of these centre points to look for the best match against the expected
centre point timings.
We instantiate a CSS-WC client and a CSS_TS client here.
The wall clock client will communicate with a wall clock server (specified by the cmd) line using
the CSS-WC protocol so that the local wall clock value approximates closely to the remote value of the server's wall clock
at any instant of time here in the measuring system.
The TV master is rendering video content. In the general case (not handled in this system), that content
could be available on the TV via different timelines (e.g from the broadcast transport stream, in which
case the timeline is expressed using presentation time stamps; or it could come e.g. from a catch-up
service, in which the timeline is expressed using whatever has been used to encode the catch-up
video delivered over IP).
Using the CSS-TS protocol, the client (here) requests the content by its id (actually by a content
stem) and which timeline to try and find for it (command line arguments)
Assuming the TV is rendering this content using this
timeline, then the CSS-TS master on the TV creates a control timestamp, which measures both the
value of the timeline clock and the value of the wallclock at that instant in time, which is
sent to the client. Assuming the TV is correctly rendering the content, there is a linear relationship
between its timeline clock and its wallclock ... the control timestamp provides a point on this line.
Used in conjunction with the timeline speed multiplier (1.0 for normal playback), which provides the slope of this line,
we can therefore calculate what the value of the timeline clock should be using our local wallclock (which tracks the
remote wallclock, and adjusts as needed based on CSS-WC information).
This is handled by a local clock object, a CorrelatedClock whose tick rate is set to match that
of the requested timeline (command line parameter). This clock is correlated with the local wallclock.
Any adjustment needed to the local wallclock (based on observing the CSS-WC protocol) will cause the
correlated clock to adjust, and in turn this causes a response in a TSClientClockController object,
resulting in a new control timestamp being reported to the measuring system (actually within measurer.py)
Any such changes are remembered, along with changes of dispersion in the wallclock, picked up
by the local wallclock's algorithm, and these are available to reconstitute the changes in timeline/
wallclock relationship occurring during the data capture.
We know, from the command line, the starting value of the timeline. We also know the relationship
between the arduino due's clock and our local wall clock ... this means we can capture the flashes/beeps
and compute the correspond timeline clock value, based on the remembered control timestamp information
and the remembered dispersion data.
These can be compared against the expected times provided by the json file.
The overall process is:
Parse the command line.
Create a wall clock client, and a CSS-TS client, the CorrelatedClock
with the same tick rate as the timeline, and the TSClientClockController.
Create a dispersion recorder hooked up to the wallclock algorithm.
Create the servers for the CSS-CII, WC and TS protocols using parsed command line.
Create the synchronisation timeline whose control timestamps will be served to the CSA.
The sync timeline has an associated clock whose tick rate is a command line argument
Create a Measurer object
The constructor connects to the Arduino and sends it commands to indicate which
light-sensor and audio inputs are to be sampled
Provide the Measurer with the TSClientClockController so that the measurer can hook
into the controller to get notification of changes in the timeline behaviour reported
over CSS_TS.
Start up the clients (wallclock and TSClientClockController). These will attempt to connect
to their servers, and timeout if unsuccessful.
Wait for the operator to indicate when the client device under test is ready.
For example, a few seconds of test video clip may have already played before
the operator arranged for the device under test to attempt to
synchronise and hence get paused because the test system has sent an initial pause
command over the CSS-TS protocol.
The measurer object then starts the capture (it is given the dispersion recorder to
use during the capture).
It sends the data capture command to the arduino, which eventually returns once its sample
buffer is full. The arduino returns the captured data along with precise timings
for the start and end of this capture.
The measurer then examines the captured data to find the times of the centre points of the beeps and/or flashes
across the channels of data received (one channel per pin).
Finally, the timing for each channel is analysed to find the best match of these centre points (the observed data)
against the expected timings for the video clip and statistics generated.
'''
import sys
import time
from ws4py.server.cherrypyserver import WebSocketPlugin
from dvbcss.protocol.cii import TimelineOption
from dvbcss.clock import SysClock, CorrelatedClock, measurePrecision, TunableClock
from dvbcss.protocol.client.wc.algorithm import LowestDispersionCandidate
from dvbcss.protocol.client.wc import WallClockClient
from dvbcss.protocol.client.ts import TSClientClockController
from measurer import Measurer
from measurer import DubiousInput
from dispersion import DispersionRecorder
import stats
def createCSSClientObjects(cmdParser):
"""\
Create the client objects needed to engage in the CSS-WC and CSS-TS protocols.
:param cmdParser the command line parser object
"""
args = cmdParser.args
sysclock=SysClock()
wallClock=TunableClock(sysclock,tickRate=1000000000) # nanos
# measure precision of wall clock empirically
wcPrecisionNanos = measurePrecision(wallClock) * 1000000000
algorithm = LowestDispersionCandidate(wallClock,repeatSecs=0.3,timeoutSecs=0.3)
wcClient=WallClockClient(cmdParser.wcBind, args.wcUrl[0], wallClock, algorithm)
timelineClock = CorrelatedClock(wallClock, args.timelineClockFrequency)
# start recording dispersion of the wall clock
dispRecorder=DispersionRecorder(algorithm)
dispRecorder.start()
print "Connecting, requesting timeline for:"
print " Any contentId beginning with:", args.contentIdStem
print " and using timeline selector: ", args.timelineSelector
print
ts = TSClientClockController(args.tsUrl[0], args.contentIdStem, args.timelineSelector, timelineClock, correlationChangeThresholdSecs=0.0)
return (ts, timelineClock, args.timelineClockFrequency, wcClient, wallClock, wcPrecisionNanos, dispRecorder)
def startCSSClients(wallClockClient, tsClientClockController):
"""\
Start the wallclock and TS client clock controller. The CSS protocols
commence.
"""
wallClockClient.start()
tsClientClockController.connect()
if __name__ == "__main__":
from testsetupcmdline import TVTesterCmdLineParser
import time
cmdParser = TVTesterCmdLineParser()
cmdParser.setupArguments()
cmdParser.parseArguments()
cmdParser.printTestSetup()
syncTimelineClockController, \
syncTimelineClock, \
syncClockTickRate, \
wallClockClient, \
wallClock, \
wcPrecisionNanos, \
dispRecorder = createCSSClientObjects(cmdParser)
# Arduino Due micros() function precision known to be 1us
# http://forum.arduino.cc/index.php?PHPSESSID=dulptiubbkqqer7p5hv2fqc583&topic=147505.msg1108517#msg1108517
acPrecisionNanos = 1000
CONNECT_TIMEOUT = 10.0
TIMELINE_AVAILABLE_TIMEOUT = 5.0
# once clients are started, need to catch keyboard interrupt to close them
# down in event of ctrl-c to exit the app
try:
measurer = Measurer("client", \
cmdParser.pinsToMeasure, \
cmdParser.pinExpectedTimes, \
cmdParser.pinEventDurations, \
cmdParser.args.videoStartTicks, \
wallClock, \
syncTimelineClock, \
syncClockTickRate, \
wcPrecisionNanos, \
acPrecisionNanos, \
cmdParser.measurerTime)
measurer.setSyncTimeLinelockController(syncTimelineClockController)
print "Connecting..."
startCSSClients(wallClockClient, syncTimelineClockController)
# wait for a few seconds as we try to connect to CSS-TS
timeout = time.time() + CONNECT_TIMEOUT
while not syncTimelineClockController.connected and time.time() < timeout:
time.sleep(0.1)
if not syncTimelineClockController.connected:
sys.stderr.write("\nTimed out trying to connect to CSS-TS. Aborting.\n\n")
sys.exit(1)
print "Connected."
# check we're receiving control timestamps for a valid timeline
print "Syncing to timeline..."
timeout = time.time() + TIMELINE_AVAILABLE_TIMEOUT
while not syncTimelineClockController.timelineAvailable and time.time() < timeout:
time.sleep(0.1)
if not syncTimelineClockController.timelineAvailable:
sys.stderr.write("\n\nWaited a while, but timeline was not available. Aborting.\n\n")
sys.exit(1)
print "Synced to timeline."
# finally check if dispersion is sane before proceeding
currentDispersion = wallClockClient.algorithm.getCurrentDispersion()
if currentDispersion > 1000000000*1.0:
sys.stderr.write("\n\nWall clock client synced with dispersion +/- %f.3 milliseconds." % (currentDispersion / 1000000.0))
sys.stderr.write("\nWhich is greater than +/- 1 second. Aborting.\n\n")
sys.exit(1)
print
print "Beginning to measure"
measurer.capture()
# sanity check we are still connected to the CSS-TS server
if not syncTimelineClockController.connected and syncTimelineClockController.timelineAvailable:
sys.write("\n\nLost connection to CSS-TS or timeline became unavailable. Aborting.\n\n")
sys.exit(1)
measurer.detectBeepsAndFlashes(dispersionFunc = dispRecorder.dispersionAt)
for channel in measurer.getComparisonChannels():
try:
index, expected, timeDifferencesAndErrors = measurer.doComparison(channel)
print
print "Results for channel: %s" % channel["pinName"]
print "----------------------------"
stats.calcAndPrintStats(index, expected, timeDifferencesAndErrors, cmdParser.args.toleranceSecs[0])
except DubiousInput:
print
print "Cannot reliably measure on pin: %s" % channel["pinName"]
print "Is input plugged into pin? Is the input level is too low?"
except KeyboardInterrupt:
pass
finally:
pass
sys.exit(0)
|
|
from uio.utils import fix_ctypes_struct, cached_getter, add_field
import ctypes
from ctypes import c_uint8 as u8, c_uint16 as u16, c_uint32 as u32
from .eirq import EIrq
ctr_t = u16 # counter value
# ePWM has a main irq and a tripzone irq. The tripzone irq uses an EIrq register
# block just like eCAP and eQEP do. The main irq's register block however is
# similar but not quite compatible.
class Irq( ctypes.Structure ):
_fields_ = [
("event", u16, 3), #rw
# 0 -
# 1 zero
# 2 max
# 3 -
# 4 up-a
# 5 down-a
# 6 up-b
# 7 down-b
("enabled", u16, 1), #rw
("", u16, 12),
("divider", u16, 2), #rw
("counter", u16, 2), #r-
("", u16, 12),
("pending", u16), #r-
("_clear", u16), #-c
("_set", u16), #-s
]
@fix_ctypes_struct
class EPwm( ctypes.Structure ):
_fields_ = [
#-------- time base ------------------------------------------------
("config", u16),
# bits 0- 1 rw counter mode: 0 up 1 down 2 updown 3 freeze
# bit 2 rw load counter on: 0 never 1 sync-in
# bit 3 rw load maximum on: 0 zero 1 write
# bits 4- 5 rw sync-out on: 0 sync-in 1 zero 2 cmp-b 3 none
# bit 6 -x trigger sync-in
# bits 7- 9 rw fine divider / 2 (0 = disabled, i.e. /1)
# bits 10-12 rw coarse divider, log2
# bit 13 rw updown counter direction on sync-in counter load
# bits 14-15 rw debug suspend: 0 hard 1 soft 2 disabled
("status", u16),
# bit 0 r- counter direction
# bit 1 rc sync-in event
# bit 2 rc max event
("", u16), #rw
("ld_counter", ctr_t), #rw
("counter", ctr_t), #rw
("ld_maximum", ctr_t), #rw
("", u16),
#-------- comparators ----------------------------------------------
#
# Four events generated:
# up-a if counter == cmp-a while counting up
# up-b if counter == cmp-b while counting up
# down-a if counter == min( cmp-a, maximum ) while counting down
# down-b if counter == min( cmp-b, maximum ) while counting down
#
# In updown mode, at an endpoint (where the direction is reversed),
# the new direction applies.
("cmp_load", u16),
# bits 0- 1 rw load cmp-a ev: 0 zero 1 max 2 both 3 none
# bits 2- 3 rw load cmp-b ev: 0 zero 1 max 2 both 3 none
# bit 4 rw load cmp-a on write (bits 0-1 ignored)
# bit 5 z-
# bit 6 rw load cmp-b on write (bits 2-3 ignored)
# bit 7 z-
# bit 8 r- load cmp-a pending
# bit 9 r- load cmp-b pending
("", u16), #r>
("ld_compare", ctr_t * 2), #r>
#-------- output control -------------------------------------------
("pwm_config", u16 * 2),
# bits 0- 1 rw action on zero
# bits 2- 3 rw action on max
# bits 4- 5 rw action on up-a
# bits 6- 7 rw action on down-a
# bits 8- 9 rw action on up-b
# bits 10-11 rw action on down-b
# 0 none 1 clear 2 set 3 toggle
#
# If counting up, the actions considered are:
# zero, up-a, up-b, max
# If counting down, the actions considered are:
# max, down-a, down-b, zero
# When an endpoint is reached in updown mode, first the actions of
# the old direction are considered and then those of the new
# direction.
#
# If multiple conditions match, the last action takes effect.
#
# A software triggered action (see below) always takes precedence.
("sw_action", u16),
# bits 0- 1 rw manual pwm-a action
# bit 2 -x trigger manual pwm-a action
#
# bits 3- 4 rw manual pwm-b action
# bit 5 -x trigger manual pwm-b action
#
# bits 6- 7 rw load sw_force on: 0 zero 1 max 2 both 3 write
("sw_force", u16),
# bits 0- 1 rw force pwm-a: 0 no 1 low 2 high
# bits 2- 3 rw force pwm-b: 0 no 1 low 2 high
#-------- dead-band ------------------------------------------------
#
# Optionally replaces pwm-a by rising edge delayed (red) signal.
# Optionally replaces pwm-b by falling edge delayed (fed) signal.
#
# Both edge delay units can use either pwm-a or pwm-b as input, and
# optionally have output inverted.
("db_config", u16),
# bit 0 rw replace pwm-b by fed output
# bit 1 rw replace pwm-a by red output
# bit 2 rw invert red output
# bit 3 rw invert fed output
# bit 4 rw red input: 0 pwm-a 1 pwm-b
# bit 5 rw fed input: 0 pwm-a 1 pwm-b
("db_rise_delay", u16),
("db_fall_delay", u16),
# bits 0- 9 rw rising/falling edge delay (time base clocks)
#-------- trip-zone ------------------------------------------------
("tz_enabled", u16),
# bits 0- 7 rw enable tripzone 0-7 inputs for auto-reset trip
# bits 8-15 rw enable tripzone 0-7 inputs for manual-reset trip
("", u16),
("tz_config", u16),
# bits 0- 1 rw on trip force pwm-a: 0 hZ 1 high 2 low 3 no
# bits 2- 3 rw on trip force pwm-b: 0 hZ 1 high 2 low 3 no
("tz_irq", EIrq),
# bit 1 auto-reset trip
# bit 2 manual-reset trip
#-------- irq output -----------------------------------------------
("irq", Irq),
#-------- chopper --------------------------------------------------
("chopper", u16),
# bit 0 rw chopper enabled
# bits 1- 4 rw initial pulse width (cycles / 8 - 1)
# bits 5- 7 rw chopper period - 1
# bits 8-10 rw duty cycle (0..6 = 1/8..7/8)
("", u8 * (0x5c - 0x3e)),
#-------- identification -------------------------------------------
#
# added in Freon/Primus?
("ident", u32), #r- 4'4d1'09'03 (v1.3.1)
]
@property
def divider( self ):
cfg = self.config
fine = ( cfg >> 7 ) & 7
coarse = ( cfg >> 10 ) & 7
if fine:
return fine << ( 1 + coarse )
else:
return 1 << coarse
@divider.setter
def divider( self, value ):
if value not in range( 1, 1793 ):
raise ValueError( "Invalid ePWM divider" )
cfg = self.config & ~0x1f80
if value != 1:
coarse = ( value & -value ).bit_length() - 1 # count trailing zeros
coarse = min( coarse, 8 )
fine = value >> coarse
if coarse == 0 or fine > 7:
raise ValueError( "ePWM divider %d (== %d << %d) not supported" % ( value, fine, coarse ) )
cfg |= fine << 7
cfg |= ( coarse - 1 ) << 10
self.config = cfg
def stop( self ):
self.ld_compare[:] = ( 0, 0 )
if ( self.config & 3 ) != 3:
while self.cmp_load >> 8:
pass
self.config |= 3
def initialize( self, period, divider=1 ):
if period not in range( 2, 65536 ):
raise ValueError( "Invalid ePWM period" )
self.stop()
self.pwm_a_config = 2 << 0 | 1 << 4 # set on counter zero, clear on compare A
self.pwm_b_config = 2 << 0 | 1 << 8 # set on counter zero, clear on compare B
self.cmp_load = 1 << 0 | 1 << 2 # reload cmp when counter reaches max
self.config |= 1 << 15
self.ld_maximum = period - 1
# set period immediately and reset counter
self.config |= 1 << 3
self.counter = 0
self.ld_maximum = period - 1
self.config &= ~( 1 << 3 )
self.config &= ~3 # start counter
add_field( EPwm, 'ld_compare_a', EPwm.ld_compare.offset + 0, ctr_t )
add_field( EPwm, 'ld_compare_b', EPwm.ld_compare.offset + 2, ctr_t )
add_field( EPwm, 'pwm_a_config', EPwm.pwm_config.offset + 0, u16 )
add_field( EPwm, 'pwm_b_config', EPwm.pwm_config.offset + 2, u16 )
assert ctypes.sizeof(EPwm) == 0x60
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from sqlalchemy import (Column, Index, Integer, BigInteger, Enum, String,
schema, Unicode)
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from nova.db.sqlalchemy import types
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.TimestampMixin,
models.ModelBase):
metadata = None
def __copy__(self):
"""Implement a safe copy.copy().
SQLAlchemy-mapped objects travel with an object
called an InstanceState, which is pegged to that object
specifically and tracks everything about that object. It's
critical within all attribute operations, including gets
and deferred loading. This object definitely cannot be
shared among two instances, and must be handled.
The copy routine here makes use of session.merge() which
already essentially implements a "copy" style of operation,
which produces a new instance with a new InstanceState and copies
all the data along mapped attributes without using any SQL.
The mode we are using here has the caveat that the given object
must be "clean", e.g. that it has no database-loaded state
that has been updated and not flushed. This is a good thing,
as creating a copy of an object including non-flushed, pending
database state is probably not a good idea; neither represents
what the actual row looks like, and only one should be flushed.
"""
session = orm.Session()
copy = session.merge(self, load=False)
session.expunge(copy)
return copy
def save(self, session=None):
from nova.db.sqlalchemy import api
if session is None:
session = api.get_session()
super(NovaBase, self).save(session=session)
class Service(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
last_seen_up = Column(DateTime, nullable=True)
forced_down = Column(Boolean, default=False)
version = Column(Integer, default=0)
class ComputeNode(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = (
schema.UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
name="uniq_compute_nodes0host0hypervisor_hostname0deleted"),
)
id = Column(Integer, primary_key=True)
service_id = Column(Integer, nullable=True)
# FIXME(sbauza: Host field is nullable because some old Juno compute nodes
# can still report stats from an old ResourceTracker without setting this
# field.
# This field has to be set non-nullable in a later cycle (probably Lxxx)
# once we are sure that all compute nodes in production report it.
host = Column(String(255), nullable=True)
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
metrics = Column(Text)
# Note(yongli): json string PCI Stats
# '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
pci_stats = Column(Text)
# extra_resources is a json string containing arbitrary
# data about additional resources.
extra_resources = Column(Text)
# json-encode string containing compute node statistics
stats = Column(Text, default='{}')
# json-encoded dict that contains NUMA topology as generated by
# objects.NUMATopoloogy._to_json()
numa_topology = Column(Text)
# allocation ratios provided by the RT
ram_allocation_ratio = Column(Float, nullable=True)
cpu_allocation_ratio = Column(Float, nullable=True)
class Certificate(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
schema.UniqueConstraint('uuid', name='uniq_instances0uuid'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(orm.object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@property
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
ephemeral_key_uuid = Column(String(36))
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
# NOTE(sbiswas7): 'scheduled_at' is still in the database
# and can be removed in the future release.
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
# This always refers to the availability_zone kwarg passed in /servers and
# provided as an API option, not at all related to the host AZ the instance
# belongs to.
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36), nullable=False)
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance,
backref=orm.backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceExtra(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'instance_extra'
__table_args__ = (
Index('instance_extra_idx', 'instance_uuid'),)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
numa_topology = orm.deferred(Column(Text))
pci_requests = orm.deferred(Column(Text))
flavor = orm.deferred(Column(Text))
vcpu_model = orm.deferred(Column(Text))
migration_context = orm.deferred(Column(Text))
instance = orm.relationship(Instance,
backref=orm.backref('extra',
uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
class Quota(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = orm.relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance,
backref=orm.backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
# TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class SecurityGroupInstanceAssociation(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
schema.UniqueConstraint('project_id', 'name', 'deleted',
name='uniq_security_groups0project_id0'
'name0deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = orm.relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = orm.relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = orm.relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
type = Column(Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
class Migration(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'deleted',
'instance_uuid', 'status'),
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status'),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# TODO(_cerberus_): enum
status = Column(String(255))
migration_type = Column(Enum('migration', 'resize', 'live-migration',
'evacuation'),
nullable=True)
hidden = Column(Boolean, default=False)
instance = orm.relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
mtu = Column(Integer)
dhcp_server = Column(types.IPAddress())
enable_dhcp = Column(Boolean, default=True)
share_address = Column(Boolean, default=False)
class VirtualInterface(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('virtual_interfaces_network_id_idx', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
Index('virtual_interfaces_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a fixed ip for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated'),
Index('fixed_ips_deleted_allocated_updated_at_idx', 'deleted',
'allocated', 'updated_at')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
# TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
# TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
# TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = orm.relationship(Network,
backref=orm.backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = orm.relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
virtual_interface = orm.relationship(VirtualInterface,
backref=orm.backref('fixed_ips'),
foreign_keys=virtual_interface_id,
primaryjoin='and_('
'FixedIp.virtual_interface_id == '
'VirtualInterface.id,'
'FixedIp.deleted == 0,'
'VirtualInterface.deleted == 0)')
class FloatingIp(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a floating ip that dynamically forwards to a fixed ip."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
# TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = orm.relationship(FixedIp,
backref=orm.backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('dns_domains_project_id_idx', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = orm.relationship(ConsolePool, backref=orm.backref('consoles'))
class InstanceMetadata(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = (
Index('instance_uuid', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid)
class InstanceTypeProjects(BASE, NovaBase, models.SoftDeleteMixin):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = orm.relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
{'mysql_collate': 'utf8_bin'},
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = orm.relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = orm.relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = orm.relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
@property
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return {m.key: m.value for m in self._metadata}
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase, models.SoftDeleteMixin):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase, models.SoftDeleteMixin):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(64))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase, models.SoftDeleteMixin):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase, models.SoftDeleteMixin):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
host = Column(String(255))
details = Column(Text)
class InstanceIdMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase, models.SoftDeleteMixin):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = orm.relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = orm.relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
# the request_id is used to identify a device that is allocated for a
# particular request
request_id = Column(String(36), nullable=True)
extra_info = Column(Text)
instance_uuid = Column(String(36))
numa_node = Column(Integer, nullable=True)
instance = orm.relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
class Tag(BASE, models.ModelBase):
"""Represents the tag for a resource."""
__tablename__ = "tags"
__table_args__ = (
Index('tags_tag_idx', 'tag'),
)
resource_id = Column(String(36), primary_key=True, nullable=False)
tag = Column(Unicode(80), primary_key=True, nullable=False)
instance = orm.relationship(
"Instance",
backref='tags',
primaryjoin='and_(Tag.resource_id == Instance.uuid,'
'Instance.deleted == 0)',
foreign_keys=resource_id
)
|
|
from __future__ import unicode_literals
import base64
import hmac
import time
import uuid
from django.conf import settings
from django.contrib.auth import authenticate
from django.core.exceptions import ImproperlyConfigured
from django.middleware.csrf import _sanitize_token, constant_time_compare
from django.utils.http import same_origin
from django.utils.translation import ugettext as _
from tastypie.http import HttpUnauthorized
from tastypie.compat import get_user_model, get_username_field
try:
from hashlib import sha1
except ImportError:
import sha
sha1 = sha.sha
try:
import python_digest
except ImportError:
python_digest = None
try:
import oauth2
except ImportError:
oauth2 = None
try:
import oauth_provider
except ImportError:
oauth_provider = None
class Authentication(object):
"""
A simple base class to establish the protocol for auth.
By default, this indicates the user is always authenticated.
"""
def __init__(self, require_active=True):
self.require_active = require_active
def is_authenticated(self, request, **kwargs):
"""
Identifies if the user is authenticated to continue or not.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
return True
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns a combination of IP address and hostname.
"""
return "%s_%s" % (request.META.get('REMOTE_ADDR', 'noaddr'), request.META.get('REMOTE_HOST', 'nohost'))
def check_active(self, user):
"""
Ensures the user has an active account.
Optimized for the ``django.contrib.auth.models.User`` case.
"""
if not self.require_active:
# Ignore & move on.
return True
return user.is_active
class BasicAuthentication(Authentication):
"""
Handles HTTP Basic auth against a specific auth backend if provided,
or against all configured authentication backends using the
``authenticate`` method from ``django.contrib.auth``.
Optional keyword arguments:
``backend``
If specified, use a specific ``django.contrib.auth`` backend instead
of checking all backends specified in the ``AUTHENTICATION_BACKENDS``
setting.
``realm``
The realm to use in the ``HttpUnauthorized`` response. Default:
``django-tastypie``.
"""
def __init__(self, backend=None, realm='django-tastypie', **kwargs):
super(BasicAuthentication, self).__init__(**kwargs)
self.backend = backend
self.realm = realm
def _unauthorized(self):
response = HttpUnauthorized()
# FIXME: Sanitize realm.
response['WWW-Authenticate'] = 'Basic Realm="%s"' % self.realm
return response
def is_authenticated(self, request, **kwargs):
"""
Checks a user's basic auth credentials against the current
Django auth backend.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
if not request.META.get('HTTP_AUTHORIZATION'):
return self._unauthorized()
try:
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
if auth_type.lower() != 'basic':
return self._unauthorized()
user_pass = base64.b64decode(data).decode('utf-8')
except:
return self._unauthorized()
bits = user_pass.split(':', 1)
if len(bits) != 2:
return self._unauthorized()
if self.backend:
user = self.backend.authenticate(username=bits[0], password=bits[1])
else:
user = authenticate(username=bits[0], password=bits[1])
if user is None:
return self._unauthorized()
if not self.check_active(user):
return False
request.user = user
return True
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's basic auth username.
"""
return request.META.get('REMOTE_USER', 'nouser')
class ApiKeyAuthentication(Authentication):
"""
Handles API key auth, in which a user provides a username & API key.
Uses the ``ApiKey`` model that ships with tastypie. If you wish to use
a different model, override the ``get_key`` method to perform the key check
as suits your needs.
"""
def _unauthorized(self):
return HttpUnauthorized()
def extract_credentials(self, request):
authorization = request.META.get('HTTP_AUTHORIZATION', '')
if authorization and authorization.lower().startswith('apikey '):
auth_type, data = authorization.split()
username, api_key = data.split(':', 1)
else:
username = request.GET.get('username') or request.POST.get('username')
api_key = request.GET.get('api_key') or request.POST.get('api_key')
return username, api_key
def is_authenticated(self, request, **kwargs):
"""
Finds the user and checks their API key.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
try:
username, api_key = self.extract_credentials(request)
except ValueError:
return self._unauthorized()
if not username or not api_key:
return self._unauthorized()
username_field = get_username_field()
User = get_user_model()
try:
lookup_kwargs = {username_field: username}
user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, User.MultipleObjectsReturned):
return self._unauthorized()
if not self.check_active(user):
return False
key_auth_check = self.get_key(user, api_key)
if key_auth_check and not isinstance(key_auth_check, HttpUnauthorized):
request.user = user
return key_auth_check
def get_key(self, user, api_key):
"""
Attempts to find the API key for the user. Uses ``ApiKey`` by default
but can be overridden.
"""
from tastypie.models import ApiKey
try:
ApiKey.objects.get(user=user, key=api_key)
except ApiKey.DoesNotExist:
return self._unauthorized()
return True
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's username.
"""
username, api_key = self.extract_credentials(request)
return username or 'nouser'
class SessionAuthentication(Authentication):
"""
An authentication mechanism that piggy-backs on Django sessions.
This is useful when the API is talking to Javascript on the same site.
Relies on the user being logged in through the standard Django login
setup.
Requires a valid CSRF token.
"""
def is_authenticated(self, request, **kwargs):
"""
Checks to make sure the user is logged in & has a Django session.
"""
# Cargo-culted from Django 1.3/1.4's ``django/middleware/csrf.py``.
# We can't just use what's there, since the return values will be
# wrong.
# We also can't risk accessing ``request.POST``, which will break with
# the serialized bodies.
if request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
return request.user.is_authenticated()
if getattr(request, '_dont_enforce_csrf_checks', False):
return request.user.is_authenticated()
csrf_token = _sanitize_token(request.COOKIES.get(settings.CSRF_COOKIE_NAME, ''))
if request.is_secure():
referer = request.META.get('HTTP_REFERER')
if referer is None:
return False
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
return False
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
return False
return request.user.is_authenticated()
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's username.
"""
return getattr(request.user, get_username_field())
class DigestAuthentication(Authentication):
"""
Handles HTTP Digest auth against a specific auth backend if provided,
or against all configured authentication backends using the
``authenticate`` method from ``django.contrib.auth``. However, instead of
the user's password, their API key should be used.
Optional keyword arguments:
``backend``
If specified, use a specific ``django.contrib.auth`` backend instead
of checking all backends specified in the ``AUTHENTICATION_BACKENDS``
setting.
``realm``
The realm to use in the ``HttpUnauthorized`` response. Default:
``django-tastypie``.
"""
def __init__(self, backend=None, realm='django-tastypie', **kwargs):
super(DigestAuthentication, self).__init__(**kwargs)
self.backend = backend
self.realm = realm
if python_digest is None:
raise ImproperlyConfigured("The 'python_digest' package could not be imported. It is required for use with the 'DigestAuthentication' class.")
def _unauthorized(self):
response = HttpUnauthorized()
new_uuid = uuid.uuid4()
opaque = hmac.new(str(new_uuid).encode('utf-8'), digestmod=sha1).hexdigest()
response['WWW-Authenticate'] = python_digest.build_digest_challenge(
timestamp=time.time(),
secret=getattr(settings, 'SECRET_KEY', ''),
realm=self.realm,
opaque=opaque,
stale=False
)
return response
def is_authenticated(self, request, **kwargs):
"""
Finds the user and checks their API key.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
if not request.META.get('HTTP_AUTHORIZATION'):
return self._unauthorized()
try:
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if auth_type.lower() != 'digest':
return self._unauthorized()
except:
return self._unauthorized()
digest_response = python_digest.parse_digest_credentials(request.META['HTTP_AUTHORIZATION'])
# FIXME: Should the nonce be per-user?
if not python_digest.validate_nonce(digest_response.nonce, getattr(settings, 'SECRET_KEY', '')):
return self._unauthorized()
user = self.get_user(digest_response.username)
api_key = self.get_key(user)
if user is False or api_key is False:
return self._unauthorized()
expected = python_digest.calculate_request_digest(
request.method,
python_digest.calculate_partial_digest(digest_response.username, self.realm, api_key),
digest_response)
if not digest_response.response == expected:
return self._unauthorized()
if not self.check_active(user):
return False
request.user = user
return True
def get_user(self, username):
username_field = get_username_field()
User = get_user_model()
try:
lookup_kwargs = {username_field: username}
user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, User.MultipleObjectsReturned):
return False
return user
def get_key(self, user):
"""
Attempts to find the API key for the user. Uses ``ApiKey`` by default
but can be overridden.
Note that this behaves differently than the ``ApiKeyAuthentication``
method of the same name.
"""
from tastypie.models import ApiKey
try:
key = ApiKey.objects.get(user=user)
except ApiKey.DoesNotExist:
return False
return key.key
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's username.
"""
if hasattr(request, 'user'):
if hasattr(request.user, 'username'):
return request.user.username
return 'nouser'
class OAuthAuthentication(Authentication):
"""
Handles OAuth, which checks a user's credentials against a separate service.
Currently verifies against OAuth 1.0a services.
This does *NOT* provide OAuth authentication in your API, strictly
consumption.
"""
def __init__(self, **kwargs):
super(OAuthAuthentication, self).__init__(**kwargs)
if oauth2 is None:
raise ImproperlyConfigured("The 'python-oauth2' package could not be imported. It is required for use with the 'OAuthAuthentication' class.")
if oauth_provider is None:
raise ImproperlyConfigured("The 'django-oauth-plus' package could not be imported. It is required for use with the 'OAuthAuthentication' class.")
def is_authenticated(self, request, **kwargs):
from oauth_provider.store import store, InvalidTokenError
if self.is_valid_request(request):
oauth_request = oauth_provider.utils.get_oauth_request(request)
consumer = store.get_consumer(request, oauth_request, oauth_request.get_parameter('oauth_consumer_key'))
try:
token = store.get_access_token(request, oauth_request, consumer, oauth_request.get_parameter('oauth_token'))
except oauth_provider.store.InvalidTokenError:
return oauth_provider.utils.send_oauth_error(oauth2.Error(_('Invalid access token: %s') % oauth_request.get_parameter('oauth_token')))
try:
self.validate_token(request, consumer, token)
except oauth2.Error as e:
return oauth_provider.utils.send_oauth_error(e)
if consumer and token:
if not self.check_active(token.user):
return False
request.user = token.user
return True
return oauth_provider.utils.send_oauth_error(oauth2.Error(_('You are not allowed to access this resource.')))
return oauth_provider.utils.send_oauth_error(oauth2.Error(_('Invalid request parameters.')))
def is_in(self, params):
"""
Checks to ensure that all the OAuth parameter names are in the
provided ``params``.
"""
from oauth_provider.consts import OAUTH_PARAMETERS_NAMES
for param_name in OAUTH_PARAMETERS_NAMES:
if param_name not in params:
return False
return True
def is_valid_request(self, request):
"""
Checks whether the required parameters are either in the HTTP
``Authorization`` header sent by some clients (the preferred method
according to OAuth spec) or fall back to ``GET/POST``.
"""
auth_params = request.META.get("HTTP_AUTHORIZATION", [])
return self.is_in(auth_params) or self.is_in(request.REQUEST)
def validate_token(self, request, consumer, token):
oauth_server, oauth_request = oauth_provider.utils.initialize_server_request(request)
return oauth_server.verify_request(oauth_request, consumer, token)
class MultiAuthentication(object):
"""
An authentication backend that tries a number of backends in order.
"""
def __init__(self, *backends, **kwargs):
super(MultiAuthentication, self).__init__(**kwargs)
self.backends = backends
def is_authenticated(self, request, **kwargs):
"""
Identifies if the user is authenticated to continue or not.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
unauthorized = False
for backend in self.backends:
check = backend.is_authenticated(request, **kwargs)
if check:
if isinstance(check, HttpUnauthorized):
unauthorized = unauthorized or check
else:
request._authentication_backend = backend
return check
return unauthorized
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns a combination of IP address and hostname.
"""
try:
return request._authentication_backend.get_identifier(request)
except AttributeError:
return 'nouser'
|
|
# -*- coding: utf-8 -*-
"""Linear Filters for time series analysis and testing
TODO:
* check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma)
Created on Sat Oct 23 17:18:03 2010
Author: Josef-pktd
"""
# not original copied from various experimental scripts
# version control history is there
import numpy as np
import scipy.fftpack as fft
from scipy import signal
try:
from scipy.signal._signaltools import _centered as trim_centered
except ImportError:
# Must be using SciPy <1.8.0 where this function was moved (it's not a
# public SciPy function, but we need it here)
from scipy.signal.signaltools import _centered as trim_centered
from statsmodels.tools.validation import array_like, PandasWrapper
def _pad_nans(x, head=None, tail=None):
if np.ndim(x) == 1:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[np.nan] * head, x, [np.nan] * tail]
elif tail is None:
return np.r_[[np.nan] * head, x]
elif head is None:
return np.r_[x, [np.nan] * tail]
elif np.ndim(x) == 2:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[[np.nan] * x.shape[1]] * head, x,
[[np.nan] * x.shape[1]] * tail]
elif tail is None:
return np.r_[[[np.nan] * x.shape[1]] * head, x]
elif head is None:
return np.r_[x, [[np.nan] * x.shape[1]] * tail]
else:
raise ValueError("Nan-padding for ndim > 2 not implemented")
#original changes and examples in sandbox.tsa.try_var_convolve
# do not do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
# ifftn, fftfreq
#from numpy import product,array
# previous location in sandbox.tsa.try_var_convolve
def fftconvolveinv(in1, in2, mode="full"):
"""
Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse
filter. does not work or I cannot get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it does not work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO does not seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#code duplication with fftconvolveinv
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""
Convolve two N-dimensional arrays using FFT. See convolve.
For use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter does not work or I cannot get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it does not work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if in2 is not None:
s2 = np.array(in2.shape)
else:
s2 = 0
if in3 is not None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
IN1 = in1.copy() # TODO: Is this correct?
if in2 is not None:
IN1 = fft.fftn(in2, fsize)
if in3 is not None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO does not seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#original changes and examples in sandbox.tsa.try_var_convolve
#examples and tests are there
def recursive_filter(x, ar_coeff, init=None):
"""
Autoregressive, or recursive, filtering.
Parameters
----------
x : array_like
Time-series data. Should be 1d or n x 1.
ar_coeff : array_like
AR coefficients in reverse time order. See Notes for details.
init : array_like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
array_like
Filtered array, number of columns determined by x and ar_coeff. If x
is a pandas object than a Series is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
"""
pw = PandasWrapper(x)
x = array_like(x, 'x')
ar_coeff = array_like(ar_coeff, 'ar_coeff')
if init is not None: # integer init are treated differently in lfiltic
init = array_like(init, 'init')
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
return pw.wrap(result)
def convolution_filter(x, filt, nsides=2):
"""
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
"""
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = int(np.ceil(len(filt)/2.) - 1) or None
trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
pw = PandasWrapper(x)
x = array_like(x, 'x', maxdim=2)
filt = array_like(filt, 'filt', ndim=x.ndim)
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
else: # filt.ndim == 2
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
return pw.wrap(result)
# previously located in sandbox.tsa.garch
def miso_lfilter(ar, ma, x, useic=False):
"""
Filter multiple time series into a single time series.
Uses a convolution to merge inputs, and then lfilter to produce output.
Parameters
----------
ar : array_like
The coefficients of autoregressive lag polynomial including lag zero,
ar(L) in the expression ar(L)y_t.
ma : array_like, same ndim as x, currently 2d
The coefficient of the moving average lag polynomial, ma(L) in
ma(L)x_t.
x : array_like
The 2-d input data series, time in rows, variables in columns.
useic : bool
Flag indicating whether to use initial conditions.
Returns
-------
y : ndarray
The filtered output series.
inp : ndarray, 1d
The combined input series.
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that:
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs, nvars), ar (narlags,), and
ma (narlags, nvars).
"""
ma = array_like(ma, 'ma')
ar = array_like(ar, 'ar')
inp = signal.correlate(x, ma[::-1, :])[:, (x.shape[1] + 1) // 2]
# for testing 2d equivalence between convolve and correlate
# inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
# np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
# TODO: initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
zi=signal.lfiltic(np.array([1., 0.]), ar,
useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
|
|
import datetime
import fileinput
import neovim
import os
import Queue
import subprocess
import sys
import tempfile
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from actions import election_actions
from filters import election_filters
from nominations import election_nominations
from positions import election_positions
class InitKilled(BaseException):
def __init__(self):
pass
class Election():
def __init__(self, main, position, options, election_id):
self.log = open(os.path.expanduser('~/.election_log'), 'w')
self.main = main
self.position = election_positions[position]()
self.nomination = election_nominations[position](main.vim, options, self.log)
self.election_id = election_id
self.candidates = []
filter_name = options.get('filter', 'match')
if election_filters.has_key(filter_name):
self.filter = election_filters[filter_name]()
self.ballot_file = None
self.process = None
self.input_queue = Queue.Queue()
if not self.nomination.initialize():
raise InitKilled
self.main.vim.command('enew')
self.main.vim.command('let b:election_id = "' + self.election_id + '"')
self.main.vim.command('set buftype=nofile')
self.main.vim.command('set nonumber')
self.main.vim.command('setlocal statusline=Election:\ ' + position)
self.visibleBallotLength = self.main.vim.eval('winheight(0)') - 1
for ascii_num in range(127):
letter = None
if ascii_num == 8:
letter = '<BS>'
elif ascii_num == 32:
letter = '<Space>'
elif ascii_num == 124:
letter = '<Bar>'
elif (ascii_num >= 33) and (ascii_num <= 126):
letter = chr(ascii_num)
if letter:
self.main.vim.command('nnoremap <silent> <buffer> <nowait> ' + letter + ' :call ElectionAddInput(' + str(ascii_num) + ')<CR>')
for name, value in election_actions.items():
action = value['class'](self.main.vim, name)
action.addMapping()
# Add backup mapping in case we can't access python
self.main.vim.command('nnoremap <buffer> <C-c> :quit<CR>')
self.startup()
def startup(self):
self.input = ''
# Add <NUL> char to input_queue to allow nominateCandidates() to pass on first run.
self.input_queue.put(0)
self.selected_line = 1
self.nominateCandidates()
def compareSorts(self, first, second, sort_index=0):
comparison = 0
if sort_index != len(first) and sort_index != len(second):
diff = first[sort_index] - second[sort_index]
if diff:
comparison = diff
else:
comparison = self.compareSorts(first, second, sort_index + 1)
return comparison
def shortenedQuicksort(self, candidates, max_length):
if self.is_sorting and len(candidates) > 1:
pivot_index = len(candidates) / 2
first_candidates = []
last_candidates = []
for index, value in enumerate(candidates):
if index != pivot_index:
if self.compareSorts(value['sort'], candidates[pivot_index]['sort']) < 0:
first_candidates.append(value)
else:
last_candidates.append(value)
self.shortenedQuicksort(first_candidates, max_length)
sorted_length = len(first_candidates)
if sorted_length < max_length:
self.shortenedQuicksort(last_candidates, max_length - sorted_length)
candidates[:] = first_candidates + [candidates[pivot_index]] + last_candidates
def sort(self):
ballot = self.main.vim.current.buffer
candidates = self.position.buildCandidates(ballot[1:], self.filter.getRegExp(self.input))
self.is_sorting = True
self.shortenedQuicksort(candidates, self.visibleBallotLength)
for ii in range(min(self.visibleBallotLength, len(candidates))):
try:
ballot[ii + 1] = candidates[ii]['name']
except:
break
self.candidates = candidates[:self.visibleBallotLength]
def nominateCandidates(self):
if self.updateInput():
self.ballot_file = tempfile.NamedTemporaryFile()
command = self.nomination.getCommand(self.filter.getGlob(self.input))
self.process = subprocess.Popen(command, cwd=self.main.vim.eval('getcwd()'), stdout=self.ballot_file)
self.process.wait()
if self.process.returncode == 0:
self.process = None
self.showBallot()
self.sort()
def moveSelection(self, movement):
self.selected_line += movement
self.drawSelectedLine()
def updateInput(self):
was_updated = False
is_empty = False
while not is_empty:
try:
input_ascii = self.input_queue.get(block=False)
# <NUL>
if input_ascii == 0:
self.input = self.input
# <BS>
elif input_ascii == 8:
self.input = self.input[:-1]
else:
self.input = self.input + chr(input_ascii)
was_updated = True
except Queue.Empty:
is_empty = True
self.main.vim.current.buffer[0] = self.input
return was_updated
def addInput(self, input_ascii):
self.input_queue.put(input_ascii)
def showBallot(self):
self.main.vim.command('%delete')
try:
self.main.vim.command('keepalt read ' + self.ballot_file.name)
except:
pass
self.main.vim.current.buffer[0] = self.input
self.drawSelectedLine()
if self.ballot_file:
self.ballot_file.close()
self.ballot_file = None
def drawSelectedLine(self):
self.main.vim.command('match CursorLine /\%%%dl/' % (self.selected_line + 1))
def getSelectedCandidate(self):
return self.candidates[self.selected_line - 1]
def stopProcess(self):
self.is_sorting = False
if self.process:
self.process.poll()
if self.process.returncode is None:
self.process.kill()
self.process = None
if self.ballot_file:
try:
self.showBallot()
except:
pass
def close(self):
self.log.close()
self.main.vim.command('match none')
self.stopProcess()
class ElectionBuilder:
def __init__(self, main, position, options):
self.error_message = None
if not election_positions.has_key(position):
self.error_message = 'Position \'' + position + '\' does not exist.'
if not election_nominations.has_key(position):
self.error_message = 'Position \'' + position + '\' does not have a nomination.'
self.main = main
self.position = position
self.options = options
def isValid(self):
return self.error_message is None
def buildElection(self):
election_id = datetime.datetime.now().strftime("%Y%M%d%H%M%S%f")
try:
self.main.elections[election_id] = Election(self.main, self.position, self.options, election_id)
except InitKilled:
pass
def printErrorMessage(self):
self.main.vim.command('echomsg "ERROR: ' + self.error_message + '"')
@neovim.plugin
class Main(object):
def __init__(self, vim):
self.vim = vim
self.elections = {}
def getElectionId(self):
return self.vim.eval('get(b:, "election_id", '')')
@neovim.function('ElectionStart')
def electionStart(self, args):
position = args[0]
options = args[1]
builder = ElectionBuilder(self, position, options)
if builder.isValid():
builder.buildElection()
else:
builder.printErrorMessage()
@neovim.function('ElectionAddInput')
def electionAddInput(self, args):
input_ascii = args[0]
election = self.elections.get(self.getElectionId(), None)
if election:
election.stopProcess()
election.addInput(input_ascii)
election.nominateCandidates()
@neovim.function('ElectionAct')
def electionAct(self, args):
action_name = args[0]
election = self.elections.get(self.getElectionId(), None)
if election and election_actions.has_key(action_name):
action = election_actions[action_name]['class'](self.vim, action_name)
action.run(election)
|
|
from django.db.models.sql import compiler
from datetime import datetime
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
index_start = len(self.query.extra_select.keys())
values = [self.query.convert_values(v, None, connection=self.connection) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
return tuple(values)
def modify_query(self, strategy, ordering, out_cols):
"""
Helper method, called from _as_sql()
Sets the value of the self._ord and self.default_reverse_ordering
attributes.
Can modify the values of the out_cols list argument and the
self.query.ordering_aliases attribute.
"""
self.default_reverse_ordering = False
self._ord = []
cnt = 0
extra_select_aliases = [k.strip('[]') for k in self.query.extra_select.keys()]
for ord_spec_item in ordering:
if ord_spec_item.endswith(' ASC') or ord_spec_item.endswith(' DESC'):
parts = ord_spec_item.split()
col, odir = ' '.join(parts[:-1]), parts[-1]
if col not in self.query.ordering_aliases and col.strip('[]') not in extra_select_aliases:
if col.isdigit():
cnt += 1
n = int(col)-1
alias = 'OrdAlias%d' % cnt
out_cols[n] = '%s AS [%s]' % (out_cols[n], alias)
self._ord.append((alias, odir))
elif col in out_cols:
if strategy == USE_TOP_HMARK:
cnt += 1
n = out_cols.index(col)
alias = 'OrdAlias%d' % cnt
out_cols[n] = '%s AS %s' % (col, alias)
self._ord.append((alias, odir))
else:
self._ord.append((col, odir))
elif strategy == USE_TOP_HMARK:
# Special case: '_order' column created by Django
# when Meta.order_with_respect_to is used
if col.split('.')[-1] == '[_order]' and odir == 'DESC':
self.default_reverse_ordering = True
cnt += 1
alias = 'OrdAlias%d' % cnt
self._ord.append((alias, odir))
self.query.ordering_aliases.append('%s AS [%s]' % (col, alias))
else:
self._ord.append((col, odir))
else:
self._ord.append((col, odir))
if strategy == USE_ROW_NUMBER and not self._ord and 'RAND()' in ordering:
self._ord.append(('RAND()',''))
if strategy == USE_TOP_HMARK and not self._ord:
# XXX:
#meta = self.get_meta()
meta = self.query.model._meta
qn = self.quote_name_unless_alias
pk_col = '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))
if pk_col not in out_cols:
out_cols.append(pk_col)
def _as_sql(self, strategy):
"""
Helper method, called from as_sql()
Similar to django/db/models/sql/query.py:Query.as_sql() but without
the ordering and limits code.
Returns SQL that hasn't an order-by clause.
"""
# get_columns needs to be called before get_ordering to populate
# _select_alias.
out_cols = self.get_columns(True)
ordering, ordering_group_by = self.get_ordering()
if strategy == USE_ROW_NUMBER:
if not ordering:
meta = self.query.get_meta()
qn = self.quote_name_unless_alias
# Special case: pk not in out_cols, use random ordering.
#
if '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column)) not in self.get_columns():
ordering = ['RAND()']
# XXX: Maybe use group_by field for ordering?
#if self.group_by:
#ordering = ['%s.%s ASC' % (qn(self.group_by[0][0]),qn(self.group_by[0][1]))]
else:
ordering = ['%s.%s ASC' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))]
if strategy in (USE_TOP_HMARK, USE_ROW_NUMBER):
self.modify_query(strategy, ordering, out_cols)
if strategy == USE_ROW_NUMBER:
ord = ', '.join(['%s %s' % pair for pair in self._ord])
self.query.ordering_aliases.append('(ROW_NUMBER() OVER (ORDER BY %s)) AS [rn]' % ord)
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn, self.connection)
having, h_params = self.query.having.as_sql(qn, self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append('DISTINCT')
if strategy == USE_TOP_LMARK:
# XXX:
#meta = self.get_meta()
meta = self.query.model._meta
result.append('TOP %s %s' % (self.query.low_mark, self.quote_name_unless_alias(meta.pk.db_column or meta.pk.column)))
else:
if strategy == USE_TOP_HMARK and self.query.high_mark is not None:
result.append('TOP %s' % self.query.high_mark)
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
return ' '.join(result), tuple(params)
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# The do_offset flag indicates whether we need to construct
# the SQL needed to use limit/offset w/SQL Server.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark != 0)
# If no offsets, just return the result of the base class
# `as_sql`.
if not do_offset:
return super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
# Shortcut for the corner case when high_mark value is 0:
if self.query.high_mark == 0:
return "", ()
self.pre_sql_setup()
# XXX:
#meta = self.get_meta()
meta = self.query.model._meta
qn = self.quote_name_unless_alias
fallback_ordering = '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))
# SQL Server 2000, offset+limit case
if self.connection.ops.sql_server_ver < 2005 and self.query.high_mark is not None:
orig_sql, params = self._as_sql(USE_TOP_HMARK)
if self._ord:
ord = ', '.join(['%s %s' % pair for pair in self._ord])
rev_ord = ', '.join(['%s %s' % (col, REV_ODIR[odir]) for col, odir in self._ord])
else:
if not self.default_reverse_ordering:
ord = '%s ASC' % fallback_ordering
rev_ord = '%s DESC' % fallback_ordering
else:
ord = '%s DESC' % fallback_ordering
rev_ord = '%s ASC' % fallback_ordering
sql = SQL_SERVER_8_LIMIT_QUERY % {
'limit': self.query.high_mark - self.query.low_mark,
'orig_sql': orig_sql,
'ord': ord,
'rev_ord': rev_ord,
# XXX:
'table': qn(meta.db_table),
}
return sql, params
# SQL Server 2005
if self.connection.ops.sql_server_ver >= 2005:
sql, params = self._as_sql(USE_ROW_NUMBER)
# Construct the final SQL clause, using the initial select SQL
# obtained above.
result = ['SELECT * FROM (%s) AS X' % sql]
# Place WHERE condition on `rn` for the desired range.
if self.query.high_mark is None:
self.query.high_mark = 9223372036854775807
result.append('WHERE X.rn BETWEEN %d AND %d' % (self.query.low_mark+1, self.query.high_mark))
return ' '.join(result), params
# SQL Server 2000, offset without limit case
# get_columns needs to be called before get_ordering to populate
# select_alias.
self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
if ordering:
ord = ', '.join(ordering)
else:
# We need to define an ordering clause since none was provided
ord = fallback_ordering
orig_sql, params = self._as_sql(USE_TOP_LMARK)
sql = SQL_SERVER_8_NO_LIMIT_QUERY % {
'orig_sql': orig_sql,
'ord': ord,
'table': qn(meta.db_table),
'key': qn(meta.pk.db_column or meta.pk.column),
}
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql_legacy(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
returns_id = bool(self.return_id and
self.connection.features.can_return_id_from_insert)
if returns_id:
result = ['SET NOCOUNT ON']
else:
result = []
result.append('INSERT INTO %s' % qn(opts.db_table))
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
if returns_id:
result.append(';\nSELECT SCOPE_IDENTITY()')
params = self.query.params
sql = ' '.join(result)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
if returns_id:
sql = "SET NOCOUNT ON"
else:
sql = ""
if len(self.query.columns) == 1 and not params:
sql += "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql += "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
if returns_id:
sql += '\n;SELECT SCOPE_IDENTITY()'
return sql, params
def as_sql(self):
if self.connection._DJANGO_VERSION < 14:
return self.as_sql_legacy()
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
returns_id = bool(self.return_id and
self.connection.features.can_return_id_from_insert)
if returns_id:
result = ['SET NOCOUNT ON']
else:
result = []
result.append('INSERT INTO %s' % qn(opts.db_table))
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
columns = [f.column for f in fields]
result.append('(%s)' % ', '.join([qn(c) for c in columns]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
if returns_id:
params = params[0]
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
result.append('\n;SELECT SCOPE_IDENTITY()')
return [(" ".join(result), tuple(params))]
items = [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
# This section deals with specifically setting the primary key,
# or using default values if necessary
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
out = []
for item in items:
sql, params = item
if auto_field_column in columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
# If there are no fields specified in the insert..
if not has_fields:
sql = "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql = "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
out.append([sql, params])
items = out
return items
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import functools
import mako
import mimetypes
import os
import posixpath
import six
import girder.events
from girder import constants, logprint, __version__, logStdoutStderr
from girder.utility import plugin_utilities, model_importer, config
from . import webroot
with open(os.path.join(os.path.dirname(__file__), 'error.mako')) as f:
_errorTemplate = f.read()
def _errorDefault(status, message, *args, **kwargs):
"""
This is used to render error pages outside of the normal Girder app, such as
404's. This overrides the default cherrypy error pages.
"""
return mako.template.Template(_errorTemplate).render(status=status, message=message)
def _configureStaticRoutes(webroot, plugins, event=None):
"""
Configures static routes for a given webroot.
This function is also run when the route table setting is modified
to allow for dynamically changing static routes at runtime.
"""
# This was triggered by some unrelated setting changing
if event is not None and event.info['key'] != constants.SettingKey.ROUTE_TABLE:
return
routeTable = loadRouteTable()
# If the static route is a URL, leave it alone
if '://' in routeTable[constants.GIRDER_STATIC_ROUTE_ID]:
apiStaticRoot = routeTable[constants.GIRDER_STATIC_ROUTE_ID]
staticRoot = routeTable[constants.GIRDER_STATIC_ROUTE_ID]
else:
# Make the staticRoot relative to the api_root, if possible. The api_root
# could be relative or absolute, but it needs to be in an absolute form for
# relpath to behave as expected. We always expect the api_root to
# contain at least two components, but the reference from static needs to
# be from only the first component.
apiRootBase = posixpath.split(posixpath.join('/',
config.getConfig()['server']['api_root']))[0]
apiStaticRoot = posixpath.relpath(routeTable[constants.GIRDER_STATIC_ROUTE_ID],
apiRootBase)
staticRoot = posixpath.relpath(routeTable[constants.GIRDER_STATIC_ROUTE_ID],
routeTable[constants.GIRDER_ROUTE_ID])
webroot.updateHtmlVars({
'apiRoot': config.getConfig()['server']['api_root'],
'staticRoot': staticRoot,
'plugins': plugins
})
webroot.api.v1.updateHtmlVars({
'apiRoot': config.getConfig()['server']['api_root'],
'staticRoot': apiStaticRoot
})
def configureServer(test=False, plugins=None, curConfig=None):
"""
Function to setup the cherrypy server. It configures it, but does
not actually start it.
:param test: Set to True when running in the tests.
:type test: bool
:param plugins: If you wish to start the server with a custom set of
plugins, pass this as a list of plugins to load. Otherwise,
will use the PLUGINS_ENABLED setting value from the db.
:param curConfig: The configuration dictionary to update.
"""
if curConfig is None:
curConfig = config.getConfig()
appconf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'request.show_tracebacks': test,
'request.methods_with_bodies': ('POST', 'PUT', 'PATCH'),
'response.headers.server': 'Girder %s' % __version__,
'error_page.default': _errorDefault
}
}
# Add MIME types for serving Fontello files from staticdir;
# these may be missing or incorrect in the OS
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/font-woff', '.woff')
if test:
appconf['/src'] = {
'tools.staticdir.on': True,
'tools.staticdir.root': constants.STATIC_ROOT_DIR,
'tools.staticdir.dir': 'clients/web/src',
}
appconf['/test'] = {
'tools.staticdir.on': True,
'tools.staticdir.root': constants.STATIC_ROOT_DIR,
'tools.staticdir.dir': 'clients/web/test',
}
appconf['/clients'] = {
'tools.staticdir.on': True,
'tools.staticdir.root': constants.STATIC_ROOT_DIR,
'tools.staticdir.dir': 'clients'
}
appconf['/plugins'] = {
'tools.staticdir.on': True,
'tools.staticdir.root': constants.STATIC_ROOT_DIR,
'tools.staticdir.dir': 'plugins',
}
curConfig.update(appconf)
if test:
# Force some config params in testing mode
curConfig.update({'server': {
'mode': 'testing',
'api_root': 'api/v1',
'static_root': 'static',
'api_static_root': '../static',
'cherrypy_server': True
}})
mode = curConfig['server']['mode'].lower()
logprint.info('Running in mode: ' + mode)
cherrypy.config['engine.autoreload.on'] = mode == 'development'
# Don't import this until after the configs have been read; some module
# initialization code requires the configuration to be set up.
from girder.api import api_main
root = webroot.Webroot()
api_main.addApiToNode(root)
cherrypy.engine.subscribe('start', girder.events.daemon.start)
cherrypy.engine.subscribe('stop', girder.events.daemon.stop)
if plugins is None:
settings = model_importer.ModelImporter().model('setting')
plugins = settings.get(constants.SettingKey.PLUGINS_ENABLED, default=())
plugins = list(plugin_utilities.getToposortedPlugins(plugins, ignoreMissing=True))
_configureStaticRoutes(root, plugins)
girder.events.bind('model.setting.save.after', '_updateStaticRoutesIfModified',
functools.partial(_configureStaticRoutes, root, plugins))
root, appconf, _ = plugin_utilities.loadPlugins(
plugins, root, appconf, root.api.v1, buildDag=False)
return root, appconf
def loadRouteTable(reconcileRoutes=False):
"""
Retrieves the route table from Girder and reconciles the state of it with the current
application state.
Reconciliation ensures that every enabled plugin has a route by assigning default routes for
plugins that have none, such as newly-enabled plugins.
:returns: The non empty routes (as a dict of name -> route) to be mounted by CherryPy
during Girder's setup phase.
"""
pluginWebroots = plugin_utilities.getPluginWebroots()
setting = model_importer.ModelImporter().model('setting')
routeTable = setting.get(constants.SettingKey.ROUTE_TABLE)
def reconcileRouteTable(routeTable):
hasChanged = False
for name in pluginWebroots.keys():
if name not in routeTable:
routeTable[name] = os.path.join('/', name)
hasChanged = True
if hasChanged:
setting.set(constants.SettingKey.ROUTE_TABLE, routeTable)
return routeTable
if reconcileRoutes:
routeTable = reconcileRouteTable(routeTable)
return {name: route for (name, route) in six.viewitems(routeTable) if route}
def setup(test=False, plugins=None, curConfig=None):
"""
Configure and mount the Girder server and plugins under the
appropriate routes.
See ROUTE_TABLE setting.
:param test: Whether to start in test mode.
:param plugins: List of plugins to enable.
:param curConfig: The config object to update.
"""
logStdoutStderr()
pluginWebroots = plugin_utilities.getPluginWebroots()
girderWebroot, appconf = configureServer(test, plugins, curConfig)
routeTable = loadRouteTable(reconcileRoutes=True)
# Mount Girder
application = cherrypy.tree.mount(girderWebroot,
str(routeTable[constants.GIRDER_ROUTE_ID]), appconf)
# Mount static files
cherrypy.tree.mount(None, routeTable[constants.GIRDER_STATIC_ROUTE_ID],
{'/':
{'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(constants.STATIC_ROOT_DIR,
'clients/web/static'),
'request.show_tracebacks': appconf['/']['request.show_tracebacks'],
'response.headers.server': 'Girder %s' % __version__,
'error_page.default': _errorDefault}})
# Mount API (special case)
# The API is always mounted at /api AND at api relative to the Girder root
cherrypy.tree.mount(girderWebroot.api, '/api', appconf)
# Mount everything else in the routeTable
for (name, route) in six.viewitems(routeTable):
if name != constants.GIRDER_ROUTE_ID and name in pluginWebroots:
cherrypy.tree.mount(pluginWebroots[name], route, appconf)
if test:
application.merge({'server': {'mode': 'testing'}})
return application
class _StaticFileRoute(object):
exposed = True
def __init__(self, path, contentType=None):
self.path = os.path.abspath(path)
self.contentType = contentType
def GET(self):
return cherrypy.lib.static.serve_file(self.path,
content_type=self.contentType)
def staticFile(path, contentType=None):
"""
Helper function to serve a static file. This should be bound as the route
object, i.e. info['serverRoot'].route_name = staticFile('...')
:param path: The path of the static file to serve from this route.
:type path: str
:param contentType: The MIME type of the static file. If set to None, the
content type wll be guessed by the file extension of
the 'path' argument.
"""
return _StaticFileRoute(path, contentType)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IPv4 Whois data collection and analysis tool
Usage:
./whois.py collect <elastic_search_url> <index_name> <doc_name>
[--sleep_min=<n>] [--sleep_max=<n>] [--threads=<n>]
./whois.py stats <elastic_search_url> <index_name>
./whois.py test
./whois.py (-h | --help)
Options:
-h, --help Show this screen and exit.
--sleep_min=<n> Least number of seconds to sleep for [Default: 1]
--sleep_max=<n> Most number of seconds to sleep for [Default: 5]
--threads=<n> Number of threads [Default: 8]
Examples:
./whois.py collect http://127.0.0.1:9200/ netblocks netblock
./whois.py stats http://127.0.0.1:9200/ netblocks
License:
The MIT License (MIT)
Copyright (c) 2014 Mark Litwintschik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
from random import randint
import socket
import struct
import sys
from docopt import docopt
import ipcalc
from ipwhois import IPWhois
import gevent
from pyelasticsearch import ElasticSearch
from pyelasticsearch.exceptions import \
ElasticHttpError, ElasticHttpNotFoundError
import requests
def ip2long(ip):
"""
Convert IPv4 address in string format into an integer
:param str ip: ipv4 address
:return: ipv4 address
:rtype: integer
"""
packed_ip = socket.inet_aton(ip)
return struct.unpack("!L", packed_ip)[0]
def get_next_ip(ip_address):
"""
:param str ip_address: ipv4 address
:return: next ipv4 address
:rtype: str
>>> get_next_ip('0.0.0.0')
'0.0.0.1'
>>> get_next_ip('24.24.24.24')
'24.24.24.25'
>>> get_next_ip('24.24.255.255')
'24.25.0.0'
>>> get_next_ip('255.255.255.255') is None
True
"""
assert ip_address.count('.') == 3, \
'Must be an IPv4 address in str representation'
if ip_address == '255.255.255.255':
return None
try:
return socket.inet_ntoa(struct.pack('!L', ip2long(ip_address) + 1))
except Exception, error:
print 'Unable to get next IP for %s' % ip_address
raise error
def get_netrange_end(asn_cidr):
"""
:param str asn_cidr: ASN CIDR
:return: ipv4 address of last IP in netrange
:rtype: str
"""
try:
last_in_netrange = \
ip2long(str(ipcalc.Network(asn_cidr).host_first())) + \
ipcalc.Network(asn_cidr).size() - 2
except ValueError, error:
print 'Issue calculating size of %s network' % asn_cidr
raise error
return socket.inet_ntoa(struct.pack('!L', last_in_netrange))
def get_next_undefined_address(ip):
"""
Get the next non-private IPv4 address if the address sent is private
:param str ip: IPv4 address
:return: ipv4 address of net non-private address
:rtype: str
>>> get_next_undefined_address('0.0.0.0')
'1.0.0.0'
>>> get_next_undefined_address('24.24.24.24')
'24.24.24.24'
>>> get_next_undefined_address('127.0.0.1')
'128.0.0.0'
>>> get_next_undefined_address('255.255.255.256') is None
True
"""
try:
# Should weed out many invalid IP addresses
ipcalc.Network(ip)
except ValueError, error:
return None
defined_networks = (
'0.0.0.0/8',
'10.0.0.0/8',
'127.0.0.0/8',
'169.254.0.0/16',
'192.0.0.0/24',
'192.0.2.0/24',
'192.88.99.0/24',
'192.168.0.0/16',
'198.18.0.0/15',
'198.51.100.0/24',
'203.0.113.0/24',
'224.0.0.0/4',
'240.0.0.0/4',
'255.255.255.255/32',
)
for network_cidr in defined_networks:
if ip in ipcalc.Network(network_cidr):
return get_next_ip(get_netrange_end(network_cidr))
return ip
def break_up_ipv4_address_space(num_threads=8):
"""
>>> break_up_ipv4_address_space() == \
[('0.0.0.0', '31.255.255.255'), ('32.0.0.0', '63.255.255.255'),\
('64.0.0.0', '95.255.255.255'), ('96.0.0.0', '127.255.255.255'),\
('128.0.0.0', '159.255.255.255'), ('160.0.0.0', '191.255.255.255'),\
('192.0.0.0', '223.255.255.255'), ('224.0.0.0', '255.255.255.255')]
True
"""
ranges = []
multiplier = 256 / num_threads
for marker in range(0, num_threads):
starting_class_a = (marker * multiplier)
ending_class_a = ((marker + 1) * multiplier) - 1
ranges.append(('%d.0.0.0' % starting_class_a,
'%d.255.255.255' % ending_class_a))
return ranges
def get_netranges(starting_ip='1.0.0.0',
last_ip='2.0.0.0',
elastic_search_url='http://127.0.0.1:9200/',
index_name='netblocks',
doc_name='netblock', sleep_min=1, sleep_max=5):
connection = ElasticSearch(elastic_search_url)
current_ip = starting_ip
while True:
# See if we've finished the range of work
if ip2long(current_ip) > ip2long(last_ip):
return
current_ip = get_next_undefined_address(current_ip)
if current_ip == None: # No more undefined ip addresses
return
print current_ip
try:
whois_resp = IPWhois(current_ip).lookup_rws()
except Exception as error:
"""
If a message like: 'STDERR: getaddrinfo(whois.apnic.net): Name or
service not known' appears' then print it out and try the next
IP address.
"""
print type(error), error
current_ip = get_next_ip(current_ip)
if current_ip is None:
return # No more undefined ip addresses
gevent.sleep(randint(sleep_min, sleep_max))
continue
if 'asn_cidr' in whois_resp and \
whois_resp['asn_cidr'] is not None and \
whois_resp['asn_cidr'].count('.') == 3:
last_netrange_ip = get_netrange_end(whois_resp['asn_cidr'])
else:
try:
last_netrange_ip = \
whois_resp['nets'][0]['range'].split('-')[-1].strip()
assert last_netrange_ip.count('.') == 3
except:
# No match found for n + 192.0.1.0.
print 'Missing ASN CIDR in whois resp: %s' % whois_resp
current_ip = get_next_ip(current_ip)
if current_ip is None:
return # No more undefined ip addresses
gevent.sleep(randint(sleep_min, sleep_max))
continue
assert last_netrange_ip is not None and \
last_netrange_ip.count('.') == 3, \
'Unable to find last netrange ip for %s: %s' % (current_ip,
whois_resp)
# Save current_ip and whois_resp
entry = {
'netblock_start': current_ip,
'netblock_end': last_netrange_ip,
'block_size': ip2long(last_netrange_ip) - ip2long(current_ip) + 1,
'whois': json.dumps(whois_resp),
}
keys = ('cidr', 'name', 'handle', 'range', 'description',
'country', 'state', 'city', 'address', 'postal_code',
'abuse_emails', 'tech_emails', 'misc_emails', 'created',
'updated')
for _key in keys:
entry[_key] = str(whois_resp['nets'][0][_key]) \
if _key in whois_resp['nets'][0] and \
whois_resp['nets'][0][_key] else None
if _key == 'city' and entry[_key] and ' ' in entry[_key]:
entry[_key] = entry[_key].replace(' ', '_')
try:
connection.index(index_name, doc_name, entry)
except ElasticHttpError, error:
print 'At %s. Unable to save record: %s' % (current_ip, entry)
raise error
current_ip = get_next_ip(last_netrange_ip)
if current_ip is None:
return # No more undefined ip addresses
gevent.sleep(randint(sleep_min, sleep_max))
def stats(elastic_search_url, index_name, doc_name):
fields = ('country', 'city')
url = '%s/%s/_search?fields=aggregations' % (elastic_search_url, index_name)
for field in fields:
data = {
"aggs": {
field: {
"terms": {
"field": field,
"order": {"total_ips": "desc"}
},
"aggs": {
"total_ips": {"sum": {"field": "block_size"}}
}
}
}
}
resp = requests.get(url, data=json.dumps(data))
assert resp.status_code == 200, \
'Did not get HTTP 200 back: %s' % resp.status_code
_stats = json.loads(resp.content)["aggregations"][field]["buckets"]
_stats = {stat['key']: int(stat['total_ips']['value'])
for stat in _stats}
print 'Top 10 netblock locations by %s' % field
for _key in sorted(_stats, key=_stats.get, reverse=True):
print "{:14,d}".format(_stats[_key]), _key.replace('_', ' ')
print
def main(argv):
"""
:param dict argv: command line arguments
"""
opt = docopt(__doc__, argv)
if opt['collect']:
sleep_min = int(opt['--sleep_min']) \
if opt['--sleep_min'] is not None else randint(1, 5)
sleep_max = int(opt['--sleep_max']) \
if opt['--sleep_max'] is not None else randint(1, 5)
num_threads = int(opt['--threads'])
if sleep_min > sleep_max:
sleep_min, sleep_max = sleep_max, sleep_min
threads = [gevent.spawn(get_netranges, starting_id, ending_ip,
opt['<elastic_search_url>'], opt['<index_name>'],
opt['<doc_name>'], sleep_min, sleep_max)
for starting_id, ending_ip in
break_up_ipv4_address_space(num_threads)]
gevent.joinall(threads)
if opt['stats']:
stats(opt['<elastic_search_url>'],
opt['<index_name>'],
opt['<doc_name>'])
if opt['test']:
import doctest
doctest.testmod()
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
pass
|
|
# -*- coding=utf-8 -*-
"""
Shared utility functions which are not specific to any particular module.
"""
from __future__ import absolute_import
import contextlib
import copy
import inspect
import sys
from functools import wraps
import packaging.version
import six
from .environment import MYPY_RUNNING
# format: off
six.add_move(
six.MovedAttribute("Callable", "collections", "collections.abc")
) # type: ignore # noqa
from six.moves import Callable # type: ignore # isort:skip # noqa
# format: on
if MYPY_RUNNING:
from types import ModuleType
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
TShimmedPath = TypeVar("TShimmedPath")
TShimmedPathCollection = TypeVar("TShimmedPathCollection")
TShim = Union[TShimmedPath, TShimmedPathCollection]
TShimmedFunc = Union[TShimmedPath, TShimmedPathCollection, Callable, Type]
STRING_TYPES = (str,)
if sys.version_info < (3, 0):
STRING_TYPES = STRING_TYPES + (unicode,) # noqa:F821
class BaseMethod(Callable):
def __init__(self, func_base, name, *args, **kwargs):
# type: (Callable, str, Any, Any) -> None
self.func = func_base
self.__name__ = self.__qualname__ = name
def __call__(self, *args, **kwargs):
# type: (Any, Any) -> Any
return self.func(*args, **kwargs)
class BaseClassMethod(Callable):
def __init__(self, func_base, name, *args, **kwargs):
# type: (Callable, str, Any, Any) -> None
self.func = func_base
self.__name__ = self.__qualname__ = name
def __call__(self, cls, *args, **kwargs):
# type: (Type, Any, Any) -> Any
return self.func(*args, **kwargs)
def make_method(fn):
# type: (Callable) -> Callable
@wraps(fn)
def method_creator(*args, **kwargs):
# type: (Any, Any) -> Callable
return BaseMethod(fn, *args, **kwargs)
return method_creator
def make_classmethod(fn):
# type: (Callable) -> Callable
@wraps(fn)
def classmethod_creator(*args, **kwargs):
# type: (Any, Any) -> Callable
return classmethod(BaseClassMethod(fn, *args, **kwargs))
return classmethod_creator
def memoize(obj):
# type: (Any) -> Callable
cache = obj.cache = {}
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
@memoize
def _parse(version):
# type: (str) -> Tuple[int, ...]
if isinstance(version, STRING_TYPES):
return tuple((int(i) for i in version.split(".")))
return version
@memoize
def parse_version(version):
# type: (str) -> packaging.version._BaseVersion
if not isinstance(version, STRING_TYPES):
raise TypeError("Can only derive versions from string, got {0!r}".format(version))
return packaging.version.parse(version)
@memoize
def split_package(module, subimport=None):
# type: (str, Optional[str]) -> Tuple[str, str]
"""
Used to determine what target to import.
Either splits off the final segment or uses the provided sub-import to return a
2-tuple of the import path and the target module or sub-path.
:param str module: A package to import from
:param Optional[str] subimport: A class, function, or subpackage to import
:return: A 2-tuple of the corresponding import package and sub-import path
:rtype: Tuple[str, str]
:Example:
>>> from pip_shims.utils import split_package
>>> split_package("pip._internal.req.req_install", subimport="InstallRequirement")
("pip._internal.req.req_install", "InstallRequirement")
>>> split_package("pip._internal.cli.base_command")
("pip._internal.cli", "base_command")
"""
package = None
if subimport:
package = subimport
else:
module, _, package = module.rpartition(".")
return module, package
def get_method_args(target_method):
# type: (Callable) -> Tuple[Callable, Optional[inspect.Arguments]]
"""
Returns the arguments for a callable.
:param Callable target_method: A callable to retrieve arguments for
:return: A 2-tuple of the original callable and its resulting arguments
:rtype: Tuple[Callable, Optional[inspect.Arguments]]
"""
inspected_args = None
try:
inspected_args = inspect.getargs(target_method.__code__)
except AttributeError:
target_func = getattr(target_method, "__func__", None)
if target_func is not None:
inspected_args = inspect.getargs(target_func.__code__)
else:
target_func = target_method
return target_func, inspected_args
def set_default_kwargs(basecls, method, *args, **default_kwargs):
# type: (Union[Type, ModuleType], Callable, Any, Any) -> Union[Type, ModuleType] # noqa
target_method = getattr(basecls, method, None)
if target_method is None:
return basecls
target_func, inspected_args = get_method_args(target_method)
if inspected_args is not None:
pos_args = inspected_args.args
else:
pos_args = []
# Spit back the base class if we can't find matching arguments
# to put defaults in place of
if not any(arg in pos_args for arg in list(default_kwargs.keys())):
return basecls
prepended_defaults = tuple() # type: Tuple[Any, ...]
# iterate from the function's argument order to make sure we fill this
# out in the correct order
for arg in args:
prepended_defaults += (arg,)
for arg in pos_args:
if arg in default_kwargs:
prepended_defaults = prepended_defaults + (default_kwargs[arg],)
if not prepended_defaults:
return basecls
if six.PY2 and inspect.ismethod(target_method):
new_defaults = prepended_defaults + target_func.__defaults__
target_method.__func__.__defaults__ = new_defaults
else:
new_defaults = prepended_defaults + target_method.__defaults__
target_method.__defaults__ = new_defaults
setattr(basecls, method, target_method)
return basecls
def ensure_function(parent, funcname, func):
# type: (Union[ModuleType, Type, Callable, Any], str, Callable) -> Callable
"""Given a module, a function name, and a function object, attaches the given
function to the module and ensures it is named properly according to the provided
argument
:param Any parent: The parent to attack the function to
:param str funcname: The name to give the function
:param Callable func: The function to rename and attach to **parent**
:returns: The function with its name, qualname, etc set to mirror **parent**
:rtype: Callable
"""
qualname = funcname
if parent is None:
parent = __module__ # type: ignore # noqa:F821
parent_is_module = inspect.ismodule(parent)
parent_is_class = inspect.isclass(parent)
module = None
if parent_is_module:
module = parent.__name__
elif parent_is_class:
qualname = "{0}.{1}".format(parent.__name__, qualname)
module = getattr(parent, "__module__", None)
else:
module = getattr(parent, "__module__", None)
try:
func.__name__ = funcname
except AttributeError:
if getattr(func, "__func__", None) is not None:
func = func.__func__
func.__name__ = funcname
func.__qualname__ = qualname
func.__module__ = module
return func
def add_mixin_to_class(basecls, mixins):
# type: (Type, List[Type]) -> Type
"""
Given a class, adds the provided mixin classes as base classes and gives a new class
:param Type basecls: An initial class to generate a new class from
:param List[Type] mixins: A list of mixins to add as base classes
:return: A new class with the provided mixins as base classes
:rtype: Type[basecls, *mixins]
"""
if not any(mixins):
return basecls
base_dict = basecls.__dict__.copy()
class_tuple = (basecls,) # type: Tuple[Type, ...]
for mixin in mixins:
if not mixin:
continue
mixin_dict = mixin.__dict__.copy()
base_dict.update(mixin_dict)
class_tuple = class_tuple + (mixin,)
base_dict.update(basecls.__dict__)
return type(basecls.__name__, class_tuple, base_dict)
def fallback_is_file_url(link):
# type: (Any) -> bool
return link.url.lower().startswith("file:")
def fallback_is_artifact(self):
# type: (Any) -> bool
return not getattr(self, "is_vcs", False)
def fallback_is_vcs(self):
# type: (Any) -> bool
return not getattr(self, "is_artifact", True)
def resolve_possible_shim(target):
# type: (TShimmedFunc) -> Optional[Union[Type, Callable]]
if target is None:
return target
if getattr(target, "shim", None):
return target.shim()
return target
@contextlib.contextmanager
def nullcontext(*args, **kwargs):
# type: (Any, Any) -> Iterator
try:
yield
finally:
pass
def has_property(target, name):
# type: (Any, str) -> bool
if getattr(target, name, None) is not None:
return True
return False
def apply_alias(imported, target, *aliases):
# type: (Union[ModuleType, Type, None], Any, Any) -> Any
"""
Given a target with attributes, point non-existant aliases at the first existing one
:param Union[ModuleType, Type] imported: A Module or Class base
:param Any target: The target which is a member of **imported** and will have aliases
:param str aliases: A list of aliases, the first found attribute will be the basis
for all non-existant names which will be created as pointers
:return: The original target
:rtype: Any
"""
base_value = None # type: Optional[Any]
applied_aliases = set()
unapplied_aliases = set()
for alias in aliases:
if has_property(target, alias):
base_value = getattr(target, alias)
applied_aliases.add(alias)
else:
unapplied_aliases.add(alias)
is_callable = inspect.ismethod(base_value) or inspect.isfunction(base_value)
for alias in unapplied_aliases:
if is_callable:
func_copy = copy.deepcopy(base_value)
alias_value = ensure_function(imported, alias, func_copy)
else:
alias_value = base_value
setattr(target, alias, alias_value)
return target
def suppress_setattr(obj, attr, value, filter_none=False):
"""
Set an attribute, suppressing any exceptions and skipping the attempt on failure.
:param Any obj: Object to set the attribute on
:param str attr: The attribute name to set
:param Any value: The value to set the attribute to
:param bool filter_none: [description], defaults to False
:return: Nothing
:rtype: None
:Example:
>>> class MyClass(object):
... def __init__(self, name):
... self.name = name
... self.parent = None
... def __repr__(self):
... return "<{0!r} instance (name={1!r}, parent={2!r})>".format(
... self.__class__.__name__, self.name, self.parent
... )
... def __str__(self):
... return self.name
>>> me = MyClass("Dan")
>>> dad = MyClass("John")
>>> grandfather = MyClass("Joe")
>>> suppress_setattr(dad, "parent", grandfather)
>>> dad
<'MyClass' instance (name='John', parent=<'MyClass' instance (name='Joe', parent=None
)>)>
>>> suppress_setattr(me, "parent", dad)
>>> me
<'MyClass' instance (name='Dan', parent=<'MyClass' instance (name='John', parent=<'My
Class' instance (name='Joe', parent=None)>)>)>
>>> suppress_setattr(me, "grandparent", grandfather)
>>> me
<'MyClass' instance (name='Dan', parent=<'MyClass' instance (name='John', parent=<'My
Class' instance (name='Joe', parent=None)>)>)>
"""
if filter_none and value is None:
pass
try:
setattr(obj, attr, value)
except Exception: # noqa
pass
def get_allowed_args(fn_or_class):
# type: (Union[Callable, Type]) -> Tuple[List[str], Dict[str, Any]]
"""
Given a callable or a class, returns the arguments and default kwargs passed in.
:param Union[Callable, Type] fn_or_class: A function, method or class to inspect.
:return: A 2-tuple with a list of arguments and a dictionary of keywords mapped to
default values.
:rtype: Tuple[List[str], Dict[str, Any]]
"""
try:
signature = inspect.signature(fn_or_class)
except AttributeError:
import funcsigs
signature = funcsigs.signature(fn_or_class)
args = []
kwargs = {}
for arg, param in signature.parameters.items():
if (
param.kind in (param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY)
) and param.default is param.empty:
args.append(arg)
else:
kwargs[arg] = param.default if param.default is not param.empty else None
return args, kwargs
def call_function_with_correct_args(fn, **provided_kwargs):
# type: (Callable, Dict[str, Any]) -> Any
"""
Determines which arguments from **provided_kwargs** to call **fn** and calls it.
Consumes a list of allowed arguments (e.g. from :func:`~inspect.getargs()`) and
uses it to determine which of the arguments in the provided kwargs should be passed
through to the given callable.
:param Callable fn: A callable which has some dynamic arguments
:param List[str] allowed_args: A list of allowed arguments which can be passed to
the supplied function
:return: The result of calling the function
:rtype: Any
"""
# signature = inspect.signature(fn)
args = []
kwargs = {}
func_args, func_kwargs = get_allowed_args(fn)
for arg in func_args:
args.append(provided_kwargs[arg])
for arg in func_kwargs:
if not provided_kwargs.get(arg):
continue
kwargs[arg] = provided_kwargs[arg]
return fn(*args, **kwargs)
def filter_allowed_args(fn, **provided_kwargs):
# type: (Callable, Dict[str, Any]) -> Tuple[List[Any], Dict[str, Any]]
"""
Given a function and a kwarg mapping, return only those kwargs used in the function.
:param Callable fn: A function to inspect
:param Dict[str, Any] kwargs: A mapping of kwargs to filter
:return: A new, filtered kwarg mapping
:rtype: Tuple[List[Any], Dict[str, Any]]
"""
args = []
kwargs = {}
func_args, func_kwargs = get_allowed_args(fn)
for arg in func_args:
if arg in provided_kwargs:
args.append(provided_kwargs[arg])
for arg in func_kwargs:
if arg not in provided_kwargs:
continue
kwargs[arg] = provided_kwargs[arg]
return args, kwargs
|
|
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for migration / resize operations.
"""
import os
from oslo_utils import excutils
from oslo_utils import units
from nova import exception
from nova.i18n import _, _LE
from nova.openstack.common import log as logging
from nova.virt import configdrive
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class MigrationOps(object):
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._imagecache = imagecache.ImageCache()
def _migrate_disk_files(self, instance_name, disk_files, dest):
# TODO(mikal): it would be nice if this method took a full instance,
# because it could then be passed to the log messages below.
same_host = False
if dest in self._hostutils.get_local_ips():
same_host = True
LOG.debug("Migration target is the source host")
else:
LOG.debug("Migration target host: %s", dest)
instance_path = self._pathutils.get_instance_dir(instance_name)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name, remove_dir=True)
dest_path = None
try:
if same_host:
# Since source and target are the same, we copy the files to
# a temporary location before moving them into place
dest_path = '%s_tmp' % instance_path
if self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
self._pathutils.makedirs(dest_path)
else:
dest_path = self._pathutils.get_instance_dir(
instance_name, dest, remove_dir=True)
for disk_file in disk_files:
# Skip the config drive as the instance is already configured
if os.path.basename(disk_file).lower() != 'configdrive.vhd':
LOG.debug('Copying disk "%(disk_file)s" to '
'"%(dest_path)s"',
{'disk_file': disk_file, 'dest_path': dest_path})
self._pathutils.copy(disk_file, dest_path)
self._pathutils.rename(instance_path, revert_path)
if same_host:
self._pathutils.rename(dest_path, instance_path)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_failed_disk_migration(instance_path, revert_path,
dest_path)
def _cleanup_failed_disk_migration(self, instance_path,
revert_path, dest_path):
try:
if dest_path and self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
if self._pathutils.exists(revert_path):
self._pathutils.rename(revert_path, instance_path)
except Exception as ex:
# Log and ignore this exception
LOG.exception(ex)
LOG.error(_LE("Cannot cleanup migration files"))
def _check_target_flavor(self, instance, flavor):
new_root_gb = flavor['root_gb']
curr_root_gb = instance['root_gb']
if new_root_gb < curr_root_gb:
raise exception.InstanceFaultRollback(
vmutils.VHDResizeException(
_("Cannot resize the root disk to a smaller size. "
"Current size: %(curr_root_gb)s GB. Requested size: "
"%(new_root_gb)s GB") %
{'curr_root_gb': curr_root_gb,
'new_root_gb': new_root_gb}))
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None, timeout=0,
retry_interval=0):
LOG.debug("migrate_disk_and_power_off called", instance=instance)
self._check_target_flavor(instance, flavor)
self._vmops.power_off(instance, timeout, retry_interval)
instance_name = instance["name"]
(disk_files,
volume_drives) = self._vmutils.get_vm_storage_paths(instance_name)
if disk_files:
self._migrate_disk_files(instance_name, disk_files, dest)
self._vmops.destroy(instance, destroy_disks=False)
# disk_info is not used
return ""
def confirm_migration(self, migration, instance, network_info):
LOG.debug("confirm_migration called", instance=instance)
self._pathutils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
def _revert_migration_files(self, instance_name):
instance_path = self._pathutils.get_instance_dir(
instance_name, create_dir=False, remove_dir=True)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name)
self._pathutils.rename(revert_path, instance_path)
def _check_and_attach_config_drive(self, instance, vm_gen):
if configdrive.required_by(instance):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path:
self._vmops.attach_config_drive(instance, configdrive_path,
vm_gen)
else:
raise vmutils.HyperVException(
_("Config drive is required by instance: %s, "
"but it does not exist.") % instance.name)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("finish_revert_migration called", instance=instance)
instance_name = instance['name']
self._revert_migration_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
image_meta = self._imagecache.get_image_details(context, instance)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)
def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
os.path.basename(base_vhd_path))
try:
LOG.debug('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s',
{'base_vhd_path': base_vhd_path,
'base_vhd_copy_path': base_vhd_copy_path})
self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_copy_path)
LOG.debug("Merging base disk %(base_vhd_copy_path)s and "
"diff disk %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)
# Replace the differential VHD with the merged one
self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_copy_path):
self._pathutils.remove(base_vhd_copy_path)
def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
curr_size = vhd_info['MaxInternalSize']
if new_size < curr_size:
raise vmutils.VHDResizeException(_("Cannot resize a VHD "
"to a smaller size"))
elif new_size > curr_size:
self._resize_vhd(vhd_path, new_size)
def _resize_vhd(self, vhd_path, new_size):
if vhd_path.split('.')[-1].lower() == "vhd":
LOG.debug("Getting parent disk info for disk: %s", vhd_path)
base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
if base_disk_path:
# A differential VHD cannot be resized. This limitation
# does not apply to the VHDX format.
self._merge_base_vhd(vhd_path, base_disk_path)
LOG.debug("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s",
{'vhd_path': vhd_path, 'new_size': new_size})
self._vhdutils.resize_vhd(vhd_path, new_size)
def _check_base_disk(self, context, instance, diff_vhd_path,
src_base_disk_path):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
# If the location of the base host differs between source
# and target hosts we need to reconnect the base disk
if src_base_disk_path.lower() != base_vhd_path.lower():
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
LOG.debug("finish_migration called", instance=instance)
instance_name = instance['name']
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
if not root_vhd_path:
raise vmutils.HyperVException(_("Cannot find boot VHD "
"file for instance: %s") %
instance_name)
root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path)
src_base_disk_path = root_vhd_info.get("ParentPath")
if src_base_disk_path:
self._check_base_disk(context, instance, root_vhd_path,
src_base_disk_path)
if resize_instance:
new_size = instance['root_gb'] * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
if resize_instance:
new_size = instance.get('ephemeral_gb', 0) * units.Gi
if not eph_vhd_path:
if new_size:
eph_vhd_path = self._vmops.create_ephemeral_vhd(instance)
else:
eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path)
self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)
|
|
'''
Timeline
========
The :class:`Timeline` is a widget specialized for displaying time
information on a continuous, and perhaps infinite, scale.
It inherits from :class:`kivy.garden.tickline.Tickline` and thus allows
zooming in and out, and panning across time. Since :class:`Timeline` works very
much like :class:`kivy.garden.tickline.Tickline`, how
:class:`Timeline` works is fairly adequately covered
in the :mod:`kivy.garden.tickline` package.
Dependencies
------------
1. kivy garden package :mod:`kivy.garden.tickline`. Use
``garden install tickline`` to install it, just like installing any
other garden package.
2. the ``pytz`` module for handling timezones. Use ``easy_install pytz`` or
``pip install pytz`` to install it for your python distribution.
3. for getting the local timezone, Windows or Unix-based systems need to get
the ``tzlocal`` python module. ``easy_install`` or ``pip install`` should
suffice here.
Platforms
---------
Most of the code here work great regardless of platform. The only exception is
getting the local time zone.
With the above dependencies installed, this is not a problem for
desktop/laptop operating systems (Windows, Linux, OSX, etc). It should also
work on android, which is handled through ``pyjnius``'s ``autoclass``.
However, it likely does NOT work in iOS. Currently
this is not a priority, but if you'd like to have this feature, you are welcome
to submit patches.
Usage
-----
A simple timeline can be obtained by just calling::
timeline = Timeline()
runTouchApp(timeline)
By default, the timeline will feature ticks with intervals of
1 day, 4 hours, 1 hour, 15 minutes, 5 minutes, 1 minute, 15 seconds,
5 seconds, and 1 second which fills up the timeline adequately and not
overwhelmingly. It will center around the current time in the current timezone.
:class:`Timeline` and :class:`TimeTick` *is* timezone aware and is able to
handle it by themselves in most cases. They by default use the local timezone
in the computation of times.
Most of the customizable settings in :class:`Timeline` are the same as
:class:`~kivy.garden.tickline.Tickline`. These include
:attr:`~Timeline.orientation`, :attr:`~Timeline.backward`,
:attr:`~Timeline.line_offset`, :attr:`~Timeline.line_pos`, and
:attr:`~Timeline.min_scale`, :attr:`~Timeline.max_scale`.
In addition, the attributes :attr:`Timeline.min_time`, :attr:`Timeline.max_time`,
:attr:`Timeline.time_0`, and :attr:`Timeline.time_1` are given as the time
versions of :attr:`~Timeline.min_index`, :attr:`~Timeline.max_index`,
:attr:`~Timeline.index_0`, and :attr:`~Timeline.index_1`.
The centerpiece of :class:`Timeline`, though, is really :class:`TimeTicks`s.
There are many available options for the intervals tracked, from 1 second to
1 day (the default ticks offer a sample of the them), listed in
:attr:`TimeTick.mode_options`. You can change a :class:`TimeTick`'s interval
by changing its :attr:`~TimeTick.mode`. For example::
# interval of 1 second
tick = TimeTick(mode='second')
# interval of 15 seconds
tick = TimeTick(mode='15 seconds')
# interval of 1 minute
tick = TimeTick(mode='minute')
# interval of 30 minutes
tick = TimeTick(mode='30 minutes')
Most other attributes are inherited from :class:`kivy.garden.tickline.Tick`.
These include :attr:`~TimeTick.tick_size`, :attr:`~TimeTick.label_global`,
:attr:`~TimeTick.halign`, :attr:`~TimeTick.valign`, etc.
You may use :func:`selected_time_ticks` to get a list of :class:`TimeTicks`s
with intervals mentioned above. Or you can call :func:`all_time_ticks` to get
a list of :class:`TimeTick`s, one for each of available modes.
A more complex working example is::
if __name__ == '__main__':
acc = Accordion(orientation='vertical')
simple = AccordionItem(title='simple')
simple.add_widget(Timeline())
complex_ = AccordionItem(title='complex')
complex_.add_widget(
Timeline(backward=True,
orientation='horizontal',
ticks=(selected_time_ticks() +
[TimeTick(valign='top',
mode='12 hours'),
TimeTick(valign='line_bottom',
mode='2 hours')]),
line_offset=dp(130)
))
acc.add_widget(simple)
acc.add_widget(complex_)
runTouchApp(acc)
Extending
---------
The :class:`TimeTick` overrides :meth:`Tick.tick_iter`
to yield datetimes instead of local indices, and :meth:`Tick.draw` similarly
expects datetimes instead of indices. Hence for most graphics customization,
overriding :meth:`TimeTick.draw` may be enough. For example, a time series
grapher can override :meth:`TimeTick.draw` to draw a dot at the height
corresponding to the given datetime.
Of course, the labeller :class:`TimeLabeller` can also be subclassed or
ducktyped to provide the necessary functionality.
'''
from bisect import bisect, bisect_left
from datetime import datetime, timedelta
from decimal import DivisionByZero
from itertools import chain
from kivy.base import runTouchApp
from kivy.core.text import Label as CoreLabel
from kivy.event import EventDispatcher
from kivy.garden.tickline import TickLabeller, Tick, Tickline
from kivy.graphics.context_instructions import Color
from kivy.graphics.vertex_instructions import Rectangle, Line
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ListProperty, NumericProperty, OptionProperty, \
DictProperty, ObjectProperty, BoundedNumericProperty, BooleanProperty, \
AliasProperty
from kivy.uix.accordion import AccordionItem, Accordion
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from math import ceil, floor
from numbers import Number
from pytz import UTC
try:
from tzlocal import get_localzone
except ImportError:
from jnius import autoclass
from pytz import timezone
TimeZone = autoclass('java.util.TimeZone')
def get_localzone():
return timezone(TimeZone.getDefault().getID())
def local_now():
return get_localzone().localize(datetime.now())
Builder.load_string('''
<AutoSizeLabel>:
size: self.texture_size
size_hint: None, None
''')
class AutoSizeLabel(Label):
pass
class TimeLabeller(TickLabeller):
'''default labeller of :class:`Timeline`. For an example of its graphics,
see example images or run the example in the module documentation.
'''
date_halign = OptionProperty('left', options=['left', 'right'])
'''specifies whether the date labels are on the left or right side of
the :class:`Timeline` when the timeline is vertical. Has no effect when
the timeline is horizontal.'''
date_valign = OptionProperty('bottom', options=['top', 'bottom'])
'''specifies whether the date labels are on top or bottom of
the :class:`Timeline` when the timeline is horizontal. Has no effect when
the timeline is vertical.'''
time_halign = OptionProperty('left', options=['left', 'right'])
'''specifies whether the time labels are on the left or right side of
the :class:`Timeline` when the timeline is vertical. Has no effect when
the timeline is horizontal.'''
time_valign = OptionProperty('bottom', options=['top', 'bottom'])
'''specifies whether the time labels are on top or bottom of
the :class:`Timeline` when the timeline is horizontal. Has no effect when
the timeline is vertical.'''
date_dist_from_edge = NumericProperty('55dp')
'''distance of the date labels from the edge of the :class:`Timeline`.'''
time_dist_from_edge = NumericProperty('22dp')
'''distance of the time labels from the edge of the :class:`Timeline`.'''
date_font_size = NumericProperty('12sp')
'''font size of the date labels.'''
time_font_size = NumericProperty('7sp')
'''font size of the time labels.'''
def __init__(self, tickline, **kw):
super(TimeLabeller, self).__init__(tickline, **kw)
self.labels = []
self.seconds_registrar = {}
self.have_time = False
self.instructions = {}
def re_init(self, *args):
self.labels = []
self.seconds_registrar = {}
self.have_time = False
super(TimeLabeller, self).re_init(*args)
def register(self, tick, tick_index, tick_info):
assert isinstance(tick_index, Number)
tick_sc = tick.scale(self.tickline.scale)
if tick_sc < tick.min_label_space:
return
seconds = tick.to_seconds(tick_index)
if self.seconds_registrar.get(seconds, 0) < tick_sc:
self.have_time |= tick.mode != 'day'
self.registrar.setdefault(tick, {})[tick_index] = tick_info
self.seconds_registrar[seconds] = tick_sc
def _get_texture_pos(self, tick, index, succinct=True, which='time',
texture=None):
tl = self.tickline
# tick_info should be (x, y, width, height) of tick
tick_info = self.registrar[tick][index]
if not texture:
label_kw = tick.get_label_texture(index, succinct, return_kw=True)
if not label_kw:
return
label_kw['font_size'] = self.time_font_size if which == 'time' else \
self.date_font_size
label_kw['halign'] = 'left' if tl.is_vertical() else 'center'
label = CoreLabel(**label_kw)
label.refresh()
texture = label.texture
if tl.is_vertical():
y = tick_info[1] + tick_info[3] / 2 - texture.height / 2
if which == 'time':
dist = self.time_dist_from_edge
else:
dist = self.date_dist_from_edge
dist = max(dist, tick.tick_size[1] + tl.tick_label_padding)
halign = tick.halign
if halign == 'left':
x = tl.x + dist
elif halign == 'line_left':
x = tl.line_pos - dist - texture.width
elif halign == 'line_right':
x = tl.line_pos + dist
else:
x = tl.right - dist - texture.width
else:
# TODO horizontal is gonna get crowded with text
x = tick_info[0] + tick_info[2] / 2 - texture.width / 2
if which == 'time':
dist = self.time_dist_from_edge
else:
dist = self.date_dist_from_edge
dist = max(dist, tick.tick_size[1] + tl.tick_label_padding)
valign = tick.valign
if valign == 'top':
y = tl.top - dist - texture.height
elif valign == 'line_top':
y = tl.line_pos + dist
elif valign == 'line_bottom':
y = tl.line_pos - dist - texture.height
else:
y = tl.y + dist
return (texture, [x, y])
def make_labels(self):
r = self.registrar
instructions = self.instructions
setdefault = instructions.setdefault
to_pop = set((tick, index) for tick in instructions
for index in instructions[tick])
tl = self.tickline
succinct = not any('second' in tick.mode for tick in r)
get_texture_pos = self._get_texture_pos
canvas = tl.canvas
for tick in r:
instrs = setdefault(tick, {})
if tick.mode != 'day':
for index in r[tick]:
self._update_rect(tick, index, instrs, get_texture_pos,
to_pop, succinct, canvas)
else:
a = tl.is_vertical()
b = 1 - a
bottom_up = sorted(r[tick], reverse=tl.backward)
if self.have_time:
last_rect = [None, None]
for index in bottom_up:
rect = \
self._update_rect(tick, index, instrs, get_texture_pos,
to_pop, succinct, canvas, which='date')
last_rect[0] = last_rect[1]
last_rect[1] = rect
max_ = tl.top if a else tl.right
if len(bottom_up) > 1:
_2ndlast, last = last_rect
last_coord = max(_2ndlast.pos[a] + _2ndlast.size[a],
max_ - last.size[a])
_2ndlast_coord = min(_2ndlast.pos[a] + _2ndlast.size[a],
max_) - _2ndlast.size[a]
last.pos = (last.pos[b], last_coord) if a else \
(last_coord, last.pos[b])
_2ndlast.pos = (_2ndlast.pos[b], _2ndlast_coord) if a \
else (_2ndlast_coord, _2ndlast.pos[b])
else:
new_coord = max_ - last_rect[1].size[a]
last_rect[1].pos = (last_rect[1].pos[b], new_coord) \
if a else \
(new_coord, last_rect[1].pos[b])
else:
for index in bottom_up[:-1]:
self._update_rect(tick, index, instrs, get_texture_pos,
to_pop, succinct, canvas, which='date')
for tick, index in to_pop:
rect = instructions[tick].pop(index)
canvas.remove(rect)
def _update_rect(self, tick, index, instrs, get_texture_pos, to_pop,
succinct, canvas, which='time'):
if index in instrs:
# old label: change position
old_rect = instrs[index]
t_p = get_texture_pos(tick, index, succinct,
texture=old_rect.texture, which=which)
old_rect.pos = t_p[1]
to_pop.remove((tick, index))
return old_rect
else:
# new label
t_p = get_texture_pos(tick, index, succinct, which=which)
if t_p:
texture, pos = t_p
rect = Rectangle(texture=texture, pos=pos,
size=texture.size)
instrs[index] = rect
canvas.add(rect)
return rect
unixepoch = datetime(1970, 1, 1, tzinfo=UTC)
_tail_names = ['microsecond', 'second', 'minute', 'hour', 'day']
_tail_res = {'microsecond': 10 ** -6, 'second': 1, 'minute': 60, 'hour': 3600,
'day': 3600 * 24}
def time_tail(dt, length=2, tail_name=None, strict=False):
'''given a datetime ``dt``, gives its time tail specified by ``length``
or ``tail_name``::
>>> assert(
time_tail(datetime(2010, 10, 4, 13, 25, 5, 0.33)) ==
timedelta(seconds=5.33))
>>> assert(
time_tail(datetime(2010, 10, 4, 13, 25, 5, 0.33), 3) ==
timedelta(minutes=25, seconds=5.33))
>>> assert(
time_tail(datetime(2010, 10, 4, 13, 25, 5, 0.33),
tail_name='hour') ==
timedelta(hour=13, minute=25, second=5.33))
>>> assert(
time_tail(datetime(2010, 10, 4, 13, 25, 5, 0.33),
tail_name='hour', strict=True) ==
timedelta(minute=25, second=5.33))
'''
if tail_name:
length = _tail_names.index(tail_name) + 1 - strict
timedelta_kw = {}
for name in _tail_names[:length]:
timedelta_kw[name + 's'] = getattr(dt, name)
return timedelta(**timedelta_kw)
def set_time_tail(dt, time_vector=[0]):
for name, val in zip(_tail_names, time_vector):
setattr(dt, name, val)
def round_time(dt, grain='second', mode='nearest'):
'''round datetime ``dt`` to the nearest, next, or previous second,
15 seconds, minutes, etc::
>>> round_time(datetime(2013, 2, 3, 5, 23, 56), 'minute', 'nearest')
datetime.datetime(2013, 2, 3, 5, 24, 0)
>>> round_time(datetime(2013, 2, 3, 5, 23, 56), 'minute', 'down')
datetime.datetime(2013, 2, 3, 5, 23, 0)
>>> round_time(datetime(2013, 2, 3, 5, 23, 56), 'day', 'up')
datetime.datetime(2013, 2, 4, 0, 0, 0)
:param dt: datetime object to round
:param grain: the smallest granularity to round toward. Can be any of
:attr:`TimeTick.mode`. Defaults to 'second'
:param mode: the rounding mode. Can be any one of 'nearest', 'up', or 'down'.
Defaults to 'nearest'
'''
if mode == 'nearest':
round_func = round
elif mode == 'up':
round_func = ceil
else:
round_func = floor
res = TimeTick.granularity(grain)
if grain in _tail_names:
tail = time_tail(dt, _tail_names.index(grain))
else:
mult, gr = grain.split(' ')
tail = time_tail(dt, _tail_names.index(gr[:-1]) + 1)
trunced = dt - tail
return timedelta(seconds=res * round_func(tail.total_seconds() / res)) \
+ trunced
class TimeTick(Tick):
size_dict = \
{'day': [dp(5), dp(48)],
'12 hours': [dp(4.5), dp(25)],
'6 hours': [dp(4.5), dp(25)],
'4 hours': [dp(4), dp(20)],
'2 hours': [dp(4), dp(20)],
'hour': [dp(4), dp(20)],
'30 minutes': [dp(3), dp(12)],
'15 minutes': [dp(3), dp(12)],
'10 minutes': [dp(2), dp(8)],
'5 minutes': [dp(2), dp(8)],
'minute': [dp(2), dp(8)],
'30 seconds': [dp(1.5), dp(7)],
'15 seconds': [dp(1.5), dp(7)],
'10 seconds': [dp(1), dp(4)],
'5 seconds': [dp(1), dp(4)],
'second': [dp(1), dp(4)]}
scale_factor_dict = \
{'day': 1,
'12 hours': 2,
'6 hours': 4,
'4 hours': 6,
'2 hours': 12,
'hour': 24,
'30 minutes': 48,
'15 minutes': 96,
'10 minutes': 48 * 3,
'5 minutes': 3 * 96,
'minute': 24 * 60,
'30 seconds': 24 * 120,
'15 seconds': 24 * 240,
'10 seconds': 24 * 360,
'5 seconds': 24 * 720,
'second': 24 * 3600}
mode_options = ['day',
'12 hours',
'6 hours',
'4 hours',
'2 hours',
'hour',
'30 minutes',
'15 minutes',
'10 minutes',
'5 minutes',
'minute',
'30 seconds',
'15 seconds',
'10 seconds',
'5 seconds',
'second']
mode = OptionProperty('day', options=mode_options)
# 188 is good to be an entire header for the date
_tick_size = ListProperty(None)
def get_tick_size(self, *args):
return self._tick_size or self.size_dict[self.mode]
def set_tick_size(self, val):
self._tick_size = val
tick_size = AliasProperty(get_tick_size, set_tick_size,
bind=['_tick_size', 'mode'])
tz = ObjectProperty(get_localzone())
def __init__(self, *args, **kw):
super(TimeTick, self).__init__(*args, **kw)
@classmethod
def granularity(cls, mode):
'''gives the multiplicity of this mode in terms of seconds.'''
return cls.scale_factor_dict['second'] / cls.scale_factor_dict[mode]
def time_min_max(self, tl, extended=False):
'''gives either (:meth:`time_0`, :meth`time_1`) or
(:meth:`time_1`, :meth`time_0`) applied to ``tl``
so that the first time is not later than the second. In essence,
provide the minimal and maximal time that can be shown on the screen
at the time of method call.
If ``extended``, then time corresponding 1 ``tl.densest_tick``
below and 1 above the current window will be given instead.
:param tl: :class:`Tickline` instance.
:param extended: If True, gives a slightly larger window, as discussed
above. Defaults to False.
'''
min_, max_ = (tl.time_1, tl.time_0) if tl.backward else \
(tl.time_0, tl.time_1)
if extended:
interval = TimeTick.granularity(tl.densest_tick.mode)
min_ -= timedelta(seconds=interval)
max_ += timedelta(seconds=interval)
return min_, max_
def tick_iter(self, tl):
'''Overrides :meth:`Tick.tick_iter`.
Provides an iterator of the times that correspond to ticks that
should be drawn on screen, depending on :attr:`mode`. Note that
for the "day" mode, the day past the last day shown on screen is also
given, for the push graphics (see :class:`TimeLabeller` for details).
'''
if self.scale(tl.scale) < self.min_space:
raise StopIteration
time_min, time_max = self.time_min_max(tl, extended=True)
time = round_time(time_min, self.mode, 'up')
delta = timedelta(seconds=self.granularity(self.mode))
if self.mode == 'day' and tl.backward:
yield time - timedelta(days=1)
while time <= time_max:
yield time
time += delta
if self.mode == 'day' and not tl.backward:
yield time
raise StopIteration
def draw(self, tickline, time):
'''Override :meth:`Tick.draw`.
Instead of taking a pair (pos, index) of the tick to be drawn, takes
the time of such a tick, and internally convert it to (pos, index)
using :meth:`index_of` and :meth:`index2pos`.
'''
super(TimeTick, self).draw(tickline, self.pos_index_of(tickline, time))
def pos_index_of(self, tickline, time):
tick_index = self.index_of(time)
tick_pos = tickline.index2pos(self.globalize(tick_index))
return tick_pos, tick_index
def pos_of(self, tickline, time):
return self.pos_index_of(tickline, time)[0]
def on_mode(self, *args):
self.scale_factor = self.scale_factor_dict[self.mode]
def datetime_of(self, tick_index):
if self.mode in ('day', 'hour', 'minute', 'second'):
t = timedelta(**{self.mode + 's': tick_index}) + unixepoch
else:
mult, mode = self.mode.split(' ')
t = timedelta(**{mode: int(mult) * tick_index}) + unixepoch
t = t.astimezone(self.tz)
return t
def to_seconds(self, tick_index):
'''converts the ``tick_index`` to the number of seconds since
unix epoch. Always returns the nearest integer.'''
return round(tick_index * self.scale_factor_dict['second']
/ self.scale_factor)
def pos2time(self, pos, tl):
return self.datetime_of(self.localize(tl.pos2index(pos)))
def index_of(self, dt, global_=False):
'''return a local index corresponding to a datetime. If ``global_``
is true, then return the global index (the index of the owning
:class:`Tickline`).
:param dt: a datetime to be converted to index.
:param global_: flag that indicates the index returned should be global.
Defaults to False.
'''
secs = (dt - unixepoch).total_seconds()
global_idx = secs / self.scale_factor_dict['second']
if global_:
return global_idx
return self.localize(global_idx)
def get_label_texture(self, index, succinct=True, return_kw=False,
return_label=False, **kw):
if isinstance(index, Number):
t = self.datetime_of(index)
else:
t = index
if self.mode == 'second':
return None
if self.mode == 'day':
# need to get the datetime of the previous day
text = (t - timedelta(seconds=1)).strftime('%a\n%m-%d-%y')
kw.setdefault('height', 50)
elif 'second' not in self.mode and succinct:
text = str(t.time())[:-3]
else:
text = str(t.time())
kw.setdefault('height', 20)
kw['text'] = text
if return_kw:
return kw
if not return_label:
return CoreLabel(**kw).texture
label = AutoSizeLabel(**kw)
label.texture_update()
return label
def all_time_ticks():
'''returns a list of :class:`TimeTick`s, one for each of the available
:attr:`~TimeTick.mode`s, specified in :attr:`TimeTick.mode_options`.
'''
return [TimeTick(mode=m) for m in TimeTick.mode.options]
def selected_time_ticks():
'''returns a list of :class:`TimeTick`s with intervals of
1 day, 4 hours, 1 hour, 15 minutes, 5 minutes, 1 minute, 15 seconds,
5 seconds, and 1 second.'''
return [TimeTick(mode=TimeTick.mode.options[i]) for i in
[0, 3, 5, 7, 9, 10, 12, 14, 15]]
class Timeline(Tickline):
'''subclass of :class:`Tickline` specialized for displaying time
information. See module documentation for more details.'''
labeller_cls = ObjectProperty(TimeLabeller)
tz = ObjectProperty(get_localzone())
def get_min_time(self, *args):
return self.datetime_of(self.min_index)
def set_min_time(self, val):
self.min_index = self.index_of(val)
min_time = AliasProperty(get_min_time, set_min_time, cache=True,
bind=['min_index'])
'''the minimal time beyond which this :class:`Timeline` cannot go.
This is a time version of :attr:`Tickline.min_index`.'''
def get_max_time(self, *args):
return self.datetime_of(self.max_index)
def set_max_time(self, val):
self.max_index = self.index_of(val)
max_time = AliasProperty(get_max_time, set_max_time, cache=True,
bind=['max_index'])
'''the maximal time beyond which this :class:`Timeline` cannot go.
This is a time version of :attr:`Tickline.max_index`.'''
def get_time_0(self, *args):
return self.datetime_of(self.index_0)
def set_time_0(self, val):
self.index_0 = self.datetime_of(val)
time_0 = AliasProperty(get_time_0, set_time_0,
bind=['index_0'])
'''gives the time that that sits on top of
``self.x`` if :attr:`orientation` is 'vertical', or ``self.y``
if :attr:`orientation` is 'horizontal'. Note that this doesn't
depend on :attr:`Tickline.backward`.
This is the time version of :class:`Tickline.index_0`.'''
def get_time_1(self, *args):
return self.datetime_of(self.index_1)
def set_time_1(self, val):
self.index_1 = self.datetime_of(val)
time_1 = AliasProperty(get_time_1, set_time_1,
bind=['index_1'])
'''gives the time that that sits on top of
``self.right`` if :attr:`orientation` is 'vertical', or ``self.top``
if :attr:`orientation` is 'horizontal'. Note that this doesn't
depend on :attr:`Tickline.backward`.
This is the time version of :class:`Tickline.index_1`.'''
def __init__(self, **kw):
now = local_now().astimezone(UTC)
self.center_on_timeframe(now - timedelta(days=1),
now + timedelta(days=1))
self.ticks = selected_time_ticks()
super(Timeline, self).__init__(**kw)
def on_tz(self, *args):
for tick in self.ticks:
tick.tz = self.tz
def pos2time(self, pos):
return self.datetime_of(self.pos2index(pos))
def datetime_of(self, index):
return (timedelta(days=index) + unixepoch).astimezone(self.tz)
def index_of(self, dt):
'''return a global index corresponding to a datetime. '''
secs = (dt - unixepoch).total_seconds()
global_idx = secs / TimeTick.scale_factor_dict['second']
return global_idx
def pos_of_time(self, time):
return self.index2pos(self.index_of(time))
def timedelta2dist(self, td):
return td.days * self.scale
def center_on_timeframe(self, start, end):
self.index_0 = self.index_of(start)
self.index_1 = self.index_of(end)
if __name__ == '__main__':
acc = Accordion(orientation='vertical')
simple = AccordionItem(title='simple')
simple.add_widget(Timeline())
complex_ = AccordionItem(title='complex')
complex_.add_widget(
Timeline(backward=True,
orientation='horizontal',
ticks=selected_time_ticks() + [TimeTick(valign='top',
mode='12 hours'),
TimeTick(valign='line_bottom',
mode='2 hours')],
line_offset=dp(130)
))
acc.add_widget(simple)
acc.add_widget(complex_)
runTouchApp(acc)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import shlex
import subprocess
import unittest
from typing import Any, Dict
from unittest import mock
from unittest.mock import MagicMock
from uuid import UUID
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.apache.beam.hooks.beam import BeamCommandRunner, BeamHook
from airflow.providers.google.cloud.hooks.dataflow import (
DEFAULT_DATAFLOW_LOCATION,
DataflowHook,
DataflowJobStatus,
DataflowJobType,
_DataflowJobsController,
_fallback_to_project_id_from_variables,
process_line_and_extract_dataflow_job_id_callback,
)
DEFAULT_RUNNER = "DirectRunner"
BEAM_STRING = 'airflow.providers.apache.beam.hooks.beam.{}'
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
MOCK_UUID = UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
MOCK_UUID_PREFIX = str(MOCK_UUID)[:8]
UNIQUE_JOB_NAME = f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}'
TEST_TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output',
}
PY_FILE = 'apache_beam.examples.wordcount'
JAR_FILE = 'unitest.jar'
JOB_CLASS = 'com.example.UnitTest'
PY_OPTIONS = ['-m']
DATAFLOW_VARIABLES_PY = {'project': 'test', 'staging_location': 'gs://test/staging', 'labels': {'foo': 'bar'}}
DATAFLOW_VARIABLES_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'labels': {'foo': 'bar'},
}
RUNTIME_ENV = {
'additionalExperiments': ['exp_flag1', 'exp_flag2'],
'additionalUserLabels': {'name': 'wrench', 'mass': '1.3kg', 'count': '3'},
'bypassTempDirValidation': {},
'ipConfiguration': 'WORKER_IP_PRIVATE',
'kmsKeyName': (
'projects/TEST_PROJECT_ID/locations/TEST_LOCATIONS/keyRings/TEST_KEYRING/cryptoKeys/TEST_CRYPTOKEYS'
),
'maxWorkers': 10,
'network': 'default',
'numWorkers': 2,
'serviceAccountEmail': 'test@apache.airflow',
'subnetwork': 'regions/REGION/subnetworks/SUBNETWORK',
'tempLocation': 'gs://test/temp',
'workerRegion': "test-region",
'workerZone': 'test-zone',
'zone': 'us-central1-f',
'machineType': 'n1-standard-1',
}
BASE_STRING = 'airflow.providers.google.common.hooks.base_google.{}'
DATAFLOW_STRING = 'airflow.providers.google.cloud.hooks.dataflow.{}'
TEST_PROJECT = 'test-project'
TEST_JOB_ID = 'test-job-id'
TEST_LOCATION = 'custom-location'
DEFAULT_PY_INTERPRETER = 'python3'
TEST_FLEX_PARAMETERS = {
"containerSpecGcsPath": "gs://test-bucket/test-file",
"jobName": 'test-job-name',
"parameters": {
"inputSubscription": 'test-subscription',
"outputTable": "test-project:test-dataset.streaming_beam_sql",
},
}
TEST_PROJECT_ID = 'test-project-id'
TEST_SQL_JOB_NAME = 'test-sql-job-name'
TEST_DATASET = 'test-dataset'
TEST_SQL_OPTIONS = {
"bigquery-project": TEST_PROJECT,
"bigquery-dataset": TEST_DATASET,
"bigquery-table": "beam_output",
'bigquery-write-disposition': "write-truncate",
}
TEST_SQL_QUERY = """
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.test-project.beam_samples.beam_table
GROUP BY sales_region;
"""
TEST_SQL_JOB_ID = 'test-job-id'
DEFAULT_CANCEL_TIMEOUT = 5 * 60
class TestFallbackToVariables(unittest.TestCase):
def test_support_project_id_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(project_id="TEST")
mock_instance.assert_called_once_with(project_id="TEST")
def test_support_project_id_from_variable_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(variables={'project': "TEST"})
mock_instance.assert_called_once_with(project_id='TEST', variables={})
def test_raise_exception_on_conflict(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException,
match="The mutually exclusive parameter `project_id` and `project` key in `variables` parameter "
"are both present\\. Please remove one\\.",
):
FixtureFallback().test_fn(variables={'project': "TEST"}, project_id="TEST2")
def test_raise_exception_on_positional_argument(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException, match="You must use keyword arguments in this methods rather than positional"
):
FixtureFallback().test_fn({'project': "TEST"}, "TEST2")
def mock_init(
self,
gcp_conn_id,
delegate_to=None,
impersonation_chain=None,
):
pass
class TestDataflowHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.dataflow_hook = DataflowHook(gcp_conn_id='test')
self.dataflow_hook.beam_hook = MagicMock()
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.DataflowHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.build")
def test_dataflow_client_creation(self, mock_build, mock_authorize):
result = self.dataflow_hook.get_conn()
mock_build.assert_called_once_with(
'dataflow', 'v1b3', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables["region"] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_multiple_extra_packages(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables['extra-package'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables['extra-package'] = ['a.whl', 'b.whl']
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
('python3',),
('python2',),
('python3',),
('python3.6',),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_interpreter(
self, py_interpreter, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=py_interpreter,
py_requirements=None,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=py_interpreter,
py_options=PY_OPTIONS,
py_requirements=None,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
(['foo-bar'], False),
(['foo-bar'], True),
([], True),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_non_empty_py_requirements_and_without_system_packages(
self,
current_py_requirements,
current_py_system_site_packages,
mock_callback_on_job_id,
mock_dataflow_wait_for_done,
mock_uuid,
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
def test_start_python_dataflow_with_empty_py_requirements_and_without_system_packages(
self, mock_dataflow_wait_for_done, mock_uuid
):
self.dataflow_hook.beam_hook = BeamHook(runner="DataflowRunner")
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"), self.assertRaisesRegex(
AirflowException, "Invalid method invocation."
):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=[],
on_new_job_id_callback=on_new_job_id_callback,
)
mock_dataflow_wait_for_done.assert_not_called()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_multiple_values_in_variables(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: Dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['mock-option'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(passed_variables)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: Dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['region'] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@parameterized.expand(
[
(JOB_NAME, JOB_NAME, False),
('test-example', 'test_example', False),
(f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}', JOB_NAME, True),
(f'test-example-{MOCK_UUID_PREFIX}', 'test_example', True),
('df-job-1', 'df-job-1', False),
('df-job', 'df-job', False),
('dfjob', 'dfjob', False),
('dfjob1', 'dfjob1', False),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
def test_valid_dataflow_job_name(self, expected_result, job_name, append_job_name, mock_uuid4):
job_name = self.dataflow_hook.build_dataflow_job_name(
job_name=job_name, append_job_name=append_job_name
)
self.assertEqual(expected_result, job_name)
#
@parameterized.expand([("1dfjob@",), ("dfjob@",), ("df^jo",)])
def test_build_dataflow_job_name_with_invalid_value(self, job_name):
self.assertRaises(
ValueError, self.dataflow_hook.build_dataflow_job_name, job_name=job_name, append_job_name=False
)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_get_job(self, mock_conn, mock_dataflowjob):
method_fetch_job_by_id = mock_dataflowjob.return_value.fetch_job_by_id
self.dataflow_hook.get_job(job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_by_id.assert_called_once_with(TEST_JOB_ID)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_metrics_by_id = mock_dataflowjob.return_value.fetch_job_metrics_by_id
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_metrics_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id_controller(self, mock_conn):
method_get_metrics = (
mock_conn.return_value.projects.return_value.locations.return_value.jobs.return_value.getMetrics
)
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
method_get_metrics.return_value.execute.assert_called_once_with(num_retries=0)
method_get_metrics.assert_called_once_with(
jobId=TEST_JOB_ID, projectId=TEST_PROJECT_ID, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_messages_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_messages_by_id = mock_dataflowjob.return_value.fetch_job_messages_by_id
self.dataflow_hook.fetch_job_messages_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_messages_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_autoscaling_events_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_autoscaling_events_by_id = (
mock_dataflowjob.return_value.fetch_job_autoscaling_events_by_id
)
self.dataflow_hook.fetch_job_autoscaling_events_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_autoscaling_events_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_wait_for_done(self, mock_conn, mock_dataflowjob):
method_wait_for_done = mock_dataflowjob.return_value.wait_for_done
self.dataflow_hook.wait_for_done(
job_name="JOB_NAME",
project_id=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
multiple_jobs=False,
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
name="JOB_NAME",
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
job_id=TEST_JOB_ID,
num_retries=self.dataflow_hook.num_retries,
multiple_jobs=False,
drain_pipeline=self.dataflow_hook.drain_pipeline,
cancel_timeout=self.dataflow_hook.cancel_timeout,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
method_wait_for_done.assert_called_once_with()
class TestDataflowTemplateHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.dataflow_hook = DataflowHook(gcp_conn_id='test')
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow(self, mock_conn, mock_controller, mock_uuid):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
variables = {'zone': 'us-central1-f', 'tempLocation': 'gs://test/temp'}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=copy.deepcopy(variables),
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={
'jobName': f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
'parameters': PARAMETERS,
'environment': variables,
},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id='test-job-id',
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_variable(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables={'region': TEST_LOCATION},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
projectId=TEST_PROJECT,
location=TEST_LOCATION,
gcsPath=TEST_TEMPLATE,
body=mock.ANY,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_parameter(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables={},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={'jobName': UNIQUE_JOB_NAME, 'parameters': PARAMETERS, 'environment': {}},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment={"numWorkers": 17},
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": RUNTIME_ENV}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_update_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
del options_with_runtime_env["numWorkers"]
runtime_env = {"numWorkers": 17}
expected_runtime_env = copy.deepcopy(RUNTIME_ENV)
expected_runtime_env.update(runtime_env)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment=runtime_env,
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": expected_runtime_env}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_flex_template(self, mock_conn, mock_controller):
expected_job = {"id": TEST_JOB_ID}
mock_locations = mock_conn.return_value.projects.return_value.locations
launch_method = mock_locations.return_value.flexTemplates.return_value.launch
launch_method.return_value.execute.return_value = {"job": expected_job}
mock_controller.return_value.get_jobs.return_value = [{"id": TEST_JOB_ID}]
on_new_job_callback = mock.MagicMock()
result = self.dataflow_hook.start_flex_template(
body={"launchParameter": TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
on_new_job_callback=on_new_job_callback,
)
on_new_job_callback.assert_called_once_with(expected_job)
launch_method.assert_called_once_with(
projectId='test-project-id',
body={'launchParameter': TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
num_retries=self.dataflow_hook.num_retries,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
mock_controller.return_value.get_jobs.wait_for_done.assrt_called_once_with()
mock_controller.return_value.get_jobs.assrt_called_once_with()
assert result == {"id": TEST_JOB_ID}
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_cancel_job(self, mock_get_conn, jobs_controller):
self.dataflow_hook.cancel_job(
job_name=UNIQUE_JOB_NAME, job_id=TEST_JOB_ID, project_id=TEST_PROJECT, location=TEST_LOCATION
)
jobs_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
name=UNIQUE_JOB_NAME,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
jobs_controller.cancel()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job_failed_to_run(
self, mock_run, mock_get_conn, mock_provide_authorized_gcloud, mock_controller
):
test_job = {'id': "TEST_JOB_ID"}
mock_controller.return_value.get_jobs.return_value = [test_job]
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=0
)
on_new_job_callback = mock.MagicMock()
result = self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_callback=on_new_job_callback,
)
mock_run.assert_called_once_with(
[
'gcloud',
'dataflow',
'sql',
'query',
TEST_SQL_QUERY,
'--project=test-project',
'--format=value(job.id)',
'--job-name=test-sql-job-name',
'--region=custom-location',
'--bigquery-project=test-project',
'--bigquery-dataset=test-dataset',
'--bigquery-table=beam_output',
'--bigquery-write-disposition=write-truncate',
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
mock_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
assert result == test_job
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job(self, mock_run, mock_provide_authorized_gcloud, mock_get_conn):
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=1
)
with pytest.raises(AirflowException):
self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_callback=mock.MagicMock(),
)
class TestDataflowJob(unittest.TestCase):
def setUp(self):
self.mock_dataflow = MagicMock()
def test_dataflow_job_init_with_job_id(self):
mock_jobs = MagicMock()
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value = mock_jobs
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME, TEST_JOB_ID
).get_jobs()
mock_jobs.get.assert_called_once_with(
projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID
)
def test_dataflow_job_init_without_job_id(self):
job = {"id": TEST_JOB_ID, "name": UNIQUE_JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE}
mock_list = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list
(mock_list.return_value.execute.return_value) = {'jobs': [job]}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME
).get_jobs()
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION)
def test_dataflow_job_wait_for_multiple_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [job, job]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.\
list.assert_called_once_with(location=TEST_LOCATION, projectId=TEST_PROJECT)
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list\
.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job, job]
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_FAILED, "Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobStatus.JOB_STATE_CANCELLED, "Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobStatus.JOB_STATE_DRAINED, "Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobStatus.JOB_STATE_UPDATED, "Google Cloud Dataflow job name-2 was updated\\."),
(
DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN",
),
]
)
def test_dataflow_job_wait_for_multiple_jobs_and_one_in_terminal_state(self, state, exception_regex):
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [
{
"id": "id-1", "name": "name-1",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE
},
{
"id": "id-2", "name": "name-2",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": state
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job.wait_for_done()
def test_dataflow_job_wait_for_multiple_jobs_and_streaming_jobs(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": [
{
"id": "id-2",
"name": "name-2",
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": DataflowJobType.JOB_TYPE_STREAMING
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
assert 1 == mock_jobs_list.call_count
def test_dataflow_job_wait_for_single_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.return_value = job
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.assert_called_once_with(
jobId=TEST_JOB_ID,
location=TEST_LOCATION,
projectId=TEST_PROJECT
)
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job]
def test_dataflow_job_is_job_running_with_no_job(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": []
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
result = dataflow_job.is_job_running()
assert result is False
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, None, True),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_wait_until_finished(
self, job_type, job_state, wait_until_finished, expected_result
):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_without_job_type(self, job_state, wait_until_finished, expected_result):
job = {"id": "id-2", "name": "name-2", "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
])
# fmt: on
def test_check_dataflow_job_state_terminal_state(self, job_type, job_state, exception_regex):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job._check_dataflow_job_state(job)
def test_dataflow_job_cancel_job(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_RUNNING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_PENDING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_QUEUED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_STOPPED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.timeout")
@mock.patch("time.sleep")
def test_dataflow_job_cancel_job_cancel_timeout(self, mock_sleep, mock_timeout):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=4,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
cancel_timeout=10,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_sleep.assert_has_calls([mock.call(4), mock.call(4), mock.call(4)])
mock_timeout.assert_called_once_with(
seconds=10, error_message='Canceling jobs failed due to timeout (10s): test-job-id'
)
@parameterized.expand(
[
(False, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(False, "JOB_TYPE_STREAMING", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_STREAMING", "JOB_STATE_DRAINED"),
]
)
def test_dataflow_job_cancel_or_drain_job(self, drain_pipeline, job_type, requested_state):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": job_type,
}
get_method = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.get
get_method.return_value.execute.return_value = job
# fmt: off
job_list_nest_method = (self.mock_dataflow
.projects.return_value.
locations.return_value.
jobs.return_value.list_next)
job_list_nest_method.return_value = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
drain_pipeline=drain_pipeline,
cancel_timeout=None,
)
dataflow_job.cancel()
get_method.assert_called_once_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_once_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': requested_state},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_batch.execute.assert_called_once()
def test_dataflow_job_cancel_job_no_running_jobs(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_UPDATED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_FAILED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_not_called()
mock_jobs.return_value.update.assert_not_called()
def test_fetch_list_job_messages_responses(self):
# fmt: off
mock_list = (
self.mock_dataflow
.projects.return_value
.locations.return_value
.jobs.return_value
.messages.return_value
.list
)
mock_list_next = (
self.mock_dataflow.
projects.return_value.
locations.return_value.
jobs.return_value
.messages.return_value
.list_next
)
# fmt: on
mock_list.return_value.execute.return_value = "response_1"
mock_list_next.return_value = None
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = list(jobs_controller._fetch_list_job_messages_responses(TEST_JOB_ID))
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID)
mock_list_next.assert_called_once_with(
previous_request=mock_list.return_value, previous_response="response_1"
)
assert result == ["response_1"]
def test_fetch_all_jobs_when_no_jobs_returned(self):
# fmt: off
(
self.mock_dataflow
.projects.return_value
.locations.return_value
.jobs.return_value
.list.return_value
.execute.return_value
) = {}
# fmt: on
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller._fetch_all_jobs()
assert result == []
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_messages_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"jobMessages": ["message_1"]},
{"jobMessages": ["message_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_messages_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['message_1', 'message_2']
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_autoscaling_events_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"autoscalingEvents": ["event_1"]},
{"autoscalingEvents": ["event_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_autoscaling_events_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['event_1', 'event_2']
APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG = f""""\
Dataflow SDK version: 2.14.0
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobsDetail/locations/europe-west3/jobs/{TEST_JOB_ID}?project=XXX
Submitted job: {TEST_JOB_ID}
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG = f""""\
INFO: Dataflow SDK version: 2.22.0
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobs/europe-west3/{TEST_JOB_ID}?project=XXXX
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: Submitted job: {TEST_JOB_ID}
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG = f""""\
INFO:root:Completed GCS upload to gs://test-dataflow-example/staging/start-python-job-local-5bcf3d71.\
1592286375.000962/apache_beam-2.14.0-cp37-cp37m-manylinux1_x86_64.whl in 0 seconds.
INFO:root:Create job: <Job
createTime: '2020-06-16T05:46:20.911857Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:46:20.911857Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:root:Created job with id: [{TEST_JOB_ID}]
INFO:root:To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/\
dataflow/jobsDetail/locations/us-central1/jobs/{TEST_JOB_ID}?project=XXX
"""
APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG = f""""\
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://test-dataflow-example/\
staging/start-python-job-local-5bcf3d71.1592286719.303624/apache_beam-2.22.0-cp37-cp37m-manylinux1_x86_64.whl\
in 1 seconds.
INFO:apache_beam.runners.dataflow.internal.apiclient:Create job: <Job
createTime: '2020-06-16T05:52:04.095216Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:52:04.095216Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:apache_beam.runners.dataflow.internal.apiclient:Created job with id: [{TEST_JOB_ID}]
INFO:apache_beam.runners.dataflow.internal.apiclient:Submitted job: {TEST_JOB_ID}
INFO:apache_beam.runners.dataflow.internal.apiclient:To access the Dataflow monitoring console, please \
navigate to https://console.cloud.google.com/dataflow/jobs/us-central1/{TEST_JOB_ID}?project=XXX
"""
class TestDataflow(unittest.TestCase):
@parameterized.expand(
[
(APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG,),
],
name_func=lambda func, num, p: f"{func.__name__}_{num}",
)
def test_data_flow_valid_job_id(self, log):
echos = ";".join(f"echo {shlex.quote(line)}" for line in log.split("\n"))
cmd = ["bash", "-c", echos]
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, TEST_JOB_ID)
def test_data_flow_missing_job_id(self):
cmd = ['echo', 'unit testing']
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, None)
@mock.patch('airflow.providers.apache.beam.hooks.beam.BeamCommandRunner.log')
@mock.patch('subprocess.Popen')
@mock.patch('select.select')
def test_dataflow_wait_for_done_logging(self, mock_select, mock_popen, mock_logging):
mock_logging.info = MagicMock()
mock_logging.warning = MagicMock()
mock_proc = MagicMock()
mock_proc.stderr = MagicMock()
mock_proc.stderr.readlines = MagicMock(return_value=['test\n', 'error\n'])
mock_stderr_fd = MagicMock()
mock_proc.stderr.fileno = MagicMock(return_value=mock_stderr_fd)
mock_proc_poll = MagicMock()
mock_select.return_value = [[mock_stderr_fd]]
def poll_resp_error():
mock_proc.return_code = 1
return True
mock_proc_poll.side_effect = [None, poll_resp_error]
mock_proc.poll = mock_proc_poll
mock_popen.return_value = mock_proc
dataflow = BeamCommandRunner(['test', 'cmd'])
mock_logging.info.assert_called_once_with('Running command: %s', 'test cmd')
self.assertRaises(Exception, dataflow.wait_for_done)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DailyList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the DailyList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyList
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyList
"""
super(DailyList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Usage/Records/Daily.json'.format(**self._solution)
def stream(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Streams DailyInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.daily.DailyInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Lists DailyInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.daily.DailyInstance]
"""
return list(self.stream(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
limit=limit,
page_size=page_size,
))
def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of DailyInstance records from the API.
Request is executed immediately
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
data = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'IncludeSubaccounts': include_subaccounts,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return DailyPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of DailyInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return DailyPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyList>'
class DailyPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the DailyPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
super(DailyPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DailyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
"""
return DailyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyPage>'
class DailyInstance(InstanceResource):
class Category(object):
A2P_REGISTRATION_FEES = "a2p-registration-fees"
AGENT_CONFERENCE = "agent-conference"
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_PAY_VERB_TRANSACTIONS = "calls-pay-verb-transactions"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CALLS_TRANSFERS = "calls-transfers"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
FLEX_USAGE = "flex-usage"
FRAUD_LOOKUPS = "fraud-lookups"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IMP_V1_USAGE = "imp-v1-usage"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_MESSAGES_CARRIERFEES = "mms-messages-carrierfees"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NOTIFY = "notify"
NOTIFY_ACTIONS_ATTEMPTS = "notify-actions-attempts"
NOTIFY_CHANNELS = "notify-channels"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PCHAT = "pchat"
PCHAT_USERS = "pchat-users"
PEER_TO_PEER_ROOMS_PARTICIPANT_MINUTES = "peer-to-peer-rooms-participant-minutes"
PFAX = "pfax"
PFAX_MINUTES = "pfax-minutes"
PFAX_MINUTES_INBOUND = "pfax-minutes-inbound"
PFAX_MINUTES_OUTBOUND = "pfax-minutes-outbound"
PFAX_PAGES = "pfax-pages"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
PROXY = "proxy"
PROXY_ACTIVE_SESSIONS = "proxy-active-sessions"
PSTNCONNECTIVITY = "pstnconnectivity"
PV = "pv"
PV_COMPOSITION_MEDIA_DOWNLOADED = "pv-composition-media-downloaded"
PV_COMPOSITION_MEDIA_ENCRYPTED = "pv-composition-media-encrypted"
PV_COMPOSITION_MEDIA_STORED = "pv-composition-media-stored"
PV_COMPOSITION_MINUTES = "pv-composition-minutes"
PV_RECORDING_COMPOSITIONS = "pv-recording-compositions"
PV_ROOM_PARTICIPANTS = "pv-room-participants"
PV_ROOM_PARTICIPANTS_AU1 = "pv-room-participants-au1"
PV_ROOM_PARTICIPANTS_BR1 = "pv-room-participants-br1"
PV_ROOM_PARTICIPANTS_IE1 = "pv-room-participants-ie1"
PV_ROOM_PARTICIPANTS_JP1 = "pv-room-participants-jp1"
PV_ROOM_PARTICIPANTS_SG1 = "pv-room-participants-sg1"
PV_ROOM_PARTICIPANTS_US1 = "pv-room-participants-us1"
PV_ROOM_PARTICIPANTS_US2 = "pv-room-participants-us2"
PV_ROOMS = "pv-rooms"
PV_SIP_ENDPOINT_REGISTRATIONS = "pv-sip-endpoint-registrations"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
ROOMS_GROUP_BANDWIDTH = "rooms-group-bandwidth"
ROOMS_GROUP_MINUTES = "rooms-group-minutes"
ROOMS_PEER_TO_PEER_MINUTES = "rooms-peer-to-peer-minutes"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMALL_GROUP_ROOMS = "small-group-rooms"
SMALL_GROUP_ROOMS_DATA_TRACK = "small-group-rooms-data-track"
SMALL_GROUP_ROOMS_PARTICIPANT_MINUTES = "small-group-rooms-participant-minutes"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_MESSAGES_CARRIERFEES = "sms-messages-carrierfees"
SMS_MESSAGES_FEATURES = "sms-messages-features"
SMS_MESSAGES_FEATURES_SENDERID = "sms-messages-features-senderid"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_CONTENT_INSPECTION = "sms-outbound-content-inspection"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
SPEECH_RECOGNITION = "speech-recognition"
STUDIO_ENGAGEMENTS = "studio-engagements"
SYNC = "sync"
SYNC_ACTIONS = "sync-actions"
SYNC_ENDPOINT_HOURS = "sync-endpoint-hours"
SYNC_ENDPOINT_HOURS_ABOVE_DAILY_CAP = "sync-endpoint-hours-above-daily-cap"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_GERMANY = "turnmegabytes-germany"
TURNMEGABYTES_INDIA = "turnmegabytes-india"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
TWILIO_INTERCONNECT = "twilio-interconnect"
VERIFY_PUSH = "verify-push"
VIDEO_RECORDINGS = "video-recordings"
VOICE_INSIGHTS = "voice-insights"
VOICE_INSIGHTS_CLIENT_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-client-insights-on-demand-minute"
VOICE_INSIGHTS_PTSN_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-ptsn-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_INTERFACE_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-interface-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_TRUNKING_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-trunking-insights-on-demand-minute"
WIRELESS = "wireless"
WIRELESS_ORDERS = "wireless-orders"
WIRELESS_ORDERS_ARTWORK = "wireless-orders-artwork"
WIRELESS_ORDERS_BULK = "wireless-orders-bulk"
WIRELESS_ORDERS_ESIM = "wireless-orders-esim"
WIRELESS_ORDERS_STARTER = "wireless-orders-starter"
WIRELESS_USAGE = "wireless-usage"
WIRELESS_USAGE_COMMANDS = "wireless-usage-commands"
WIRELESS_USAGE_COMMANDS_AFRICA = "wireless-usage-commands-africa"
WIRELESS_USAGE_COMMANDS_ASIA = "wireless-usage-commands-asia"
WIRELESS_USAGE_COMMANDS_CENTRALANDSOUTHAMERICA = "wireless-usage-commands-centralandsouthamerica"
WIRELESS_USAGE_COMMANDS_EUROPE = "wireless-usage-commands-europe"
WIRELESS_USAGE_COMMANDS_HOME = "wireless-usage-commands-home"
WIRELESS_USAGE_COMMANDS_NORTHAMERICA = "wireless-usage-commands-northamerica"
WIRELESS_USAGE_COMMANDS_OCEANIA = "wireless-usage-commands-oceania"
WIRELESS_USAGE_COMMANDS_ROAMING = "wireless-usage-commands-roaming"
WIRELESS_USAGE_DATA = "wireless-usage-data"
WIRELESS_USAGE_DATA_AFRICA = "wireless-usage-data-africa"
WIRELESS_USAGE_DATA_ASIA = "wireless-usage-data-asia"
WIRELESS_USAGE_DATA_CENTRALANDSOUTHAMERICA = "wireless-usage-data-centralandsouthamerica"
WIRELESS_USAGE_DATA_CUSTOM_ADDITIONALMB = "wireless-usage-data-custom-additionalmb"
WIRELESS_USAGE_DATA_CUSTOM_FIRST5MB = "wireless-usage-data-custom-first5mb"
WIRELESS_USAGE_DATA_DOMESTIC_ROAMING = "wireless-usage-data-domestic-roaming"
WIRELESS_USAGE_DATA_EUROPE = "wireless-usage-data-europe"
WIRELESS_USAGE_DATA_INDIVIDUAL_ADDITIONALGB = "wireless-usage-data-individual-additionalgb"
WIRELESS_USAGE_DATA_INDIVIDUAL_FIRSTGB = "wireless-usage-data-individual-firstgb"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_CANADA = "wireless-usage-data-international-roaming-canada"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_INDIA = "wireless-usage-data-international-roaming-india"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_MEXICO = "wireless-usage-data-international-roaming-mexico"
WIRELESS_USAGE_DATA_NORTHAMERICA = "wireless-usage-data-northamerica"
WIRELESS_USAGE_DATA_OCEANIA = "wireless-usage-data-oceania"
WIRELESS_USAGE_DATA_POOLED = "wireless-usage-data-pooled"
WIRELESS_USAGE_DATA_POOLED_DOWNLINK = "wireless-usage-data-pooled-downlink"
WIRELESS_USAGE_DATA_POOLED_UPLINK = "wireless-usage-data-pooled-uplink"
WIRELESS_USAGE_MRC = "wireless-usage-mrc"
WIRELESS_USAGE_MRC_CUSTOM = "wireless-usage-mrc-custom"
WIRELESS_USAGE_MRC_INDIVIDUAL = "wireless-usage-mrc-individual"
WIRELESS_USAGE_MRC_POOLED = "wireless-usage-mrc-pooled"
WIRELESS_USAGE_MRC_SUSPENDED = "wireless-usage-mrc-suspended"
WIRELESS_USAGE_SMS = "wireless-usage-sms"
WIRELESS_USAGE_VOICE = "wireless-usage-voice"
def __init__(self, version, payload, account_sid):
"""
Initialize the DailyInstance
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
"""
super(DailyInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'api_version': payload.get('api_version'),
'as_of': payload.get('as_of'),
'category': payload.get('category'),
'count': payload.get('count'),
'count_unit': payload.get('count_unit'),
'description': payload.get('description'),
'end_date': deserialize.iso8601_date(payload.get('end_date')),
'price': deserialize.decimal(payload.get('price')),
'price_unit': payload.get('price_unit'),
'start_date': deserialize.iso8601_date(payload.get('start_date')),
'subresource_uris': payload.get('subresource_uris'),
'uri': payload.get('uri'),
'usage': payload.get('usage'),
'usage_unit': payload.get('usage_unit'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def account_sid(self):
"""
:returns: The SID of the Account accrued the usage
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the resource
:rtype: unicode
"""
return self._properties['api_version']
@property
def as_of(self):
"""
:returns: Usage records up to date as of this timestamp
:rtype: unicode
"""
return self._properties['as_of']
@property
def category(self):
"""
:returns: The category of usage
:rtype: DailyInstance.Category
"""
return self._properties['category']
@property
def count(self):
"""
:returns: The number of usage events
:rtype: unicode
"""
return self._properties['count']
@property
def count_unit(self):
"""
:returns: The units in which count is measured
:rtype: unicode
"""
return self._properties['count_unit']
@property
def description(self):
"""
:returns: A plain-language description of the usage category
:rtype: unicode
"""
return self._properties['description']
@property
def end_date(self):
"""
:returns: The last date for which usage is included in the UsageRecord
:rtype: date
"""
return self._properties['end_date']
@property
def price(self):
"""
:returns: The total price of the usage
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency in which `price` is measured
:rtype: unicode
"""
return self._properties['price_unit']
@property
def start_date(self):
"""
:returns: The first date for which usage is included in this UsageRecord
:rtype: date
"""
return self._properties['start_date']
@property
def subresource_uris(self):
"""
:returns: A list of related resources identified by their relative URIs
:rtype: unicode
"""
return self._properties['subresource_uris']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def usage(self):
"""
:returns: The amount of usage
:rtype: unicode
"""
return self._properties['usage']
@property
def usage_unit(self):
"""
:returns: The units in which usage is measured
:rtype: unicode
"""
return self._properties['usage_unit']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyInstance>'
|
|
"""
Test the sensitivity of simulation parameters
This is a very small package for testing the sensitivity of simulation
results on some parameters. Its main functions include
.. autosummary::
:toctree:
run_simuls
get_results
run_func
"""
import os
import subprocess
import collections
from jinja2 import Environment, FileSystemLoader, TemplateNotFound
#
# Public functions
# ----------------
#
def run_simuls(params, files, acts):
"""Performs the simulations to tests the sensitivity
This is the driver function for running the simulations before gathering
the results.
:param params: The sequence of parameters whose sensitivity is to be tested.
The values need to be a triple of parameter name, parameter value,
and a differentiation step size for changing the parameter.
:param files: An iterable of the names of the input files of the simulation.
All the files should be present in the current working directory and
are going to be written into the subdirectories for the simulations.
:param acts: An iterable of actions that are going to be performed in each
of the subdirectories to initiate the simulations. Needs to be given as
strings for the command that is to be run.
:returns: 0 for success.
"""
# Set the Jinja template engine up and get all the templates.
orig_cwd = os.getcwd()
env = Environment(loader=FileSystemLoader(orig_cwd))
templs = []
for i in files:
try:
templs.append(env.get_template(i))
except TemplateNotFound:
raise ValueError(
'Input file template {} cannot be found'.format(i)
)
continue
# Do the job for each of the given parameters.
for idx, param in enumerate(params):
# First we need to get all the parameter values and sub-directory names.
param_vals = _get_param_vals(param)
dir_names = _get_dir_names(param)
# Render the templates and run the requested actions in each of the
# sub-directories.
for param_val, dir_name in zip(param_vals, dir_names):
# Make and switch to the subdirectory.
os.makedirs(dir_name, exist_ok=True)
os.chdir(os.path.join(orig_cwd, dir_name))
print(
'Parameter {p_name} value {val} in directory {dir_name}'.format(
p_name=param[0], val=param_val, dir_name=dir_name
)
)
# Make the template rendering context.
ctx = {name: val for name, val, _ in params}
ctx[param[0]] = param_val
ctx['dir_name'] = dir_name
# Generate all the templates.
for templ, name in zip(templs, files):
with open(name, 'w') as input_fp:
input_fp.write(templ.render(ctx))
continue
# Make all the requested actions.
for i in acts:
subprocess.Popen(i.split())
continue
# Switch back to the original working directory.
os.chdir(orig_cwd)
# Continue to the next value of the parameter
continue
# Continue to the next parameter.
continue
return 0
ParamSensit = collections.namedtuple(
'ParamSensit', [
'value',
'deriv',
'cond',
]
)
def get_results(params, acts, if_print=True):
"""Gets the results about the sensitivity
:param params: An iterable of the parameters whose sensitivity are to be
tested, given as triples of name, value, and difference.
:param acts: The actions to be performed for the results. It should be a
mapping with names of the results as keys and callables for getting
the results as values. The callables are going to be called with no
arguments in the subdirectories for the simulations.
:param bool if_print: If the results are going to be printed.
:returns: A dictionary giving the results for each of the parameters on
each of the results. With the pair of the name of the parameter and
the name of the result as keys and ``ParamSensit`` objects as values.
:rtype: dict
"""
orig_cwd = os.getcwd()
res = {}
# Make the processing for each of the parameters one-by-one.
for param in params:
# The directory names and the values.
dir_names = _get_dir_names(param)
# Get the simulation results one-by-one in the order.
res_vals = {i: [] for i in acts.keys()}
for dir_name in dir_names:
# Switch the directory.
os.chdir(os.path.join(orig_cwd, dir_name))
for k, v in acts.items():
res_vals[k].append(
v()
)
continue
# Continue to the next parameter value/subdirectory.
os.chdir(orig_cwd)
continue
# Compute the results for the current parameter.
res.update(
_compute_results(param, res_vals)
)
# Continue to the next parameter.
continue
# Print the results on the stdout before returning.
if if_print:
_print_res(res)
return res
def run_func(params, func):
"""Run the given function in all the simulation directories
This is a utitity function for automatic performing the same action on the
subdirectories.
:param params: The parameters whose sensitivity is to be tested.
:param function func: The callable that is going to be called in each of
the subdirectories.
:returns: None
"""
# Make the dummy results dictionary.
def dummy_func():
func()
return 1
acts = {'act': dummy_func}
# Call the get results function.
get_results(params, acts, if_print=False)
return None
#
# Private functions
# -----------------
#
# Directory and values generation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
def _get_param_vals(param):
"""Gets the value of the parameters
:param param: The parameter triple whose values are to be generated.
:returns: The value of the parameter on a five-point stencil.
"""
return [
param[1] + i * param[2]
for i in range(-2, 3)
]
def _get_dir_names(param):
"""Gets the names of the directories for testing the given parameter
:param param: The parameter triple to be tested.
:returns: A list of directories names to be used to test the given
parameter.
"""
return [
'-'.join([param[0], str(i)])
for i in range(0, 5)
]
#
# Result computation
# ^^^^^^^^^^^^^^^^^^
#
def _compute_results(param, res_vals):
"""Computes the results for the sensitivity of a parameter
:param param: The parameter whose sensitivity is to be tested.
:param res_vals: The dictionary giving the results of the simulation at
different values of the parameter. The names of the results are keys
and the five values of the results are the values.
:returns: A dictionary giving the sensitivity of the results to the given
parameter.
"""
res = {}
# Treat the results one-by-one.
for name, vals in res_vals.items():
res[(param[0], name)] = _compute_result(param, vals)
continue
return res
def _compute_result(param, vals):
"""Computes the sensitivity result for a single parameter on a single result
:param param: The parameter to be tested.
:param vals: A list of five points for the results.
:returns: A parameter sensitivity for this pair.
"""
# The simulation result at the given parameter value.
value = vals[2]
# The five-point stencil derivative formula.
deriv = (
-vals[4] + 8 * vals[3] - 8 * vals[1] + vals[0]
) / (12 * param[2])
# The condition number.
cond = (param[1] * deriv) / value
return ParamSensit(
value=value, deriv=deriv, cond=cond
)
#
# Result printing
# ^^^^^^^^^^^^^^^
#
def _print_res(res):
"""Prints the results on the screen
:param res: The dictionary of the results.
:returns: 0 for success.
"""
tot_length = 120
n_fields = 5
fmt = ''.join(['{:^', str(tot_length // n_fields), '}']) * n_fields
print(fmt.format(
'Parameter', 'Result', 'Value', 'Derivative', 'Condition N'
))
for k, v in res.items():
print(
fmt.format(*(k + tuple(v)))
)
continue
return 0
|
|
from __future__ import division
from sympy import (Abs, Catalan, cos, Derivative, E, EulerGamma, exp,
factorial, factorial2, Function, GoldenRatio, I, Integer, Integral,
Interval, Lambda, Limit, Matrix, nan, O, oo, pi, Pow, Rational, Float, Rel,
S, sin, SparseMatrix, sqrt, summation, Sum, Symbol, symbols, Wild,
WildFunction, zeta, zoo, Dummy, Dict, Tuple, FiniteSet, factor,
subfactorial, true, false, Equivalent, Xor, Complement, SymmetricDifference,
AccumBounds, UnevaluatedExpr)
from sympy.core import Expr
from sympy.physics.units import second, joule
from sympy.polys import Poly, rootof, RootSum, groebner, ring, field, ZZ, QQ, lex, grlex
from sympy.geometry import Point, Circle
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.printing import sstr, sstrrepr, StrPrinter
from sympy.core.trace import Tr
x, y, z, w = symbols('x,y,z,w')
d = Dummy('d')
def test_printmethod():
class R(Abs):
def _sympystr(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert sstr(R(x)) == "foo(x)"
class R(Abs):
def _sympystr(self, printer):
return "foo"
assert sstr(R(x)) == "foo"
def test_Abs():
assert str(Abs(x)) == "Abs(x)"
assert str(Abs(Rational(1, 6))) == "1/6"
assert str(Abs(Rational(-1, 6))) == "1/6"
def test_Add():
assert str(x + y) == "x + y"
assert str(x + 1) == "x + 1"
assert str(x + x**2) == "x**2 + x"
assert str(5 + x + y + x*y + x**2 + y**2) == "x**2 + x*y + x + y**2 + y + 5"
assert str(1 + x + x**2/2 + x**3/3) == "x**3/3 + x**2/2 + x + 1"
assert str(2*x - 7*x**2 + 2 + 3*y) == "-7*x**2 + 2*x + 3*y + 2"
assert str(x - y) == "x - y"
assert str(2 - x) == "-x + 2"
assert str(x - 2) == "x - 2"
assert str(x - y - z - w) == "-w + x - y - z"
assert str(x - z*y**2*z*w) == "-w*y**2*z**2 + x"
assert str(x - 1*y*x*y) == "-x*y**2 + x"
assert str(sin(x).series(x, 0, 15)) == "x - x**3/6 + x**5/120 - x**7/5040 + x**9/362880 - x**11/39916800 + x**13/6227020800 + O(x**15)"
def test_Catalan():
assert str(Catalan) == "Catalan"
def test_ComplexInfinity():
assert str(zoo) == "zoo"
def test_Derivative():
assert str(Derivative(x, y)) == "Derivative(x, y)"
assert str(Derivative(x**2, x, evaluate=False)) == "Derivative(x**2, x)"
assert str(Derivative(
x**2/y, x, y, evaluate=False)) == "Derivative(x**2/y, x, y)"
def test_dict():
assert str({1: 1 + x}) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str({1: x**2, 2: y*x}) in ("{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr({1: x**2, 2: y*x}) == "{1: x**2, 2: x*y}"
def test_Dict():
assert str(Dict({1: 1 + x})) == sstr({1: 1 + x}) == "{1: x + 1}"
assert str(Dict({1: x**2, 2: y*x})) in (
"{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr(Dict({1: x**2, 2: y*x})) == "{1: x**2, 2: x*y}"
def test_Dummy():
assert str(d) == "_d"
assert str(d + x) == "_d + x"
def test_EulerGamma():
assert str(EulerGamma) == "EulerGamma"
def test_Exp():
assert str(E) == "E"
def test_factorial():
n = Symbol('n', integer=True)
assert str(factorial(-2)) == "zoo"
assert str(factorial(0)) == "1"
assert str(factorial(7)) == "5040"
assert str(factorial(n)) == "factorial(n)"
assert str(factorial(2*n)) == "factorial(2*n)"
assert str(factorial(factorial(n))) == 'factorial(factorial(n))'
assert str(factorial(factorial2(n))) == 'factorial(factorial2(n))'
assert str(factorial2(factorial(n))) == 'factorial2(factorial(n))'
assert str(factorial2(factorial2(n))) == 'factorial2(factorial2(n))'
assert str(subfactorial(3)) == "2"
assert str(subfactorial(n)) == "subfactorial(n)"
assert str(subfactorial(2*n)) == "subfactorial(2*n)"
def test_Function():
f = Function('f')
fx = f(x)
w = WildFunction('w')
assert str(f) == "f"
assert str(fx) == "f(x)"
assert str(w) == "w_"
def test_Geometry():
assert sstr(Point(0, 0)) == 'Point2D(0, 0)'
assert sstr(Circle(Point(0, 0), 3)) == 'Circle(Point2D(0, 0), 3)'
# TODO test other Geometry entities
def test_GoldenRatio():
assert str(GoldenRatio) == "GoldenRatio"
def test_ImaginaryUnit():
assert str(I) == "I"
def test_Infinity():
assert str(oo) == "oo"
assert str(oo*I) == "oo*I"
def test_Integer():
assert str(Integer(-1)) == "-1"
assert str(Integer(1)) == "1"
assert str(Integer(-3)) == "-3"
assert str(Integer(0)) == "0"
assert str(Integer(25)) == "25"
def test_Integral():
assert str(Integral(sin(x), y)) == "Integral(sin(x), y)"
assert str(Integral(sin(x), (y, 0, 1))) == "Integral(sin(x), (y, 0, 1))"
def test_Interval():
a = Symbol('a', real=True)
assert str(Interval(0, a)) == "[0, a]"
assert str(Interval(0, a, False, False)) == "[0, a]"
assert str(Interval(0, a, True, False)) == "(0, a]"
assert str(Interval(0, a, False, True)) == "[0, a)"
assert str(Interval(0, a, True, True)) == "(0, a)"
def test_AccumBounds():
a = Symbol('a', real=True)
assert str(AccumBounds(0, a)) == "<0, a>"
assert str(AccumBounds(0, 1)) == "<0, 1>"
def test_Lambda():
assert str(Lambda(d, d**2)) == "Lambda(_d, _d**2)"
# issue 2908
assert str(Lambda((), 1)) == "Lambda((), 1)"
assert str(Lambda((), x)) == "Lambda((), x)"
def test_Limit():
assert str(Limit(sin(x)/x, x, y)) == "Limit(sin(x)/x, x, y)"
assert str(Limit(1/x, x, 0)) == "Limit(1/x, x, 0)"
assert str(
Limit(sin(x)/x, x, y, dir="-")) == "Limit(sin(x)/x, x, y, dir='-')"
def test_list():
assert str([x]) == sstr([x]) == "[x]"
assert str([x**2, x*y + 1]) == sstr([x**2, x*y + 1]) == "[x**2, x*y + 1]"
assert str([x**2, [y + x]]) == sstr([x**2, [y + x]]) == "[x**2, [x + y]]"
def test_Matrix_str():
M = Matrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
M = Matrix([[1]])
assert str(M) == sstr(M) == "Matrix([[1]])"
M = Matrix([[1, 2]])
assert str(M) == sstr(M) == "Matrix([[1, 2]])"
M = Matrix()
assert str(M) == sstr(M) == "Matrix(0, 0, [])"
M = Matrix(0, 1, lambda i, j: 0)
assert str(M) == sstr(M) == "Matrix(0, 1, [])"
def test_Mul():
assert str(x/y) == "x/y"
assert str(y/x) == "y/x"
assert str(x/y/z) == "x/(y*z)"
assert str((x + 1)/(y + 2)) == "(x + 1)/(y + 2)"
assert str(2*x/3) == '2*x/3'
assert str(-2*x/3) == '-2*x/3'
assert str(-1.0*x) == '-1.0*x'
assert str(1.0*x) == '1.0*x'
class CustomClass1(Expr):
is_commutative = True
class CustomClass2(Expr):
is_commutative = True
cc1 = CustomClass1()
cc2 = CustomClass2()
assert str(Rational(2)*cc1) == '2*CustomClass1()'
assert str(cc1*Rational(2)) == '2*CustomClass1()'
assert str(cc1*Float("1.5")) == '1.5*CustomClass1()'
assert str(cc2*Rational(2)) == '2*CustomClass2()'
assert str(cc2*Rational(2)*cc1) == '2*CustomClass1()*CustomClass2()'
assert str(cc1*Rational(2)*cc2) == '2*CustomClass1()*CustomClass2()'
def test_NaN():
assert str(nan) == "nan"
def test_NegativeInfinity():
assert str(-oo) == "-oo"
def test_Order():
assert str(O(x)) == "O(x)"
assert str(O(x**2)) == "O(x**2)"
assert str(O(x*y)) == "O(x*y, x, y)"
assert str(O(x, x)) == "O(x)"
assert str(O(x, (x, 0))) == "O(x)"
assert str(O(x, (x, oo))) == "O(x, (x, oo))"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, x, y)) == "O(x, x, y)"
assert str(O(x, (x, oo), (y, oo))) == "O(x, (x, oo), (y, oo))"
def test_Permutation_Cycle():
from sympy.combinatorics import Permutation, Cycle
# general principle: economically, canonically show all moved elements
# and the size of the permutation.
for p, s in [
(Cycle(),
'()'),
(Cycle(2),
'(2)'),
(Cycle(2, 1),
'(1 2)'),
(Cycle(1, 2)(5)(6, 7)(10),
'(1 2)(6 7)(10)'),
(Cycle(3, 4)(1, 2)(3, 4),
'(1 2)(4)'),
]:
assert str(p) == s
Permutation.print_cyclic = False
for p, s in [
(Permutation([]),
'Permutation([])'),
(Permutation([], size=1),
'Permutation([0])'),
(Permutation([], size=2),
'Permutation([0, 1])'),
(Permutation([], size=10),
'Permutation([], size=10)'),
(Permutation([1, 0, 2]),
'Permutation([1, 0, 2])'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation([1, 0], size=6)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation([1, 0], size=10)'),
]:
assert str(p) == s
Permutation.print_cyclic = True
for p, s in [
(Permutation([]),
'()'),
(Permutation([], size=1),
'(0)'),
(Permutation([], size=2),
'(1)'),
(Permutation([], size=10),
'(9)'),
(Permutation([1, 0, 2]),
'(2)(0 1)'),
(Permutation([1, 0, 2, 3, 4, 5]),
'(5)(0 1)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'(9)(0 1)'),
(Permutation([0, 1, 3, 2, 4, 5], size=10),
'(9)(2 3)'),
]:
assert str(p) == s
def test_Pi():
assert str(pi) == "pi"
def test_Poly():
assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')"
assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')"
assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')"
assert str(Poly(2*x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')"
assert str(Poly(2*x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')"
assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')"
assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')"
assert str(Poly(-2*x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')"
assert str(Poly(-2*x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')"
assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')"
assert str(Poly(2*x + x**5, x)) == "Poly(x**5 + 2*x, x, domain='ZZ')"
assert str(Poly(3**(2*x), 3**x)) == "Poly((3**x)**2, 3**x, domain='ZZ')"
assert str(Poly((x**2)**x)) == "Poly(((x**2)**x), (x**2)**x, domain='ZZ')"
assert str(Poly((x + y)**3, (x + y), expand=False)
) == "Poly((x + y)**3, x + y, domain='ZZ')"
assert str(Poly((x - 1)**2, (x - 1), expand=False)
) == "Poly((x - 1)**2, x - 1, domain='ZZ')"
assert str(
Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')"
assert str(
Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')"
assert str(Poly(x**2 + I*x, x)) == "Poly(x**2 + I*x, x, domain='EX')"
assert str(Poly(x**2 - I*x, x)) == "Poly(x**2 - I*x, x, domain='EX')"
assert str(Poly(-x*y*z + x*y - 1, x, y, z)
) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')"
assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \
"Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')"
assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)"
assert str(Poly(2*x**2 + 3*x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def test_PolyRing():
assert str(ring("x", ZZ, lex)[0]) == "Polynomial ring in x over ZZ with lex order"
assert str(ring("x,y", QQ, grlex)[0]) == "Polynomial ring in x, y over QQ with grlex order"
assert str(ring("x,y,z", ZZ["t"], lex)[0]) == "Polynomial ring in x, y, z over ZZ[t] with lex order"
def test_FracField():
assert str(field("x", ZZ, lex)[0]) == "Rational function field in x over ZZ with lex order"
assert str(field("x,y", QQ, grlex)[0]) == "Rational function field in x, y over QQ with grlex order"
assert str(field("x,y,z", ZZ["t"], lex)[0]) == "Rational function field in x, y, z over ZZ[t] with lex order"
def test_PolyElement():
Ruv, u,v = ring("u,v", ZZ)
Rxyz, x,y,z = ring("x,y,z", Ruv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x**2) == "x**2"
assert str(x**(-2)) == "x**(-2)"
assert str(x**QQ(1, 2)) == "x**(1/2)"
assert str((u**2 + 3*u*v + 1)*x**2*y + u + 1) == "(u**2 + 3*u*v + 1)*x**2*y + u + 1"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x"
assert str((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == "(u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1"
assert str((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == "-(u**2 - 3*u*v + 1)*x**2*y - (u + 1)*x - 1"
assert str(-(v**2 + v + 1)*x + 3*u*v + 1) == "-(v**2 + v + 1)*x + 3*u*v + 1"
assert str(-(v**2 + v + 1)*x - 3*u*v + 1) == "-(v**2 + v + 1)*x - 3*u*v + 1"
def test_FracElement():
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
assert str(x - x) == "0"
assert str(x - 1) == "x - 1"
assert str(x + 1) == "x + 1"
assert str(x/3) == "x/3"
assert str(x/z) == "x/z"
assert str(x*y/z) == "x*y/z"
assert str(x/(z*t)) == "x/(z*t)"
assert str(x*y/(z*t)) == "x*y/(z*t)"
assert str((x - 1)/y) == "(x - 1)/y"
assert str((x + 1)/y) == "(x + 1)/y"
assert str((-x - 1)/y) == "(-x - 1)/y"
assert str((x + 1)/(y*z)) == "(x + 1)/(y*z)"
assert str(-y/(x + 1)) == "-y/(x + 1)"
assert str(y*z/(x + 1)) == "y*z/(x + 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - 1)"
assert str(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == "((u + 1)*x*y + 1)/((v - 1)*z - u*v*t - 1)"
def test_Pow():
assert str(x**-1) == "1/x"
assert str(x**-2) == "x**(-2)"
assert str(x**2) == "x**2"
assert str((x + y)**-1) == "1/(x + y)"
assert str((x + y)**-2) == "(x + y)**(-2)"
assert str((x + y)**2) == "(x + y)**2"
assert str((x + y)**(1 + x)) == "(x + y)**(x + 1)"
assert str(x**Rational(1, 3)) == "x**(1/3)"
assert str(1/x**Rational(1, 3)) == "x**(-1/3)"
assert str(sqrt(sqrt(x))) == "x**(1/4)"
# not the same as x**-1
assert str(x**-1.0) == 'x**(-1.0)'
# see issue #2860
assert str(Pow(S(2), -1.0, evaluate=False)) == '2**(-1.0)'
def test_sqrt():
assert str(sqrt(x)) == "sqrt(x)"
assert str(sqrt(x**2)) == "sqrt(x**2)"
assert str(1/sqrt(x)) == "1/sqrt(x)"
assert str(1/sqrt(x**2)) == "1/sqrt(x**2)"
assert str(y/sqrt(x)) == "y/sqrt(x)"
assert str(x**(1/2)) == "x**0.5"
assert str(1/x**(1/2)) == "x**(-0.5)"
def test_Rational():
n1 = Rational(1, 4)
n2 = Rational(1, 3)
n3 = Rational(2, 4)
n4 = Rational(2, -4)
n5 = Rational(0)
n7 = Rational(3)
n8 = Rational(-3)
assert str(n1*n2) == "1/12"
assert str(n1*n2) == "1/12"
assert str(n3) == "1/2"
assert str(n1*n3) == "1/8"
assert str(n1 + n3) == "3/4"
assert str(n1 + n2) == "7/12"
assert str(n1 + n4) == "-1/4"
assert str(n4*n4) == "1/4"
assert str(n4 + n2) == "-1/6"
assert str(n4 + n5) == "-1/2"
assert str(n4*n5) == "0"
assert str(n3 + n4) == "0"
assert str(n1**n7) == "1/64"
assert str(n2**n7) == "1/27"
assert str(n2**n8) == "27"
assert str(n7**n8) == "1/27"
assert str(Rational("-25")) == "-25"
assert str(Rational("1.25")) == "5/4"
assert str(Rational("-2.6e-2")) == "-13/500"
assert str(S("25/7")) == "25/7"
assert str(S("-123/569")) == "-123/569"
assert str(S("0.1[23]", rational=1)) == "61/495"
assert str(S("5.1[666]", rational=1)) == "31/6"
assert str(S("-5.1[666]", rational=1)) == "-31/6"
assert str(S("0.[9]", rational=1)) == "1"
assert str(S("-0.[9]", rational=1)) == "-1"
assert str(sqrt(Rational(1, 4))) == "1/2"
assert str(sqrt(Rational(1, 36))) == "1/6"
assert str((123**25) ** Rational(1, 25)) == "123"
assert str((123**25 + 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "123"
assert str((123**25 - 1)**Rational(1, 25)) != "122"
assert str(sqrt(Rational(81, 36))**3) == "27/8"
assert str(1/sqrt(Rational(81, 36))**3) == "8/27"
assert str(sqrt(-4)) == str(2*I)
assert str(2**Rational(1, 10**10)) == "2**(1/10000000000)"
def test_Float():
# NOTE prec is the whole number of decimal digits
assert str(Float('1.23', prec=1 + 2)) == '1.23'
assert str(Float('1.23456789', prec=1 + 8)) == '1.23456789'
assert str(
Float('1.234567890123456789', prec=1 + 18)) == '1.234567890123456789'
assert str(pi.evalf(1 + 2)) == '3.14'
assert str(pi.evalf(1 + 14)) == '3.14159265358979'
assert str(pi.evalf(1 + 64)) == ('3.141592653589793238462643383279'
'5028841971693993751058209749445923')
assert str(pi.round(-1)) == '0.'
assert str((pi**400 - (pi**400).round(1)).n(2)) == '-0.e+88'
def test_Relational():
assert str(Rel(x, y, "<")) == "x < y"
assert str(Rel(x + y, y, "==")) == "Eq(x + y, y)"
assert str(Rel(x, y, "!=")) == "Ne(x, y)"
assert str(Rel(x, y, ':=')) == "Assignment(x, y)"
def test_CRootOf():
assert str(rootof(x**5 + 2*x - 1, 0)) == "CRootOf(x**5 + 2*x - 1, 0)"
def test_RootSum():
f = x**5 + 2*x - 1
assert str(
RootSum(f, Lambda(z, z), auto=False)) == "RootSum(x**5 + 2*x - 1)"
assert str(RootSum(f, Lambda(
z, z**2), auto=False)) == "RootSum(x**5 + 2*x - 1, Lambda(z, z**2))"
def test_GroebnerBasis():
assert str(groebner(
[], x, y)) == "GroebnerBasis([], x, y, domain='ZZ', order='lex')"
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
assert str(groebner(F, order='grlex')) == \
"GroebnerBasis([x**2 - x - 3*y + 1, y**2 - 2*x + y - 1], x, y, domain='ZZ', order='grlex')"
assert str(groebner(F, order='lex')) == \
"GroebnerBasis([2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7], x, y, domain='ZZ', order='lex')"
def test_set():
assert sstr(set()) == 'set()'
assert sstr(frozenset()) == 'frozenset()'
assert sstr(set([1])) == '{1}'
assert sstr(frozenset([1])) == 'frozenset({1})'
assert sstr(set([1, 2, 3])) == '{1, 2, 3}'
assert sstr(frozenset([1, 2, 3])) == 'frozenset({1, 2, 3})'
assert sstr(
set([1, x, x**2, x**3, x**4])) == '{1, x, x**2, x**3, x**4}'
assert sstr(
frozenset([1, x, x**2, x**3, x**4])) == 'frozenset({1, x, x**2, x**3, x**4})'
def test_SparseMatrix():
M = SparseMatrix([[x**+1, 1], [y, x + y]])
assert str(M) == "Matrix([[x, 1], [y, x + y]])"
assert sstr(M) == "Matrix([\n[x, 1],\n[y, x + y]])"
def test_Sum():
assert str(summation(cos(3*z), (z, x, y))) == "Sum(cos(3*z), (z, x, y))"
assert str(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
"Sum(x*y**2, (x, -2, 2), (y, -5, 5))"
def test_Symbol():
assert str(y) == "y"
assert str(x) == "x"
e = x
assert str(e) == "x"
def test_tuple():
assert str((x,)) == sstr((x,)) == "(x,)"
assert str((x + y, 1 + x)) == sstr((x + y, 1 + x)) == "(x + y, x + 1)"
assert str((x + y, (
1 + x, x**2))) == sstr((x + y, (1 + x, x**2))) == "(x + y, (x + 1, x**2))"
def test_Unit():
assert str(second) == "s"
assert str(joule) == "kg*m**2/s**2" # issue 5560
def test_wild_str():
# Check expressions containing Wild not causing infinite recursion
w = Wild('x')
assert str(w + 1) == 'x_ + 1'
assert str(exp(2**w) + 5) == 'exp(2**x_) + 5'
assert str(3*w + 1) == '3*x_ + 1'
assert str(1/w + 1) == '1 + 1/x_'
assert str(w**2 + 1) == 'x_**2 + 1'
assert str(1/(1 - w)) == '1/(-x_ + 1)'
def test_zeta():
assert str(zeta(3)) == "zeta(3)"
def test_issue_3101():
e = x - y
a = str(e)
b = str(e)
assert a == b
def test_issue_3103():
e = -2*sqrt(x) - y/sqrt(x)/2
assert str(e) not in ["(-2)*x**1/2(-1/2)*x**(-1/2)*y",
"-2*x**1/2(-1/2)*x**(-1/2)*y", "-2*x**1/2-1/2*x**-1/2*w"]
assert str(e) == "-2*sqrt(x) - y/(2*sqrt(x))"
def test_issue_4021():
e = Integral(x, x) + 1
assert str(e) == 'Integral(x, x) + 1'
def test_sstrrepr():
assert sstr('abc') == 'abc'
assert sstrrepr('abc') == "'abc'"
e = ['a', 'b', 'c', x]
assert sstr(e) == "[a, b, c, x]"
assert sstrrepr(e) == "['a', 'b', 'c', x]"
def test_infinity():
assert sstr(oo*I) == "oo*I"
def test_full_prec():
assert sstr(S("0.3"), full_prec=True) == "0.300000000000000"
assert sstr(S("0.3"), full_prec="auto") == "0.300000000000000"
assert sstr(S("0.3"), full_prec=False) == "0.3"
assert sstr(S("0.3")*x, full_prec=True) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert sstr(S("0.3")*x, full_prec="auto") in [
"0.3*x",
"x*0.3"
]
assert sstr(S("0.3")*x, full_prec=False) in [
"0.3*x",
"x*0.3"
]
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert sstr(A*B*C**-1) == "A*B*C**(-1)"
assert sstr(C**-1*A*B) == "C**(-1)*A*B"
assert sstr(A*C**-1*B) == "A*C**(-1)*B"
assert sstr(sqrt(A)) == "sqrt(A)"
assert sstr(1/sqrt(A)) == "A**(-1/2)"
def test_empty_printer():
str_printer = StrPrinter()
assert str_printer.emptyPrinter("foo") == "foo"
assert str_printer.emptyPrinter(x*y) == "x*y"
assert str_printer.emptyPrinter(32) == "32"
def test_settings():
raises(TypeError, lambda: sstr(S(4), method="garbage"))
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert str(where(X > 0)) == "Domain: And(0 < x1, x1 < oo)"
D = Die('d1', 6)
assert str(where(D > 4)) == "Domain: Or(Eq(d1, 5), Eq(d1, 6))"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert str(pspace(Tuple(A, B)).domain) == "Domain: And(0 <= a, 0 <= b, a < oo, b < oo)"
def test_FiniteSet():
assert str(FiniteSet(*range(1, 51))) == '{1, 2, 3, ..., 48, 49, 50}'
assert str(FiniteSet(*range(1, 6))) == '{1, 2, 3, 4, 5}'
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert sstr(F.convert(x/(x + y))) == sstr(x/(x + y))
assert sstr(R.convert(x + y)) == sstr(x + y)
def test_categories():
from sympy.categories import (Object, NamedMorphism,
IdentityMorphism, Category)
A = Object("A")
B = Object("B")
f = NamedMorphism(A, B, "f")
id_A = IdentityMorphism(A)
K = Category("K")
assert str(A) == 'Object("A")'
assert str(f) == 'NamedMorphism(Object("A"), Object("B"), "f")'
assert str(id_A) == 'IdentityMorphism(Object("A"))'
assert str(K) == 'Category("K")'
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert str(t) == 'Tr(A*B)'
def test_issue_6387():
assert str(factor(-3.0*z + 3)) == '-3.0*(1.0*z - 1.0)'
def test_MatMul_MatAdd():
from sympy import MatrixSymbol
assert str(2*(MatrixSymbol("X", 2, 2) + MatrixSymbol("Y", 2, 2))) == \
"2*(X + Y)"
def test_MatrixSlice():
from sympy.matrices.expressions import MatrixSymbol
assert str(MatrixSymbol('X', 10, 10)[:5, 1:9:2]) == 'X[:5, 1:9:2]'
assert str(MatrixSymbol('X', 10, 10)[5, :5:2]) == 'X[5, :5:2]'
def test_true_false():
assert str(true) == repr(true) == sstr(true) == "True"
assert str(false) == repr(false) == sstr(false) == "False"
def test_Equivalent():
assert str(Equivalent(y, x)) == "Equivalent(x, y)"
def test_Xor():
assert str(Xor(y, x, evaluate=False)) == "Xor(x, y)"
def test_Complement():
assert str(Complement(S.Reals, S.Naturals)) == '(-oo, oo) \ Naturals()'
def test_SymmetricDifference():
assert str(SymmetricDifference(Interval(2,3), Interval(3,4),evaluate=False)) == \
'SymmetricDifference([2, 3], [3, 4])'
def test_UnevaluatedExpr():
a, b = symbols("a b")
expr1 = 2*UnevaluatedExpr(a+b)
assert str(expr1) == "2*(a + b)"
|
|
from __future__ import division
from __future__ import unicode_literals
import time
import numpy as np
import tensorflow as tf
import collections
from deepchem.utils.save import log
from deepchem.metrics import to_one_hot
from deepchem.metrics import from_one_hot
from deepchem.models.tensorgraph.tensor_graph import TensorGraph, TFWrapper
from deepchem.models.tensorgraph.layers import Layer, Feature, Label, Weights, \
WeightedError, Dense, Dropout, WeightDecay, Reshape, SparseSoftMaxCrossEntropy, \
L2Loss, ReduceSum, Concat, Stack, TensorWrapper, ReLU, Squeeze, SoftMax, Cast
from deepchem.models.tensorgraph.IRV import Slice
class ProgressiveMultitaskRegressor(TensorGraph):
"""Implements a progressive multitask neural network for regression.
Progressive Networks: https://arxiv.org/pdf/1606.04671v3.pdf
Progressive networks allow for multitask learning where each task
gets a new column of weights. As a result, there is no exponential
forgetting where previous tasks are ignored.
"""
def __init__(self,
n_tasks,
n_features,
alpha_init_stddevs=0.02,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
n_outputs=1,
**kwargs):
"""Creates a progressive network.
Only listing parameters specific to progressive networks here.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of input features
alpha_init_stddevs: list
List of standard-deviations for alpha in adapter layers.
layer_sizes: list
the size of each dense layer in the network. The length of this list determines the number of layers.
weight_init_stddevs: list or float
the standard deviation of the distribution to use for weight initialization of each layer. The length
of this list should equal len(layer_sizes)+1. The final element corresponds to the output layer.
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
bias_init_consts: list or float
the value to initialize the biases in each layer to. The length of this list should equal len(layer_sizes)+1.
The final element corresponds to the output layer. Alternatively this may be a single value instead of a list,
in which case the same value is used for every layer.
weight_decay_penalty: float
the magnitude of the weight decay penalty to use
weight_decay_penalty_type: str
the type of penalty to use for weight decay, either 'l1' or 'l2'
dropouts: list or float
the dropout probablity to use for each layer. The length of this list should equal len(layer_sizes).
Alternatively this may be a single value instead of a list, in which case the same value is used for every layer.
activation_fns: list or object
the Tensorflow activation function to apply to each layer. The length of this list should equal
len(layer_sizes). Alternatively this may be a single value instead of a list, in which case the
same value is used for every layer.
"""
super(ProgressiveMultitaskRegressor, self).__init__(**kwargs)
self.n_tasks = n_tasks
self.n_features = n_features
self.layer_sizes = layer_sizes
self.alpha_init_stddevs = alpha_init_stddevs
self.weight_init_stddevs = weight_init_stddevs
self.bias_init_consts = bias_init_consts
self.dropouts = dropouts
self.activation_fns = activation_fns
self.n_outputs = n_outputs
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, collections.Sequence):
self.weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(alpha_init_stddevs, collections.Sequence):
self.alpha_init_stddevs = [alpha_init_stddevs] * n_layers
if not isinstance(bias_init_consts, collections.Sequence):
self.bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, collections.Sequence):
self.dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, collections.Sequence):
self.activation_fns = [activation_fns] * n_layers
# Add the input features.
self.mol_features = Feature(shape=(None, n_features))
self._task_labels = Label(shape=(None, n_tasks))
self._task_weights = Weights(shape=(None, n_tasks))
all_layers = {}
outputs = []
for task in range(self.n_tasks):
task_layers = []
for i in range(n_layers):
if i == 0:
prev_layer = self.mol_features
else:
prev_layer = all_layers[(i - 1, task)]
if task > 0:
lateral_contrib, trainables = self.add_adapter(all_layers, task, i)
task_layers.extend(trainables)
layer = Dense(
in_layers=[prev_layer],
out_channels=layer_sizes[i],
activation_fn=None,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer,
stddev=self.weight_init_stddevs[i]),
biases_initializer=TFWrapper(
tf.constant_initializer, value=self.bias_init_consts[i]))
task_layers.append(layer)
if i > 0 and task > 0:
layer = layer + lateral_contrib
assert self.activation_fns[i] is tf.nn.relu, "Only ReLU is supported"
layer = ReLU(in_layers=[layer])
if self.dropouts[i] > 0.0:
layer = Dropout(self.dropouts[i], in_layers=[layer])
all_layers[(i, task)] = layer
prev_layer = all_layers[(n_layers - 1, task)]
layer = Dense(
in_layers=[prev_layer],
out_channels=n_outputs,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer,
stddev=self.weight_init_stddevs[-1]),
biases_initializer=TFWrapper(
tf.constant_initializer, value=self.bias_init_consts[-1]))
task_layers.append(layer)
if task > 0:
lateral_contrib, trainables = self.add_adapter(all_layers, task,
n_layers)
task_layers.extend(trainables)
layer = layer + lateral_contrib
output_layer = self.create_output(layer)
outputs.append(output_layer)
label = Slice(task, axis=1, in_layers=[self._task_labels])
weight = Slice(task, axis=1, in_layers=[self._task_weights])
task_loss = self.create_loss(layer, label, weight)
self.create_submodel(layers=task_layers, loss=task_loss, optimizer=None)
outputs = Stack(axis=1, in_layers=outputs)
self.add_output(outputs)
# Weight decay not activated
"""
if weight_decay_penalty != 0.0:
weighted_loss = WeightDecay(
weight_decay_penalty,
weight_decay_penalty_type,
in_layers=[weighted_loss])
"""
def create_loss(self, layer, label, weight):
weighted_loss = ReduceSum(L2Loss(in_layers=[label, layer, weight]))
return weighted_loss
def create_output(self, layer):
return layer
def add_adapter(self, all_layers, task, layer_num):
"""Add an adapter connection for given task/layer combo"""
i = layer_num
prev_layers = []
trainable_layers = []
# Handle output layer
if i < len(self.layer_sizes):
layer_sizes = self.layer_sizes
alpha_init_stddev = self.alpha_init_stddevs[i]
weight_init_stddev = self.weight_init_stddevs[i]
bias_init_const = self.bias_init_consts[i]
elif i == len(self.layer_sizes):
layer_sizes = self.layer_sizes + [self.n_outputs]
alpha_init_stddev = self.alpha_init_stddevs[-1]
weight_init_stddev = self.weight_init_stddevs[-1]
bias_init_const = self.bias_init_consts[-1]
else:
raise ValueError("layer_num too large for add_adapter.")
# Iterate over all previous tasks.
for prev_task in range(task):
prev_layers.append(all_layers[(i - 1, prev_task)])
# prev_layers is a list with elements of size
# (batch_size, layer_sizes[i-1])
prev_layer = Concat(axis=1, in_layers=prev_layers)
with self._get_tf("Graph").as_default():
alpha = TensorWrapper(
tf.Variable(
tf.truncated_normal((1,), stddev=alpha_init_stddev),
name="alpha_layer_%d_task%d" % (i, task)))
trainable_layers.append(alpha)
prev_layer = prev_layer * alpha
dense1 = Dense(
in_layers=[prev_layer],
out_channels=layer_sizes[i - 1],
activation_fn=None,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer, stddev=weight_init_stddev),
biases_initializer=TFWrapper(
tf.constant_initializer, value=bias_init_const))
trainable_layers.append(dense1)
dense2 = Dense(
in_layers=[dense1],
out_channels=layer_sizes[i],
activation_fn=None,
weights_initializer=TFWrapper(
tf.truncated_normal_initializer, stddev=weight_init_stddev),
biases_initializer=None)
trainable_layers.append(dense2)
return dense2, trainable_layers
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
checkpoint_interval=1000,
deterministic=False,
restore=False,
**kwargs):
for task in range(self.n_tasks):
self.fit_task(
dataset,
nb_epoch=nb_epoch,
max_checkpoints_to_keep=max_checkpoints_to_keep,
checkpoint_interval=checkpoint_interval,
deterministic=deterministic,
restore=restore,
submodel=task,
**kwargs)
def fit_task(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
checkpoint_interval=1000,
deterministic=False,
restore=False,
submodel=None,
**kwargs):
"""Fit one task."""
generator = self.default_generator(
dataset, epochs=nb_epoch, deterministic=deterministic)
self.fit_generator(generator, max_checkpoints_to_keep, checkpoint_interval,
restore, self.submodels[submodel])
class ProgressiveMultitaskClassifier(ProgressiveMultitaskRegressor):
"""Implements a progressive multitask neural network for classification.
Progressive Networks: https://arxiv.org/pdf/1606.04671v3.pdf
Progressive networks allow for multitask learning where each task
gets a new column of weights. As a result, there is no exponential
forgetting where previous tasks are ignored.
"""
def __init__(self,
n_tasks,
n_features,
alpha_init_stddevs=0.02,
layer_sizes=[1000],
weight_init_stddevs=0.02,
bias_init_consts=1.0,
weight_decay_penalty=0.0,
weight_decay_penalty_type="l2",
dropouts=0.5,
activation_fns=tf.nn.relu,
**kwargs):
n_outputs = 2
super(ProgressiveMultitaskClassifier, self).__init__(
n_tasks,
n_features,
alpha_init_stddevs=alpha_init_stddevs,
layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts,
weight_decay_penalty=weight_decay_penalty,
weight_decay_penalty_type=weight_decay_penalty_type,
dropouts=dropouts,
activation_fns=activation_fns,
n_outputs=n_outputs,
**kwargs)
def create_loss(self, layer, label, weight):
task_label = Squeeze(squeeze_dims=1, in_layers=[label])
task_label = Cast(dtype=tf.int32, in_layers=[task_label])
task_weight = Squeeze(squeeze_dims=1, in_layers=[weight])
loss = SparseSoftMaxCrossEntropy(in_layers=[task_label, layer])
weighted_loss = WeightedError(in_layers=[loss, task_weight])
return weighted_loss
def create_output(self, layer):
output = SoftMax(in_layers=[layer])
return output
|
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy.testing as npt
import numpy as np
import nose
from scipy import stats
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
TODO:
* make functioning test for skew and kurtosis
still known failures - skip for now
"""
#currently not used
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
DECIMAL_kurt = 0
distcont = [
['alpha', (3.5704770516650459,)],
['anglit', ()],
['arcsine', ()],
['beta', (2.3098496451481823, 0.62687954300963677)],
['betaprime', (5, 6)], # avoid unbound error in entropy with (100, 86)],
['bradford', (0.29891359763170633,)],
['burr', (10.5, 4.3)], #incorrect mean and var for(0.94839838075366045, 4.3820284068855795)],
['cauchy', ()],
['chi', (78,)],
['chi2', (55,)],
['cosine', ()],
['dgamma', (1.1023326088288166,)],
['dweibull', (2.0685080649914673,)],
['erlang', (20,)], #correction numargs = 1
['expon', ()],
['exponpow', (2.697119160358469,)],
['exponweib', (2.8923945291034436, 1.9505288745913174)],
['f', (29, 18)],
['fatiguelife', (29,)], #correction numargs = 1
['fisk', (3.0857548622253179,)],
['foldcauchy', (4.7164673455831894,)],
['foldnorm', (1.9521253373555869,)],
['frechet_l', (3.6279911255583239,)],
['frechet_r', (1.8928171603534227,)],
['gamma', (1.9932305483800778,)],
['gausshyper', (13.763771604130699, 3.1189636648681431,
2.5145980350183019, 5.1811649903971615)], #veryslow
['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)],
['genextreme', (-0.1,)], # sample mean test fails for (3.3184017469423535,)],
['gengamma', (4.4162385429431925, 3.1193091679242761)],
['genhalflogistic', (0.77274727809929322,)],
['genlogistic', (0.41192440799679475,)],
['genpareto', (0.1,)], # use case with finite moments
['gilbrat', ()],
['gompertz', (0.94743713075105251,)],
['gumbel_l', ()],
['gumbel_r', ()],
['halfcauchy', ()],
['halflogistic', ()],
['halfnorm', ()],
['hypsecant', ()],
['invgamma', (2.0668996136993067,)],
['invgauss', (0.14546264555347513,)],
['invweibull', (10.58,)], # sample mean test fails at(0.58847112119264788,)]
['johnsonsb', (4.3172675099141058, 3.1837781130785063)],
['johnsonsu', (2.554395574161155, 2.2482281679651965)],
['ksone', (1000,)], #replace 22 by 100 to avoid failing range, ticket 956
['kstwobign', ()],
['laplace', ()],
['levy', ()],
['levy_l', ()],
# ['levy_stable', (0.35667405469844993,
# -0.67450531578494011)], #NotImplementedError
# rvs not tested
['loggamma', (0.41411931826052117,)],
['logistic', ()],
['loglaplace', (3.2505926592051435,)],
['lognorm', (0.95368226960575331,)],
['lomax', (1.8771398388773268,)],
['maxwell', ()],
['mielke', (10.4, 3.6)], # sample mean test fails for (4.6420495492121487, 0.59707419545516938)],
# mielke: good results if 2nd parameter >2, weird mean or var below
['nakagami', (4.9673794866666237,)],
['ncf', (27, 27, 0.41578441799226107)],
['nct', (14, 0.24045031331198066)],
['ncx2', (21, 1.0560465975116415)],
['norm', ()],
['pareto', (2.621716532144454,)],
['pearson3', (0.1,)],
['powerlaw', (1.6591133289905851,)],
['powerlognorm', (2.1413923530064087, 0.44639540782048337)],
['powernorm', (4.4453652254590779,)],
['rayleigh', ()],
['rdist', (0.9,)], # feels also slow
# ['rdist', (3.8266985793976525,)], #veryslow, especially rvs
#['rdist', (541.0,)], # from ticket #758 #veryslow
['recipinvgauss', (0.63004267809369119,)],
['reciprocal', (0.0062309367010521255, 1.0062309367010522)],
['rice', (0.7749725210111873,)],
['semicircular', ()],
['t', (2.7433514990818093,)],
['triang', (0.15785029824528218,)],
['truncexpon', (4.6907725456810478,)],
['truncnorm', (-1.0978730080013919, 2.7306754109031979)],
['tukeylambda', (3.1321477856738267,)],
['uniform', ()],
['vonmises', (3.9939042581071398,)],
['wald', ()],
['weibull_max', (2.8687961709100187,)],
['weibull_min', (1.7866166930421596,)],
['wrapcauchy', (0.031071279018614728,)]]
# for testing only specific functions
##distcont = [
## ['erlang', (20,)], #correction numargs = 1
## ['fatiguelife', (29,)], #correction numargs = 1
## ['loggamma', (0.41411931826052117,)]]
# for testing ticket:767
##distcont = [
## ['genextreme', (3.3184017469423535,)],
## ['genextreme', (0.01,)],
## ['genextreme', (0.00001,)],
## ['genextreme', (0.0,)],
## ['genextreme', (-0.01,)]
## ]
##distcont = [['gumbel_l', ()],
## ['gumbel_r', ()],
## ['norm', ()]
## ]
##distcont = [['norm', ()]]
distmissing = ['wald', 'gausshyper', 'genexpon', 'rv_continuous',
'loglaplace', 'rdist', 'semicircular', 'invweibull', 'ksone',
'cosine', 'kstwobign', 'truncnorm', 'mielke', 'recipinvgauss', 'levy',
'johnsonsu', 'levy_l', 'powernorm', 'wrapcauchy',
'johnsonsb', 'truncexpon', 'rice', 'invgauss', 'invgamma',
'powerlognorm']
distmiss = [[dist,args] for dist,args in distcont if dist in distmissing]
distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'rice', 'mielke', 'semicircular', 'cosine', 'invweibull',
'powerlognorm', 'johnsonsu', 'kstwobign']
#distslow are sorted by speed (very slow to slow)
def _silence_fp_errors(func):
def wrap(*a, **kw):
olderr = np.seterr(all='ignore')
try:
return func(*a, **kw)
finally:
np.seterr(**olderr)
wrap.__name__ = func.__name__
return wrap
@_silence_fp_errors
def test_cont_basic():
# this test skips slow distributions
for distname, arg in distcont[:]:
if distname in distslow:
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 1000
rvs = distfn.rvs(size=sn,*arg)
sm = rvs.mean()
sv = rvs.var()
skurt = stats.kurtosis(rvs)
sskew = stats.skew(rvs)
m,v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, distname + \
'sample mean test'
# the sample skew kurtosis test has known failures, not very good distance measure
#yield check_sample_skew_kurt, distfn, arg, sskew, skurt, distname
yield check_moment, distfn, arg, m, v, distname
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
if distname in ['wald']:
continue
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
@npt.dec.slow
def test_cont_basic_slow():
# same as above for slow distributions
for distname, arg in distcont[:]:
if distname not in distslow: continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 1000
rvs = distfn.rvs(size=sn,*arg)
sm = rvs.mean()
sv = rvs.var()
skurt = stats.kurtosis(rvs)
sskew = stats.skew(rvs)
m,v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, distname + \
'sample mean test'
# the sample skew kurtosis test has known failures, not very good distance measure
#yield check_sample_skew_kurt, distfn, arg, sskew, skurt, distname
yield check_moment, distfn, arg, m, v, distname
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
#yield check_oth, distfn, arg # is still missing
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
@_silence_fp_errors
def check_moment(distfn, arg, m, v, msg):
m1 = distfn.moment(1,*arg)
m2 = distfn.moment(2,*arg)
if not np.isinf(m):
npt.assert_almost_equal(m1, m, decimal=10, err_msg= msg + \
' - 1st moment')
else: # or np.isnan(m1),
npt.assert_(np.isinf(m1),
msg + ' - 1st moment -infinite, m1=%s' % str(m1))
#np.isnan(m1) temporary special treatment for loggamma
if not np.isinf(v):
npt.assert_almost_equal(m2-m1*m1, v, decimal=10, err_msg= msg + \
' - 2ndt moment')
else: #or np.isnan(m2),
npt.assert_(np.isinf(m2),
msg + ' - 2nd moment -infinite, m2=%s' % str(m2))
#np.isnan(m2) temporary special treatment for loggamma
@_silence_fp_errors
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
#this did not work, skipped silently by nose
#check_sample_meanvar, sm, m, msg + 'sample mean test'
#check_sample_meanvar, sv, v, msg + 'sample var test'
if not np.isinf(m):
check_sample_mean(sm, sv, sn, m)
if not np.isinf(v):
check_sample_var(sv, sn, v)
## check_sample_meanvar( sm, m, msg + 'sample mean test')
## check_sample_meanvar( sv, v, msg + 'sample var test')
def check_sample_mean(sm,v,n, popmean):
"""
from stats.stats.ttest_1samp(a, popmean):
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean.
Returns: t-value, two-tailed prob
"""
## a = asarray(a)
## x = np.mean(a)
## v = np.var(a, ddof=1)
## n = len(a)
df = n-1
svar = ((n-1)*v) / float(df) #looks redundant
t = (sm-popmean)/np.sqrt(svar*(1.0/n))
prob = stats.betai(0.5*df,0.5,df/(df+t*t))
#return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m,sm=%f,%f' % (t,prob,popmean,sm))
def check_sample_var(sv,n, popvar):
'''
two-sided chisquare test for sample variance equal to hypothesized variance
'''
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.chisqprob(chi2,df)*2
npt.assert_(pval > 0.01, 'var fail, t,pval = %f, %f, v,sv=%f,%f' % (chi2,pval,popvar,sv))
def check_sample_skew_kurt(distfn, arg, ss, sk, msg):
skew,kurt = distfn.stats(moments='sk',*arg)
## skew = distfn.stats(moment='s',*arg)[()]
## kurt = distfn.stats(moment='k',*arg)[()]
check_sample_meanvar( sk, kurt, msg + 'sample kurtosis test')
check_sample_meanvar( ss, skew, msg + 'sample skew test')
def check_sample_meanvar(sm,m,msg):
if not np.isinf(m) and not np.isnan(m):
npt.assert_almost_equal(sm, m, decimal=DECIMAL, err_msg= msg + \
' - finite moment')
## else:
## npt.assert_(abs(sm) > 10000), msg='infinite moment, sm = ' + str(sm))
@_silence_fp_errors
def check_cdf_ppf(distfn,arg,msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg= msg + \
' - cdf-ppf roundtrip')
@_silence_fp_errors
def check_sf_isf(distfn,arg,msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1,0.5,0.9], *arg), *arg),
[0.1,0.5,0.9], decimal=DECIMAL, err_msg= msg + \
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1,0.9], *arg),
1.0-distfn.sf([0.1,0.9], *arg),
decimal=DECIMAL, err_msg= msg + \
' - cdf-sf relationship')
@_silence_fp_errors
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
#replace with better diff and better test (more points),
#actually, this works pretty well
npt.assert_almost_equal(pdfv, cdfdiff,
decimal=DECIMAL, err_msg= msg + ' - cdf-pdf relationship')
@_silence_fp_errors
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg + " - logpdf-log(pdf) relationship")
@_silence_fp_errors
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg + " - logsf-log(sf) relationship")
@_silence_fp_errors
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg + " - logcdf-log(cdf) relationship")
@_silence_fp_errors
def check_distribution_rvs(dist, args, alpha, rvs):
#test from scipy.stats.tests
#this version reuses existing random variables
D,pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
if __name__ == "__main__":
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False)
|
|
import os
import sys
import textwrap
import pytest
from _pytest.monkeypatch import MonkeyPatch
@pytest.fixture
def mp(request):
cwd = os.getcwd()
sys_path = list(sys.path)
def cleanup():
sys.path[:] = sys_path
os.chdir(cwd)
request.addfinalizer(cleanup)
return MonkeyPatch()
def test_setattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
monkeypatch.setattr(A, 'y', 2, raising=False)
assert A.y == 2
monkeypatch.undo()
assert not hasattr(A, 'y')
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, 'x', 2)
assert A.x == 2
monkeypatch.setattr(A, 'x', 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
class TestSetattrWithImportPath:
def test_string_expression(self, monkeypatch):
monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
assert os.path.abspath("123") == "hello2"
def test_string_expression_class(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
def test_unicode_string(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
monkeypatch.delattr("_pytest.config.Config")
def test_wrong_target(self, monkeypatch):
pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
def test_unknown_import(self, monkeypatch):
pytest.raises(ImportError,
lambda: monkeypatch.setattr("unkn123.classx", None))
def test_unknown_attr(self, monkeypatch):
pytest.raises(AttributeError,
lambda: monkeypatch.setattr("os.path.qweqwe", None))
def test_unknown_attr_non_raising(self, monkeypatch):
# https://github.com/pytest-dev/pytest/issues/746
monkeypatch.setattr('os.path.qweqwe', 42, raising=False)
assert os.path.qweqwe == 42
def test_delattr(self, monkeypatch):
monkeypatch.delattr("os.path.abspath")
assert not hasattr(os.path, "abspath")
monkeypatch.undo()
assert os.path.abspath
def test_delattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
assert not hasattr(A, 'x')
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
monkeypatch.delattr(A, 'y', raising=False)
monkeypatch.setattr(A, 'x', 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
monkeypatch.setitem(d, 'y', 1700)
monkeypatch.setitem(d, 'y', 1700)
assert d['x'] == 2
assert d['y'] == 1700
monkeypatch.setitem(d, 'x', 3)
assert d['x'] == 3
monkeypatch.undo()
assert d['x'] == 1
assert 'y' not in d
d['x'] = 5
monkeypatch.undo()
assert d['x'] == 5
def test_setitem_deleted_meanwhile():
d = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
del d['x']
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, 'hello')
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, 'x')
assert 'x' not in d
monkeypatch.delitem(d, 'y', raising=False)
pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')")
assert not d
monkeypatch.setitem(d, 'y', 1700)
assert d['y'] == 1700
d['hello'] = 'world'
monkeypatch.setitem(d, 'x', 1500)
assert d['x'] == 1500
monkeypatch.undo()
assert d == {'hello': 'world', 'x': 1}
def test_setenv():
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2)
import os
assert os.environ['XYZ123'] == "2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_delenv():
name = 'xyz1234'
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2, prepend="-")
assert os.environ['XYZ123'] == "2"
monkeypatch.setenv('XYZ123', 3, prepend="-")
assert os.environ['XYZ123'] == "3-2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource("""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "MonkeyPatch"
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp):
old = list(sys.path)
mp.syspath_prepend('world')
mp.syspath_prepend('hello')
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp):
mp.syspath_prepend('hello world')
mp.undo()
sys.path.append('more hello world')
mp.undo()
assert sys.path[-1] == 'more hello world'
def test_chdir_with_path_local(mp, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile("""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*
""")
def test_importerror(testdir):
p = testdir.mkpydir("package")
p.join("a.py").write(textwrap.dedent("""\
import doesnotexist
x = 1
"""))
testdir.tmpdir.join("test_importerror.py").write(textwrap.dedent("""\
def test_importerror(monkeypatch):
monkeypatch.setattr('package.a.x', 2)
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*import error in package.a: No module named {0}doesnotexist{0}*
""".format("'" if sys.version_info > (3, 0) else ""))
class SampleNew(object):
@staticmethod
def hello():
return True
class SampleNewInherit(SampleNew):
pass
class SampleOld:
# oldstyle on python2
@staticmethod
def hello():
return True
class SampleOldInherit(SampleOld):
pass
@pytest.mark.parametrize('Sample', [
SampleNew, SampleNewInherit,
SampleOld, SampleOldInherit,
], ids=['new', 'new-inherit', 'old', 'old-inherit'])
def test_issue156_undo_staticmethod(Sample):
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, 'hello', None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
def test_issue1338_name_resolving():
pytest.importorskip('requests')
monkeypatch = MonkeyPatch()
try:
monkeypatch.delattr('requests.sessions.Session.request')
finally:
monkeypatch.undo()
|
|
from bisect import bisect_left, bisect_right
class SortedCollection(object):
"""Sequence sorted by a key function.
SortedCollection() is much easier to work with than using bisect() directly.
It supports key functions like those use in sorted(), min(), and max().
The result of the key function call is saved so that keys can be searched
efficiently.
Instead of returning an insertion-point which can be hard to interpret, the
five find-methods return a specific item in the sequence. They can scan for
exact matches, the last item less-than-or-equal to a key, or the first item
greater-than-or-equal to a key.
Once found, an item's ordinal position can be located with the index() method.
New items can be added with the insert() and insert_right() methods.
Old items can be deleted with the remove() method.
The usual sequence methods are provided to support indexing, slicing,
length lookup, clearing, copying, forward and reverse iteration, contains
checking, item counts, item removal, and a nice looking repr.
Finding and indexing are O(log n) operations while iteration and insertion
are O(n). The initial sort is O(n log n).
The key function is stored in the 'key' attibute for easy introspection or
so that you can assign a new key function (triggering an automatic re-sort).
In short, the class was designed to handle all of the common use cases for
bisect but with a simpler API and support for key functions.
>>> from pprint import pprint
>>> from operator import itemgetter
>>> s = SortedCollection(key=itemgetter(2))
>>> for record in [
... ('roger', 'young', 30),
... ('angela', 'jones', 28),
... ('bill', 'smith', 22),
... ('david', 'thomas', 32)]:
... s.insert(record)
>>> pprint(list(s)) # show records sorted by age
[('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)]
>>> s.find_le(29) # find oldest person aged 29 or younger
('angela', 'jones', 28)
>>> s.find_lt(28) # find oldest person under 28
('bill', 'smith', 22)
>>> s.find_gt(28) # find youngest person over 28
('roger', 'young', 30)
>>> r = s.find_ge(32) # find youngest person aged 32 or older
>>> s.index(r) # get the index of their record
3
>>> s[3] # fetch the record at that index
('david', 'thomas', 32)
>>> s.key = itemgetter(0) # now sort by first name
>>> pprint(list(s))
[('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)]
"""
def __init__(self, iterable=(), key=None):
self._given_key = key
key = (lambda x: x) if key is None else key
decorated = sorted((key(item), item) for item in iterable)
self._keys = [k for k, item in decorated]
self._items = [item for k, item in decorated]
self._key = key
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not self._key:
self.__init__(self._items, key=key)
def _delkey(self):
self._setkey(None)
key = property(_getkey, _setkey, _delkey, 'key function')
def clear(self):
self.__init__([], self._key)
def copy(self):
return self.__class__(self, self._key)
def __len__(self):
return len(self._items)
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __repr__(self):
return '%s(%r, key=%s)' % (
self.__class__.__name__,
self._items,
getattr(self._given_key, '__name__', repr(self._given_key))
)
def __reduce__(self):
return self.__class__, (self._items, self._given_key)
def __contains__(self, item):
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, item):
'Find the position of an item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].index(item) + i
def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].count(item)
def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i]
def find(self, k):
'Return first item with a key == k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i != len(self) and self._keys[i] == k:
return self._items[i]
raise ValueError('No item found with key equal to: %r' % (k,))
def find_le(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, k):
'Return last item with a key < k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key below: %r' % (k,))
def find_ge(self, k):
'Return first item with a key >= equal to k. Raise ValueError if not found'
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,))
def find_gt(self, k):
'Return first item with a key > k. Raise ValueError if not found'
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,))
def pop(self):
'Returns and removes the last (rightmost) element from the list. Raise ValueError if list is empty.'
if len(self) == 0:
raise ValueError('Cannot pop from empty list.')
i = len(self) - 1
value = self[i]
del self._items[i]
del self._keys[i]
return value
# --------------------------- Simple demo and tests -------------------------
if __name__ == '__main__':
def ve2no(f, *args):
'Convert ValueError result to -1'
try:
return f(*args)
except ValueError:
return -1
def slow_index(seq, k):
'Location of match or -1 if not found'
for i, item in enumerate(seq):
if item == k:
return i
return -1
def slow_find(seq, k):
'First item with a key equal to k. -1 if not found'
for item in seq:
if item == k:
return item
return -1
def slow_find_le(seq, k):
'Last item with a key less-than or equal to k.'
for item in reversed(seq):
if item <= k:
return item
return -1
def slow_find_lt(seq, k):
'Last item with a key less-than k.'
for item in reversed(seq):
if item < k:
return item
return -1
def slow_find_ge(seq, k):
'First item with a key-value greater-than or equal to k.'
for item in seq:
if item >= k:
return item
return -1
def slow_find_gt(seq, k):
'First item with a key-value greater-than or equal to k.'
for item in seq:
if item > k:
return item
return -1
from random import choice
pool = [1.5, 2, 2.0, 3, 3.0, 3.5, 4, 4.0, 4.5]
for i in range(500):
for n in range(6):
s = [choice(pool) for i in range(n)]
sc = SortedCollection(s)
s.sort()
for probe in pool:
assert repr(ve2no(sc.index, probe)) == repr(slow_index(s, probe))
assert repr(ve2no(sc.find, probe)) == repr(slow_find(s, probe))
assert repr(ve2no(sc.find_le, probe)) == repr(slow_find_le(s, probe))
assert repr(ve2no(sc.find_lt, probe)) == repr(slow_find_lt(s, probe))
assert repr(ve2no(sc.find_ge, probe)) == repr(slow_find_ge(s, probe))
assert repr(ve2no(sc.find_gt, probe)) == repr(slow_find_gt(s, probe))
for i, item in enumerate(s):
assert repr(item) == repr(sc[i]) # test __getitem__
assert item in sc # test __contains__ and __iter__
assert s.count(item) == sc.count(item) # test count()
assert len(sc) == n # test __len__
assert list(map(repr, reversed(sc))) == list(map(repr, reversed(s))) # test __reversed__
assert list(sc.copy()) == list(sc) # test copy()
sc.clear() # test clear()
assert len(sc) == 0
sd = SortedCollection('The quick Brown Fox jumped'.split(), key=str.lower)
assert sd._keys == ['brown', 'fox', 'jumped', 'quick', 'the']
assert sd._items == ['Brown', 'Fox', 'jumped', 'quick', 'The']
assert sd._key == str.lower
assert repr(sd) == "SortedCollection(['Brown', 'Fox', 'jumped', 'quick', 'The'], key=lower)"
sd.key = str.upper
assert sd._key == str.upper
assert len(sd) == 5
assert list(reversed(sd)) == ['The', 'quick', 'jumped', 'Fox', 'Brown']
for item in sd:
assert item in sd
for i, item in enumerate(sd):
assert item == sd[i]
sd.insert('jUmPeD')
sd.insert_right('QuIcK')
assert sd._keys ==['BROWN', 'FOX', 'JUMPED', 'JUMPED', 'QUICK', 'QUICK', 'THE']
assert sd._items == ['Brown', 'Fox', 'jUmPeD', 'jumped', 'quick', 'QuIcK', 'The']
assert sd.find_le('JUMPED') == 'jumped', sd.find_le('JUMPED')
assert sd.find_ge('JUMPED') == 'jUmPeD'
assert sd.find_le('GOAT') == 'Fox'
assert sd.find_ge('GOAT') == 'jUmPeD'
assert sd.find('FOX') == 'Fox'
assert sd[3] == 'jumped'
assert sd[3:5] ==['jumped', 'quick']
assert sd[-2] == 'QuIcK'
assert sd[-4:-2] == ['jumped', 'quick']
for i, item in enumerate(sd):
assert sd.index(item) == i
try:
sd.index('xyzpdq')
except ValueError:
pass
else:
assert 0, 'Oops, failed to notify of missing value'
sd.remove('jumped')
assert list(sd) == ['Brown', 'Fox', 'jUmPeD', 'quick', 'QuIcK', 'The']
import doctest
from operator import itemgetter
print((doctest.testmod()))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GMM estimator class
-------------------
"""
from __future__ import print_function, division
import warnings
import numpy as np
import numdifftools as nd
from scipy.linalg import pinv
from scipy.optimize import minimize
from .hac_function import hac
from .results import Results
__all__ = ['GMM']
class GMM(object):
"""GMM estimation class.
Attributes
----------
momcond
Moment function
Methods
-------
gmmest
Multiple step GMM estimation procedure
"""
def __init__(self, momcond):
"""Initialize the class.
Parameters
----------
momcond : function
Moment function. Should return:
- array (nobs x nmoms)
moment function values
- (optionally) array (nmoms x nparams)
derivative of moment function average across observations.
"""
# Moment conditions
self.momcond = momcond
def gmmest(self, theta_start, bounds=None, constraints=(),
iter=2, method='BFGS', kernel='Bartlett',
band=None, names=None, **kwargs):
"""Multiple step GMM estimation procedure.
Parameters
----------
theta_start : array
Initial parameters
bounds : list of tuples
Bounds on parameters
constraints : dict or sequence of dict
Equality and inequality constraints. See scipy.optimize.minimize
iter : int
Number of GMM steps
method : str
Optimization method
kernel : str
Type of kernel for HAC.
Currenly implemented: SU, Bartlett, Parzen, Quadratic
band : int
Truncation parameter for HAC
names : list of str
Parameter names
Returns
-------
instance of Results
Estimation results
"""
# Initialize theta to hold estimator
theta = theta_start.copy()
# First step GMM
for i in range(iter):
moment = self.momcond(theta, **kwargs)[0]
nmoms = moment.shape[1]
if nmoms - theta.size <= 0:
warnings.warn("Not enough degrees of freedom!")
# Compute optimal weighting matrix
# Only after the first step
if i == 0:
weight_mat = np.eye(nmoms)
else:
weight_mat = self.__weights(moment, kernel=kernel, band=band)
opt_out = minimize(self.__gmmobjective, theta,
args=(weight_mat, kwargs),
method=method,
jac=True, bounds=bounds,
constraints=constraints,
callback=self.callback)
# Update parameter for the next step
theta = opt_out.x
var_theta = self.varest(theta, **kwargs)
return Results(opt_out=opt_out, var_theta=var_theta,
nmoms=nmoms, names=names)
def callback(self, theta):
"""Callback function. Prints at each optimization iteration.
"""
pass
def __gmmobjective(self, theta, weight_mat, kwargs):
"""GMM objective function and its gradient.
Parameters
----------
theta : (nparams,) array
Parameters
weight_mat : (nmoms, nmoms) array
Weighting matrix
Returns
-------
value : float
Value of objective function, see Hansen (2012, p.241)
dvalue : (nparams,) array
Derivative of objective function.
Depends on the switch 'use_jacob'
"""
# moment - nobs x nmoms
# dmoment - nmoms x nparams
moment, dmoment = self.momcond(theta, **kwargs)
nobs = moment.shape[0]
moment = moment.mean(0)
gdotw = moment.dot(weight_mat)
# Objective function
value = gdotw.dot(moment.T) * nobs
if value <= 0:
value = 1e10
# assert value >= 0, 'Objective function should be non-negative'
if dmoment is None:
dmoment = self.__approx_dmoment(theta, **kwargs)
# 1 x nparams
dvalue = 2 * gdotw.dot(dmoment) * nobs
return value, dvalue
def __approx_dmoment(self, theta, **kwargs):
"""Approxiamte derivative of the moment function numerically.
Parameters
----------
theta : (nparams,) array
Parameters
Returns
-------
(nmoms, nparams) array
Derivative of the moment function
"""
with np.errstate(divide='ignore'):
return nd.Jacobian(lambda x:
self.momcond(x, **kwargs)[0].mean(0))(theta)
def __weights(self, moment, **kwargs):
"""
Optimal weighting matrix
Parameters
----------
moment : (nobs, nmoms) array
Moment restrictions
Returns
-------
(nmoms, nmoms) array
Inverse of momconds covariance matrix
"""
return pinv(hac(moment, **kwargs))
def varest(self, theta, **kwargs):
"""Estimate variance matrix of parameters.
Parameters
----------
theta : (nparams,)
Parameters
Returns
-------
(nparams, nparams) array
Variance matrix of parameters
"""
# g - nobs x q, time x number of momconds
# dmoment - q x k, time x number of momconds
moment, dmoment = self.momcond(theta, **kwargs)
if dmoment is None:
dmoment = self.__approx_dmoment(theta, **kwargs)
var_moment = self.__weights(moment, **kwargs)
# TODO : What if k = 1?
return pinv(dmoment.T.dot(var_moment).dot(dmoment)) / moment.shape[0]
|
|
#!/usr/bin/env python
import ast, base64, json
from secret_manager import sec_manager
from Crypto.PublicKey import RSA
from swiftclient import client
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient.v3 import client as kc
from keystoneclient.v3 import tokens
from config import *
from myLogger import *
class EncSwiftclient:
def __init__(self, auth_token, project_id):
auth_obj = v3.Token(auth_url=AUTH_URL, token=auth_token, project_domain_name="Default",project_id=project_id)
sess = session.Session(auth=auth_obj)
#Store auth_token
self.auth = auth_token
#Retrieve tenant id
self.idtenant = project_id
storage_url = '%s/AUTH_%s' %(STORAGE_URL,str(self.idtenant))
self.kc_conn = kc.Client(session=sess)
self.swift_conn = client.Connection(preauthtoken=auth_token,preauthurl=storage_url, auth_version='3')
self.iduser = sess.get_user_id()
self.SWIFT_ID = self.getUserID(SWIFT_USER)
#Secret manager instance
self.sec_manager = sec_manager(sess,self.iduser)
def getUserID(self,username):
"""
Get the user ID from Keystone
param: username
"""
ret_id = filter(lambda x: x.name == username, self.kc_conn.users.list())[0]
return ret_id.id
def getUsername(self,userid):
"""
Get the username from Keystone
param: user ID
"""
username = self.kc_conn.users.get(userid).name
return username
def get_enc_object (self,container,obj):
"""
Get an object from a specific container
Decrypt the object with the DEK retrieved from the catalog
Args:
container: the name of the container
obj: the object name
"""
try:
#Obtain container headers and keys id
cont_header = self.head_container(container)
actual_acl = self.extractACL(cont_header)
#Control ACL on the client side on private containers
if actual_acl and (self.iduser not in actual_acl):
return
hdrs, content = self.swift_conn.get_object(container,obj)
except Exception,err:
logger.info("Error in get_enc_object")
return
container_sel_id = cont_header.get('x-container-meta-sel-id',None)
container_ref = cont_header.get('x-container-meta-container-ref',None)
object_bel_id = hdrs.get('x-object-meta-bel-id',None)
object_sel_id = hdrs.get('x-object-meta-sel-id',None)
if object_bel_id is None:
#Clear object stored
logger.debug('Clear content >>> %s' % content)
return hdrs, str(content)
# Decrypt object with SEL dek (if exists)
try:
if container_sel_id is not None:# and container_sel_id != hdrs.get('x-object-meta-sel-id',None):
sel_DEK = self.sec_manager.get_secret(self.iduser,container_ref, container_sel_id).get('KEK',None)
if sel_DEK is not None:
logger.debug('Content encrypted with SEL >>> %s\n' % content)
content = self.sec_manager.key_manager.decrypt_msg(str(content),sel_DEK)
else:
logger.info("You cannot obtain this object")
return
except:
logger.error('Decrypt msg (SEL)')
return
# Decrypt object with BEL dek
try:
bel_DEK = self.sec_manager.get_secret(self.iduser,container_ref,object_bel_id).get('KEK',None)
if bel_DEK is not None:
logger.debug('Content encrypted with BEL >>> %s\n' % content)
content = self.sec_manager.key_manager.decrypt_msg(str(content), bel_DEK)
logger.debug('Clear content >>> %s' % content)
return hdrs, str(content)
except:
logger.error('Decrypt msg (BEL)')
return
def add_user_to_acl(self,headers,meta,usrID):
"""
Add user usrID to acl included in headers
"""
acl = self.extractACL_param(headers,meta)
acl.append(unicode(usrID))
acl = list(set(acl))
return str({self.idtenant: acl})
def put_enc_container (self,container, headers= None):
"""
Create a container with a specific ACL and a DEK for that.
Args:
container: the name of the new cont ainer
headers: the metadata for this container
"""
if headers is None or not self.extractACL(headers):
try:
#Put container without encryption option
self.swift_conn.put_container(container)
except:
logger.info("Error in Put container")
return
#Add self user id to acl
headers['x-container-read'] = self.add_user_to_acl(headers,'x-container-read' ,self.iduser)
headers['x-container-write']= self.add_user_to_acl(headers,'x-container-write',self.iduser)
listACL = self.extractACL(headers)
try:
#Return if container exists
self.head_container(container)
logger.info("Container already created")
except:
#Create catalog node (new BEL DEK associated to the container)
bel_DEK_id, obj = self.sec_manager.create_node(self.iduser, container)
# Store secrets (for updating the graph)
container_ref = self.sec_manager.store_secrets(None,listACL,obj,bel_DEK_id)
# Create container
try:
headers['x-container-meta-container-ref'] = container_ref
headers['x-container-meta-bel-id'] = bel_DEK_id
self.swift_conn.put_container(container, headers=headers)
logger.info("Container %s created" % container)
except:
logger.info('Error put new container')
def encrypt_obj(self,container_ref,bel_DEK_id, content):
"""
Retrieve BEL DEK from the catalog
Encrypt the object
"""
bel_DEK = self.sec_manager.get_secret(self.iduser,container_ref, bel_DEK_id).get('KEK',None)
if bel_DEK is None:
return None
return self.sec_manager.key_manager.encrypt_msg(str(content),bel_DEK)
def put_enc_object (self,container,obj_name,content):
"""
Put an object into a specific container
Encrypt the object with the DEK retrieved from the catalog
Args:
container: the name of the container
obj_name: the object name
content: object content
"""
try:
resp_header = self.head_container(container)
actual_acl = self.extractACL(resp_header)
#Permitted upload of clear objects
if not actual_acl:
self.swift_conn.put_object(container, obj_name, content)
return
except:
logger.info("Error in Put Object (container header)")
return
#Not allowed Put
if self.iduser not in actual_acl:
return
sel_DEK_id = resp_header.get('x-container-meta-sel-id', None)
version_sel_DEK = resp_header.get('x-container-meta-sel-version',0)
bel_DEK_id = resp_header.get('x-container-meta-bel-id', None)
container_ref = resp_header.get('x-container-meta-container-ref',None)
enc_content = self.encrypt_obj(container_ref, bel_DEK_id,content)
#Transient phase. No correct BEL key in the catalog
if enc_content is None:
logger.info("You have not the rights to access the container yet")
return
obj_headers = {}
#If SEL applied, update object headers
if sel_DEK_id is not None:
obj_headers['x-object-meta-sel-id'] = sel_DEK_id
obj_headers['x-object-meta-sel-version'] = version_sel_DEK
obj_headers['x-object-meta-bel-id'] = bel_DEK_id
try:
# Put object
self.swift_conn.put_object(container, obj_name, enc_content,headers=obj_headers)
logger.info("Object %s uploaded" % obj_name)
except Exception,err:
logger.info("Error in Put Object")
def postcontainer_public_to_private(self,container,headers,new_acl):
"""
Change a container visibility. A public container becomes private.
All the objects must be ciphered with a new BEL DEK
"""
#Add self.iduser to new_acl
new_acl.append(unicode(self.iduser))
new_acl = list(set(new_acl))
#Create a new BEL key
bel_id, obj_bel = self.sec_manager.create_node(self.iduser, container)
#Store secrets
container_ref = self.sec_manager.store_secrets(None,new_acl,obj_bel,bel_id)
cont_headers={}
cont_headers['x-container-meta-container-ref'] = container_ref
cont_headers['x-container-meta-bel-id'] = str(bel_id)
cont_headers['x-container-read'] = self.add_user_to_acl(headers,'X-Container-Read' ,self.iduser)
cont_headers['x-container-write']= self.add_user_to_acl(headers,'X-Container-Write',self.iduser)
try:
# Post header container
self.swift_conn.post_container(container,headers=cont_headers)
#Download the objects and upload them ciphered with BEL key
head, list_obj = self.get_container(container)
for obj in list_obj:
if obj['name'][-1] != '/' or obj['content_type'] != 'application/directory':
head, content = self.swift_conn.get_object(container, obj['name'])
self.put_enc_object(container,obj['name'],content)
except Exception,err:
logger.info("Error in post container (become private)")
def postcontainer_private_to_public(self,container,headers):
"""
Change a container visibility. A private container becomes public.
All the objects must be deciphered and uploaded clear
"""
#Remove acl and key information from container headers
headers[u'x-container-meta-sel-id']= u''
headers[u'x-container-meta-sel-acl']= u''
headers[u'x-container-meta-sel-version']= u''
headers[u'x-container-meta-bel-id']= u''
headers[u'X-Container-Read']= u''
headers[u'X-Container-Write']= u''
try:
self.swift_conn.post_container(container,headers=headers)
#Download the objects and upload them clear
head, list_obj = self.get_container(container)
for obj in list_obj:
if obj['name'][-1] != '/' or obj['content_type'] != 'application/directory':
head, content = self.get_enc_object(container, obj['name'])
self.swift_conn.put_object(container,obj['name'],content)
except:
logger.info("Error in post container (become public)")
def store_actual_BEL_DEKs(self,cont_secret_ref,container_name,added_users,actual_bel_id):
"""
Send the BEL DEKs, protecting all the objects included in the container, to all the added users
"""
#Retrieve all the BEL keys
dict_bel_DEKs = self.retrieve_bel_DEKs(container_name,cont_secret_ref)
#Add BEL container DEK if no object has been uploaded
dict_bel_DEKs[actual_bel_id] = self.sec_manager.get_secret(self.iduser,cont_secret_ref,actual_bel_id)
new_cont_secret_ref = cont_secret_ref
for bel_DEK_id,obj in dict_bel_DEKs.items():
#Store secrets
new_cont_secret_ref = self.sec_manager.store_secrets(cont_secret_ref,added_users,obj,bel_DEK_id)
return new_cont_secret_ref
def postcontainer_changepolicy(self, container_name, headers, actual_head, new_acl, actual_acl):
"""
Change policy (add or revoke users) on a container
Apply Over-encryption if config.OVER_ENCRYPTION is set True
"""
#Add user id to acl
headers['X-Container-Read'] = self.add_user_to_acl(headers,'X-Container-Read' ,self.iduser)
headers['X-Container-Write']= self.add_user_to_acl(headers,'X-Container-Write',self.iduser)
new_acl.append(unicode(self.iduser))
new_acl = list(set(new_acl))
#Retrieve SEL information
initial_acl_sel = self.extractACL_param(actual_head,'x-container-meta-sel-acl')
version_sel_DEK = actual_head.get("x-container-meta-sel-version",'0')
actual_sel_id = actual_head.get('x-container-meta-sel-id',None)
cont_secret_ref = actual_head.get('x-container-meta-container-ref',None)
new_cont_secret_ref = cont_secret_ref
removed_users = list(set(actual_acl).difference(new_acl))
added_users = list(set(new_acl).difference(actual_acl))
try:
if added_users:
#Send the BEL DEKs protecting the objects included in the container, to all the added users
new_cont_secret_ref = self.store_actual_BEL_DEKs(cont_secret_ref,container_name,added_users,actual_head['x-container-meta-bel-id'])
headers['x-container-meta-container-ref'] = new_cont_secret_ref
if not removed_users and OVER_ENCRYPTION:
if not set(new_acl).issuperset(set(initial_acl_sel)):
#No change to the actual protection layers
if initial_acl_sel:
new_list = list(set(initial_acl_sel + added_users))
headers['x-container-meta-sel-acl'] = str({self.idtenant : map(lambda x: "AUTH_" + str(x),new_list)})
new_cont_secret_ref = self.sec_manager.store_secrets(new_cont_secret_ref,added_users,self.sec_manager.get_secret(self.iduser,new_cont_secret_ref,actual_sel_id),actual_sel_id)
headers['x-container-meta-container-ref'] = new_cont_secret_ref
else:
#Remove SEL protection (if exists)
headers[u'x-container-meta-sel-id']= u''
headers[u'x-container-meta-sel-acl']= u''
headers[u'x-container-meta-sel-version']= u''
#self.send_message(actual_acl + [self.SWIFT_ID],{},actual_sel_id)
if removed_users:
bel_id, obj_bel = self.sec_manager.create_node(self.iduser, container_name)
headers['x-container-meta-bel-id'] = str(bel_id)
new_cont_secret_ref = self.sec_manager.store_secrets(new_cont_secret_ref,new_acl,obj_bel,bel_id)
headers['x-container-meta-container-ref'] = new_cont_secret_ref
if not OVER_ENCRYPTION:
#Only BEL option: download all the files, re-encrypt and upload them
self.swift_conn.post_container(container_name,headers=headers)
head, list_obj = self.get_container(container_name)
for obj in list_obj:
if obj['name'][-1] != '/' or obj['content_type'] != 'application/directory':
head, content = self.get_enc_object(container_name, obj['name'])
self.put_enc_object(container_name,obj['name'],content)
return
if OVER_ENCRYPTION:
#Apply a new Surface Encryption Layer
sel_id, obj_sel = self.sec_manager.create_node(self.iduser, container_name)
init_acl = list(set(initial_acl_sel + added_users)) if initial_acl_sel else list(set(new_acl + actual_acl))
headers['x-container-meta-sel-id'] = str(sel_id)
headers['x-container-meta-sel-acl'] = str({self.idtenant:map(lambda x:"AUTH_"+str(x), init_acl)})
headers['x-container-meta-sel-version'] = str(eval(version_sel_DEK)+1)
new_cont_secret_ref = self.sec_manager.store_secrets(new_cont_secret_ref, new_acl+ [self.SWIFT_ID],obj_sel,sel_id)
headers['x-container-meta-container-ref'] = new_cont_secret_ref
#new_cont_secret_ref = self.send_message(new_cont_secret_ref, actual_acl + [self.SWIFT_ID],{},actual_sel_id)!= 200:
self.swift_conn.post_container(container_name,headers=headers)
except Exception, err:
logger.debug("Error in Post container.")
def post_enc_container (self,container,headers):
"""
Change headers of a specific container
Args:
container: the name of the container
headers: the metadata for this container
"""
if type(headers) is not dict:
headers = {}
actual_head = self.head_container(container)
actual_acl = sorted(self.extractACL(actual_head))
new_acl = sorted(self.extractACL(headers))
if not actual_acl and not new_acl:
#Container not ciphered yet. It has to remain public
try:
# Change Swift not encrypted container headers:
self.swift_conn.post_container(container, headers)
return
except:
logger.error("Post container (not encrypted)")
if not actual_acl and new_acl:
#Container not ciphered yet. It has to become private
self.postcontainer_public_to_private(container, headers, new_acl)
if actual_acl and not new_acl:
#Container already ciphered. It has to become public
self.postcontainer_private_to_public(container,headers)
if actual_acl and new_acl:
#Container already ciphered. It has to remain private
self.postcontainer_changepolicy(container,headers, actual_head, new_acl,actual_acl)
def retrieve_bel_DEKs(self,container,cont_secret_ref):
"""
Retrieve the DEKs used for the object in the container
Args:
container: the name of the container
cat: User catalog
Returns:
dic: A dictionary with all the DEKs use in the container
"""
dic = {}
#Obtain objects list
headers, list_obj = self.get_container(container)
for obj in list_obj:
if obj['name'][-1] != '/' or obj['content_type'] != 'application/directory':
#Obtain each BEL key from own catalog
header = self.head_object(container, obj['name'])
bel_DEK_id = header['x-object-meta-bel-id']
dic[bel_DEK_id] = self.sec_manager.get_secret(self.iduser,cont_secret_ref,bel_DEK_id)
return dic
def extractACL_param(self, headers,meta):
"""
Extract the ACL from the container headers with the meta parameter
"""
# Get ACLs from the headers
acl = ast.literal_eval(headers.get(meta, '{}'))
list_acl = reduce(lambda x, y: x + y, acl.values(), [])
# Remove duplicates:
list_acl = list(set(list_acl))
# Remove AUTH_ from names
list_clean = map(lambda x: x.replace('AUTH_', ''), list_acl)
return list_clean
def extractACL(self, headers):
"""
Extract the ACL from the container headers
"""
# Get ACLs from the headers
if headers.has_key('x-container-read'):
acl_read = ast.literal_eval(headers['x-container-read'])
elif headers.has_key('X-Container-Read'):
acl_read = ast.literal_eval(headers['X-Container-Read'])
else:
acl_read = {}
if headers.has_key('x-container-write'):
acl_write = ast.literal_eval(headers['x-container-write'])
elif headers.has_key('X-Container-Write'):
acl_write = ast.literal_eval(headers['X-Container-Write'])
else:
acl_write = {}
acl = reduce(lambda x, y: x + y, acl_read.values(), []) + reduce(lambda x, y: x + y, acl_write.values(), [])
# Remove duplicates:
acl = list(set(acl))
# Remove AUTH_ from names
acl_clean = map(lambda x: x.replace('AUTH_', ''), acl)
return acl_clean
def get_container (self, container, marker=None, delimiter=None, prefix=None):
"""
Get a specific container
Args:
container: the name of the container
"""
return self.swift_conn.get_container(container, marker=marker, delimiter=delimiter, prefix=prefix)
def get_account (self):
"""
Get all information about an account
"""
account, containers = self.swift_conn.get_account()
list_cont = []
for cont in containers:
cont_name = cont.get('name','')
headers = self.head_container(cont_name)
list_acl = self.extractACL(headers)
if list_acl == [] or (self.iduser in list_acl):
list_cont.append(cont)
return account, list_cont
def delete_object(self, container, obj):
return self.swift_conn.delete_object(container, obj)
def head_object(self, container, obj):
return self.swift_conn.head_object(container, obj)
def post_object(self, container, obj, headers):
return self.swift_conn.post_object(container, obj, headers)
def head_container(self, container):
return self.swift_conn.head_container(container)
def delete_container(self, container):
return self.swift_conn.delete_container(container)
def post_account(self, headers):
return self.swift_conn.post_account(headers)
|
|
# Copyright (c) 2014 ProphetStor, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import httplib
import re
import mock
from oslo_utils import units
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.prophetstor import dpl_iscsi as DPLDRIVER
from cinder.volume.drivers.prophetstor import dplcommon as DPLCOMMON
POOLUUID = 'ac33fc6e417440d5a1ef27d7231e1cc4'
VOLUMEUUID = 'a000000000000000000000000000001'
INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa'
DATA_IN_VOLUME = {'id': VOLUMEUUID}
DATA_IN_CONNECTOR = {'initiator': INITIATOR}
DATA_SERVER_INFO = 0, {
'metadata': {'vendor': 'ProphetStor',
'version': '1.5'}}
DATA_POOLS = 0, {
'children': [POOLUUID]
}
DATA_POOLINFO = 0, {
'capabilitiesURI': '',
'children': [],
'childrenrange': '',
'completionStatus': 'Complete',
'metadata': {'available_capacity': 4294967296,
'ctime': 1390551362349,
'vendor': 'prophetstor',
'version': '1.5',
'display_description': 'Default Pool',
'display_name': 'default_pool',
'event_uuid': '4f7c4d679a664857afa4d51f282a516a',
'physical_device': {'cache': [],
'data': ['disk_uuid_0',
'disk_uuid_1',
'disk_uuid_2'],
'log': [],
'spare': []},
'pool_uuid': POOLUUID,
'properties': {'raid_level': 'raid0'},
'state': 'Online',
'used_capacity': 0,
'total_capacity': 4294967296,
'zpool_guid': '8173612007304181810'},
'objectType': 'application/cdmi-container',
'percentComplete': 100}
DATA_ASSIGNVDEV = 0, {
'children': [],
'childrenrange': '',
'completionStatus': 'Complete',
'domainURI': '',
'exports': {'Network/iSCSI': [
{'logical_unit_name': '',
'logical_unit_number': '101',
'permissions': [INITIATOR],
'portals': ['172.31.1.210:3260'],
'target_identifier':
'iqn.2013-09.com.prophetstor:hypervisor.886423051816'
}]},
'metadata': {'ctime': 0,
'event_uuid': 'c11e90287e9348d0b4889695f1ec4be5',
'type': 'volume'},
'objectID': '',
'objectName': 'd827e23d403f4f12bb208a6fec208fd8',
'objectType': 'application/cdmi-container',
'parentID': '8daa374670af447e8efea27e16bf84cd',
'parentURI': '/dpl_volume',
'snapshots': []
}
DATA_OUTPUT = 0, None
MOD_OUTPUT = {'status': 'available'}
DATA_IN_GROUP = {'id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'name': 'group123',
'description': 'des123',
'status': ''}
DATA_IN_VOLUME = {'id': 'abc123',
'display_name': 'abc123',
'display_description': '',
'size': 1,
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_VOLUME_VG = {'id': 'abc123',
'display_name': 'abc123',
'display_description': '',
'size': 1,
'consistencygroup_id':
'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'status': 'available',
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_VOLUME1 = {'id': 'abc456',
'display_name': 'abc456',
'display_description': '',
'size': 1,
'host': "hostname@backend#%s" % POOLUUID}
DATA_IN_CG_SNAPSHOT = {
'consistencygroup_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee',
'id': 'cgsnapshot1',
'name': 'cgsnapshot1',
'description': 'cgsnapshot1',
'status': ''}
DATA_IN_SNAPSHOT = {'id': 'snapshot1',
'volume_id': 'abc123',
'display_name': 'snapshot1',
'display_description': ''}
DATA_OUT_SNAPSHOT_CG = {
'id': 'snapshot1',
'volume_id': 'abc123',
'display_name': 'snapshot1',
'display_description': '',
'cgsnapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'}
class TestProphetStorDPLVolume(test.TestCase):
def _gen_snapshot_url(self, vdevid, snapshotid):
snapshot_url = '/%s/%s/%s' % (vdevid, DPLCOMMON.DPL_OBJ_SNAPSHOT,
snapshotid)
return snapshot_url
def setUp(self):
super(TestProphetStorDPLVolume, self).setUp()
self.dplcmd = DPLCOMMON.DPLVolume('1.1.1.1', 8356, 'admin', 'password')
self.DPL_MOCK = mock.MagicMock()
self.dplcmd.objCmd = self.DPL_MOCK
self.DPL_MOCK.send_cmd.return_value = DATA_OUTPUT
def test_getserverinfo(self):
self.dplcmd.get_server_info()
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_SYSTEM),
None,
[httplib.OK, httplib.ACCEPTED])
def test_createvdev(self):
self.dplcmd.create_vdev(DATA_IN_VOLUME['id'],
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
POOLUUID,
int(DATA_IN_VOLUME['size']) * units.Gi)
metadata = {}
metadata['display_name'] = DATA_IN_VOLUME['display_name']
metadata['display_description'] = DATA_IN_VOLUME['display_description']
metadata['pool_uuid'] = POOLUUID
metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi
metadata['maximum_snapshot'] = 1024
metadata['properties'] = dict(thin_provision=True)
params = {}
params['metadata'] = metadata
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def test_extendvdev(self):
self.dplcmd.extend_vdev(DATA_IN_VOLUME['id'],
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
int(DATA_IN_VOLUME['size']) * units.Gi)
metadata = {}
metadata['display_name'] = DATA_IN_VOLUME['display_name']
metadata['display_description'] = DATA_IN_VOLUME['display_description']
metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi
metadata['maximum_snapshot'] = 1024
params = {}
params['metadata'] = metadata
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def test_deletevdev(self):
self.dplcmd.delete_vdev(DATA_IN_VOLUME['id'], True)
metadata = {}
params = {}
metadata['force'] = True
params['metadata'] = metadata
self.DPL_MOCK.send_cmd.assert_called_once_with(
'DELETE',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND,
httplib.NO_CONTENT])
def test_createvdevfromsnapshot(self):
self.dplcmd.create_vdev_from_snapshot(
DATA_IN_VOLUME['id'],
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
DATA_IN_SNAPSHOT['id'],
POOLUUID)
metadata = {}
params = {}
metadata['snapshot_operation'] = 'copy'
metadata['display_name'] = DATA_IN_VOLUME['display_name']
metadata['display_description'] = DATA_IN_VOLUME['display_description']
metadata['pool_uuid'] = POOLUUID
metadata['maximum_snapshot'] = 1024
metadata['properties'] = dict(thin_provision=True)
params['metadata'] = metadata
params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def test_getpool(self):
self.dplcmd.get_pool(POOLUUID)
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL,
POOLUUID),
None,
[httplib.OK, httplib.ACCEPTED])
def test_clonevdev(self):
self.dplcmd.clone_vdev(
DATA_IN_VOLUME['id'],
DATA_IN_VOLUME1['id'],
POOLUUID,
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
int(DATA_IN_VOLUME['size']) * units.Gi
)
metadata = {}
params = {}
metadata["snapshot_operation"] = "clone"
metadata["display_name"] = DATA_IN_VOLUME['display_name']
metadata["display_description"] = DATA_IN_VOLUME['display_description']
metadata["pool_uuid"] = POOLUUID
metadata["total_capacity"] = int(DATA_IN_VOLUME['size']) * units.Gi
metadata['maximum_snapshot'] = 1024
metadata['properties'] = dict(thin_provision=True)
params["metadata"] = metadata
params["copy"] = DATA_IN_VOLUME['id']
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME1['id']),
params,
[httplib.OK, httplib.CREATED, httplib.ACCEPTED])
def test_createvdevsnapshot(self):
self.dplcmd.create_vdev_snapshot(
DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'],
DATA_IN_SNAPSHOT['display_name'],
DATA_IN_SNAPSHOT['display_description']
)
metadata = {}
params = {}
metadata['display_name'] = DATA_IN_SNAPSHOT['display_name']
metadata['display_description'] = \
DATA_IN_SNAPSHOT['display_description']
params['metadata'] = metadata
params['snapshot'] = DATA_IN_SNAPSHOT['id']
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[httplib.OK, httplib.CREATED, httplib.ACCEPTED])
def test_getvdev(self):
self.dplcmd.get_vdev(DATA_IN_VOLUME['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
None,
[httplib.OK, httplib.ACCEPTED, httplib.NOT_FOUND])
def test_getvdevstatus(self):
self.dplcmd.get_vdev_status(DATA_IN_VOLUME['id'], '123456')
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id'],
'123456'),
None,
[httplib.OK, httplib.NOT_FOUND])
def test_getpoolstatus(self):
self.dplcmd.get_pool_status(POOLUUID, '123456')
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_POOL,
POOLUUID,
'123456'),
None,
[httplib.OK, httplib.NOT_FOUND])
def test_assignvdev(self):
self.dplcmd.assign_vdev(
DATA_IN_VOLUME['id'],
'iqn.1993-08.org.debian:01:test1',
'',
'1.1.1.1:3260',
0
)
params = {}
metadata = {}
exports = {}
metadata['export_operation'] = 'assign'
exports['Network/iSCSI'] = {}
target_info = {}
target_info['logical_unit_number'] = 0
target_info['logical_unit_name'] = ''
permissions = []
portals = []
portals.append('1.1.1.1:3260')
permissions.append('iqn.1993-08.org.debian:01:test1')
target_info['permissions'] = permissions
target_info['portals'] = portals
exports['Network/iSCSI'] = target_info
params['metadata'] = metadata
params['exports'] = exports
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[httplib.OK, httplib.ACCEPTED, httplib.CREATED])
def test_unassignvdev(self):
self.dplcmd.unassign_vdev(DATA_IN_VOLUME['id'],
'iqn.1993-08.org.debian:01:test1',
'')
params = {}
metadata = {}
exports = {}
metadata['export_operation'] = 'unassign'
params['metadata'] = metadata
exports['Network/iSCSI'] = {}
exports['Network/iSCSI']['target_identifier'] = ''
permissions = []
permissions.append('iqn.1993-08.org.debian:01:test1')
exports['Network/iSCSI']['permissions'] = permissions
params['exports'] = exports
self.DPL_MOCK.send_cmd.assert_called_once_with(
'PUT',
'/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id']),
params,
[httplib.OK, httplib.ACCEPTED,
httplib.NO_CONTENT, httplib.NOT_FOUND])
def test_deletevdevsnapshot(self):
self.dplcmd.delete_vdev_snapshot(DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'])
params = {}
params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'],
DATA_IN_SNAPSHOT['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'DELETE',
'/%s/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id'],
DPLCOMMON.DPL_OBJ_SNAPSHOT,
DATA_IN_SNAPSHOT['id']),
None,
[httplib.OK, httplib.ACCEPTED, httplib.NO_CONTENT,
httplib.NOT_FOUND])
def test_listvdevsnapshots(self):
self.dplcmd.list_vdev_snapshots(DATA_IN_VOLUME['id'])
self.DPL_MOCK.send_cmd.assert_called_once_with(
'GET',
'/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1,
DPLCOMMON.DPL_OBJ_VOLUME,
DATA_IN_VOLUME['id'],
DPLCOMMON.DPL_OBJ_SNAPSHOT),
None,
[httplib.OK])
class TestProphetStorDPLDriver(test.TestCase):
def __init__(self, method):
super(TestProphetStorDPLDriver, self).__init__(method)
def _conver_uuid2hex(self, strID):
return strID.replace('-', '')
def setUp(self):
super(TestProphetStorDPLDriver, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.san_ip = '1.1.1.1'
self.configuration.dpl_port = 8356
self.configuration.san_login = 'admin'
self.configuration.san_password = 'password'
self.configuration.dpl_pool = POOLUUID
self.configuration.iscsi_port = 3260
self.configuration.san_is_local = False
self.configuration.san_thin_provision = True
self.context = ''
self.DPL_MOCK = mock.MagicMock()
self.DB_MOCK = mock.MagicMock()
self.dpldriver = DPLDRIVER.DPLISCSIDriver(
configuration=self.configuration)
self.dpldriver.dpl = self.DPL_MOCK
self.dpldriver.db = self.DB_MOCK
self.dpldriver.do_setup(self.context)
def test_get_volume_stats(self):
self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO
self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO
res = self.dpldriver.get_volume_stats(True)
self.assertEqual('ProphetStor', res['vendor_name'])
self.assertEqual('1.5', res['driver_version'])
pool = res["pools"][0]
self.assertEqual(4, pool['total_capacity_gb'])
self.assertEqual(4, pool['free_capacity_gb'])
self.assertEqual(0, pool['reserved_percentage'])
self.assertEqual(False, pool['QoS_support'])
def test_create_volume(self):
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.dpldriver.create_volume(DATA_IN_VOLUME)
self.DPL_MOCK.create_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
self.configuration.dpl_pool,
int(DATA_IN_VOLUME['size']) * units.Gi,
True)
def test_create_volume_without_pool(self):
fake_volume = copy.deepcopy(DATA_IN_VOLUME)
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.configuration.dpl_pool = ""
fake_volume['host'] = "host@backend" # missing pool
self.assertRaises(exception.InvalidHost, self.dpldriver.create_volume,
volume=fake_volume)
def test_create_volume_with_configuration_pool(self):
fake_volume = copy.deepcopy(DATA_IN_VOLUME)
fake_volume['host'] = "host@backend" # missing pool
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.dpldriver.create_volume(fake_volume)
self.DPL_MOCK.create_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
self.configuration.dpl_pool,
int(DATA_IN_VOLUME['size']) * units.Gi,
True)
def test_create_volume_of_group(self):
self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT
self.dpldriver.create_volume(DATA_IN_VOLUME_VG)
self.DPL_MOCK.create_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
self.configuration.dpl_pool,
int(DATA_IN_VOLUME['size']) * units.Gi,
True)
self.DPL_MOCK.join_vg.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME_VG['id']),
self._conver_uuid2hex(
DATA_IN_VOLUME_VG['consistencygroup_id']))
def test_delete_volume(self):
self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
self.dpldriver.delete_volume(DATA_IN_VOLUME)
self.DPL_MOCK.delete_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']))
def test_delete_volume_of_group(self):
self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.leave_vg.return_volume = DATA_OUTPUT
self.dpldriver.delete_volume(DATA_IN_VOLUME_VG)
self.DPL_MOCK.leave_vg.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME_VG['id']),
self._conver_uuid2hex(DATA_IN_GROUP['id'])
)
self.DPL_MOCK.delete_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']))
def test_create_volume_from_snapshot(self):
self.DPL_MOCK.create_vdev_from_snapshot.return_value = DATA_OUTPUT
self.dpldriver.create_volume_from_snapshot(DATA_IN_VOLUME,
DATA_IN_SNAPSHOT)
self.DPL_MOCK.create_vdev_from_snapshot.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_VOLUME['display_name'],
DATA_IN_VOLUME['display_description'],
self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']),
self.configuration.dpl_pool,
True)
def test_create_cloned_volume(self):
self.DPL_MOCK.clone_vdev.return_value = DATA_OUTPUT
self.dpldriver.create_cloned_volume(DATA_IN_VOLUME1, DATA_IN_VOLUME)
self.DPL_MOCK.clone_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
self._conver_uuid2hex(DATA_IN_VOLUME1['id']),
self.configuration.dpl_pool,
DATA_IN_VOLUME1['display_name'],
DATA_IN_VOLUME1['display_description'],
int(DATA_IN_VOLUME1['size']) *
units.Gi,
True)
def test_create_snapshot(self):
self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT
self.dpldriver.create_snapshot(DATA_IN_SNAPSHOT)
self.DPL_MOCK.create_vdev_snapshot.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']),
self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']),
DATA_IN_SNAPSHOT['display_name'],
DATA_IN_SNAPSHOT['display_description'])
def test_delete_snapshot(self):
self.DPL_MOCK.delete_vdev_snapshot.return_value = DATA_OUTPUT
self.dpldriver.delete_snapshot(DATA_IN_SNAPSHOT)
self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']),
self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']))
def test_initialize_connection(self):
self.DPL_MOCK.assign_vdev.return_value = DATA_ASSIGNVDEV
self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV
res = self.dpldriver.initialize_connection(DATA_IN_VOLUME,
DATA_IN_CONNECTOR)
self.assertEqual('iscsi', res['driver_volume_type'])
self.assertEqual('101', res['data']['target_lun'])
self.assertEqual(True, res['data']['target_discovered'])
self.assertEqual('172.31.1.210:3260', res['data']['target_portal'])
self.assertEqual(
'iqn.2013-09.com.prophetstor:hypervisor.886423051816',
res['data']['target_iqn'])
def test_terminate_connection(self):
self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT
self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR)
self.DPL_MOCK.unassign_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_CONNECTOR['initiator'])
def test_terminate_connection_volume_detached(self):
self.DPL_MOCK.unassign_vdev.return_value = errno.ENODATA, None
self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR)
self.DPL_MOCK.unassign_vdev.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_VOLUME['id']),
DATA_IN_CONNECTOR['initiator'])
def test_terminate_connection_failed(self):
self.DPL_MOCK.unassign_vdev.return_value = errno.EFAULT, None
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.dpldriver.terminate_connection,
volume=DATA_IN_VOLUME, connector=DATA_IN_CONNECTOR)
self.assertTrue(
re.match(r".*Flexvisor failed", ex.msg))
def test_get_pool_info(self):
self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO
_, res = self.dpldriver._get_pool_info(POOLUUID)
self.assertEqual(4294967296, res['metadata']['available_capacity'])
self.assertEqual(1390551362349, res['metadata']['ctime'])
self.assertEqual('Default Pool',
res['metadata']['display_description'])
self.assertEqual('default_pool',
res['metadata']['display_name'])
self.assertEqual('4f7c4d679a664857afa4d51f282a516a',
res['metadata']['event_uuid'])
self.assertEqual(
{'cache': [],
'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'],
'log': [],
'spare': []},
res['metadata']['physical_device'])
self.assertEqual(POOLUUID, res['metadata']['pool_uuid'])
self.assertEqual(
{'raid_level': 'raid0'},
res['metadata']['properties'])
self.assertEqual('Online', res['metadata']['state'])
self.assertEqual(4294967296, res['metadata']['total_capacity'])
self.assertEqual('8173612007304181810', res['metadata']['zpool_guid'])
def test_create_consistency_group(self):
self.DPL_MOCK.create_vg.return_value = DATA_OUTPUT
model_update = self.dpldriver.create_consistencygroup(self.context,
DATA_IN_GROUP)
self.DPL_MOCK.create_vg.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_GROUP['id']), DATA_IN_GROUP['name'],
DATA_IN_GROUP['description'])
self.assertDictMatch({'status': 'available'}, model_update)
def test_delete_consistency_group(self):
self.DB_MOCK.volume_get_all_by_group.return_value = \
[DATA_IN_VOLUME_VG]
self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT
self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT
model_update, volumes = self.dpldriver.delete_consistencygroup(
self.context, DATA_IN_GROUP)
self.DPL_MOCK.delete_vg.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_GROUP['id']))
self.DPL_MOCK.delete_vdev.assert_called_once_with(
self._conver_uuid2hex((DATA_IN_VOLUME_VG['id'])))
self.assertDictMatch({'status': 'deleted'}, model_update, )
def test_create_consistency_group_snapshot(self):
self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = \
[DATA_OUT_SNAPSHOT_CG]
self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT
model_update, snapshots = self.dpldriver.create_cgsnapshot(
self.context, DATA_IN_CG_SNAPSHOT)
self.assertDictMatch({'status': 'available'}, model_update)
def test_delete_consistency_group_snapshot(self):
self.DB_MOCK.snapshot_get_all_for_cgsnapshot.return_value = \
[DATA_OUT_SNAPSHOT_CG]
self.DPL_MOCK.delete_cgsnapshot.return_value = DATA_OUTPUT
model_update, snapshots = self.dpldriver.delete_cgsnapshot(
self.context, DATA_IN_CG_SNAPSHOT)
self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with(
self._conver_uuid2hex(DATA_IN_CG_SNAPSHOT['consistencygroup_id']),
self._conver_uuid2hex(DATA_IN_CG_SNAPSHOT['id']),
True)
self.assertDictMatch({'status': 'deleted'}, model_update)
|
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
aggregator.projectcounts
~~~~~~~~~~~~~~~~~~~~~~~~
This module contains functions to aggregate Wikimedia's
projectcount files.
"""
import logging
import calendar
import datetime
import os
import glob
import util
PROJECTVIEWS_STRFTIME_PATTERN = ('%%Y%s%%Y-%%m%sprojectviews-%%Y%%m%%d-'
'%%H0000' % (os.sep, os.sep))
PROJECTCOUNTS_STRFTIME_PATTERN = ('%%Y%s%%Y-%%m%sprojectcounts-%%Y%%m%%d-'
'%%H0000' % (os.sep, os.sep))
CSV_HEADER = 'Date,Total,Desktop site,Mobile site,Zero site'
DATE_MOBILE_ADDED = datetime.date(2014, 9, 23)
cache = {}
def clear_cache():
global cache
logging.debug("Clearing projectcounts cache")
cache = {}
def aggregate_for_date(
source_dir_abs, date,
allow_bad_data=False, output_projectviews=False):
"""Aggregates hourly projectcounts for a given day.
This function does not attempt to cache the aggregated data. Either cache
yourself, or is helper methods that cache, as
get_hourly_count_for_webstatscollector_abbreviation.
If one of the required 24 hourly files do not exist, cannot be read, or
some other issue occurs, a RuntimeError is raised.
The returned dictonary is keyed by the lowercase webstatscollector
abbreviation, and values are the total counts for this day.
:param source_dir_abs: Absolute directory to read the hourly projectcounts
files from.
:param date: The date to get the count for.
:param allow_bad_data: If True, do not bail out, if some data is
bad or missing. (Default: False)
:param output_projectviews: If True, name the output files projectviews
instead of projectcounts. (Default: False)
"""
daily_data = {}
if output_projectviews:
output_format = PROJECTVIEWS_STRFTIME_PATTERN
else:
output_format = PROJECTCOUNTS_STRFTIME_PATTERN
for hour in range(24):
# Initialize with the relevant hour start ...
hourly_file_datetime = datetime.datetime(date.year, date.month,
date.day, hour)
# and add another hour since webstatscollector uses the interval end in
# the file name :-(
hourly_file_datetime += datetime.timedelta(hours=1)
hourly_file_abs = os.path.join(
source_dir_abs,
hourly_file_datetime.strftime(output_format))
if not os.path.isfile(hourly_file_abs):
if allow_bad_data:
# The file does not exist, but bad data is explicitly
# allowed, so we continue aggregating
continue
else:
raise RuntimeError("'%s' is not an existing file" % (
hourly_file_abs))
logging.debug("Reading %s" % (hourly_file_abs))
with open(hourly_file_abs, 'r') as hourly_file:
for line in hourly_file:
fields = line.split(' ')
if len(fields) != 4:
logging.warn("File %s as an incorrect line: %s" % (
hourly_file_abs, line))
# Kept in case we want to get back to raising an error
# raise RuntimeError("Malformed line in '%s'" % (
# hourly_file))
else:
abbreviation = fields[0].lower()
count = int(fields[2])
daily_data[abbreviation] = daily_data.get(abbreviation, 0) \
+ count
return daily_data
def get_daily_count(source_dir_abs, webstatscollector_abbreviation, date,
allow_bad_data=False, output_projectviews=False):
"""Obtains the daily count for a webstatscollector abbreviation.
Data gets cached upon read. For a day, the data is <50KB, so having many
dates in cache is not resource intensive.
:param source_dir_abs: Absolute directory to read the hourly projectcounts
files from.
:param webstatscollector_abbreviation: The webstatscollector abbreviation
to get the count for
:param date: The date to get the count for.
:param allow_bad_data: If True, do not bail out, if some data is
bad or missing. (Default: False)
:param output_projectviews: If True, name the output files projectviews
instead of projectcounts. (Default: False)
"""
global cache
try:
source_dir_cache = cache[source_dir_abs]
except KeyError:
source_dir_cache = {}
cache[source_dir_abs] = source_dir_cache
try:
date_data = source_dir_cache[date]
except KeyError:
date_data = aggregate_for_date(
source_dir_abs, date, allow_bad_data, output_projectviews
)
source_dir_cache[date] = date_data
return date_data.get(webstatscollector_abbreviation, 0)
def update_daily_csv(target_dir_abs, dbname, csv_data_input, first_date,
last_date, bad_dates=[], force_recomputation=False):
"""Updates daily per project CSVs from a csv data dictionary.
The existing per project CSV files in target_dir_abs/daily are updated from
first_date up to (and including) last_date.
If the CSVs already has data for a given day, it is not recomputed, unless
force_recomputation is True. But if a day is in the set of days that are
considered, and it is also in bad_dates, the data for this day is removed
regardless of force_recomputation.
Upon any error, the function raises an exception.
:param target_dir_abs: Absolute directory. CSVs are getting written to the
'daily' subdirectory of target_dir_abs.
:param dbname: The database name of the wiki to consider (E.g.: 'enwiki')
:param csv_data_input: The data dict to aggregate from
:param first_date: The first date to compute non-existing data for.
:param last_date: The last date to compute non-existing data for.
:param bad_dates: List of dates considered having bad data. (Default: [])
:param force_recomputation: If True, recompute data for the given days,
even if it is already in the CSV. (Default: False)
"""
csv_dir_abs = os.path.join(target_dir_abs, 'daily')
if not os.path.exists(csv_dir_abs):
os.mkdir(csv_dir_abs)
csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv')
csv_data = util.parse_csv_to_first_column_dict(csv_file_abs)
for date in util.generate_dates(first_date, last_date):
date_str = date.isoformat()
logging.debug("Updating csv '%s' for date '%s'" % (
dbname, str(date)))
if date in bad_dates:
if date_str in csv_data:
del csv_data[date_str]
else:
if date_str not in csv_data or force_recomputation:
if date_str not in csv_data_input:
raise RuntimeError("No data for '%s' during daily "
"aggregation" % (date_str))
csv_data[date_str] = csv_data_input[date_str]
util.write_dict_values_sorted_to_csv(
csv_file_abs,
csv_data,
header=CSV_HEADER)
def rescale_counts(csv_data, dates, bad_dates, rescale_to):
"""Extracts relevant dates from CSV data, sums them up, and rescales them.
If the dates only cover bad dates, None is returned.
Each column is rescaled separatedly.
Missing columns for good dates are not assumed to be 0.
The first column is ignored, and assumed to hold the date for the reading.
The second column is assumed to hold the sum of the remaining
columns. This column is not rescaled, but the recomputed by
summing the other rescaled columns. Thereby, we can guarantee that
the "total sum" always is the sum of the other columns.
Upon other errors, a RuntimeError is raised.
The rescaled counts are returned as list of integers.
:param csv_data_input: The data dict to get data from
:param dates: The dates to sum up counts for
:param bad_dates: List of dates considered having bad data.
:param rescale_to: Rescale the good entries to this many entries.
"""
ret = None
aggregations = None
columns = 0
for date in dates:
if date in bad_dates:
continue
date_str = date.isoformat()
try:
csv_line_items = csv_data[date_str].split(',')
except KeyError:
raise RuntimeError("No data for '%s'" % (date_str))
# Getting rid if date column. No need to aggregate date columns.
del csv_line_items[0]
# Getting rid of the "total sum" column.
# We always want the "total sum" column to be the sum of the
# other columns in the row. Hence, we cannot simply rescale
# the "total sum" column from the other rows, as that would on
# the one hand give rounding artifacts, and on the other hand
# would not work if some row is missing values for some
# columns. Therefore, we don't rescale the "total sum" column,
# but recompute it after the other columns' rescaled value is
# known.
del csv_line_items[0]
if ret is None:
ret = []
aggregations = []
# Make sure we can fit csv_line_items's columns into the aggregations
while columns < len(csv_line_items):
ret.append(0)
aggregations.append(0)
columns += 1
for i in range(columns):
try:
ret[i] += int(csv_line_items[i].strip())
aggregations[i] += 1
except IndexError:
# csv_line_times is shorter than ret.
pass
except ValueError:
# No valid reading. (E.g. the empty string)
pass
if ret is not None:
# Since we found readings, rescale.
ret = [(ret[i] * rescale_to) / aggregations[i] if aggregations[i]
else None
for i in range(columns)]
# Then recompute the "total sum" column and prepend it.
ret.insert(0, sum([0 if i is None else i for i in ret]))
return ret
def update_weekly_csv(target_dir_abs, dbname, csv_data_input, first_date,
last_date, bad_dates=[], force_recomputation=False):
"""Updates weekly per project CSVs from a csv data dictionary.
The existing per project CSV files in target_dir_abs/weekly are updated for
all weeks where Sunday in in the date interval from first_date up to (and
including) last_date.
For weekly aggregations, a week's total data is rescaled to 7 days.
If a week under consideration contains no good date, it is removed.
Upon any error, the function raises an exception.
:param target_dir_abs: Absolute directory. CSVs are getting written to the
'weekly_rescaled' subdirectory of target_dir_abs.
:param dbname: The database name of the wiki to consider (E.g.: 'enwiki')
:param csv_data_input: The data dict to aggregate from
:param first_date: The first date to compute non-existing data for.
:param last_date: The last date to compute non-existing data for.
:param bad_dates: List of dates considered having bad data. (Default: [])
:param force_recomputation: If True, recompute data for the given days,
even if it is already in the CSV. (Default: False)
"""
csv_dir_abs = os.path.join(target_dir_abs, 'weekly_rescaled')
if not os.path.exists(csv_dir_abs):
os.mkdir(csv_dir_abs)
csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv')
csv_data = util.parse_csv_to_first_column_dict(csv_file_abs)
for date in util.generate_dates(first_date, last_date):
if date.weekday() == 6: # Sunday. End of ISO week
date_str = date.strftime('%GW%V')
logging.debug("Updating csv '%s' for date '%s'" % (
dbname, str(date)))
week_dates = set(date + datetime.timedelta(days=offset)
for offset in range(-6, 1))
expected_good_dates = len(week_dates - set(bad_dates))
need_recomputation = force_recomputation
need_recomputation |= expected_good_dates != 7
need_recomputation |= date_str not in csv_data
if need_recomputation:
if expected_good_dates == 0:
try:
del csv_data[date_str]
except KeyError:
# No reading was there to remove. That's ok :-)
pass
else:
weekly_counts = rescale_counts(csv_data_input, week_dates,
bad_dates, 7)
util.update_csv_data_dict(
csv_data,
date_str,
*weekly_counts)
util.write_dict_values_sorted_to_csv(
csv_file_abs,
csv_data,
header=CSV_HEADER)
def update_monthly_csv(target_dir_abs, dbname, csv_data_input, first_date,
last_date, bad_dates=[], force_recomputation=False):
"""Updates monthly per project CSVs from a csv data dictionary.
The existing per project CSV files in target_dir_abs/monthly_rescaled are
updated for all months where the last day of the month is in the date
interval from first_date up to (and including) last_date.
For monthly aggregations, a month's total data is rescaled to 30 days.
If a month under consideration contains no good date, it is removed.
Upon any error, the function raises an exception.
:param target_dir_abs: Absolute directory. CSVs are getting written to the
'monthly_rescaled' subdirectory of target_dir_abs.
:param dbname: The database name of the wiki to consider (E.g.: 'enwiki')
:param csv_data_input: The data dict to aggregate from
:param first_date: The first date to compute non-existing data for.
:param last_date: The last date to compute non-existing data for.
:param bad_dates: List of dates considered having bad data. (Default: [])
:param force_recomputation: If True, recompute data for the given days,
even if it is already in the CSV. (Default: False)
"""
csv_dir_abs = os.path.join(target_dir_abs, 'monthly_rescaled')
if not os.path.exists(csv_dir_abs):
os.mkdir(csv_dir_abs)
csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv')
csv_data = util.parse_csv_to_first_column_dict(csv_file_abs)
for date in util.generate_dates(first_date, last_date):
if (date + datetime.timedelta(days=1)).day == 1:
# date + 1 day is the first of a month, so date is the last of a
# month. Let's compute for this month
date_str = date.strftime('%Y-%m')
logging.debug("Updating csv '%s' for date '%s'" % (
dbname, date_str))
days_in_month = date.day
month_dates = set(datetime.date(date.year, date.month, day)
for day in range(1, days_in_month+1))
expected_good_dates = len(month_dates - set(bad_dates))
need_recomputation = force_recomputation
need_recomputation |= expected_good_dates != days_in_month
need_recomputation |= date_str not in csv_data
if need_recomputation:
if expected_good_dates == 0:
try:
del csv_data[date_str]
except KeyError:
# No reading was there to remove. That's ok :-)
pass
else:
monthly_counts = rescale_counts(csv_data_input,
month_dates, bad_dates, 30)
util.update_csv_data_dict(
csv_data,
date_str,
*monthly_counts)
util.write_dict_values_sorted_to_csv(
csv_file_abs,
csv_data,
header=CSV_HEADER)
def update_yearly_csv(target_dir_abs, dbname, csv_data_input, first_date,
last_date, bad_dates=[], force_recomputation=False):
"""Updates yearly per project CSVs from a csv data dictionary.
The existing per project CSV files in target_dir_abs/yearly_rescaled are
updated for all years where the last day of the year is in the date
interval from first_date up to (and including) last_date.
For yearly aggregations, a year's total data is rescaled to 365 days.
If a year under consideration contains no good date, it is removed.
Upon any error, the function raises an exception.
:param target_dir_abs: Absolute directory. CSVs are getting written to the
'yearly_rescaled' subdirectory of target_dir_abs.
:param dbname: The database name of the wiki to consider (E.g.: 'enwiki')
:param csv_data_input: The data dict to aggregate from
:param first_date: The first date to compute non-existing data for.
:param last_date: The last date to compute non-existing data for.
:param bad_dates: List of dates considered having bad data. (Default: [])
:param force_recomputation: If True, recompute data for the given days,
even if it is already in the CSV. (Default: False)
"""
csv_dir_abs = os.path.join(target_dir_abs, 'yearly_rescaled')
if not os.path.exists(csv_dir_abs):
os.mkdir(csv_dir_abs)
csv_file_abs = os.path.join(csv_dir_abs, dbname + '.csv')
csv_data = util.parse_csv_to_first_column_dict(csv_file_abs)
for date in util.generate_dates(first_date, last_date):
if date.month == 12 and date.day == 31:
# date is the last day of a year. Let's compute for this year
date_str = date.strftime('%Y')
logging.debug("Updating csv '%s' for date '%s'" % (
dbname, date_str))
days_in_year = 366 if calendar.isleap(date.year) else 365
year_dates = set(date - datetime.timedelta(days=offset)
for offset in range(0, days_in_year))
expected_good_dates = len(year_dates - set(bad_dates))
need_recomputation = force_recomputation
need_recomputation |= expected_good_dates != days_in_year
need_recomputation |= date_str not in csv_data
if need_recomputation:
if expected_good_dates == 0:
try:
del csv_data[date_str]
except KeyError:
# No reading was there to remove. That's ok :-)
pass
else:
yearly_counts = rescale_counts(csv_data_input, year_dates,
bad_dates, 365)
util.update_csv_data_dict(
csv_data,
date_str,
*yearly_counts)
util.write_dict_values_sorted_to_csv(
csv_file_abs,
csv_data,
header=CSV_HEADER)
def update_per_project_csvs_for_dates(
source_dir_abs, target_dir_abs, first_date, last_date,
bad_dates=[], additional_aggregators=[], force_recomputation=False,
compute_all_projects=False, output_projectviews=False):
"""Updates per project CSVs from hourly projectcounts files.
The existing per project CSV files in the daily_raw subdirectory of
target_dir_abs are updated with daily data from first_date up to (and
including) last_date.
If the CSVs already has data for a given day, it is not recomputed, unless
force_recomputation is True.
Upon any error, the function raises an exception without cleaning or
syncing up the CSVs. So if the first CSV could get updated, but there are
issues with the second, the data written to the first CSV survives. Hence,
the CSVs need not end with the same date upon error.
:param source_dir_abs: Absolute directory to read the hourly projectcounts
files from.
:param target_dir_abs: Absolute directory of the per project CSVs.
:param first_date: The first date to compute non-existing data for.
:param last_date: The last date to compute non-existing data for.
:param bad_dates: List of dates considered having bad data. (Default: [])
:param additionaly_aggregators: List of functions to additionally
aggregate with. Those functions need to take target_dir_abs,
dbname, csv_data_input, first_date, last_date, bad_dates, and
force_recomputation as paramaters (in that order). dbname is
the database name for the wiki to aggregate for, and
csv_data_input is the CSV data dictionary for the daily_raw
aggregation. The other parameters and just passed
through. (Default: [])
:param force_recomputation: If True, recompute data for the given days,
even if it is already in the CSV. (Default: False)
:param compute_all_projects: If True, compute counts for all projects
into a file named 'all.csv'.
:param output_projectviews: If True, name the output files projectviews
instead of projectcounts. (Default: False)
"""
# Contains the aggregation of all data across projects indexed by date.
all_projects_data = {}
for csv_file_abs in sorted(glob.glob(os.path.join(
target_dir_abs, 'daily_raw', '*.csv'))):
dbname = os.path.basename(csv_file_abs)
dbname = dbname.rsplit('.csv', 1)[0]
if dbname == 'all':
# 'all.csv' is an aggregation across all projects
# and should not be processed.
continue
logging.info("Updating csv '%s'" % (csv_file_abs))
csv_data = util.parse_csv_to_first_column_dict(csv_file_abs)
for date in util.generate_dates(first_date, last_date):
date_str = date.isoformat()
logging.debug("Updating csv '%s' for date '%s'" % (
dbname, str(date)))
if date_str not in csv_data or force_recomputation:
# Check if to allow bad data for this day
allow_bad_data = date in bad_dates
# desktop site
abbreviation = util.dbname_to_webstatscollector_abbreviation(
dbname, 'desktop')
count_desktop = get_daily_count(
source_dir_abs, abbreviation, date,
allow_bad_data, output_projectviews,
)
# mobile site
abbreviation = util.dbname_to_webstatscollector_abbreviation(
dbname, 'mobile')
count_mobile = get_daily_count(
source_dir_abs, abbreviation, date,
allow_bad_data, output_projectviews,
)
# zero site
abbreviation = util.dbname_to_webstatscollector_abbreviation(
dbname, 'zero')
count_zero = get_daily_count(
source_dir_abs, abbreviation, date,
allow_bad_data, output_projectviews,
)
count_total = count_desktop
if date >= DATE_MOBILE_ADDED:
count_total += count_mobile + count_zero
# injecting obtained data
util.update_csv_data_dict(
csv_data,
date_str,
count_total,
count_desktop,
count_mobile if date >= DATE_MOBILE_ADDED else None,
count_zero if date >= DATE_MOBILE_ADDED else None)
_write_raw_and_aggregated_csv_data(
target_dir_abs,
dbname,
csv_data,
first_date,
last_date,
additional_aggregators,
bad_dates,
force_recomputation)
# Aggregates values across all projects
if compute_all_projects:
util.merge_sum_csv_data_dict(all_projects_data, csv_data)
# Writes aggregations across all projects
if compute_all_projects:
oldest_date = util.parse_string_to_date(min(all_projects_data.keys()))
newest_date = util.parse_string_to_date(max(all_projects_data.keys()))
_write_raw_and_aggregated_csv_data(
target_dir_abs,
'all',
all_projects_data,
oldest_date,
newest_date,
additional_aggregators,
bad_dates,
force_recomputation)
def _write_raw_and_aggregated_csv_data(
target_dir_abs, dbname, csv_data, first_date, last_date,
additional_aggregators, bad_dates, force_recomputation):
"""
Writes the data passed in the csv_data dict to various destinations:
1. Writes the raw data (as it comes in the csv_data dict), to:
<target_dir_abs>/daily_raw/<dbname>.csv
2. Uses each aggregator in additional_aggregators to write the data
to the aggregator's specific location. Note: Some of this method's
parameters are just forwarded to these aggregators.
:param target_dir_abs: Absolute directory of the per project CSVs.
:param dbname: The database name of the wiki to consider (E.g.: 'enwiki')
:param csv_data: The dict containing the data to be written.
:param first_date: The first date to write non-existing data for.
:param last_date: The last date to write non-existing data for.
:param additional_aggregators: See update_per_project_csvs_for_dates.
:param bad_dates: List of dates considered having bad data.
:param force_recomputation: If True, recompute data for the given days.
"""
csv_file_abs = os.path.join(target_dir_abs, 'daily_raw', dbname + '.csv')
util.write_dict_values_sorted_to_csv(
csv_file_abs,
csv_data,
header=CSV_HEADER)
for additional_aggregator in additional_aggregators:
additional_aggregator(
target_dir_abs,
dbname,
csv_data,
first_date,
last_date,
bad_dates=bad_dates,
force_recomputation=force_recomputation)
def _get_validity_issues_for_aggregated_projectcounts_generic(
csv_dir_abs, total_expected, desktop_site_expected,
mobile_site_expected, zero_site_expected, current_date_strs):
"""Gets a list of obvious validity issues for a directory of CSVs
:param csv_dir_abs: Absolute directory of the per project CSVs.
:param total_expected: Expect at least that many views to overal on
big wikis.
:param desktop_site_expected: Expect at least that many views to desktop
site on big wikis.
:param mobile_site_expected: Expect at least that many views to mobile site
on big wikis.
:param zero_site_expected: Expect at least that many views to zero site on
big wikis.
:param current_date_strs: Expect one of those as date string of the final
item in the CSVs.
"""
issues = []
dbnames = []
big_wikis = [
'enwiki',
'jawiki',
'dewiki',
'eswiki',
'frwiki',
'ruwiki',
'itwiki',
]
for csv_file_abs in sorted(glob.glob(os.path.join(csv_dir_abs, '*.csv'))):
logging.info("Checking csv '%s'" % (csv_file_abs))
dbname = os.path.basename(csv_file_abs)
dbname = dbname.rsplit('.csv', 1)[0]
dbnames.append(dbname)
with open(csv_file_abs, 'r') as file:
lines = file.readlines()
if len(lines):
# Analyze last line
last_line = (lines[-1]).rstrip('\n\r')
last_line_split = last_line.split(',')
if len(last_line_split) == 5:
# Check if last line is current.
try:
if last_line_split[0] not in current_date_strs:
issues.append("Last line of %s is too old "
"'%s'" % (csv_file_abs, last_line))
except ValueError:
issues.append("Last line of %s is too old "
"'%s'" % (csv_file_abs, last_line))
if dbname in big_wikis:
# Check total count
try:
if int(last_line_split[1]) < total_expected:
issues.append("Total count of last line of "
"%s is too low '%s'" % (
csv_file_abs, last_line))
except ValueError:
issues.append("Total count of last line of %s is"
"not an integer '%s'" % (
csv_file_abs, last_line))
# Check desktop count
try:
if int(last_line_split[2]) < desktop_site_expected:
issues.append("Desktop count of last line of "
"%s is too low '%s'" % (
csv_file_abs, last_line))
except ValueError:
issues.append("Desktop count of last line of %s is"
"not an integer '%s'" % (
csv_file_abs, last_line))
# Check mobile count
try:
if int(last_line_split[3]) < mobile_site_expected:
issues.append("Mobile count of last line of "
"%s is too low '%s'" % (
csv_file_abs, last_line))
except ValueError:
issues.append("Mobile count of last line of %s is"
"not an integer '%s'" % (
csv_file_abs, last_line))
# Check zero count
try:
if int(last_line_split[4]) < zero_site_expected:
issues.append("Zero count of last line of "
"%s is too low '%s'" % (
csv_file_abs, last_line))
except ValueError:
issues.append("Desktop count of last line of %s is"
"not an integer '%s'" % (
csv_file_abs, last_line))
# Check zero count
try:
if int(last_line_split[1]) != \
int(last_line_split[2]) + \
int(last_line_split[3]) + \
int(last_line_split[4]):
issues.append(
"Total column is not the sum of "
"individual columns in '%s' for %s" % (
last_line, csv_file_abs))
except ValueError:
# Some column is not a number. This has already
# been reported above, so we just pass.
pass
else:
issues.append("Last line of %s does not have 5 columns: "
"'%s'" % (csv_file_abs, last_line))
else:
issues.append("No lines for %s" % csv_file_abs)
if not len(dbnames):
issues.append("Could not find any CSVs")
if set(big_wikis) - set(dbnames):
issues.append("Not all big wikis covered (Missing: %s)" % (
[x for x in (set(big_wikis) - set(dbnames))]))
if not (set(dbnames) - set(big_wikis)):
issues.append("No wikis beyond the big wikis")
return sorted(issues)
def get_validity_issues_for_aggregated_projectcounts(data_dir_abs):
"""Gets a list of obvious validity issues of aggregated projectcount CSVs
:param data_dir_abs: Absolute directory of the per project CSVs.
"""
issues = []
current_dates = [
datetime.date.today(),
util.parse_string_to_date('yesterday')
]
# daily_raw files
issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic(
os.path.join(data_dir_abs, 'daily_raw'),
1000000, 1000000, 10000, 100,
set(date.isoformat() for date in current_dates)
))
# daily files
issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic(
os.path.join(data_dir_abs, 'daily'),
1000000, 1000000, 10000, 100,
set(date.isoformat() for date in current_dates)
))
# weekly files
issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic(
os.path.join(data_dir_abs, 'weekly_rescaled'),
10000000, 10000000, 100000, 1000,
set((date - datetime.timedelta(days=6)).strftime('%GW%V')
for date in current_dates)
))
# monthly files
issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic(
os.path.join(data_dir_abs, 'monthly_rescaled'),
50000000, 50000000, 500000, 5000,
set((
datetime.date(date.year, date.month, 1)
- datetime.timedelta(days=1)
).strftime('%Y-%m')
for date in current_dates)
))
# yearly files
issues.extend(_get_validity_issues_for_aggregated_projectcounts_generic(
os.path.join(data_dir_abs, 'yearly_rescaled'),
700000000, 700000000, 7000000, 70000,
set((
datetime.date(date.year, 1, 1) - datetime.timedelta(days=1)
).strftime('%Y')
for date in current_dates)
))
return issues
|
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from contextlib import contextmanager
import subprocess
from alembic.ddl import base as alembic_ddl
from alembic import script as alembic_script
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_db.sqlalchemy import test_migrations
from oslotest import base as oslotest_base
import six
import sqlalchemy
from sqlalchemy import event # noqa
from sqlalchemy.sql import ddl as sqla_ddl
from neutron.db import migration as migration_root
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import cli as migration
from neutron.db.migration.models import head as head_models
from neutron.tests import base as test_base
from neutron.tests.unit import testlib_api
cfg.CONF.import_opt('core_plugin', 'neutron.conf.common')
CREATION_OPERATIONS = {
'sqla': (sqla_ddl.CreateIndex,
sqla_ddl.CreateTable,
sqla_ddl.CreateColumn,
),
'alembic': (alembic_ddl.AddColumn,
)
}
DROP_OPERATIONS = {
'sqla': (sqla_ddl.DropConstraint,
sqla_ddl.DropIndex,
sqla_ddl.DropTable,
),
'alembic': (alembic_ddl.DropColumn,
)
}
def upgrade(engine, alembic_config, branch_name='heads'):
cfg.CONF.set_override('connection', engine.url, group='database')
migration.do_alembic_command(alembic_config, 'upgrade',
branch_name)
class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
'''Test for checking of equality models state and migrations.
For the opportunistic testing you need to set up a db named
'openstack_citest' with user 'openstack_citest' and password
'openstack_citest' on localhost.
The test will then use that db and user/password combo to run the tests.
For PostgreSQL on Ubuntu this can be done with the following commands::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner
openstack_citest;
For MySQL on Ubuntu this can be done with the following commands::
mysql -u root
>create database openstack_citest;
>grant all privileges on openstack_citest.* to
openstack_citest@localhost identified by 'openstack_citest';
Output is a list that contains information about differences between db and
models. Output example::
[('add_table',
Table('bat', MetaData(bind=None),
Column('info', String(), table=<bat>), schema=None)),
('remove_table',
Table(u'bar', MetaData(bind=None),
Column(u'data', VARCHAR(), table=<bar>), schema=None)),
('add_column',
None,
'foo',
Column('data', Integer(), table=<foo>)),
('remove_column',
None,
'foo',
Column(u'old_data', VARCHAR(), table=None)),
[('modify_nullable',
None,
'foo',
u'x',
{'existing_server_default': None,
'existing_type': INTEGER()},
True,
False)]]
* ``remove_*`` means that there is extra table/column/constraint in db;
* ``add_*`` means that it is missing in db;
* ``modify_*`` means that on column in db is set wrong
type/nullable/server_default. Element contains information:
- what should be modified,
- schema,
- table,
- column,
- existing correct column parameters,
- right value,
- wrong value.
This class also contains tests for branches, like that correct operations
are used in contract and expand branches.
'''
BUILD_SCHEMA = False
TIMEOUT_SCALING_FACTOR = 4
def setUp(self):
super(_TestModelsMigrations, self).setUp()
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(core_plugin='ml2')
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
# Migration tests can take a long time
self.useFixture(test_base.Timeout(scaling=self.TIMEOUT_SCALING_FACTOR))
def db_sync(self, engine):
upgrade(engine, self.alembic_config)
def get_engine(self):
return self.engine
def get_metadata(self):
return head_models.get_metadata()
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table' and (name == 'alembic_version' or
name in external.TABLES):
return False
return super(_TestModelsMigrations, self).include_object(
object_, name, type_, reflected, compare_to)
def filter_metadata_diff(self, diff):
return list(filter(self.remove_unrelated_errors, diff))
# Remove some difference that are not mistakes just specific of
# dialects, etc
def remove_unrelated_errors(self, element):
insp = sqlalchemy.engine.reflection.Inspector.from_engine(
self.get_engine())
dialect = self.get_engine().dialect.name
if isinstance(element, tuple):
if dialect == 'mysql' and element[0] == 'remove_index':
table_name = element[1].table.name
for fk in insp.get_foreign_keys(table_name):
if fk['name'] == element[1].name:
return False
cols = [c.name for c in element[1].expressions]
for col in cols:
if col in insp.get_pk_constraint(
table_name)['constrained_columns']:
return False
else:
for modified, _, table, column, _, _, new in element:
if modified == 'modify_default' and dialect == 'mysql':
constrained = insp.get_pk_constraint(table)
if column in constrained['constrained_columns']:
return False
return True
def test_upgrade_expand_branch(self):
# Verify that "command neutron-db-manage upgrade --expand" works
# without errors. Check this for both MySQL and PostgreSQL.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.EXPAND_BRANCH)
def test_upgrade_contract_branch(self):
# Verify that "command neutron-db-manage upgrade --contract" works
# without errors. Check this for both MySQL and PostgreSQL.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.CONTRACT_BRANCH)
@contextmanager
def _listener(self, engine, listener_func):
try:
event.listen(engine, 'before_execute', listener_func)
yield
finally:
event.remove(engine, 'before_execute',
listener_func)
def test_branches(self):
drop_exceptions = collections.defaultdict(list)
creation_exceptions = collections.defaultdict(list)
def find_migration_exceptions():
# Due to some misunderstandings and some conscious decisions,
# there may be some expand migrations which drop elements and
# some contract migrations which create elements. These excepted
# elements must be returned by a method in the script itself.
# The names of the method must be 'contract_creation_exceptions'
# or 'expand_drop_exceptions'. The methods must have a docstring
# explaining the reason for the exception.
#
# Here we build lists of the excepted elements and verify that
# they are documented.
script = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
for m in list(script.walk_revisions(base='base', head='heads')):
branches = m.branch_labels or []
if migration.CONTRACT_BRANCH in branches:
method_name = 'contract_creation_exceptions'
exceptions_dict = creation_exceptions
elif migration.EXPAND_BRANCH in branches:
method_name = 'expand_drop_exceptions'
exceptions_dict = drop_exceptions
else:
continue
get_excepted_elements = getattr(m.module, method_name, None)
if not get_excepted_elements:
continue
explanation = getattr(get_excepted_elements, '__doc__', "")
if len(explanation) < 1:
self.fail("%s() requires docstring with explanation" %
'.'.join([m.module.__name__,
get_excepted_elements.__name__]))
for sa_type, elements in get_excepted_elements().items():
exceptions_dict[sa_type].extend(elements)
def is_excepted_sqla(clauseelement, exceptions):
"""Identify excepted operations that are allowed for the branch."""
element = clauseelement.element
element_name = element.name
if isinstance(element, sqlalchemy.Index):
element_name = element.table.name
for sa_type_, excepted_names in exceptions.items():
if isinstance(element, sa_type_):
if element_name in excepted_names:
return True
def is_excepted_alembic(clauseelement, exceptions):
"""Identify excepted operations that are allowed for the branch."""
# For alembic the clause is AddColumn or DropColumn
column = clauseelement.column.name
table = clauseelement.column.table.name
element_name = '.'.join([table, column])
for alembic_type, excepted_names in exceptions.items():
if alembic_type == sqlalchemy.Column:
if element_name in excepted_names:
return True
def is_allowed(clauseelement, exceptions, disallowed_ops):
if (isinstance(clauseelement, disallowed_ops['sqla']) and
hasattr(clauseelement, 'element')):
return is_excepted_sqla(clauseelement, exceptions)
if isinstance(clauseelement, disallowed_ops['alembic']):
return is_excepted_alembic(clauseelement, exceptions)
return True
def check_expand_branch(conn, clauseelement, multiparams, params):
if not is_allowed(clauseelement, drop_exceptions, DROP_OPERATIONS):
self.fail("Migration in expand branch contains drop command")
def check_contract_branch(conn, clauseelement, multiparams, params):
if not is_allowed(clauseelement, creation_exceptions,
CREATION_OPERATIONS):
self.fail("Migration in contract branch contains create "
"command")
find_migration_exceptions()
engine = self.engine
cfg.CONF.set_override('connection', engine.url, group='database')
with engine.begin() as connection:
self.alembic_config.attributes['connection'] = connection
# upgrade to latest release first; --expand users are expected to
# apply all alembic scripts from previous releases before applying
# the new ones
for release in migration_root.NEUTRON_MILESTONES:
release_revisions = migration._find_milestone_revisions(
self.alembic_config, release)
for rev in release_revisions:
migration.do_alembic_command(
self.alembic_config, 'upgrade', rev[0])
with self._listener(engine, check_expand_branch):
migration.do_alembic_command(
self.alembic_config, 'upgrade',
'%s@head' % migration.EXPAND_BRANCH)
with self._listener(engine, check_contract_branch):
migration.do_alembic_command(
self.alembic_config, 'upgrade',
'%s@head' % migration.CONTRACT_BRANCH)
def _test_has_offline_migrations(self, revision, expected):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
migration.do_alembic_command(self.alembic_config, 'upgrade', revision)
self.assertEqual(expected,
migration.has_offline_migrations(self.alembic_config,
'unused'))
def test_has_offline_migrations_pending_contract_scripts(self):
self._test_has_offline_migrations('kilo', True)
def test_has_offline_migrations_all_heads_upgraded(self):
self._test_has_offline_migrations('heads', False)
# NOTE(ihrachys): if this test fails for you, it probably means that you
# attempt to add an unsafe contract migration script, that is in
# contradiction to blueprint online-upgrades
# TODO(ihrachys): revisit later in Pike+ where some contract scripts may be
# safe again
def test_forbid_offline_migrations_starting_newton(self):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
# the following revisions are Newton heads
for revision in ('5cd92597d11d', '5c85685d616d'):
migration.do_alembic_command(
self.alembic_config, 'upgrade', revision)
self.assertFalse(migration.has_offline_migrations(
self.alembic_config, 'unused'),
msg='Offline contract migration scripts are forbidden for Ocata+')
class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight):
def test_check_mysql_engine(self):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
with engine.begin() as connection:
self.alembic_config.attributes['connection'] = connection
migration.do_alembic_command(self.alembic_config, 'upgrade',
'heads')
insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine)
# Test that table creation on MySQL only builds InnoDB tables
tables = insp.get_table_names()
self.assertGreater(len(tables), 0,
"No tables found. Wrong schema?")
res = [table for table in tables if
insp.get_table_options(table)['mysql_engine'] !=
'InnoDB' and
table != 'alembic_version']
self.assertEqual(0, len(res), "%s non InnoDB tables created" % res)
class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight):
pass
class TestSanityCheck(testlib_api.SqlTestCaseLight):
BUILD_SCHEMA = False
def setUp(self):
super(TestSanityCheck, self).setUp()
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
def _drop_table(self, table):
with self.engine.begin() as conn:
table.drop(conn)
def test_check_sanity_1df244e556f5(self):
ha_router_agent_port_bindings = sqlalchemy.Table(
'ha_router_agent_port_bindings', sqlalchemy.MetaData(),
sqlalchemy.Column('port_id', sqlalchemy.String(36)),
sqlalchemy.Column('router_id', sqlalchemy.String(36)),
sqlalchemy.Column('l3_agent_id', sqlalchemy.String(36)))
with self.engine.connect() as conn:
ha_router_agent_port_bindings.create(conn)
self.addCleanup(self._drop_table, ha_router_agent_port_bindings)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
# pylint: disable=no-value-for-parameter
conn.execute(ha_router_agent_port_bindings.insert(), [
{'port_id': '1234', 'router_id': '12345',
'l3_agent_id': '123'},
{'port_id': '12343', 'router_id': '12345',
'l3_agent_id': '123'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("1df244e556f5").module
self.assertRaises(script.DuplicateL3HARouterAgentPortBinding,
script.check_sanity, conn)
def test_check_sanity_030a959ceafa(self):
routerports = sqlalchemy.Table(
'routerports', sqlalchemy.MetaData(),
sqlalchemy.Column('router_id', sqlalchemy.String(36)),
sqlalchemy.Column('port_id', sqlalchemy.String(36)),
sqlalchemy.Column('port_type', sqlalchemy.String(255)))
with self.engine.connect() as conn:
routerports.create(conn)
self.addCleanup(self._drop_table, routerports)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
# pylint: disable=no-value-for-parameter
conn.execute(routerports.insert(), [
{'router_id': '1234', 'port_id': '12345',
'port_type': '123'},
{'router_id': '12343', 'port_id': '12345',
'port_type': '1232'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("030a959ceafa").module
self.assertRaises(script.DuplicatePortRecordinRouterPortdatabase,
script.check_sanity, conn)
def test_check_sanity_6b461a21bcfc_dup_on_fixed_ip(self):
floatingips = sqlalchemy.Table(
'floatingips', sqlalchemy.MetaData(),
sqlalchemy.Column('floating_network_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_port_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_ip_address', sqlalchemy.String(64)))
with self.engine.connect() as conn:
floatingips.create(conn)
self.addCleanup(self._drop_table, floatingips)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
# pylint: disable=no-value-for-parameter
conn.execute(floatingips.insert(), [
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': '12345678'},
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': '12345678'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("6b461a21bcfc").module
self.assertRaises(script.DuplicateFloatingIPforOneFixedIP,
script.check_sanity, conn)
def test_check_sanity_6b461a21bcfc_dup_on_no_fixed_ip(self):
floatingips = sqlalchemy.Table(
'floatingips', sqlalchemy.MetaData(),
sqlalchemy.Column('floating_network_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_port_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_ip_address', sqlalchemy.String(64)))
with self.engine.connect() as conn:
floatingips.create(conn)
self.addCleanup(self._drop_table, floatingips)
# NOTE(haleyb): without this disabled, pylint complains
# about a missing 'dml' argument.
# pylint: disable=no-value-for-parameter
conn.execute(floatingips.insert(), [
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': None},
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': None}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("6b461a21bcfc").module
self.assertIsNone(script.check_sanity(conn))
class TestWalkDowngrade(oslotest_base.BaseTestCase):
def setUp(self):
super(TestWalkDowngrade, self).setUp()
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
def test_no_downgrade(self):
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
versions = [v for v in script_dir.walk_revisions(base='base',
head='heads')]
failed_revisions = []
for version in versions:
if hasattr(version.module, 'downgrade'):
failed_revisions.append(version.revision)
if failed_revisions:
self.fail('Migrations %s have downgrade' % failed_revisions)
return True
class _TestWalkMigrations(object):
'''This will add framework for testing schema migration
for different backends.
'''
BUILD_SCHEMA = False
def execute_cmd(self, cmd=None):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
output = proc.communicate()[0]
self.assertEqual(0, proc.returncode, 'Command failed with '
'output:\n%s' % output)
def _get_alembic_config(self, uri):
db_config = migration.get_neutron_config()
self.script_dir = alembic_script.ScriptDirectory.from_config(db_config)
db_config.neutron_config = cfg.CONF
db_config.neutron_config.set_override('connection',
six.text_type(uri),
group='database')
return db_config
def _revisions(self):
"""Provides revisions and its parent revisions.
:return: List of tuples. Every tuple contains revision and its parent
revision.
"""
revisions = list(self.script_dir.walk_revisions("base", "heads"))
revisions = list(reversed(revisions))
for rev in revisions:
# Destination, current
yield rev.revision, rev.down_revision
def _migrate_up(self, config, engine, dest, curr, with_data=False):
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % dest, None)
if pre_upgrade:
data = pre_upgrade(engine)
migration.do_alembic_command(config, 'upgrade', dest)
if with_data:
check = getattr(self, "_check_%s" % dest, None)
if check and data:
check(engine, data)
def test_walk_versions(self):
"""Test migrations ability to upgrade and downgrade.
"""
engine = self.engine
config = self._get_alembic_config(engine.url)
revisions = self._revisions()
for dest, curr in revisions:
self._migrate_up(config, engine, dest, curr, with_data=True)
class TestWalkMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
pass
class TestWalkMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
pass
|
|
# -*- coding: utf-8 -*-
# Unittests for fixtures.
from __future__ import unicode_literals
import json
import os
import re
import warnings
from django.core.serializers.base import DeserializationError
from django.core import management
from django.core.management.base import CommandError
from django.core.management.commands.dumpdata import sort_dependencies
from django.db import transaction, IntegrityError
from django.db.models import signals
from django.test import (TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from django.test import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import six
from django.utils.six import PY3, StringIO
from .models import (Animal, Stuff, Absolute, Parent, Child, Article, Widget,
Store, Person, Book, NKChild, RefToNKChild, Circle1, Circle2, Circle3,
ExternalDependency, Thingy)
_cur_dir = os.path.dirname(os.path.abspath(upath(__file__)))
class TestFixtures(TestCase):
def animal_pre_save_check(self, signal, sender, instance, **kwargs):
self.pre_save_checks.append(
(
'Count = %s (%s)' % (instance.count, type(instance.count)),
'Weight = %s (%s)' % (instance.weight, type(instance.weight)),
)
)
def test_duplicate_pk(self):
"""
This is a regression test for ticket #3790.
"""
# Load a fixture that uses PK=1
management.call_command(
'loaddata',
'sequence',
verbosity=0,
)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2
)
animal.save()
self.assertGreater(animal.id, 1)
def test_loaddata_not_found_fields_not_ignore(self):
"""
Test for ticket #9279 -- Error is raised for entries in
the serialised data for fields that have been removed
from the database when not ignored.
"""
with self.assertRaises(DeserializationError):
management.call_command(
'loaddata',
'sequence_extra',
verbosity=0
)
def test_loaddata_not_found_fields_ignore(self):
"""
Test for ticket #9279 -- Ignores entries in
the serialised data for fields that have been removed
from the database.
"""
management.call_command(
'loaddata',
'sequence_extra',
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, 'Lion')
def test_loaddata_not_found_fields_ignore_xml(self):
"""
Test for ticket #19998 -- Ignore entries in the XML serialised data
for fields that have been removed from the model definition.
"""
management.call_command(
'loaddata',
'sequence_extra_xml',
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, 'Wolf')
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
)
self.assertEqual(Stuff.objects.all()[0].name, None)
self.assertEqual(Stuff.objects.all()[0].owner, None)
@skipUnlessDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml_empty_strings(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
)
self.assertEqual(Stuff.objects.all()[0].name, '')
self.assertEqual(Stuff.objects.all()[0].owner, None)
def test_absolute_path(self):
"""
Regression test for ticket #6436 --
os.path.join will throw away the initial parts of a path if it
encounters an absolute path.
This means that if a fixture is specified as an absolute path,
we need to make sure we don't discover the absolute path in every
fixture directory.
"""
load_absolute_path = os.path.join(
os.path.dirname(upath(__file__)),
'fixtures',
'absolute.json'
)
management.call_command(
'loaddata',
load_absolute_path,
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_relative_path(self, path=['fixtures', 'absolute.json']):
relative_path = os.path.join(*path)
cwd = os.getcwd()
try:
os.chdir(_cur_dir)
management.call_command(
'loaddata',
relative_path,
verbosity=0,
)
finally:
os.chdir(cwd)
self.assertEqual(Absolute.objects.count(), 1)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1')])
def test_relative_path_in_fixture_dirs(self):
self.test_relative_path(path=['inner', 'absolute.json'])
def test_path_containing_dots(self):
management.call_command(
'loaddata',
'path.containing.dots.json',
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_unknown_format(self):
"""
Test for ticket #4371 -- Loading data of an unknown format should fail
Validate that error conditions are caught correctly
"""
with six.assertRaisesRegex(self, management.CommandError,
"Problem installing fixture 'bad_fixture1': "
"unkn is not a known serialization format."):
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
)
@override_settings(SERIALIZATION_MODULES={'unkn': 'unexistent.path'})
def test_unimportable_serializer(self):
"""
Test that failing serializer import raises the proper error
"""
with six.assertRaisesRegex(self, ImportError,
r"No module named.*unexistent"):
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
)
def test_invalid_data(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
using explicit filename.
Test for ticket #18213 -- warning conditions are caught correctly
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2.xml',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'bad_fixture2'. (File format may be invalid.)")
def test_invalid_data_no_ext(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
without file extension.
Test for ticket #18213 -- warning conditions are caught correctly
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'bad_fixture2'. (File format may be invalid.)")
def test_empty(self):
"""
Test for ticket #18213 -- Loading a fixture file with no data output a warning.
Previously empty fixture raises an error exception, see ticket #4371.
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'empty',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'empty'. (File format may be invalid.)")
def test_error_message(self):
"""
Regression for #9011 - error message is correct.
Change from error to warning for ticket #18213.
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2',
'animal',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'bad_fixture2'. (File format may be invalid.)")
def test_pg_sequence_resetting_checks(self):
"""
Test for ticket #7565 -- PostgreSQL sequence resetting checks shouldn't
ascend to parent models when inheritance is used
(since they are treated individually).
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
self.assertEqual(Parent.objects.all()[0].id, 1)
self.assertEqual(Child.objects.all()[0].id, 1)
def test_close_connection_after_loaddata(self):
"""
Test for ticket #7572 -- MySQL has a problem if the same connection is
used to create tables, load data, and then query over that data.
To compensate, we close the connection after running loaddata.
This ensures that a new connection is opened when test queries are
issued.
"""
management.call_command(
'loaddata',
'big-fixture.json',
verbosity=0,
)
articles = Article.objects.exclude(id=9)
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
# Just for good measure, run the same query again.
# Under the influence of ticket #7572, this will
# give a different result to the previous call.
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
def test_field_value_coerce(self):
"""
Test for tickets #8298, #9942 - Field values should be coerced into the
correct type by the deserializer, not as part of the database write.
"""
self.pre_save_checks = []
signals.pre_save.connect(self.animal_pre_save_check)
try:
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
)
self.assertEqual(
self.pre_save_checks,
[
("Count = 42 (<%s 'int'>)" % ('class' if PY3 else 'type'),
"Weight = 1.2 (<%s 'float'>)" % ('class' if PY3 else 'type'))
]
)
finally:
signals.pre_save.disconnect(self.animal_pre_save_check)
def test_dumpdata_uses_default_manager(self):
"""
Regression for #11286
Ensure that dumpdata honors the default manager
Dump the current contents of the database as a JSON fixture
"""
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
)
management.call_command(
'loaddata',
'sequence.json',
verbosity=0,
)
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2
)
animal.save()
stdout = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.animal',
format='json',
stdout=stdout
)
# Output order isn't guaranteed, so check for parts
data = stdout.getvalue()
# Get rid of artifacts like '000000002' to eliminate the differences
# between different Python versions.
data = re.sub('0{6,}\d', '', data)
animals_data = sorted([
{"pk": 1, "model": "fixtures_regress.animal", "fields": {"count": 3, "weight": 1.2, "name": "Lion", "latin_name": "Panthera leo"}},
{"pk": 10, "model": "fixtures_regress.animal", "fields": {"count": 42, "weight": 1.2, "name": "Emu", "latin_name": "Dromaius novaehollandiae"}},
{"pk": animal.pk, "model": "fixtures_regress.animal", "fields": {"count": 2, "weight": 2.2, "name": "Platypus", "latin_name": "Ornithorhynchus anatinus"}},
], key=lambda x: x["pk"])
data = sorted(json.loads(data), key=lambda x: x["pk"])
self.maxDiff = 1024
self.assertEqual(data, animals_data)
def test_proxy_model_included(self):
"""
Regression for #11428 - Proxy models aren't included when you dumpdata
"""
stdout = StringIO()
# Create an instance of the concrete class
widget = Widget.objects.create(name='grommet')
management.call_command(
'dumpdata',
'fixtures_regress.widget',
'fixtures_regress.widgetproxy',
format='json',
stdout=stdout
)
self.assertJSONEqual(
stdout.getvalue(),
"""[{"pk": %d, "model": "fixtures_regress.widget", "fields": {"name": "grommet"}}]"""
% widget.pk
)
def test_loaddata_works_when_fixture_has_forward_refs(self):
"""
Regression for #3615 - Forward references cause fixtures not to load in MySQL (InnoDB)
"""
management.call_command(
'loaddata',
'forward_ref.json',
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_raises_error_when_fixture_has_invalid_foreign_key(self):
"""
Regression for #3615 - Ensure data with nonexistent child key references raises error
"""
with six.assertRaisesRegex(self, IntegrityError,
"Problem installing fixture"):
management.call_command(
'loaddata',
'forward_ref_bad_data.json',
verbosity=0,
)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_2')])
def test_loaddata_forward_refs_split_fixtures(self):
"""
Regression for #17530 - should be able to cope with forward references
when the fixtures are not in the same files or directories.
"""
management.call_command(
'loaddata',
'forward_ref_1.json',
'forward_ref_2.json',
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_no_fixture_specified(self):
"""
Regression for #7043 - Error is quickly reported when no fixtures is provided in the command line.
"""
with six.assertRaisesRegex(self, management.CommandError,
"No database fixture specified. Please provide the path of "
"at least one fixture in the command line."):
management.call_command(
'loaddata',
verbosity=0,
)
def test_loaddata_not_existant_fixture_file(self):
stdout_output = StringIO()
with warnings.catch_warnings(record=True):
management.call_command(
'loaddata',
'this_fixture_doesnt_exist',
verbosity=2,
stdout=stdout_output,
)
self.assertTrue("No fixture 'this_fixture_doesnt_exist' in" in
force_text(stdout_output.getvalue()))
def test_ticket_20820(self):
"""
Regression for ticket #20820 -- loaddata on a model that inherits
from a model with a M2M shouldn't blow up.
"""
management.call_command(
'loaddata',
'special-article.json',
verbosity=0,
)
class NaturalKeyFixtureTests(TestCase):
def test_nk_deserialize(self):
"""
Test for ticket #13030 - Python based parser version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
)
self.assertEqual(
NKChild.objects.get(pk=1).data,
'apple'
)
self.assertEqual(
RefToNKChild.objects.get(pk=1).nk_fk.data,
'apple'
)
def test_nk_deserialize_xml(self):
"""
Test for ticket #13030 - XML version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance2.xml',
verbosity=0,
)
self.assertEqual(
NKChild.objects.get(pk=2).data,
'banana'
)
self.assertEqual(
RefToNKChild.objects.get(pk=2).nk_fk.data,
'apple'
)
def test_nk_on_serialize(self):
"""
Check that natural key requirements are taken into account
when serializing models
"""
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
)
stdout = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.book',
'fixtures_regress.person',
'fixtures_regress.store',
verbosity=0,
format='json',
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
stdout=stdout,
)
self.assertJSONEqual(
stdout.getvalue(),
"""[{"fields": {"main": null, "name": "Amazon"}, "model": "fixtures_regress.store"}, {"fields": {"main": null, "name": "Borders"}, "model": "fixtures_regress.store"}, {"fields": {"name": "Neal Stephenson"}, "model": "fixtures_regress.person"}, {"pk": 1, "model": "fixtures_regress.book", "fields": {"stores": [["Amazon"], ["Borders"]], "name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]"""
)
def test_dependency_sorting(self):
"""
Now lets check the dependency sorting explicitly
It doesn't matter what order you mention the models
Store *must* be serialized before then Person, and both
must be serialized before Book.
"""
sorted_deps = sort_dependencies(
[('fixtures_regress', [Book, Person, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_2(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Book, Store, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_3(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Store, Book, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_4(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Store, Person, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_5(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Book, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_6(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_dangling(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Circle1, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Circle1, Store, Person, Book]
)
def test_dependency_sorting_tight_circular(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Store, Book])],
)
def test_dependency_sorting_tight_circular_2(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Circle1, Book, Circle2])],
)
def test_dependency_self_referential(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle3 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Book, Circle3])],
)
def test_dependency_sorting_long(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2, fixtures_regress.Circle3 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Circle3, Store, Book])],
)
def test_dependency_sorting_normal(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, ExternalDependency, Book])]
)
self.assertEqual(
sorted_deps,
[Person, Book, ExternalDependency]
)
def test_normal_pk(self):
"""
Check that normal primary keys still work
on a model with natural key capabilities
"""
management.call_command(
'loaddata',
'non_natural_1.json',
verbosity=0,
)
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
)
management.call_command(
'loaddata',
'non_natural_2.xml',
verbosity=0,
)
books = Book.objects.all()
self.assertEqual(
books.__repr__(),
"""[<Book: Cryptonomicon by Neal Stephenson (available at Amazon, Borders)>, <Book: Ender's Game by Orson Scott Card (available at Collins Bookstore)>, <Book: Permutation City by Greg Egan (available at Angus and Robertson)>]"""
)
class TestTicket11101(TransactionTestCase):
available_apps = [
'fixtures_regress',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@skipUnlessDBFeature('supports_transactions')
def test_ticket_11101(self):
"""Test that fixtures can be rolled back (ticket #11101)."""
with transaction.atomic():
management.call_command(
'loaddata',
'thingy.json',
verbosity=0,
)
self.assertEqual(Thingy.objects.count(), 1)
transaction.set_rollback(True)
self.assertEqual(Thingy.objects.count(), 0)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
import sys
if sys.version > '3':
basestring = str
xrange = range
unicode = str
from abc import ABCMeta
import copy
import numpy as np
from py4j.java_gateway import JavaObject
from pyspark.ml.linalg import DenseVector, Vector
from pyspark.ml.util import Identifiable
__all__ = ['Param', 'Params', 'TypeConverters']
class Param(object):
"""
A param with self-contained documentation.
.. versionadded:: 1.3.0
"""
def __init__(self, parent, name, doc, typeConverter=None):
if not isinstance(parent, Identifiable):
raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
self.parent = parent.uid
self.name = str(name)
self.doc = str(doc)
self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter
def _copy_new_parent(self, parent):
"""Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined":
param = copy.copy(self)
param.parent = parent.uid
return param
else:
raise ValueError("Cannot copy from non-dummy parent %s." % parent)
def __str__(self):
return str(self.parent) + "__" + self.name
def __repr__(self):
return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, Param):
return self.parent == other.parent and self.name == other.name
else:
return False
class TypeConverters(object):
"""
.. note:: DeveloperApi
Factory methods for common type conversion functions for `Param.typeConverter`.
.. versionadded:: 2.0.0
"""
@staticmethod
def _is_numeric(value):
vtype = type(value)
return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long'
@staticmethod
def _is_integer(value):
return TypeConverters._is_numeric(value) and float(value).is_integer()
@staticmethod
def _can_convert_to_list(value):
vtype = type(value)
return vtype in [list, np.ndarray, tuple, xrange, array.array] or isinstance(value, Vector)
@staticmethod
def _can_convert_to_string(value):
vtype = type(value)
return isinstance(value, basestring) or vtype in [np.unicode_, np.string_, np.str_]
@staticmethod
def identity(value):
"""
Dummy converter that just returns value.
"""
return value
@staticmethod
def toList(value):
"""
Convert a value to a list, if possible.
"""
if type(value) == list:
return value
elif type(value) in [np.ndarray, tuple, xrange, array.array]:
return list(value)
elif isinstance(value, Vector):
return list(value.toArray())
else:
raise TypeError("Could not convert %s to list" % value)
@staticmethod
def toListFloat(value):
"""
Convert a value to list of floats, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return [float(v) for v in value]
raise TypeError("Could not convert %s to list of floats" % value)
@staticmethod
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value)
@staticmethod
def toListString(value):
"""
Convert a value to list of strings, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
return [TypeConverters.toString(v) for v in value]
raise TypeError("Could not convert %s to list of strings" % value)
@staticmethod
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value)
@staticmethod
def toFloat(value):
"""
Convert a value to a float, if possible.
"""
if TypeConverters._is_numeric(value):
return float(value)
else:
raise TypeError("Could not convert %s to float" % value)
@staticmethod
def toInt(value):
"""
Convert a value to an int, if possible.
"""
if TypeConverters._is_integer(value):
return int(value)
else:
raise TypeError("Could not convert %s to int" % value)
@staticmethod
def toString(value):
"""
Convert a value to a string, if possible.
"""
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value))
@staticmethod
def toBoolean(value):
"""
Convert a value to a boolean, if possible.
"""
if type(value) == bool:
return value
else:
raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value))
class Params(Identifiable):
"""
Components that take parameters. This also provides an internal
param map to store parameter values attached to the instance.
.. versionadded:: 1.3.0
"""
__metaclass__ = ABCMeta
def __init__(self):
super(Params, self).__init__()
#: internal param map for user-supplied values param map
self._paramMap = {}
#: internal param map for default values
self._defaultParamMap = {}
#: value returned by :py:func:`params`
self._params = None
# Copy the params from the class to the object
self._copy_params()
def _copy_params(self):
"""
Copy all params defined on the class to current object.
"""
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self))
@property
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params" and
not isinstance(getattr(type(self), x, None), property)]))
return self._params
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
def explainParams(self):
"""
Returns the documentation of all params with their optionally
default values and user-supplied values.
"""
return "\n".join([self.explainParam(param) for param in self.params])
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
def isDefined(self, param):
"""
Checks whether a param is explicitly set by user or has
a default value.
"""
return self.isSet(param) or self.hasDefault(param)
def hasParam(self, paramName):
"""
Tests whether this instance contains a param with a given
(string) name.
"""
if isinstance(paramName, str):
p = getattr(self, paramName, None)
return isinstance(p, Param)
else:
raise TypeError("hasParam(): paramName must be a string")
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param values <
user-supplied values < extra.
:param extra: extra param values
:return: merged param map
"""
if extra is None:
extra = dict()
paramMap = self._defaultParamMap.copy()
paramMap.update(self._paramMap)
paramMap.update(extra)
return paramMap
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = copy.copy(self)
that._paramMap = {}
that._defaultParamMap = {}
return self._copyValues(that, extra)
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, str):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
@staticmethod
def _dummy():
"""
Returns a dummy Params instance used as a placeholder to
generate docs.
"""
dummy = Params()
dummy.uid = "undefined"
return dummy
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None:
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
self._paramMap[p] = value
return self
def _clear(self, param):
"""
Clears a param from the param map if it has been explicitly set.
"""
if self.isSet(param):
del self._paramMap[param]
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None and not isinstance(value, JavaObject):
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid default param value given for param "%s". %s'
% (p.name, e))
self._defaultParamMap[p] = value
return self
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
paramMap = self._paramMap.copy()
if extra is not None:
paramMap.update(extra)
for param in self.params:
# copy default params
if param in self._defaultParamMap and to.hasParam(param.name):
to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
# copy explicitly set params
if param in paramMap and to.hasParam(param.name):
to._set(**{param.name: paramMap[param]})
return to
def _resetUid(self, newUid):
"""
Changes the uid of this instance. This updates both
the stored uid and the parent uid of params and param maps.
This is used by persistence (loading).
:param newUid: new uid to use, which is converted to unicode
:return: same instance, but with the uid and Param.parent values
updated, including within param maps
"""
newUid = unicode(newUid)
self.uid = newUid
newDefaultParamMap = dict()
newParamMap = dict()
for param in self.params:
newParam = copy.copy(param)
newParam.parent = newUid
if param in self._defaultParamMap:
newDefaultParamMap[newParam] = self._defaultParamMap[param]
if param in self._paramMap:
newParamMap[newParam] = self._paramMap[param]
param.parent = newUid
self._defaultParamMap = newDefaultParamMap
self._paramMap = newParamMap
return self
|
|
from __future__ import annotations
from typing import Any
from public import public
import ibis
import ibis.common.exceptions as com
from .. import datatypes as dt
from .core import Expr
@public
class ValueExpr(Expr):
"""
Base class for a data generating expression having a fixed and known type,
either a single value (scalar)
"""
def __init__(self, arg, dtype, name=None):
super().__init__(arg)
self._name = name
self._dtype = dtype
def equals(self, other, cache=None):
return (
isinstance(other, ValueExpr)
and self._name == other._name
and self._dtype == other._dtype
and super().equals(other, cache=cache)
)
def has_name(self):
if self._name is not None:
return True
return self.op().has_resolved_name()
def get_name(self):
if self._name is not None:
# This value has been explicitly named
return self._name
# In some but not all cases we can get a name from the node that
# produces the value
return self.op().resolve_name()
def name(self, name):
return self._factory(self._arg, name=name)
def type(self):
return self._dtype
@property
def _factory(self):
def factory(arg, name=None):
return type(self)(arg, dtype=self.type(), name=name)
return factory
@public
class ScalarExpr(ValueExpr):
def _type_display(self):
return str(self.type())
def to_projection(self):
"""
Promote this column expression to a table projection
"""
from .relations import TableExpr
roots = self.op().root_tables()
if len(roots) > 1:
raise com.RelationError(
'Cannot convert scalar expression '
'involving multiple base table references '
'to a projection'
)
table = TableExpr(roots[0])
return table.projection([self])
def _repr_html_(self) -> str | None:
return None
@public
class ColumnExpr(ValueExpr):
def _type_display(self):
return str(self.type())
def parent(self):
return self._arg
def to_projection(self):
"""
Promote this column expression to a table projection
"""
from .relations import TableExpr
roots = self.op().root_tables()
if len(roots) > 1:
raise com.RelationError(
'Cannot convert array expression '
'involving multiple base table references '
'to a projection'
)
table = TableExpr(roots[0])
return table.projection([self])
def _repr_html_(self) -> str | None:
if not ibis.options.interactive:
return None
return self.execute().to_frame()._repr_html_()
@public
class AnyValue(ValueExpr):
pass # noqa: E701,E302
@public
class AnyScalar(ScalarExpr, AnyValue):
pass # noqa: E701,E302
@public
class AnyColumn(ColumnExpr, AnyValue):
pass # noqa: E701,E302
@public
class NullValue(AnyValue):
pass # noqa: E701,E302
@public
class NullScalar(AnyScalar, NullValue):
pass # noqa: E701,E302
@public
class NullColumn(AnyColumn, NullValue):
pass # noqa: E701,E302
@public
class ListExpr(ColumnExpr, AnyValue):
@property
def values(self):
return self.op().values
def __iter__(self):
return iter(self.values)
def __getitem__(self, key):
return self.values[key]
def __add__(self, other):
other_values = tuple(getattr(other, 'values', other))
return type(self.op())(self.values + other_values).to_expr()
def __radd__(self, other):
other_values = tuple(getattr(other, 'values', other))
return type(self.op())(other_values + self.values).to_expr()
def __bool__(self):
return bool(self.values)
__nonzero__ = __bool__
def __len__(self):
return len(self.values)
_NULL = None
@public
def null():
"""Create a NULL/NA scalar"""
import ibis.expr.operations as ops
global _NULL
if _NULL is None:
_NULL = ops.NullLiteral().to_expr()
return _NULL
@public
def literal(value: Any, type: dt.DataType | str | None = None) -> ScalarExpr:
"""Create a scalar expression from a Python value.
!!! tip "Use specific functions for arrays, structs and maps"
Ibis supports literal construction of arrays using the following
functions:
1. [`ibis.array`][ibis.array]
1. [`ibis.struct`][ibis.struct]
1. [`ibis.map`][ibis.map]
Constructing these types using `literal` will be deprecated in a future
release.
Parameters
----------
value
A Python value
type
An instance of [`DataType`][ibis.expr.datatypes.DataType] or a string
indicating the ibis type of `value`. This parameter can be used
in cases where ibis's type inference isn't sufficient for discovering
the type of `value`.
Returns
-------
ScalarExpr
An expression representing a literal value
Examples
--------
Construct an integer literal
>>> import ibis
>>> x = ibis.literal(42)
>>> x.type()
int8
Construct a `float64` literal from an `int`
>>> y = ibis.literal(42, type='double')
>>> y.type()
float64
Ibis checks for invalid types
>>> ibis.literal('foobar', type='int64') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Value 'foobar' cannot be safely coerced to int64
"""
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
if hasattr(value, 'op') and isinstance(value.op(), ops.Literal):
return value
try:
inferred_dtype = dt.infer(value)
except com.InputTypeError:
has_inferred = False
else:
has_inferred = True
if type is None:
has_explicit = False
else:
has_explicit = True
explicit_dtype = dt.dtype(type)
if has_explicit and has_inferred:
try:
# ensure type correctness: check that the inferred dtype is
# implicitly castable to the explicitly given dtype and value
dtype = inferred_dtype.cast(explicit_dtype, value=value)
except com.IbisTypeError:
raise TypeError(
f'Value {value!r} cannot be safely coerced to {type}'
)
elif has_explicit:
dtype = explicit_dtype
elif has_inferred:
dtype = inferred_dtype
else:
raise TypeError(
'The datatype of value {!r} cannot be inferred, try '
'passing it explicitly with the `type` keyword.'.format(value)
)
if dtype is dt.null:
return null().cast(dtype)
else:
value = dt._normalize(dtype, value)
return ops.Literal(value, dtype=dtype).to_expr()
|
|
import tensorflow as tf
import numpy as np
############################################################################################################
# Convolution layer Methods
def __conv2d_p(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0):
"""
Convolution 2D Wrapper
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param w: (tf.tensor) pretrained weights (if None, it means no pretrained weights)
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias. (if not float, it means pretrained bias)
:return out: The output of the layer. (N, H', W', num_filters)
"""
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], num_filters]
with tf.name_scope('layer_weights'):
if w == None:
w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength)
__variable_summaries(w)
with tf.name_scope('layer_biases'):
if isinstance(bias, float):
bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias))
__variable_summaries(bias)
with tf.name_scope('layer_conv2d'):
conv = tf.nn.conv2d(x, w, stride, padding)
out = tf.nn.bias_add(conv, bias)
return out
def conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = __conv2d_p('conv', x=x, w=w, num_filters=num_filters, kernel_size=kernel_size, stride=stride,
padding=padding,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training, epsilon=1e-5)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
def dropout_with_keep():
return tf.nn.dropout(conv_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(conv_a, 1.0)
if dropout_keep_prob != -1:
conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(conv_o_dr)
return conv_o
def grouped_conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), num_groups=1, l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True):
with tf.variable_scope(name) as scope:
sz = x.get_shape()[3].value // num_groups
conv_side_layers = [
conv2d(name + "_" + str(i), x[:, :, :, i * sz:i * sz + sz], w, num_filters // num_groups, kernel_size,
padding,
stride,
initializer,
l2_strength, bias, activation=None,
batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=dropout_keep_prob,
is_training=is_training) for i in
range(num_groups)]
conv_g = tf.concat(conv_side_layers, axis=-1)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_g, training=is_training, epsilon=1e-5)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_g
else:
conv_a = activation(conv_g)
return conv_a
def __depthwise_conv2d_p(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0):
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], 1]
with tf.name_scope('layer_weights'):
if w is None:
w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength)
__variable_summaries(w)
with tf.name_scope('layer_biases'):
if isinstance(bias, float):
bias = tf.get_variable('biases', [x.shape[-1]], initializer=tf.constant_initializer(bias))
__variable_summaries(bias)
with tf.name_scope('layer_conv2d'):
conv = tf.nn.depthwise_conv2d(x, w, stride, padding)
out = tf.nn.bias_add(conv, bias)
return out
def depthwise_conv2d(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0, activation=None,
batchnorm_enabled=False, is_training=True):
with tf.variable_scope(name) as scope:
conv_o_b = __depthwise_conv2d_p(name='conv', x=x, w=w, kernel_size=kernel_size, padding=padding,
stride=stride, initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training, epsilon=1e-5)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
return conv_a
############################################################################################################
# ShuffleNet unit methods
def shufflenet_unit(name, x, w=None, num_groups=1, group_conv_bottleneck=True, num_filters=16, stride=(1, 1),
l2_strength=0.0, bias=0.0, batchnorm_enabled=True, is_training=True, fusion='add'):
# Paper parameters. If you want to change them feel free to pass them as method parameters.
activation = tf.nn.relu
with tf.variable_scope(name) as scope:
residual = x
bottleneck_filters = (num_filters // 4) if fusion == 'add' else (num_filters - residual.get_shape()[
3].value) // 4
if group_conv_bottleneck:
bottleneck = grouped_conv2d('Gbottleneck', x=x, w=None, num_filters=bottleneck_filters, kernel_size=(1, 1),
padding='VALID',
num_groups=num_groups, l2_strength=l2_strength, bias=bias,
activation=activation,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
shuffled = channel_shuffle('channel_shuffle', bottleneck, num_groups)
else:
bottleneck = conv2d('bottleneck', x=x, w=None, num_filters=bottleneck_filters, kernel_size=(1, 1),
padding='VALID', l2_strength=l2_strength, bias=bias, activation=activation,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
shuffled = bottleneck
padded = tf.pad(shuffled, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
depthwise = depthwise_conv2d('depthwise', x=padded, w=None, stride=stride, l2_strength=l2_strength,
padding='VALID', bias=bias,
activation=None, batchnorm_enabled=batchnorm_enabled, is_training=is_training)
if stride == (2, 2):
residual_pooled = avg_pool_2d(residual, size=(3, 3), stride=stride, padding='SAME')
else:
residual_pooled = residual
if fusion == 'concat':
group_conv1x1 = grouped_conv2d('Gconv1x1', x=depthwise, w=None,
num_filters=num_filters - residual.get_shape()[3].value,
kernel_size=(1, 1),
padding='VALID',
num_groups=num_groups, l2_strength=l2_strength, bias=bias,
activation=None,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
return activation(tf.concat([residual_pooled, group_conv1x1], axis=-1))
elif fusion == 'add':
group_conv1x1 = grouped_conv2d('Gconv1x1', x=depthwise, w=None,
num_filters=num_filters,
kernel_size=(1, 1),
padding='VALID',
num_groups=num_groups, l2_strength=l2_strength, bias=bias,
activation=None,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
residual_match = residual_pooled
# This is used if the number of filters of the residual block is different from that
# of the group convolution.
if num_filters != residual_pooled.get_shape()[3].value:
residual_match = conv2d('residual_match', x=residual_pooled, w=None, num_filters=num_filters,
kernel_size=(1, 1),
padding='VALID', l2_strength=l2_strength, bias=bias, activation=None,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
return activation(group_conv1x1 + residual_match)
else:
raise ValueError("Specify whether the fusion is \'concat\' or \'add\'")
def channel_shuffle(name, x, num_groups):
with tf.variable_scope(name) as scope:
n, h, w, c = x.shape.as_list()
x_reshaped = tf.reshape(x, [-1, h, w, num_groups, c // num_groups])
x_transposed = tf.transpose(x_reshaped, [0, 1, 2, 4, 3])
output = tf.reshape(x_transposed, [-1, h, w, c])
return output
############################################################################################################
# Fully Connected layer Methods
def __dense_p(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0):
"""
Fully connected layer
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias. (if not float, it means pretrained bias)
:return out: The output of the layer. (N, H)
"""
n_in = x.get_shape()[-1].value
with tf.variable_scope(name):
if w == None:
w = __variable_with_weight_decay([n_in, output_dim], initializer, l2_strength)
__variable_summaries(w)
if isinstance(bias, float):
bias = tf.get_variable("layer_biases", [output_dim], tf.float32, tf.constant_initializer(bias))
__variable_summaries(bias)
output = tf.nn.bias_add(tf.matmul(x, w), bias)
return output
def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True
):
"""
This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (N, H)
"""
with tf.variable_scope(name) as scope:
dense_o_b = __dense_p(name='dense', x=x, w=w, output_dim=output_dim, initializer=initializer,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training, epsilon=1e-5)
if not activation:
dense_a = dense_o_bn
else:
dense_a = activation(dense_o_bn)
else:
if not activation:
dense_a = dense_o_b
else:
dense_a = activation(dense_o_b)
def dropout_with_keep():
return tf.nn.dropout(dense_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(dense_a, 1.0)
if dropout_keep_prob != -1:
dense_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
dense_o_dr = dense_a
dense_o = dense_o_dr
return dense_o
def flatten(x):
"""
Flatten a (N,H,W,C) input into (N,D) output. Used for fully connected layers after conolution layers
:param x: (tf.tensor) representing input
:return: flattened output
"""
all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]])
o = tf.reshape(x, [-1, all_dims_exc_first])
return o
############################################################################################################
# Pooling Methods
def max_pool_2d(x, size=(2, 2), stride=(2, 2), name='pooling'):
"""
Max pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.max_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding='VALID',
name=name)
def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'):
"""
Average pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding,
name=name)
############################################################################################################
# Utilities for layers
def __variable_with_weight_decay(kernel_shape, initializer, wd):
"""
Create a variable with L2 Regularization (Weight Decay)
:param kernel_shape: the size of the convolving weight kernel.
:param initializer: The initialization scheme, He et al. normal or Xavier normal are recommended.
:param wd:(weight decay) L2 regularization parameter.
:return: The weights of the kernel initialized. The L2 loss is added to the loss collection.
"""
w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer)
collection_name = tf.GraphKeys.REGULARIZATION_LOSSES
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss')
tf.add_to_collection(collection_name, weight_decay)
return w
# Summaries for variables
def __variable_summaries(var):
"""
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
:param var: variable to be summarized
:return: None
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
|
|
import tensorflow as tf
from neupy.core.properties import (
ProperFractionProperty,
ParameterProperty,
TypedListProperty,
NumberProperty,
IntProperty,
)
from neupy.utils import asfloat
from neupy.exceptions import (
WeightInitializationError,
LayerConnectionError,
)
from .base import Identity
__all__ = ('BatchNorm', 'LocalResponseNorm', 'GroupNorm')
class BatchNorm(Identity):
"""
Batch normalization layer.
Parameters
----------
axes : tuple with ints or None
Axes along which normalization will be applied. The ``None``
value means that normalization will be applied over all axes
except the last one. In case of 4D tensor it will
be equal to ``(0, 1, 2)``. Defaults to ``None``.
epsilon : float
Epsilon is a positive constant that adds to the standard
deviation to prevent the division by zero.
Defaults to ``1e-5``.
alpha : float
Coefficient for the exponential moving average of
batch-wise means and standard deviations computed during
training; the closer to one, the more it will depend on
the last batches seen. Value needs to be between ``0`` and ``1``.
Defaults to ``0.1``.
gamma : array-like, Tensorfow variable, scalar or Initializer
Scale. Default initialization methods you can
find :ref:`here <init-methods>`.
Defaults to ``Constant(value=1)``.
beta : array-like, Tensorfow variable, scalar or Initializer
Offset. Default initialization methods you can
find :ref:`here <init-methods>`.
Defaults to ``Constant(value=0)``.
running_mean : array-like, Tensorfow variable, scalar or Initializer
Default initialization methods you can
find :ref:`here <init-methods>`.
Defaults to ``Constant(value=0)``.
running_inv_std : array-like, Tensorfow variable, scalar or Initializer
Default initialization methods you can
find :ref:`here <init-methods>`.
Defaults to ``Constant(value=1)``.
{Identity.name}
Methods
-------
{Identity.Methods}
Attributes
----------
{Identity.Attributes}
Examples
--------
Feedforward Neural Networks (FNN) with batch normalization after
activation function was applied.
>>> from neupy.layers import *
>>> network = join(
... Input(10),
... Relu(5) >> BatchNorm(),
... Relu(5) >> BatchNorm(),
... Sigmoid(1),
... )
Feedforward Neural Networks (FNN) with batch normalization before
activation function was applied.
>>> from neupy.layers import *
>>> network = join(
... Input(10),
... Linear(5) >> BatchNorm() >> Relu(),
... Linear(5) >> BatchNorm() >> Relu(),
... Sigmoid(1),
... )
Convolutional Neural Networks (CNN)
>>> from neupy.layers import *
>>> network = join(
... Input((28, 28, 1)),
... Convolution((3, 3, 16)) >> BatchNorm() >> Relu(),
... Convolution((3, 3, 16)) >> BatchNorm() >> Relu(),
... Reshape(),
... Softmax(10),
... )
References
----------
.. [1] Batch Normalization: Accelerating Deep Network Training
by Reducing Internal Covariate Shift,
http://arxiv.org/pdf/1502.03167v3.pdf
"""
axes = TypedListProperty(allow_none=True)
epsilon = NumberProperty(minval=0)
alpha = ProperFractionProperty()
beta = ParameterProperty()
gamma = ParameterProperty()
running_mean = ParameterProperty()
running_inv_std = ParameterProperty()
def __init__(self, axes=None, alpha=0.1, beta=0, gamma=1, epsilon=1e-5,
running_mean=0, running_inv_std=1, name=None):
super(BatchNorm, self).__init__(name=name)
self.axes = axes
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.epsilon = epsilon
self.running_mean = running_mean
self.running_inv_std = running_inv_std
if axes is not None and len(set(axes)) != len(axes):
raise ValueError(
"Specified axes have to contain only unique values")
def create_variables(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if input_shape.ndims is None:
raise WeightInitializationError(
"Cannot initialize variables for the batch normalization "
"layer, because input shape is undefined. Layer: {}"
"".format(self))
if self.axes is None:
# If ndims == 4 then axes = (0, 1, 2)
# If ndims == 2 then axes = (0,)
self.axes = tuple(range(input_shape.ndims - 1))
if any(axis >= input_shape.ndims for axis in self.axes):
raise LayerConnectionError(
"Batch normalization cannot be applied over one of "
"the axis, because input has only {} dimensions. Layer: {}"
"".format(input_shape.ndims, self))
parameter_shape = tuple([
input_shape[axis].value if axis not in self.axes else 1
for axis in range(input_shape.ndims)
])
if any(parameter is None for parameter in parameter_shape):
unknown_dim_index = parameter_shape.index(None)
raise WeightInitializationError(
"Cannot create variables for batch normalization, because "
"input has unknown dimension #{} (0-based indices). "
"Input shape: {}, Layer: {}".format(
unknown_dim_index, input_shape, self))
self.input_shape = input_shape
self.running_mean = self.variable(
value=self.running_mean, shape=parameter_shape,
name='running_mean', trainable=False)
self.running_inv_std = self.variable(
value=self.running_inv_std, shape=parameter_shape,
name='running_inv_std', trainable=False)
self.gamma = self.variable(
value=self.gamma, name='gamma',
shape=parameter_shape)
self.beta = self.variable(
value=self.beta, name='beta',
shape=parameter_shape)
def output(self, input, training=False):
input = tf.convert_to_tensor(input, dtype=tf.float32)
if not training:
mean = self.running_mean
inv_std = self.running_inv_std
else:
alpha = asfloat(self.alpha)
mean = tf.reduce_mean(
input, self.axes,
keepdims=True, name="mean",
)
variance = tf.reduce_mean(
tf.squared_difference(input, tf.stop_gradient(mean)),
self.axes,
keepdims=True,
name="variance",
)
inv_std = tf.rsqrt(variance + asfloat(self.epsilon))
tf.add_to_collection(
tf.GraphKeys.UPDATE_OPS,
self.running_inv_std.assign(
asfloat(1 - alpha) * self.running_inv_std + alpha * inv_std
)
)
tf.add_to_collection(
tf.GraphKeys.UPDATE_OPS,
self.running_mean.assign(
asfloat(1 - alpha) * self.running_mean + alpha * mean
)
)
normalized_value = (input - mean) * inv_std
return self.gamma * normalized_value + self.beta
class LocalResponseNorm(Identity):
"""
Local Response Normalization Layer.
Aggregation is purely across channels, not within channels,
and performed "pixelwise".
If the value of the :math:`i` th channel is :math:`x_i`, the output is
.. math::
x_i = \\frac{{x_i}}{{ (k + ( \\alpha \\sum_j x_j^2 ))^\\beta }}
where the summation is performed over this position on :math:`n`
neighboring channels.
Parameters
----------
alpha : float
Coefficient, see equation above. Defaults to ``1e-4``.
beta : float
Offset, see equation above. Defaults to ``0.75``.
k : float
Exponent, see equation above. Defaults to ``2``.
depth_radius : int
Number of adjacent channels to normalize over, must be odd.
Defaults to ``5``.
{Identity.name}
Methods
-------
{Identity.Methods}
Attributes
----------
{Identity.Attributes}
Examples
--------
>>> from neupy.layers import *
>>> network = Input((10, 10, 12)) >> LocalResponseNorm()
"""
alpha = NumberProperty()
beta = NumberProperty()
k = NumberProperty()
depth_radius = IntProperty()
def __init__(self, alpha=1e-4, beta=0.75, k=2, depth_radius=5, name=None):
super(LocalResponseNorm, self).__init__(name=name)
if depth_radius % 2 == 0:
raise ValueError("Only works with odd `depth_radius` values")
self.alpha = alpha
self.beta = beta
self.k = k
self.depth_radius = depth_radius
def get_output_shape(self, input_shape):
if input_shape and input_shape.ndims != 4:
raise LayerConnectionError(
"Layer `{}` expected input with 4 dimensions, got {} instead. "
"Shape: {}".format(self.name, input_shape.ndims, input_shape))
return super(LocalResponseNorm, self).get_output_shape(input_shape)
def output(self, input, **kwargs):
return tf.nn.local_response_normalization(
input,
depth_radius=self.depth_radius,
bias=self.k,
alpha=self.alpha,
beta=self.beta)
class GroupNorm(Identity):
"""
Group Normalization layer. This layer is a simple alternative to the
Batch Normalization layer for cases when batch size is small.
Parameters
----------
n_groups : int
During normalization all the channels will be break down into
separate groups and mean and variance will be estimated per group.
This parameter controls number of groups.
gamma : array-like, Tensorfow variable, scalar or Initializer
Scale. Default initialization methods you can
find :ref:`here <init-methods>`.
Defaults to ``Constant(value=1)``.
beta : array-like, Tensorfow variable, scalar or Initializer
Offset. Default initialization methods you can
find :ref:`here <init-methods>`.
Defaults to ``Constant(value=0)``.
epsilon : float
Epsilon ensures that input rescaling procedure that uses estimated
variance will never cause division by zero. Defaults to ``1e-5``.
{Identity.name}
Methods
-------
{Identity.Methods}
Attributes
----------
{Identity.Attributes}
Examples
--------
Convolutional Neural Networks (CNN)
>>> from neupy.layers import *
>>> network = join(
... Input((28, 28, 1)),
... Convolution((3, 3, 16)) >> GroupNorm(4) >> Relu(),
... Convolution((3, 3, 16)) >> GroupNorm(4) >> Relu(),
... Reshape(),
... Softmax(10),
... )
References
----------
.. [1] Group Normalization, Yuxin Wu, Kaiming He,
https://arxiv.org/pdf/1803.08494.pdf
"""
n_groups = IntProperty(minval=1)
beta = ParameterProperty()
gamma = ParameterProperty()
epsilon = NumberProperty(minval=0)
def __init__(self, n_groups, beta=0, gamma=1, epsilon=1e-5, name=None):
super(GroupNorm, self).__init__(name=name)
self.n_groups = n_groups
self.beta = beta
self.gamma = gamma
self.epsilon = epsilon
def create_variables(self, input_shape):
n_channels = input_shape[3]
if n_channels.value is None:
raise WeightInitializationError(
"Cannot initialize variables when number of "
"channels is unknown. Input shape: {}, Layer: {}"
"".format(input_shape, self))
parameter_shape = (1, 1, 1, n_channels)
self.gamma = self.variable(
value=self.gamma, name='gamma',
shape=parameter_shape)
self.beta = self.variable(
value=self.beta, name='beta',
shape=parameter_shape)
def get_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if input_shape and input_shape.ndims != 4:
raise LayerConnectionError(
"Group normalization layer expects 4 dimensional input, "
"got {} instead. Input shape: {}, Layer: {}"
"".format(input_shape.ndims, input_shape, self))
n_channels = input_shape[3]
if n_channels.value and n_channels % self.n_groups != 0:
raise LayerConnectionError(
"Cannot divide {} input channels into {} groups. "
"Input shape: {}, Layer: {}".format(
n_channels, self.n_groups, input_shape, self))
return super(GroupNorm, self).get_output_shape(input_shape)
def output(self, input):
input = tf.convert_to_tensor(input, dtype=tf.float32)
input_shape = tf.shape(input)
n_groups = self.n_groups
# We access dimensional information in form of tensors in case
# if some of the dimensions are undefined. In this way we make
# sure that reshape will work even if part of the input shape
# is undefined.
dims = [input_shape[i] for i in range(4)]
n_samples, height, width, n_channels = dims
input = tf.reshape(input, [
n_samples, height, width, n_groups, n_channels // n_groups])
mean, variance = tf.nn.moments(input, [1, 2, 4], keep_dims=True)
input = (input - mean) / tf.sqrt(variance + self.epsilon)
input = tf.reshape(input, input_shape)
return input * self.gamma + self.beta
|
|
# -*- coding: utf-8 -*-
"""
Django settings for Budger project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (budger/config/settings/common.py - 3 = budger/)
APPS_DIR = ROOT_DIR.path('budger')
env = environ.Env()
env.read_env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'rest_framework', # REST API
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'budger.users.apps.UsersConfig',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'budger.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Denis Olehov""", 'denolehov@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///budger'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# API
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'budger.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'budger.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ('budger.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
if BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = BROKER_URL
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
|
|
import re
from deltas import wikitext_split
from deltas.segmenters import ParagraphsSentencesAndWhitespace
from revscoring.datasources import Datasource
from revscoring.datasources.meta import filters, frequencies, mappers
class Revision:
def __init__(self, name, revision_datasources):
super().__init__(name, revision_datasources)
self.tokens = tokenized(revision_datasources.text)
"""
A list of all tokens
"""
self.paragraphs_sentences_and_whitespace = Datasource(
self._name + ".paragraphs_sentences_and_whitespace",
paragraphs_sentences_and_whitespace.segment,
depends_on=[self.tokens]
)
"""
A list of paragraphs, sentences, and whitespaces as segments. See
:class:`deltas.segmenters.Segment` and
:class:`deltas.segmenters.MatchableSegment`.
"""
self.token_frequency = frequencies.table(
self.tokens,
name=self._name + ".token_frequency"
)
"""
A frequency table of all tokens.
"""
self.numbers = self.tokens_in_types(
{'number'}, name=self._name + ".numbers"
)
"""
A list of numeric tokens
"""
self.number_frequency = frequencies.table(
self.numbers, name=self._name + ".number_frequency"
)
"""
A frequency table of number tokens.
"""
self.whitespaces = self.tokens_in_types(
{'whitespace'}, name=self._name + ".whitespaces"
)
"""
A list of whitespace tokens
"""
self.whitespace_frequency = frequencies.table(
self.whitespaces, name=self._name + ".whitespace_frequency"
)
"""
A frequency table of whichspace tokens.
"""
self.markups = self.tokens_in_types(
{'dbrack_open', 'dbrack_close', 'brack_open', 'brack_close',
'tab_open', 'tab_close', 'dcurly_open', 'dcurly_close',
'curly_open', 'curly_close', 'bold', 'italics', 'equals'},
name=self._name + ".markups"
)
"""
A list of markup tokens
"""
self.markup_frequency = frequencies.table(
self.markups, name=self._name + ".markup_frequency"
)
"""
A frequency table of markup tokens.
"""
self.cjks = self.tokens_in_types(
{'cjk'}, name=self._name + ".cjks"
)
"""
A list of Chinese/Japanese/Korean tokens
"""
self.cjk_frequency = frequencies.table(
self.cjks, name=self._name + ".cjk_frequency"
)
"""
A frequency table of cjk tokens.
"""
self.entities = self.tokens_in_types(
{'entity'}, name=self._name + ".entities"
)
"""
A list of HTML entity tokens
"""
self.entity_frequency = frequencies.table(
self.entities, name=self._name + ".entity_frequency"
)
"""
A frequency table of entity tokens.
"""
self.urls = self.tokens_in_types(
{'url'}, name=self._name + ".urls"
)
"""
A list of URL tokens
"""
self.url_frequency = frequencies.table(
self.urls, name=self._name + ".url_frequency"
)
"""
A frequency table of url tokens.
"""
self.words = self.tokens_in_types(
{'word'}, name=self._name + ".words"
)
"""
A list of word tokens
"""
self.word_frequency = frequencies.table(
mappers.lower_case(self.words),
name=self._name + ".word_frequency"
)
"""
A frequency table of lower-cased word tokens.
"""
self.uppercase_words = filters.filter(
is_uppercase_word, self.words,
name=self._name + ".uppercase_words"
)
"""
A list of uppercase word tokens that are at least two
characters long.
"""
self.uppercase_word_frequency = frequencies.table(
self.uppercase_words,
name=self._name + ".uppercase_word_frequency"
)
"""
A frequency table of uppercase word tokens that are at least two
characters long.
"""
self.punctuations = self.tokens_in_types(
{'period', 'qmark', 'epoint', 'comma', 'colon', 'scolon',
'japan_punct'},
name=self._name + ".punctuations"
)
"""
A list of punctuation tokens
"""
self.punctuation_frequency = frequencies.table(
self.punctuations, name=self._name + ".punctuation_frequency"
)
"""
A frequency table of punctuation tokens.
"""
self.breaks = self.tokens_in_types(
{'break'}, name=self._name + ".breaks"
)
"""
A list of break tokens
"""
self.break_frequency = frequencies.table(
self.breaks, name=self._name + ".break_frequency"
)
"""
A frequency table of break tokens.
"""
def tokens_in_types(self, types, name=None):
"""
Constructs a :class:`revscoring.Datasource` that returns all content
tokens that are within a set of types.
"""
token_is_in_types = TokenIsInTypes(types)
if name is None:
name = "{0}({1})" \
.format(self._name + ".tokens_in_types", types)
return filters.filter(token_is_in_types.filter,
self.tokens, name=name)
def tokens_matching(self, regex, name=None, regex_flags=re.I):
"""
Constructs a :class:`revscoring.Datasource` that returns all content
tokens that match a regular expression.
"""
if not hasattr(regex, "pattern"):
regex = re.compile(regex, regex_flags)
if name is None:
name = "{0}({1})" \
.format(self._name + ".tokens_matching", regex.pattern)
return filters.regex_matching(regex, self.tokens,
name=name)
class Diff():
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token_delta = frequencies.delta(
self.revision.parent.token_frequency,
self.revision.token_frequency,
name=self._name + ".token_delta"
)
"""
A token frequency delta table
"""
self.token_prop_delta = frequencies.prop_delta(
self.revision.parent.token_frequency,
self.token_delta,
name=self._name + ".token_prop_delta"
)
"""
A token proportional frequency delta table
"""
self.number_delta = frequencies.delta(
self.revision.parent.number_frequency,
self.revision.number_frequency,
name=self._name + ".number_delta"
)
"""
A number frequency delta table
"""
self.number_prop_delta = frequencies.prop_delta(
self.revision.parent.number_frequency,
self.number_delta,
name=self._name + ".number_prop_delta"
)
"""
A number proportional frequency delta table
"""
self.whitespace_delta = frequencies.delta(
self.revision.parent.whitespace_frequency,
self.revision.whitespace_frequency,
name=self._name + ".whitespace_delta"
)
"""
A whitespace frequency delta table
"""
self.whitespace_prop_delta = frequencies.prop_delta(
self.revision.parent.whitespace_frequency,
self.whitespace_delta,
name=self._name + ".whitespace_prop_delta"
)
"""
A whitespace proportional frequency delta table
"""
self.markup_delta = frequencies.delta(
self.revision.parent.markup_frequency,
self.revision.markup_frequency,
name=self._name + ".markup_delta"
)
"""
A markup frequency delta table
"""
self.markup_prop_delta = frequencies.prop_delta(
self.revision.parent.markup_frequency,
self.markup_delta,
name=self._name + ".markup_prop_delta"
)
"""
A markup proportional frequency delta table
"""
self.cjk_delta = frequencies.delta(
self.revision.parent.cjk_frequency,
self.revision.cjk_frequency,
name=self._name + ".cjk_delta"
)
"""
A cjk frequency delta table
"""
self.cjk_prop_delta = frequencies.prop_delta(
self.revision.parent.cjk_frequency,
self.cjk_delta,
name=self._name + ".cjk_prop_delta"
)
"""
A cjk proportional frequency delta table
"""
self.entity_delta = frequencies.delta(
self.revision.parent.entity_frequency,
self.revision.entity_frequency,
name=self._name + ".entity_delta"
)
"""
A entity frequency delta table
"""
self.entity_prop_delta = frequencies.prop_delta(
self.revision.parent.entity_frequency,
self.entity_delta,
name=self._name + ".entity_prop_delta"
)
"""
A entity proportional frequency delta table
"""
self.url_delta = frequencies.delta(
self.revision.parent.url_frequency,
self.revision.url_frequency,
name=self._name + ".url_delta"
)
"""
A url frequency delta table
"""
self.url_prop_delta = frequencies.prop_delta(
self.revision.parent.url_frequency,
self.url_delta,
name=self._name + ".url_prop_delta"
)
"""
A url proportional frequency delta table
"""
self.word_delta = frequencies.delta(
self.revision.parent.word_frequency,
self.revision.word_frequency,
name=self._name + ".word_delta"
)
"""
A lower-cased word frequency delta table
"""
self.word_prop_delta = frequencies.prop_delta(
self.revision.parent.word_frequency,
self.word_delta,
name=self._name + ".word_prop_delta"
)
"""
A lower-cased word proportional frequency delta table
"""
self.uppercase_word_delta = frequencies.delta(
self.revision.parent.uppercase_word_frequency,
self.revision.uppercase_word_frequency,
name=self._name + ".uppercase_word_delta"
)
"""
A uppercase word frequency delta table
"""
self.uppercase_word_prop_delta = frequencies.prop_delta(
self.revision.parent.uppercase_word_frequency,
self.uppercase_word_delta,
name=self._name + ".uppercase_word_prop_delta"
)
"""
A uppercase word proportional frequency delta table
"""
self.punctuation_delta = frequencies.delta(
self.revision.parent.punctuation_frequency,
self.revision.punctuation_frequency,
name=self._name + ".punctuation_delta"
)
"""
A punctuation frequency delta table
"""
self.punctuation_prop_delta = frequencies.prop_delta(
self.revision.parent.punctuation_frequency,
self.punctuation_delta,
name=self._name + ".punctuation_prop_delta"
)
"""
A punctuation proportional frequency delta table
"""
self.break_delta = frequencies.delta(
self.revision.parent.break_frequency,
self.revision.break_frequency,
name=self._name + ".break_delta"
)
"""
A break frequency delta table
"""
self.break_prop_delta = frequencies.prop_delta(
self.revision.parent.break_frequency,
self.break_delta,
name=self._name + ".break_prop_delta"
)
"""
A break proportional frequency delta table
"""
def is_uppercase_word(word_token):
return len(word_token) > 1 and \
sum(c.lower() != c for c in word_token) == len(word_token)
class TokenIsInTypes:
def __init__(self, types):
self.types = set(types)
def filter(self, token):
return token.type in self.types
def _process_tokens(text):
return [t for t in wikitext_split.tokenize(text or "")]
def tokenized(text_datasource, name=None):
"""
Constructs a :class:`revision.Datasource` that generates a list of tokens
"""
if name is None:
name = "{0}({1})".format("tokenized", text_datasource)
return Datasource(
name, _process_tokens, depends_on=[text_datasource]
)
paragraphs_sentences_and_whitespace = ParagraphsSentencesAndWhitespace()
|
|
import logging
from smtplib import SMTPException
from celery import shared_task
from django.conf import settings
from django.core import signing
from django.core.mail import send_mail
from django.template import Context, Template
from django.template.loader import get_template
from waldur_core.core import utils as core_utils
from . import backend, models
from .utils import get_feedback_link
logger = logging.getLogger(__name__)
@shared_task(name='waldur_mastermind.support.pull_support_users')
def pull_support_users():
"""
Pull support users from backend.
Note that support users are not deleted in JIRA.
Instead, they are marked as disabled.
Therefore, Waldur replicates the same behaviour.
"""
if not settings.WALDUR_SUPPORT['ENABLED']:
return
backend_users = backend.get_active_backend().get_users()
for backend_user in backend_users:
user, created = models.SupportUser.objects.get_or_create(
backend_id=backend_user.backend_id, defaults={'name': backend_user.name}
)
if not created and user.name != backend_user.name:
user.name = backend_user.name
user.save()
if not user.is_active:
user.is_active = True
user.save()
models.SupportUser.objects.exclude(
backend_id__in=[u.backend_id for u in backend_users]
).update(is_active=False)
@shared_task(name='waldur_mastermind.support.pull_priorities')
def pull_priorities():
if not settings.WALDUR_SUPPORT['ENABLED']:
return
backend.get_active_backend().pull_priorities()
@shared_task(name='waldur_mastermind.support.create_issue')
def create_issue(serialized_issue):
issue = core_utils.deserialize_instance(serialized_issue)
try:
backend.get_active_backend().create_issue(issue)
except Exception as e:
issue.error_message = str(e)
issue.save(update_fields=['error_message'])
else:
issue.error_message = ''
issue.save(update_fields=['error_message'])
@shared_task(name='waldur_mastermind.support.create_confirmation_comment')
def create_confirmation_comment(serialized_issue, comment_tmpl=''):
issue = core_utils.deserialize_instance(serialized_issue)
try:
backend.get_active_backend().create_confirmation_comment(issue, comment_tmpl)
except Exception as e:
issue.error_message = str(e)
issue.save(update_fields=['error_message'])
else:
issue.error_message = ''
issue.save(update_fields=['error_message'])
@shared_task(name='waldur_mastermind.support.send_issue_updated_notification')
def send_issue_updated_notification(serialized_issue, changed):
issue = core_utils.deserialize_instance(serialized_issue)
_send_issue_notification(
issue=issue, template='issue_updated', extra_context={'changed': changed},
)
@shared_task(name='waldur_mastermind.support.send_comment_added_notification')
def send_comment_added_notification(serialized_comment):
comment = core_utils.deserialize_instance(serialized_comment)
_send_issue_notification(
issue=comment.issue,
template='comment_added',
extra_context={'comment': comment},
)
@shared_task(name='waldur_mastermind.support.send_comment_updated_notification')
def send_comment_updated_notification(serialized_comment, old_description):
comment = core_utils.deserialize_instance(serialized_comment)
_send_issue_notification(
issue=comment.issue,
template='comment_updated',
extra_context={'comment': comment, 'old_description': old_description,},
)
def _send_email(
issue,
html_template,
text_template,
subject_template,
receiver=None,
extra_context=None,
):
if not settings.WALDUR_SUPPORT['ENABLED']:
return
if settings.SUPPRESS_NOTIFICATION_EMAILS:
message = (
'Issue notifications are suppressed. '
'Please set SUPPRESS_NOTIFICATION_EMAILS to False to send notifications.'
)
logger.info(message)
return
if not receiver:
receiver = issue.caller
context = {
'issue_url': core_utils.format_homeport_link(
'support/issue/{uuid}/', uuid=issue.uuid
),
'site_name': settings.WALDUR_CORE['SITE_NAME'],
'issue': issue,
}
if extra_context:
context.update(extra_context)
html_message = html_template.render(Context(context))
text_message = text_template.render(Context(context, autoescape=False))
subject = subject_template.render(Context(context, autoescape=False)).strip()
logger.debug('About to send an issue update notification to %s' % receiver.email)
try:
send_mail(
subject,
text_message,
settings.DEFAULT_FROM_EMAIL,
[receiver.email],
html_message=html_message,
)
except SMTPException as e:
message = (
'Failed to notify a user about an issue update. Issue uuid: %s. Error: %s'
% (issue.uuid.hex, e.message)
)
logger.warning(message)
def _send_issue_notification(issue, template, *args, **kwargs):
try:
notification_template = models.TemplateStatusNotification.objects.get(
status=issue.status
)
html_template = Template(notification_template.html)
text_template = Template(notification_template.text)
subject_template = Template(notification_template.subject)
except models.TemplateStatusNotification.DoesNotExist:
html_template = get_template('support/notification_%s.html' % template).template
text_template = get_template('support/notification_%s.txt' % template).template
subject_template = get_template(
'support/notification_%s_subject.txt' % template
).template
_send_email(issue, html_template, text_template, subject_template, *args, **kwargs)
def _send_issue_feedback(issue, template, *args, **kwargs):
html_template = get_template('support/notification_%s.html' % template).template
text_template = get_template('support/notification_%s.txt' % template).template
subject_template = get_template(
'support/notification_%s_subject.txt' % template
).template
_send_email(issue, html_template, text_template, subject_template, *args, **kwargs)
@shared_task(name='waldur_mastermind.support.send_issue_feedback_notification')
def send_issue_feedback_notification(serialized_issue):
issue = core_utils.deserialize_instance(serialized_issue)
signer = signing.TimestampSigner()
token = signer.sign(issue.uuid.hex)
extra_context = {
'feedback_link': get_feedback_link(token),
'feedback_links': [
{'label': value, 'link': get_feedback_link(token, key),}
for (key, value) in models.Feedback.Evaluation.CHOICES
],
}
_send_issue_feedback(
issue=issue, template='issue_feedback', extra_context=extra_context,
)
@shared_task(name='waldur_mastermind.support.sync_feedback')
def sync_feedback(serialized_feedback):
feedback = core_utils.deserialize_instance(serialized_feedback)
feedback.state = feedback.States.CREATING
feedback.save()
backend.get_active_backend().create_feedback(feedback)
|
|
# tests.test_target.test_class_balance
# Tests for the ClassBalance visualizer
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Thu Jul 19 10:21:49 2018 -0400
#
# ID: test_class_balance.py [] benjamin@bengfort.com $
"""
Tests for the ClassBalance visualizer
"""
##########################################################################
## Imports
##########################################################################
import pytest
from yellowbrick.target.class_balance import *
from yellowbrick.exceptions import YellowbrickValueError
from tests.base import VisualTestCase
from tests.dataset import DatasetMixin, Dataset, Split
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split as tts
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Data Fixtures
##########################################################################
def make_fixture(binary=False, balanced=False, split=False):
"""
Make a dataset for testing ClassBalance based on the specified params.
"""
kwargs = {
"n_samples":100, "n_features":20, "n_informative":8, "n_redundant":2,
"n_clusters_per_class":1, "random_state":89092,
}
if binary:
kwargs['n_classes'] = 2
kwargs['weights'] = None if balanced else [0.3, 0.7]
else:
kwargs['n_classes'] = 5
kwargs['weights'] = None if balanced else [0.1, 0.2, 0.4, 0.2, .01]
X, y = make_classification(**kwargs)
if split:
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, random_state=101
)
return Dataset(Split(X_train, X_test), Split(y_train, y_test))
return Dataset(X, y)
##########################################################################
## Tests
##########################################################################
class ClassBalanceTests(VisualTestCase, DatasetMixin):
"""
Test ClassBalance visualizer
"""
def test_signature_exception(self):
"""
An exception is raised if X and y are put into the visualizer
"""
oz = ClassBalance()
dataset = make_fixture(split=False)
message = "fit has changed to only require a 1D array, y"
with pytest.raises(YellowbrickValueError, match=message):
oz.fit(dataset.X, dataset.y)
def test_invalid_target(self):
"""
A value error should be raised on invalid train or test target
"""
y_valid = np.random.randint(2, size=100)
y_invalid = np.random.uniform(size=100)
oz = ClassBalance()
with pytest.raises(YellowbrickValueError):
oz.fit(y_invalid)
with pytest.raises(YellowbrickValueError):
oz.fit(y_valid, y_invalid)
def test_class_names_must_match(self):
"""
Assert error raised when more classes are in data than specified
"""
oz = ClassBalance(labels=["a", "b", "c"])
dataset = make_fixture(binary=False, split=False)
with pytest.raises(YellowbrickValueError):
oz.fit(dataset.y)
def test_binary_balance(self):
"""
Test binary classification in balance mode
"""
dataset = make_fixture(binary=True, split=False)
oz = ClassBalance()
assert oz.fit(dataset.y) is oz
assert oz._mode == BALANCE
#oz.finalize()
self.assert_images_similar(oz)
def test_binary_compare(self):
"""
Test binary classification in compare mode
"""
dataset = make_fixture(binary=True, split=True)
oz = ClassBalance()
assert oz.fit(dataset.y.train, dataset.y.test) is oz
assert oz._mode == COMPARE
#oz.finalize()
self.assert_images_similar(oz)
def test_multiclass_balance(self):
"""
Test multiclass classification in balance mode
"""
dataset = make_fixture(binary=False, split=False)
oz = ClassBalance()
assert oz.fit(dataset.y) is oz
assert oz._mode == BALANCE
#oz.finalize()
self.assert_images_similar(oz)
def test_multiclass_compare(self):
"""
Test multiclass classification in compare mode
"""
dataset = make_fixture(binary=False, split=True)
oz = ClassBalance()
assert oz.fit(dataset.y.train, dataset.y.test) is oz
assert oz._mode == COMPARE
#oz.finalize()
self.assert_images_similar(oz)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_occupancy_balance(self):
"""
Test pandas data frame with string target in balance mode
"""
data = self.load_data("occupancy")
y = pd.Series([
"occupied" if yi else "unoccupied" for yi in data['occupancy']
])
# Create and fit the visualizer
oz = ClassBalance()
assert oz.fit(y) is oz
#oz.finalize()
self.assert_images_similar(oz)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_occupancy_compare(self):
"""
Test pandas data frame with string target in compare mode
"""
data = self.load_data("occupancy")
features = [
"temperature", "relative_humidity", "light", "C02", "humidity"
]
X = pd.DataFrame(data[features])
y = pd.Series([
"occupied" if yi else "unoccupied" for yi in data['occupancy']
])
_, _, y_train, y_test = tts(X, y, test_size=0.4, random_state=2242)
# Create and fit the visualizer
oz = ClassBalance()
assert oz.fit(y_train, y_test) is oz
#oz.finalize()
self.assert_images_similar(oz)
def test_quick_method(self):
"""
Test the quick method with
"""
dataset = make_fixture(binary=False, split=False)
ax = class_balance(dataset.y)
self.assert_images_similar(ax=ax, tol=0.5)
|
|
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from unittest import TestResult
from flask_testing import TestCase, LiveServerTestCase
from flask_testing.utils import ContextVariableDoesNotExist
from .flask_app import create_app
class TestSetup(TestCase):
def create_app(self):
return create_app()
def test_setup(self):
self.assertTrue(self.app is not None)
self.assertTrue(self.client is not None)
self.assertTrue(self._ctx is not None)
class TestSetupFailure(TestCase):
def _pre_setup(self):
pass
def test_setup_failure(self):
'''Should not fail in _post_teardown if _pre_setup fails'''
assert True
class TestTeardownGraceful(TestCase):
def create_app(self):
return create_app()
def test_remove_testcase_attributes(self):
"""
There should no exception after this test because teardown
is graceful.
"""
del self.app
del self._ctx
class TestClientUtils(TestCase):
def create_app(self):
return create_app()
def test_get_json(self):
response = self.client.get("/ajax/")
self.assertEqual(response.json, dict(name="test"))
def test_status_failure_message(self):
expected_message = 'my message'
try:
self.assertStatus(self.client.get('/'), 404, expected_message)
except AssertionError as e:
self.assertTrue(expected_message in e.args[0] or \
expected_message in e.message)
def test_default_status_failure_message(self):
expected_message = 'HTTP Status 404 expected but got 200'
try:
self.assertStatus(self.client.get('/'), 404)
except AssertionError as e:
self.assertTrue(expected_message in e.args[0] or \
expected_message in e.message)
def test_assert_200(self):
self.assert200(self.client.get("/"))
def test_assert_404(self):
self.assert404(self.client.get("/oops/"))
def test_assert_403(self):
self.assert403(self.client.get("/forbidden/"))
def test_assert_401(self):
self.assert401(self.client.get("/unauthorized/"))
def test_assert_405(self):
self.assert405(self.client.post("/"))
def test_assert_500(self):
self.assert500(self.client.get("/internal_server_error/"))
def test_assert_redirects(self):
response = self.client.get("/redirect/")
self.assertRedirects(response, "/")
def test_assert_template_used(self):
try:
self.client.get("/template/")
self.assert_template_used("index.html")
except RuntimeError:
pass
def test_assert_template_not_used(self):
self.client.get("/")
try:
self.assert_template_used("index.html")
assert False
except AssertionError:
pass
except RuntimeError:
pass
def test_get_context_variable(self):
try:
self.client.get("/template/")
self.assertEqual(self.get_context_variable("name"), "test")
except RuntimeError:
pass
def test_assert_context(self):
try:
self.client.get("/template/")
self.assert_context("name", "test")
except RuntimeError:
pass
def test_assert_bad_context(self):
try:
self.client.get("/template/")
self.assertRaises(AssertionError, self.assert_context,
"name", "foo")
self.assertRaises(AssertionError, self.assert_context,
"foo", "foo")
except RuntimeError:
pass
def test_assert_get_context_variable_not_exists(self):
try:
self.client.get("/template/")
self.assertRaises(ContextVariableDoesNotExist,
self.get_context_variable, "foo")
except RuntimeError:
pass
class TestLiveServer(LiveServerTestCase):
def create_app(self):
app = create_app()
app.config['LIVESERVER_PORT'] = 8943
return app
def test_server_process_is_spawned(self):
process = self._process
# Check the process is spawned
self.assertNotEqual(process, None)
# Check the process is alive
self.assertTrue(process.is_alive())
def test_server_listening(self):
response = urlopen(self.get_server_url())
self.assertTrue(b'OK' in response.read())
self.assertEqual(response.code, 200)
class TestNotRenderTemplates(TestCase):
render_templates = False
def create_app(self):
return create_app()
def test_assert_not_process_the_template(self):
response = self.client.get("/template/")
assert "" == response.data
def test_assert_template_rendered_signal_sent(self):
self.client.get("/template/")
self.assert_template_used('index.html')
class TestRenderTemplates(TestCase):
render_templates = True
def create_app(self):
return create_app()
def test_assert_not_process_the_template(self):
response = self.client.get("/template/")
assert "" != response.data
class TestRestoreTheRealRender(TestCase):
def create_app(self):
return create_app()
def test_assert_the_real_render_template_is_restored(self):
test = TestNotRenderTemplates('test_assert_not_process_the_template')
test_result = TestResult()
test(test_result)
assert test_result.wasSuccessful()
response = self.client.get("/template/")
assert "" != response.data
|
|
"""
@package mi.dataset.parser.presf_abc
@file marine-integrations/mi/dataset/parser/presf_abc.py
@author Christopher Fortin, Jeff Roy, Rene Gelinas
@brief Parser for the presf_abc dataset driver
This file contains code for the presf_abc parsers and code to produce data
particles. This parser only parses recovered data. There is one parser which
produces two types of data particles. The names of the output particle streams
are unique.
The input file is ASCII and contains five types of records.
The first record type is the header record of ASCII text preceded by '*'.
The next three types of the records are 18 digit hexadecimal values with
specific formats for the logging session data, tide data and wave burst
metadata. The last record type is 12 digit hexadecimal list of wave burst
data values, two per line.
All records end with the newline regular expression.
Header data records: '*', text
Header End record: '*S>DD'
Logging Session start record: 'FFFFFFFFFBFFFFFFFF'
Logging Session data record: 2-18 digit hexadecimal
Logging Session end record: 'FFFFFFFFFCFFFFFFFF'
Tide Data record: 18 digit hexadecimal
Wave Metadata record: 2-18 digit hexadecimal
Wave Burst records: 12 digit pressure measurement (2 measurements per record)
Wave Burst End record: 'FFFFFFFFFFFFFFFFFF'
Only sensor data records produce particles if properly formed.
Mal-formed sensor data records and all status records produce no particles.
Release notes:
Initial Release
"""
import re
from mi.core.common import BaseEnum
from mi.core.instrument.dataset_data_particle import \
DataParticle, \
DataParticleKey, \
DataParticleValue
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.dataset_parser import SimpleParser
from mi.dataset.parser import utilities
from mi.dataset.parser.common_regexes import FLOAT_REGEX, \
SCIENTIFIC_REGEX, END_OF_LINE_REGEX, ASCII_HEX_CHAR_REGEX, \
ANY_CHARS_REGEX
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Rene Gelinas'
__license__ = 'Apache 2.0'
# Basic patterns
FLOAT = r'(' + FLOAT_REGEX + ')' # generic float
SCI_NOTATION = r'(' + SCIENTIFIC_REGEX + ')' # Generic scientific notation
WHITESPACE = r'(\s*)' # any whitespace
HEADER_RECORD_START = r'\*' + WHITESPACE
# Header records - text
HEADER_LINE_REGEX = HEADER_RECORD_START + ANY_CHARS_REGEX
HEADER_LINE_REGEX += END_OF_LINE_REGEX
HEADER_LINE_MATCHER = re.compile(HEADER_LINE_REGEX)
# Logging Session records - 18 digit hexadecimal
SESSION_START_REGEX = r'F{9}BF{8}' + END_OF_LINE_REGEX
SESSION_START_MATCHER = re.compile(SESSION_START_REGEX)
SESSION_TIME_REGEX = r'(' + ASCII_HEX_CHAR_REGEX + '{8})'
SESSION_TIME_REGEX += r'0{10}'
SESSION_TIME_REGEX += END_OF_LINE_REGEX
SESSION_TIME_MATCHER = re.compile(SESSION_TIME_REGEX)
SESSION_STATUS_REGEX = r'(' + ASCII_HEX_CHAR_REGEX + '{4})'
SESSION_STATUS_REGEX += r'(' + ASCII_HEX_CHAR_REGEX + '{4})'
SESSION_STATUS_REGEX += r'0{10}'
SESSION_STATUS_REGEX += END_OF_LINE_REGEX
SESSION_STATUS_MATCHER = re.compile(SESSION_STATUS_REGEX)
SESSION_END_REGEX = r'F{9}CF{8}' + END_OF_LINE_REGEX
SESSION_END_MATCHER = re.compile(SESSION_END_REGEX)
# Tide data records - 18 digit hexadecimal
TIDE_DATA_REGEX = r'(' + ASCII_HEX_CHAR_REGEX + '{6})'
TIDE_DATA_REGEX += r'(' + ASCII_HEX_CHAR_REGEX + '{4})'
TIDE_DATA_REGEX += r'(' + ASCII_HEX_CHAR_REGEX + '{8})'
TIDE_DATA_REGEX += END_OF_LINE_REGEX
TIDE_DATA_MATCHER = re.compile(TIDE_DATA_REGEX)
# Wave data records - 18 digit hexadecimal
WAVE_DATA_START_REGEX = r'0{18}' + END_OF_LINE_REGEX
WAVE_DATA_START_MATCHER = re.compile(WAVE_DATA_START_REGEX)
WAVE_DATA_REGEX = r'(' + ASCII_HEX_CHAR_REGEX + '{8})'
WAVE_DATA_REGEX += r'(' + ASCII_HEX_CHAR_REGEX + '{2})'
WAVE_DATA_REGEX += r'0{8}'
WAVE_DATA_REGEX += END_OF_LINE_REGEX
WAVE_DATA_MATCHER = re.compile(WAVE_DATA_REGEX)
WAVE_BURST_DATA_REGEX = r'(' + ASCII_HEX_CHAR_REGEX + '{6})'
WAVE_BURST_DATA_REGEX += r'(' + ASCII_HEX_CHAR_REGEX + '{6})'
WAVE_BURST_DATA_REGEX += END_OF_LINE_REGEX
WAVE_BURST_DATA_MATCHER = re.compile(WAVE_BURST_DATA_REGEX)
WAVE_DATA_END_REGEX = r'F{18}' + END_OF_LINE_REGEX
WAVE_DATA_END_MATCHER = re.compile(WAVE_DATA_END_REGEX)
# Data end pattern
FILE_DATA_END_REGEX = 'S>'
FILE_DATA_END_REGEX += END_OF_LINE_REGEX
FILE_DATA_END_MATCHER = re.compile(FILE_DATA_END_REGEX)
# SESSION_TIME_MATCHER produces the following groups:
SESSION_GROUP_SAMPLE_TIME = 1
# SESSION_SAMPLE_DATA_MATCHER produces the following groups:
SESSION_GROUP_TIDE_INTERVAL = 1
SESSION_GROUP_WAVE_PERIOD = 2
# TIDE_DATA_MATCHER produces the following groups:
TIDE_GROUP_PRESSURE_NUM = 1
TIDE_GROUP_TEMPERATURE_NUM = 2
TIDE_GROUP_START_TIME = 3
# WAVE_DATA_MATCHER produces the following groups:
WAVE_GROUP_START_TIME = 1
WAVE_GROUP_NUM_SAMPLES_MSB = 2
WAVE_GROUP_PRESS_TEMP_COMP_NUM = 1
WAVE_GROUP_NUM_SAMPLES_LSB = 2
# WAVE_BURST_DATA_MATCHER produces the following groups:
WAVE_BURST_GROUP_PRESSURE_NUM_1 = 1
WAVE_BURST_GROUP_PRESSURE_NUM_2 = 2
class PresfAbcSessionKey(BaseEnum):
TIDE_SAMPLE_START_TIME = 'tide_sample_start_timestamp'
TIDE_SAMPLE_INTERVAL = 'tide_sample_period'
WAVE_INTEGRATION_PERIOD = 'wave_integration_period'
class PresfAbcTideParticleKey(BaseEnum):
TM_START_TIME = 'presf_time'
TM_PRESSURE_NUM = 'presf_tide_pressure_number'
TM_TEMPERATURE_NUM = 'presf_tide_temperature_number'
class PresfAbcWaveParticleKey(BaseEnum):
WM_START_TIME = 'presf_time'
WM_PTCN_NUM = 'presf_wave_press_temp_comp_number'
WM_BURST_PRESSURE_NUM = 'presf_wave_burst_pressure_number'
WM_NUM_BURST_SAMPLES = 'wm_num_burst_samples'
class DataParticleType(BaseEnum):
TIDE_RECOVERED = 'presf_abc_tide_measurement_recovered'
WAVE_RECOVERED = 'presf_abc_wave_burst_recovered'
class DataSection(BaseEnum):
SESSION = 0
TIDE = 1
WAVE = 2
class PresfAbcTideDataParticle(DataParticle):
"""
Class for parsing data from the presf_abc_dcl tide data set
"""
_data_particle_type = DataParticleType.TIDE_RECOVERED
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(PresfAbcTideDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
result = list()
result.append(self._encode_value(
PresfAbcTideParticleKey.TM_START_TIME,
self.raw_data[PresfAbcTideParticleKey.TM_START_TIME], int))
result.append(self._encode_value(
PresfAbcTideParticleKey.TM_PRESSURE_NUM,
self.raw_data[PresfAbcTideParticleKey.TM_PRESSURE_NUM], int))
result.append(self._encode_value(
PresfAbcTideParticleKey.TM_TEMPERATURE_NUM,
self.raw_data[PresfAbcTideParticleKey.TM_TEMPERATURE_NUM], int))
# The particle timestamp is the time of the tide measurement.
tm_start_time = self.raw_data[PresfAbcTideParticleKey.TM_START_TIME]
ntp_time = utilities.time_2000_to_ntp(tm_start_time)
self.set_internal_timestamp(timestamp=ntp_time)
return result
class PresfAbcWaveDataParticle(DataParticle):
"""
Class for parsing data from the presf_abc_dcl wave data set
"""
_data_particle_type = DataParticleType.WAVE_RECOVERED
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(PresfAbcWaveDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
result = list()
result.append(self._encode_value(
PresfAbcWaveParticleKey.WM_START_TIME,
self.raw_data[PresfAbcWaveParticleKey.WM_START_TIME], int))
result.append(self._encode_value(
PresfAbcWaveParticleKey.WM_PTCN_NUM,
self.raw_data[PresfAbcWaveParticleKey.WM_PTCN_NUM], int))
result.append(self._encode_value(
PresfAbcWaveParticleKey.WM_BURST_PRESSURE_NUM,
self.raw_data[PresfAbcWaveParticleKey.WM_BURST_PRESSURE_NUM],
lambda x: [int(y) for y in x]))
# The particle timestamp is the time of the start fo the wave burst.
wm_start_time = self.raw_data[PresfAbcWaveParticleKey.WM_START_TIME]
ntp_time = utilities.time_2000_to_ntp(wm_start_time)
self.set_internal_timestamp(timestamp=ntp_time)
return result
class PresfAbcParser(SimpleParser):
"""
Class for parsing recovered data from the presf_abc instrument.
"""
def __init__(self,
stream_handle,
exception_callback,
produce_wave_data):
self._produce_wave_data = produce_wave_data
self._wave_particle_class = PresfAbcWaveDataParticle
self._tide_particle_class = PresfAbcTideDataParticle
self._current_data_section = DataSection.SESSION
super(PresfAbcParser, self).__init__({}, stream_handle,
exception_callback)
@staticmethod
def empty_session_data():
session_data = dict.fromkeys(
[PresfAbcSessionKey.TIDE_SAMPLE_START_TIME,
PresfAbcSessionKey.TIDE_SAMPLE_INTERVAL,
PresfAbcSessionKey.WAVE_INTEGRATION_PERIOD])
return session_data
@staticmethod
def empty_tide_data():
tide_data = dict.fromkeys(
[PresfAbcTideParticleKey.TM_START_TIME,
PresfAbcTideParticleKey.TM_PRESSURE_NUM,
PresfAbcTideParticleKey.TM_TEMPERATURE_NUM])
return tide_data
@staticmethod
def empty_wave_data():
wave_data = dict.fromkeys(
[PresfAbcWaveParticleKey.WM_START_TIME,
PresfAbcWaveParticleKey.WM_PTCN_NUM,
PresfAbcWaveParticleKey.WM_NUM_BURST_SAMPLES])
wave_data[PresfAbcWaveParticleKey.WM_BURST_PRESSURE_NUM] = []
return wave_data
def parse_session_data(self, line, session_data):
"""
Description:
This function parses the logging session data of the presf_abc
hex file. There are two logging session data records. The first
record contains the time of the beginning of the first tide sample.
The second record contains the tide sample interval (in seconds)
and the wave integration period (in the number of 0.25 second
interval).
Parameters:
line: logging session data line to parse
session_data: tide data structure for particle creation
"""
session_time = SESSION_TIME_MATCHER.match(line)
if session_time and \
session_data[PresfAbcSessionKey.TIDE_SAMPLE_START_TIME] is None:
time_first_tide_sample =\
int(session_time.group(SESSION_GROUP_SAMPLE_TIME), 16)
session_data[PresfAbcSessionKey.TIDE_SAMPLE_START_TIME] =\
time_first_tide_sample
else:
session_status = SESSION_STATUS_MATCHER.match(line)
if session_status:
tide_sample_interval =\
int(session_status.group(SESSION_GROUP_TIDE_INTERVAL), 16)
session_data[PresfAbcSessionKey.TIDE_SAMPLE_INTERVAL] =\
tide_sample_interval
wave_intr_period =\
int(session_status.group(SESSION_GROUP_WAVE_PERIOD), 16)
session_data[PresfAbcSessionKey.WAVE_INTEGRATION_PERIOD] =\
wave_intr_period
else:
# Expected format is incorrect.
log.debug("Unexpected logging session status data.")
self._exception_callback(RecoverableSampleException(
"Unexpected logging session data: %s" % line))
def parse_tide_data(self, line, tide_data):
"""
Description:
This function parses the tide data of the presf_abc hex file. The
tide data contains the pressure number (used to calculate the
pressure), the temperature number (used the calculate the
temperature), and the start time of the tide measurement (seconds
from 1-1-2000).
Parameters:
line: tide data line to parse
tide_data: tide data structure for particle creation
"""
tide_data_re = TIDE_DATA_MATCHER.match(line)
if tide_data_re:
# Parse the tide measurement start time
tm_start_time =\
int(tide_data_re.group(TIDE_GROUP_START_TIME), 16)
tide_data[PresfAbcTideParticleKey.TM_START_TIME] =\
tm_start_time
# Parse the tide measurement pressure count.
p_dec_tide =\
int(tide_data_re.group(TIDE_GROUP_PRESSURE_NUM), 16)
tide_data[PresfAbcTideParticleKey.TM_PRESSURE_NUM] =\
p_dec_tide
# Parse the timde measurement temperature count
t_dec_tide =\
int(tide_data_re.group(TIDE_GROUP_TEMPERATURE_NUM), 16)
tide_data[PresfAbcTideParticleKey.TM_TEMPERATURE_NUM] =\
t_dec_tide
particle = self._extract_sample(self._tide_particle_class,
None,
tide_data)
self._record_buffer.append(particle)
else:
log.debug("Unexpected format for tide data: %s" % line)
self._exception_callback(RecoverableSampleException(
"Unexpected format for tide data: %s" % line))
def parse_wave_data(self, line, wave_data):
"""
Description:
This function parses the wave data of the presf_abc hex file.
The wave data contains two pressure number measurements (used
to calculate the pressure).
Parameters:
line: wave data line to parse
wave_data: tide data structure for particle creation
"""
# Get the possible wave date record matches.
wave_data_re = WAVE_DATA_MATCHER.match(line)
wave_burst_data_re = WAVE_BURST_DATA_MATCHER.match(line)
wave_data_end_re = WAVE_DATA_END_MATCHER.match(line)
# Check if the record is one of the two wave metadata records.
if wave_data_re:
if wave_data[PresfAbcWaveParticleKey.WM_START_TIME] is None:
# Parse the Wave Burst start time
wb_start_time =\
int(wave_data_re.group(WAVE_GROUP_START_TIME), 16)
wave_data[PresfAbcWaveParticleKey.WM_START_TIME] =\
wb_start_time
# Parse the number of Wave Burst samples (MSB)
wb_samples_msb =\
int(wave_data_re.group(WAVE_GROUP_NUM_SAMPLES_MSB), 16) <<\
8
wave_data[PresfAbcWaveParticleKey.WM_NUM_BURST_SAMPLES] =\
wb_samples_msb
else:
# Parse the Pressure Temperature Compensation Number
ptcn =\
int(wave_data_re.group(WAVE_GROUP_PRESS_TEMP_COMP_NUM), 16)
wave_data[PresfAbcWaveParticleKey.WM_PTCN_NUM] = ptcn
# Parse the number of Wave Burst samples (LSB)
wb_samples_lsb =\
int(wave_data_re.group(WAVE_GROUP_NUM_SAMPLES_LSB), 16)
wave_data[PresfAbcWaveParticleKey.WM_NUM_BURST_SAMPLES] +=\
wb_samples_lsb
# Check if the record is a wave burst record.
elif wave_burst_data_re:
# Parse the first pressure measurement from the record
p_dec_wave = int(wave_burst_data_re.
group(WAVE_BURST_GROUP_PRESSURE_NUM_1), 16)
wave_data[PresfAbcWaveParticleKey.WM_BURST_PRESSURE_NUM].\
append(p_dec_wave)
# Parse the second pressure measurement from the record
p_dec_wave = int(wave_burst_data_re.
group(WAVE_BURST_GROUP_PRESSURE_NUM_2), 16)
wave_data[PresfAbcWaveParticleKey.WM_BURST_PRESSURE_NUM].\
append(p_dec_wave)
# Check if the record is the end wave burst record.
elif wave_data_end_re:
# Check we recieved the correct number of wave burst data.
if len(wave_data[PresfAbcWaveParticleKey.WM_BURST_PRESSURE_NUM])\
== wave_data[PresfAbcWaveParticleKey.WM_NUM_BURST_SAMPLES]:
# Create the data particle and add it to the buffer.
particle = self._extract_sample(self._wave_particle_class,
None,
wave_data)
self._record_buffer.append(particle)
else:
log.debug("Unexcepted number of wave burst records: %s" % line)
self._exception_callback(RecoverableSampleException(
"Unexcepted number of wave burst records: %s" % line))
else:
log.debug("Unexpected format for wave data: %s" % line)
self._exception_callback(RecoverableSampleException(
"Unexpected format for wave data: %s" % line))
def parse_file(self):
"""
The main parsing function which loops over each line in the file and
extracts particles if the correct format is found.
"""
session_data = self.empty_session_data()
tide_data = self.empty_tide_data()
wave_data = self.empty_wave_data()
#
# # First, parse the header
# self.parse_header()
for line in self._stream_handle:
#####
# Check for a header line and ignore it.
#####
if HEADER_LINE_MATCHER.match(line):
continue # read next line
#####
# Check for a transition to another data section (logging, tide or
# wave) and set the current data section appropriately
#####
# Check for the start of a logging session data section.
if SESSION_START_MATCHER.match(line):
# Start of new logging session, clear the logging session data.
session_data = self.empty_session_data()
self._current_data_section = DataSection.SESSION
continue # read next line
# If this is the end of the session data, clear the tide and wave
# data and set the current data section to the tide data section.
if SESSION_END_MATCHER.match(line):
# End of the logging session, clear the tide and wave data.
tide_data = self.empty_tide_data()
wave_data = self.empty_wave_data()
self._current_data_section = DataSection.TIDE
continue # read next line
# Check for the start of a wave data section.
if WAVE_DATA_START_MATCHER.match(line):
self._current_data_section = DataSection.WAVE
continue # read next line
# Check for end of the data in the file and get out of the loop.
if FILE_DATA_END_MATCHER.match(line):
break
#####
# If we got here, the record isn't a flag to transition to another
# data section, so parse the data appropriately.
#####
if self._current_data_section == DataSection.SESSION:
self.parse_session_data(line, session_data)
continue # read next line
if self._current_data_section == DataSection.TIDE:
self.parse_tide_data(line, tide_data)
continue # read next line
if self._current_data_section == DataSection.WAVE:
if self._produce_wave_data:
self.parse_wave_data(line, wave_data)
# If this is the end of the wave data, clear the tide and wave
# data and set the current section to the tide data section.
if WAVE_DATA_END_MATCHER.match(line):
tide_data = self.empty_tide_data()
wave_data = self.empty_wave_data()
self._current_data_section = DataSection.TIDE
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import UrlMapsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class UrlMapsRestInterceptor:
"""Interceptor for UrlMaps.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the UrlMapsRestTransport.
.. code-block:: python
class MyCustomUrlMapsInterceptor(UrlMapsRestInterceptor):
def pre_aggregated_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_aggregated_list(response):
logging.log(f"Received response: {response}")
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_invalidate_cache(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_invalidate_cache(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_patch(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_patch(response):
logging.log(f"Received response: {response}")
def pre_update(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update(response):
logging.log(f"Received response: {response}")
def pre_validate(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_validate(response):
logging.log(f"Received response: {response}")
transport = UrlMapsRestTransport(interceptor=MyCustomUrlMapsInterceptor())
client = UrlMapsClient(transport=transport)
"""
def pre_aggregated_list(
self,
request: compute.AggregatedListUrlMapsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.AggregatedListUrlMapsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for aggregated_list
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_aggregated_list(
self, response: compute.UrlMapsAggregatedList
) -> compute.UrlMapsAggregatedList:
"""Post-rpc interceptor for aggregated_list
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_delete(
self, request: compute.DeleteUrlMapRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.DeleteUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_get(
self, request: compute.GetUrlMapRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.GetUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_get(self, response: compute.UrlMap) -> compute.UrlMap:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_insert(
self, request: compute.InsertUrlMapRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.InsertUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_invalidate_cache(
self,
request: compute.InvalidateCacheUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InvalidateCacheUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for invalidate_cache
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_invalidate_cache(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for invalidate_cache
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_list(
self, request: compute.ListUrlMapsRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.ListUrlMapsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_list(self, response: compute.UrlMapList) -> compute.UrlMapList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_patch(
self, request: compute.PatchUrlMapRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.PatchUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for patch
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_patch(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for patch
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_update(
self, request: compute.UpdateUrlMapRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.UpdateUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for update
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_update(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for update
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
def pre_validate(
self,
request: compute.ValidateUrlMapRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ValidateUrlMapRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for validate
Override in a subclass to manipulate the request or metadata
before they are sent to the UrlMaps server.
"""
return request, metadata
def post_validate(
self, response: compute.UrlMapsValidateResponse
) -> compute.UrlMapsValidateResponse:
"""Post-rpc interceptor for validate
Override in a subclass to manipulate the response
after it is returned by the UrlMaps server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class UrlMapsRestStub:
_session: AuthorizedSession
_host: str
_interceptor: UrlMapsRestInterceptor
class UrlMapsRestTransport(UrlMapsTransport):
"""REST backend transport for UrlMaps.
The UrlMaps API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, UrlMapsRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[UrlMapsRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or UrlMapsRestInterceptor()
self._prep_wrapped_messages(client_info)
class _AggregatedList(UrlMapsRestStub):
def __hash__(self):
return hash("AggregatedList")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.AggregatedListUrlMapsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.UrlMapsAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListUrlMapsRequest):
The request object. A request message for
UrlMaps.AggregatedList. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.UrlMapsAggregatedList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/aggregated/urlMaps",
},
]
request, metadata = self._interceptor.pre_aggregated_list(request, metadata)
request_kwargs = compute.AggregatedListUrlMapsRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.AggregatedListUrlMapsRequest.to_json(
compute.AggregatedListUrlMapsRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.UrlMapsAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_aggregated_list(resp)
return resp
class _Delete(UrlMapsRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteUrlMapRequest):
The request object. A request message for UrlMaps.Delete.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/global/urlMaps/{url_map}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
request_kwargs = compute.DeleteUrlMapRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.DeleteUrlMapRequest.to_json(
compute.DeleteUrlMapRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(UrlMapsRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.UrlMap:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetUrlMapRequest):
The request object. A request message for UrlMaps.Get.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.UrlMap:
Represents a URL Map resource. Compute Engine has two
URL Map resources: \*
`Global </compute/docs/reference/rest/v1/urlMaps>`__ \*
`Regional </compute/docs/reference/rest/v1/regionUrlMaps>`__
A URL map resource is a component of certain types of
cloud load balancers and Traffic Director: \* urlMaps
are used by external HTTP(S) load balancers and Traffic
Director. \* regionUrlMaps are used by internal HTTP(S)
load balancers. For a list of supported URL map features
by the load balancer type, see the Load balancing
features: Routing and traffic management table. For a
list of supported URL map features for Traffic Director,
see the Traffic Director features: Routing and traffic
management table. This resource defines mappings from
hostnames and URL paths to either a backend service or a
backend bucket. To use the global urlMaps resource, the
backend service must have a loadBalancingScheme of
either EXTERNAL or INTERNAL_SELF_MANAGED. To use the
regionUrlMaps resource, the backend service must have a
loadBalancingScheme of INTERNAL_MANAGED. For more
information, read URL Map Concepts.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/global/urlMaps/{url_map}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetUrlMapRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetUrlMapRequest.to_json(
compute.GetUrlMapRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.UrlMap.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(UrlMapsRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertUrlMapRequest):
The request object. A request message for UrlMaps.Insert.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/global/urlMaps",
"body": "url_map_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertUrlMapRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.UrlMap.to_json(
compute.UrlMap(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertUrlMapRequest.to_json(
compute.InsertUrlMapRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _InvalidateCache(UrlMapsRestStub):
def __hash__(self):
return hash("InvalidateCache")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InvalidateCacheUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the invalidate cache method over HTTP.
Args:
request (~.compute.InvalidateCacheUrlMapRequest):
The request object. A request message for
UrlMaps.InvalidateCache. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/global/urlMaps/{url_map}/invalidateCache",
"body": "cache_invalidation_rule_resource",
},
]
request, metadata = self._interceptor.pre_invalidate_cache(
request, metadata
)
request_kwargs = compute.InvalidateCacheUrlMapRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.CacheInvalidationRule.to_json(
compute.CacheInvalidationRule(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InvalidateCacheUrlMapRequest.to_json(
compute.InvalidateCacheUrlMapRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_invalidate_cache(resp)
return resp
class _List(UrlMapsRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListUrlMapsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.UrlMapList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListUrlMapsRequest):
The request object. A request message for UrlMaps.List.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.UrlMapList:
Contains a list of UrlMap resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/global/urlMaps",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListUrlMapsRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListUrlMapsRequest.to_json(
compute.ListUrlMapsRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.UrlMapList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
class _Patch(UrlMapsRestStub):
def __hash__(self):
return hash("Patch")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.PatchUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the patch method over HTTP.
Args:
request (~.compute.PatchUrlMapRequest):
The request object. A request message for UrlMaps.Patch.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/compute/v1/projects/{project}/global/urlMaps/{url_map}",
"body": "url_map_resource",
},
]
request, metadata = self._interceptor.pre_patch(request, metadata)
request_kwargs = compute.PatchUrlMapRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.UrlMap.to_json(
compute.UrlMap(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.PatchUrlMapRequest.to_json(
compute.PatchUrlMapRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_patch(resp)
return resp
class _Update(UrlMapsRestStub):
def __hash__(self):
return hash("Update")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.UpdateUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the update method over HTTP.
Args:
request (~.compute.UpdateUrlMapRequest):
The request object. A request message for UrlMaps.Update.
See the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "put",
"uri": "/compute/v1/projects/{project}/global/urlMaps/{url_map}",
"body": "url_map_resource",
},
]
request, metadata = self._interceptor.pre_update(request, metadata)
request_kwargs = compute.UpdateUrlMapRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.UrlMap.to_json(
compute.UrlMap(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.UpdateUrlMapRequest.to_json(
compute.UpdateUrlMapRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_update(resp)
return resp
class _Validate(UrlMapsRestStub):
def __hash__(self):
return hash("Validate")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ValidateUrlMapRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.UrlMapsValidateResponse:
r"""Call the validate method over HTTP.
Args:
request (~.compute.ValidateUrlMapRequest):
The request object. A request message for
UrlMaps.Validate. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.UrlMapsValidateResponse:
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/global/urlMaps/{url_map}/validate",
"body": "url_maps_validate_request_resource",
},
]
request, metadata = self._interceptor.pre_validate(request, metadata)
request_kwargs = compute.ValidateUrlMapRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.UrlMapsValidateRequest.to_json(
compute.UrlMapsValidateRequest(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ValidateUrlMapRequest.to_json(
compute.ValidateUrlMapRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.UrlMapsValidateResponse.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_validate(resp)
return resp
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListUrlMapsRequest], compute.UrlMapsAggregatedList
]:
stub = self._STUBS.get("aggregated_list")
if not stub:
stub = self._STUBS["aggregated_list"] = self._AggregatedList(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def delete(self) -> Callable[[compute.DeleteUrlMapRequest], compute.Operation]:
stub = self._STUBS.get("delete")
if not stub:
stub = self._STUBS["delete"] = self._Delete(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(self) -> Callable[[compute.GetUrlMapRequest], compute.UrlMap]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(self) -> Callable[[compute.InsertUrlMapRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def invalidate_cache(
self,
) -> Callable[[compute.InvalidateCacheUrlMapRequest], compute.Operation]:
stub = self._STUBS.get("invalidate_cache")
if not stub:
stub = self._STUBS["invalidate_cache"] = self._InvalidateCache(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(self) -> Callable[[compute.ListUrlMapsRequest], compute.UrlMapList]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def patch(self) -> Callable[[compute.PatchUrlMapRequest], compute.Operation]:
stub = self._STUBS.get("patch")
if not stub:
stub = self._STUBS["patch"] = self._Patch(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def update(self) -> Callable[[compute.UpdateUrlMapRequest], compute.Operation]:
stub = self._STUBS.get("update")
if not stub:
stub = self._STUBS["update"] = self._Update(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def validate(
self,
) -> Callable[[compute.ValidateUrlMapRequest], compute.UrlMapsValidateResponse]:
stub = self._STUBS.get("validate")
if not stub:
stub = self._STUBS["validate"] = self._Validate(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("UrlMapsRestTransport",)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test litecoind with different proxy configuration.
Test plan:
- Start litecoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on litecoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create litecoinds that connect to them
- Manipulate the litecoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
|
import numpy as n
import os, glob, sys
import casautil
import logging, logging.config
# set up
tb = casautil.tools.table()
class casa_sol():
""" Container for CASA caltable(s).
Provides tools for applying to data of shape (nints, nbl, nch, npol).
Initialize class based on input file(s) and selection criteria.
Optional flux scale gain file can be given. Should be gain file applied to source with setjy applied.
"""
def __init__(self, gainfile, flagants=True):
""" Initialize with a table of CASA gain solutions. Can later add BP.
"""
# custom log file
self.logger = logging.getLogger(__name__)
if os.path.exists(gainfile):
self.parsegain(gainfile)
self.flagants = flagants
else:
self.logger.warn('Gainfile not found.')
raise IOError
def parsegain(self, gainfile):
"""Takes .g1 CASA cal table and places values in numpy arrays.
"""
tb.open(gainfile)
mjd = tb.getcol('TIME')/(24*3600) # mjd days, as for telcal
field = tb.getcol('FIELD_ID')
spw = tb.getcol('SPECTRAL_WINDOW_ID')
gain = tb.getcol('CPARAM') # dimensions of (npol, 1?, ntimes*nants)
snr = tb.getcol('SNR')
flagged = tb.getcol('FLAG')
tb.close()
tb.open(os.path.join(gainfile, 'ANTENNA'))
antname = tb.getcol('NAME') # ant number in order written to gain file
antnum = n.array([int(antname[i][2:]) for i in range(len(antname))])
tb.close()
tb.open(os.path.join(gainfile, 'FIELD'))
caldir = tb.getcol('PHASE_DIR') # shape = (2 dirs, 1?, n sources)
calra = n.mod(caldir[0,0,:], 2*n.pi)
caldec = caldir[1,0,:]
self.radec = zip(calra, caldec)
tb.close()
# # need to find parent data MS to get some metadata
# mslist = glob.glob(gainfile[:-3] + '*.ms')
# try:
# msfile = mslist[0]
# self.logger.info('Found parent data MS %s' % msfile)
# except IndexError:
# self.logger.warn('Could not find parent data MS for metadata...')
# tb.open(msfile + '/ANTENNA')
# antname = tb.getcol('NAME') # one name per ant
# tb.close()
# tb.open(msfile + '/SPECTRAL_WINDOW')
# reffreq = 1e-6*(tb.getcol('REF_FREQUENCY')+tb.getcol('TOTAL_BANDWIDTH')/2) # similar to telcal "skyfreq"
# specname = tb.getcol('NAME')
# tb.close()
# tb.open(msfile + '/SOURCE')
# source = [name for name in tb.getcol('NAME') if 'J' in name][0] # should return single cal name **hack**
# tb.close()
# nsol = len(gain[0,0])
# ifid0R = specname[0][7] + '-' + specname[0][8] # one value
# ifid0L = specname[0][9] + '-' + specname[0][10] # one value
# ifid1R = specname[1][7] + '-' + specname[1][8] # one value
# ifid1L = specname[1][9] + '-' + specname[1][10] # one value
# # paste R,L end to end, so first loop over time, then spw, then pol
# mjd = n.concatenate( (time, time), axis=0)
# ifid = [ifid0R]*(nsol/2) + [ifid1R]*(nsol/2) + [ifid0L]*(nsol/2) + [ifid1L]*(nsol/2) # first quarter is spw0,pol0, then spw1,pol0, ...
# skyfreq = n.concatenate( (reffreq[0]*n.ones(nsol/2), reffreq[1]*n.ones(nsol/2), reffreq[0]*n.ones(nsol/2), reffreq[1]*n.ones(nsol/2)), axis=0)
# gain = n.concatenate( (gain[0,0],gain[1,0]), axis=0)
# amp = n.abs(gain)
# phase = n.degrees(n.angle(gain))
# source = [source]*nsol*2
# flagged = n.concatenate( (flag[0,0],flag[1,0]), axis=0)
nants = len(n.unique(antnum))
nspw = len(n.unique(spw))
self.spwlist = n.unique(spw)
npol = len(gain)
# merge times less than some threshold
nsol = 0
newmjd = [n.unique(mjd)[0]]
uniquefield = [field[n.where(newmjd[0] == mjd)][0]]
skip = []
for i in range(1, len(n.unique(mjd))):
if 24*3600*(n.unique(mjd)[i] - n.unique(mjd)[i-1]) < 30.:
skip.append(n.unique(mjd)[i])
continue
else:
newmjd.append(n.unique(mjd)[i])
uniquefield.append(field[n.where(n.unique(mjd)[i] == mjd)[0][0]])
self.uniquemjd = n.array(newmjd)
self.uniquefield = n.array(uniquefield)
nsol = len(self.uniquemjd)
self.logger.info('Parsed gain table solutions for %d solutions (skipping %d), %d ants, %d spw, and %d pols' % (nsol, len(skip), nants, nspw, npol))
self.logger.info('Unique solution fields/times: %s' % str(zip(self.uniquefield, self.uniquemjd)))
self.gain = n.zeros( (nsol, nants, nspw, npol), dtype='complex' )
flags = n.zeros( (nsol, nants, nspw, npol), dtype='complex' )
for sol in range(nsol):
for ant in range(nants):
for spw in range(nspw):
for pol in range(npol):
self.gain[sol, ant, spw, pol] = gain[pol,0,spw*nsol*nants+sol*nants+ant]
flags[sol, ant, spw, pol] = flagged[pol,0,spw*nsol*nants+sol*nants+ant]
self.gain = n.ma.masked_array(self.gain, flags)
# gain = n.concatenate( (n.concatenate( (gain[0,0,:nants*nsol].reshape(nsol,nants,1,1), gain[1,0,:nants*nsol].reshape(nsol,nants,1,1)), axis=3), n.concatenate( (gain[0,0,nants*nsol:].reshape(nsol,nants,1,1), gain[1,0,nants*nsol:].reshape(nsol,nants,1,1)), axis=3)), axis=2)
# flagged = n.concatenate( (n.concatenate( (flagged[0,0,:nants*nsol].reshape(nsol,nants,1,1), flagged[1,0,:nants*nsol].reshape(nsol,nants,1,1)), axis=3), n.concatenate( (flagged[0,0,nants*nsol:].reshape(nsol,nants,1,1), flagged[1,0,nants*nsol:].reshape(nsol,nants,1,1)), axis=3)), axis=2)
# self.gain = n.ma.masked_array(gain, flagged == True)
self.mjd = n.array(mjd); self.antnum = antnum
# make another version of ants array
# self.antnum = n.concatenate( (antnum, antnum), axis=0)
# self.amp = n.array(amp); self.phase = n.array(phase)
# self.antname = n.concatenate( (antname[antnum], antname[antnum]), axis=0)
# self.complete = n.arange(len(self.mjd))
# for consistency with telcal
#self.ifid = n.array(ifid); self.skyfreq = n.array(skyfreq); self.source = n.array(source)
def parsebp(self, bpfile, debug=False):
""" Takes bp CASA cal table and places values in numpy arrays.
Assumes two or fewer spw. :\
Assumes one bp solution per file.
"""
# bandpass. taking liberally from Corder et al's analysisutilities
([polyMode, polyType, nPolyAmp, nPolyPhase, scaleFactor, nRows, nSpws, nUniqueTimesBP, uniqueTimesBP,
nPolarizations, frequencyLimits, increments, frequenciesGHz, polynomialPhase,
polynomialAmplitude, timesBP, antennasBP, cal_desc_idBP, spwBP]) = openBpolyFile(bpfile, debug)
# index iterates over antennas, then times/sources (solution sets). each index has 2x npoly, which are 2 pols
polynomialAmplitude = n.array(polynomialAmplitude); polynomialPhase = n.array(polynomialPhase)
polynomialAmplitude[:,0] = 0.; polynomialAmplitude[:,nPolyAmp] = 0.
polynomialPhase[:,0] = 0.; polynomialPhase[nPolyPhase] = 0.
ampSolR, ampSolL = calcChebyshev(polynomialAmplitude, frequencyLimits, n.array(frequenciesGHz)*1e+9)
phaseSolR, phaseSolL = calcChebyshev(polynomialPhase, frequencyLimits, n.array(frequenciesGHz)*1e+9)
nants = len(n.unique(antennasBP))
self.bptimes = n.array(timesBP)
ptsperspec = 1000
npol = 2
self.logger.info('Parsed bp solutions for %d solutions, %d ants, %d spw, and %d pols' % (nUniqueTimesBP, nants, nSpws, nPolarizations))
self.bandpass = n.zeros( (nants, nSpws*ptsperspec, npol), dtype='complex')
for spw in range(nSpws):
ampSolR[spw*nants:(spw+1)*nants] += 1 - ampSolR[spw*nants:(spw+1)*nants].mean() # renormalize mean over ants (per spw) == 1
ampSolL[spw*nants:(spw+1)*nants] += 1 - ampSolL[spw*nants:(spw+1)*nants].mean()
for ant in range(nants):
self.bandpass[ant, spw*ptsperspec:(spw+1)*ptsperspec, 0] = ampSolR[ant+spw*nants] * n.exp(1j*phaseSolR[ant+spw*nants])
self.bandpass[ant, spw*ptsperspec:(spw+1)*ptsperspec, 1] = ampSolL[ant+spw*nants] * n.exp(1j*phaseSolL[ant+spw*nants])
self.bpfreq = n.zeros( (nSpws*ptsperspec) )
for spw in range(nSpws):
self.bpfreq[spw*ptsperspec:(spw+1)*ptsperspec] = 1e9 * frequenciesGHz[nants*spw]
# bpSolR0 = ampSolR[:nants] * n.exp(1j*phaseSolR[:nants])
# bpSolR1 = ampSolR[nants:] * n.exp(1j*phaseSolR[nants:])
# bpSolL0 = ampSolL[:nants] * n.exp(1j*phaseSolL[:nants])
# bpSolL1 = ampSolL[nants:] * n.exp(1j*phaseSolL[nants:])
# structure close to tpipe data structure (nant, freq, pol). note that freq is oversampled to 1000 bins.
# self.bandpass = n.concatenate( (n.concatenate( (bpSolR0[:,:,None], bpSolR1[:,:,None]), axis=1), n.concatenate( (bpSolL0[:,:,None], bpSolL1[:,:,None]), axis=1)), axis=2)
# self.bpfreq = 1e9*n.concatenate( (frequenciesGHz[0], frequenciesGHz[nants]), axis=0) # freq values at bp bins
# self.logger.info('Parsed bp table solutions for %d solutions, %d ants, %d spw, and %d pols' % (nUniqueTimesBP, nants, nSpws, nPolarizations))
def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=1, spwind=[], pols=['XX','YY']):
""" Set select parameter that defines time, spw, and pol solutions to apply.
time defines the time to find solutions near in mjd.
freqs defines frequencies to select bandpass solution
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
radec (radian tuple) and dist (deg) define optional location of source for filtering solutions.
spwind is list of indices to be used (e.g., [0,2,4,10])
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed.
calname not used. here for uniformity with telcal_sol.
"""
self.spwind = spwind
if calname:
self.logger.warn('calname option not used for casa_sol. Applied based on radec.')
# define pol index
if 'X' in ''.join(pols) or 'Y' in ''.join(pols):
polord = ['XX', 'YY']
elif 'R' in ''.join(pols) or 'L' in ''.join(pols):
polord = ['RR', 'LL']
self.polind = [polord.index(pol) for pol in pols]
self.ant1ind = [n.where(ant1 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]
self.ant2ind = [n.where(ant2 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr]
# select by smallest time distance for source within some angular region of target
if radec:
ra, dec = radec
calra = n.array(self.radec)[:,0]
caldec = n.array(self.radec)[:,1]
fields = n.where( (n.abs(calra - ra) < n.radians(dist)) & (n.abs(caldec - dec) < n.radians(dist)) )[0]
if len(fields) == 0:
self.logger.warn('Warning: no close calibrator found. Removing radec restriction.')
fields = n.unique(self.uniquefield)
else:
fields = n.unique(self.uniquefield)
sel = []
for field in fields:
sel += list(n.where(field == self.uniquefield)[0])
mjddist = n.abs(time - self.uniquemjd[sel])
closestgain = n.where(mjddist == mjddist.min())[0][0]
self.logger.info('Using gain solution for field %d at MJD %.5f, separated by %d min ' % (self.uniquefield[n.where(self.uniquemjd == self.uniquemjd[sel][closestgain])], self.uniquemjd[closestgain], mjddist[closestgain]*24*60))
self.gain = self.gain.take(self.spwind, axis=2).take(self.polind, axis=3)[closestgain]
if hasattr(self, 'bandpass'):
bins = [n.where(n.min(n.abs(self.bpfreq-selfreq)) == n.abs(self.bpfreq-selfreq))[0][0] for selfreq in freqs]
self.bandpass = self.bandpass.take(bins, axis=1).take(self.polind, axis=2)
self.freqs = freqs
self.logger.debug('Using bandpass at BP bins (1000 bins per spw): %s', str(bins))
def calc_flag(self, sig=3.0):
""" Calculates antennas to flag, based on bad gain and bp solutions.
"""
if len(self.gain.shape) == 4:
gamp = n.abs(self.gain).mean(axis=0) # mean gain amp for each ant over time
elif len(self.gain.shape) == 3:
gamp = n.abs(self.gain) # gain amp for selected time
# badgain = n.where(gamp < gamp.mean() - sig*gamp.std())
badgain = n.where( (gamp < gamp.mean() - sig*gamp.std()) | gamp.mask)
self.logger.info('Flagging low/bad gains for ant/spw/pol: %s %s %s' % (str(self.antnum[badgain[0]]), str(badgain[1]), str(badgain[2])))
badants = badgain
return badants
def apply(self, data):
""" Applies calibration solution to data array. Assumes structure of (nint, nbl, nch, npol).
"""
# flag bad ants
if self.flagants:
badants = self.calc_flag()
else:
badants = n.array([[]])
# apply gain correction
if hasattr(self, 'bandpass'):
corr = n.ones_like(data)
flag = n.ones_like(data.real).astype('int')
chans_uncal = range(len(self.freqs))
for spwi in range(len(self.spwind)):
chsize = n.round(self.bpfreq[1]-self.bpfreq[0], 0)
ww = n.where( (self.freqs >= self.bpfreq[self.spwind[spwi]*1000]) & (self.freqs <= self.bpfreq[(self.spwind[spwi]+1)*1000-1]+chsize) )[0]
if len(ww) == 0:
self.logger.info('Gain solution frequencies not found in data for spw %d.' % (self.spwind[spwi]))
firstch = ww[0]
lastch = ww[-1]+1
for ch in ww:
chans_uncal.remove(ch)
self.logger.info('Combining gain sol from spw=%d with BW chans from %d-%d' % (self.spwind[spwi], firstch, lastch))
for badant in n.transpose(badants):
if badant[1] == spwi:
badbl = n.where((badant[0] == n.array(self.ant1ind)) | (badant[0] == n.array(self.ant2ind)))[0]
flag[:, badbl, firstch:lastch, badant[2]] = 0
corr1 = self.gain[self.ant1ind, spwi, :][None, :, None, :] * self.bandpass[self.ant1ind, firstch:lastch, :][None, :, :, :]
corr2 = (self.gain[self.ant2ind, spwi, :][None, :, None, :] * self.bandpass[self.ant2ind, firstch:lastch, :][None, :, :, :]).conj()
corr[:, :, firstch:lastch, :] = corr1 * corr2
if len(chans_uncal):
self.logger.info('Setting data without bp solution to zero for chans %s.' % (chans_uncal))
flag[:, :, chans_uncal,:] = 0
data[:] *= flag/corr
else:
for spw in range(len(self.gain[0,0])):
pass
def plot(self):
""" Quick visualization of calibration solution.
"""
import pylab as p
p.clf()
fig = p.figure(1)
nspw = len(self.gain[0])
ext = n.ceil(n.sqrt(nspw)) # find best squre plot (simplest)
for spw in range(len(self.gain[0])):
ax = fig.add_subplot(ext, ext, spw+1)
for pol in [0,1]:
ax.scatter(range(len(self.gain)), n.abs(self.gain.data[:,spw,pol]), color=n.array(['k','y']).take(self.gain.mask[:,spw,pol]), marker=['x','.'][pol])
fig.show()
def openBpolyFile(caltable, debug=False):
logger = logging.getLogger(__name__)
# mytb = au.createCasaTool(tbtool) # from analysisutilities by corder
tb.open(caltable)
desc = tb.getdesc()
if ('POLY_MODE' in desc):
polyMode = tb.getcol('POLY_MODE')
polyType = tb.getcol('POLY_TYPE')
scaleFactor = tb.getcol('SCALE_FACTOR')
antenna1 = tb.getcol('ANTENNA1')
times = tb.getcol('TIME')
cal_desc_id = tb.getcol('CAL_DESC_ID')
nRows = len(polyType)
for pType in polyType:
if (pType != 'CHEBYSHEV'):
logger.info("I do not recognized polynomial type = %s" % (pType))
return
# Here we assume that all spws have been solved with the same mode
uniqueTimesBP = n.unique(tb.getcol('TIME'))
nUniqueTimesBP = len(uniqueTimesBP)
if (nUniqueTimesBP >= 2):
logger.debug("Multiple BP sols found with times differing by %s seconds. Using first." % (str(uniqueTimesBP-uniqueTimesBP[0])))
nUniqueTimesBP = 1
uniqueTimesBP = uniqueTimesBP[0]
mystring = ''
nPolyAmp = tb.getcol('N_POLY_AMP')
nPolyPhase = tb.getcol('N_POLY_PHASE')
frequencyLimits = tb.getcol('VALID_DOMAIN')
increments = 0.001*(frequencyLimits[1,:]-frequencyLimits[0,:])
frequenciesGHz = []
for i in range(len(frequencyLimits[0])):
freqs = (1e-9)*n.arange(frequencyLimits[0,i],frequencyLimits[1,i],increments[i]) # **for some reason this is nch-1 long?**
frequenciesGHz.append(freqs)
polynomialAmplitude = []
polynomialPhase = []
for i in range(len(polyMode)):
polynomialAmplitude.append([1])
polynomialPhase.append([0])
if (polyMode[i] == 'A&P' or polyMode[i] == 'A'):
polynomialAmplitude[i] = tb.getcell('POLY_COEFF_AMP',i)[0][0][0]
if (polyMode[i] == 'A&P' or polyMode[i] == 'P'):
polynomialPhase[i] = tb.getcell('POLY_COEFF_PHASE',i)[0][0][0]
tb.close()
tb.open(caltable+'/CAL_DESC')
nSpws = len(tb.getcol('NUM_SPW'))
spws = tb.getcol('SPECTRAL_WINDOW_ID')
spwBP = []
for c in cal_desc_id:
spwBP.append(spws[0][c])
tb.close()
nPolarizations = len(polynomialAmplitude[0]) / nPolyAmp[0]
mystring += '%.3f, ' % (uniqueTimesBP/(24*3600))
logger.debug('BP solution has unique time(s) %s and %d pols' % (mystring, nPolarizations))
# This value is overridden by the new function doPolarizations in ValueMapping.
# logger.debug("Inferring %d polarizations from size of polynomial array" % (nPolarizations))
return([polyMode, polyType, nPolyAmp, nPolyPhase, scaleFactor, nRows, nSpws, nUniqueTimesBP,
uniqueTimesBP, nPolarizations, frequencyLimits, increments, frequenciesGHz,
polynomialPhase, polynomialAmplitude, times, antenna1, cal_desc_id, spwBP])
else:
tb.close()
return([])
# end of openBpolyFile()
def calcChebyshev(coeffs, validDomain, freqs):
"""
Given a set of coefficients,
this method evaluates a Chebyshev approximation.
Used for CASA bandpass reading.
input coeffs and freqs are numpy arrays
"""
logger = logging.getLogger(__name__)
domain = (validDomain[1] - validDomain[0])[0]
bins = -1 + 2* n.array([ (freqs[i]-validDomain[0,i])/domain for i in range(len(freqs))])
ncoeffs = len(coeffs[0])/2
rr = n.array([n.polynomial.chebyshev.chebval(bins[i], coeffs[i,:ncoeffs]) for i in range(len(coeffs))])
ll = n.array([n.polynomial.chebyshev.chebval(bins[i], coeffs[i,ncoeffs:]) for i in range(len(coeffs))])
return rr,ll
class telcal_sol():
""" Instantiated with on telcalfile.
Parses .GN file and provides tools for applying to data of shape (nints, nbl, nch, npol)
"""
def __init__(self, telcalfile, flagants=True):
self.logger = logging.getLogger(__name__)
if os.path.exists(telcalfile):
self.parseGN(telcalfile)
self.logger.info('Read telcalfile %s' % telcalfile)
if flagants:
self.flagants()
else:
self.logger.warn('Gainfile not found.')
raise IOError
def flagants(self, threshold=50):
""" Flags solutions with amplitude more than threshold larger than median.
"""
# identify very low gain amps not already flagged
badsols = n.where( (n.median(self.amp)/self.amp > threshold) & (self.flagged == False))[0]
if len(badsols):
self.logger.info('Solutions %s flagged (times %s, ants %s, freqs %s) for low gain amplitude.' % (str(badsols), self.mjd[badsols], self.antname[badsols], self.ifid[badsols]))
for sol in badsols:
self.flagged[sol] = True
def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=0, spwind=[], pols=['XX','YY']):
""" Set select parameter that defines spectral window, time, or any other selection.
time (in mjd) defines the time to find solutions near for given calname.
freqs (in Hz) is frequencies in data.
blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr).
calname defines the name of the calibrator to use. if blank, uses only the time selection.
pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed. not yet implemented.
radec, dist, spwind not used. here for uniformity with casa_sol.
"""
self.freqs = freqs
self.chansize = freqs[1]-freqs[0]
self.select = self.complete # use only complete solution sets (set during parse)
self.blarr = blarr
if spwind:
self.logger.warn('spwind option not used for telcal_sol. Applied based on freqs.')
if radec:
self.logger.warn('radec option not used for telcal_sol. Applied based on calname.')
if dist:
self.logger.warn('dist option not used for telcal_sol. Applied based on calname.')
# define pol index
if 'X' in ''.join(pols) or 'Y' in ''.join(pols):
polord = ['XX', 'YY']
elif 'R' in ''.join(pols) or 'L' in ''.join(pols):
polord = ['RR', 'LL']
self.polind = [polord.index(pol) for pol in pols]
if calname:
nameselect = []
for ss in n.unique(self.source[self.select]):
if calname in ss:
nameselect = n.where(self.source[self.select] == ss) # define selection for name
self.select = self.select[nameselect] # update overall selection
self.logger.debug('Selection down to %d solutions with %s' % (len(self.select), calname))
if not nameselect:
self.logger.debug('Calibrator name %s not found. Ignoring.' % (calname))
# select freq
freqselect = n.where([ff in n.around(self.freqs, -6) for ff in n.around(1e6*self.skyfreq[self.select], -6)]) # takes solution if band center is in (rounded) array of chan freqs
if len(freqselect[0]) == 0:
raise StandardError('No complete set of telcal solutions at that frequency.')
self.select = self.select[freqselect[0]] # update overall selection
self.logger.info('Frequency selection cut down to %d solutions' % (len(self.select)))
# select pol
# ifids = self.ifid[self.select]
# if (polstr == 'RR') or (polstr == 'XX'):
# polselect = n.where(['A' in ifid or 'B' in ifid for ifid in ifids])
# elif (polstr == 'LL') or (polstr == 'YY'):
# polselect = n.where(['C' in ifid or 'D' in ifid for ifid in ifids])
# self.select = self.select[polselect] # update overall selection
self.polarization = n.empty(len(self.ifid))
for i in range(len(self.ifid)):
if ('A' in self.ifid[i]) or ('B' in self.ifid[i]):
self.polarization[i] = 0
elif ('C' in self.ifid[i]) or ('D' in self.ifid[i]):
self.polarization[i] = 1
# select by smallest time distance for source
mjddist = n.abs(time - n.unique(self.mjd[self.select]))
closest = n.where(mjddist == mjddist.min())
if len(closest[0]) > 1:
self.logger.info('Multiple closest solutions in time (%s). Taking first.' % (str(closest[0])))
closest = closest[0][0]
timeselect = n.where(self.mjd[self.select] == n.unique(self.mjd[self.select])[closest]) # define selection for time
self.select = self.select[timeselect[0]] # update overall selection
self.logger.info('Selection down to %d solutions separated from given time by %d minutes' % (len(self.select), mjddist[closest]*24*60))
self.logger.debug('Selected solutions: %s' % str(self.select))
self.logger.info('MJD: %s' % str(n.unique(self.mjd[self.select])))
self.logger.debug('Mid frequency (MHz): %s' % str(n.unique(self.skyfreq[self.select])))
self.logger.debug('IFID: %s' % str(n.unique(self.ifid[self.select])))
self.logger.info('Source: %s' % str(n.unique(self.source[self.select])))
self.logger.debug('Ants: %s' % str(n.unique(self.antname[self.select])))
def parseGN(self, telcalfile):
"""Takes .GN telcal file and places values in numpy arrays.
"""
skip = 3 # skip first three header lines
MJD = 0; UTC = 1; LSTD = 2; LSTS = 3; IFID = 4; SKYFREQ = 5; ANT = 6; AMP = 7; PHASE = 8
RESIDUAL = 9; DELAY = 10; FLAGGED = 11; ZEROED = 12; HA = 13; AZ = 14; EL = 15
SOURCE = 16
#FLAGREASON = 17
mjd = []; utc = []; lstd = []; lsts = []; ifid = []; skyfreq = [];
antname = []; amp = []; phase = []; residual = []; delay = [];
flagged = []; zeroed = []; ha = []; az = []; el = []; source = []
#flagreason = []
i = 0
for line in open(telcalfile,'r'):
fields = line.split()
if i < skip:
i += 1
continue
if ('NO_ANTSOL_SOLUTIONS_FOUND' in line):
# keep ERROR solutions now that flagging works
continue
try:
mjd.append(float(fields[MJD])); utc.append(fields[UTC]); lstd.append(float(fields[LSTD])); lsts.append(fields[LSTS])
ifid.append(fields[IFID]); skyfreq.append(float(fields[SKYFREQ])); antname.append(fields[ANT])
amp.append(float(fields[AMP])); phase.append(float(fields[PHASE])); residual.append(float(fields[RESIDUAL]))
delay.append(float(fields[DELAY])); flagged.append('true' == (fields[FLAGGED]))
zeroed.append('true' == (fields[ZEROED])); ha.append(float(fields[HA])); az.append(float(fields[AZ]))
el.append(float(fields[EL])); source.append(fields[SOURCE])
# flagreason.append('') # 18th field not yet implemented
except ValueError:
self.logger.warn('Trouble parsing line of telcal file. Skipping.')
continue
self.mjd = n.array(mjd); self.utc = n.array(utc); self.lstd = n.array(lstd); self.lsts = n.array(lsts)
self.ifid = n.array(ifid); self.skyfreq = n.array(skyfreq); self.antname = n.array(antname); self.amp = n.array(amp)
self.phase = n.array(phase); self.residual = n.array(residual); self.delay = n.array(delay)
self.flagged = n.array(flagged); self.zeroed = n.array(zeroed); self.ha = n.array(ha); self.az = n.array(az)
self.el = n.array(el); self.source = n.array(source);
#self.flagreason = n.array(flagreason)
# purify list to keep only complete solution sets
# uu = n.unique(self.mjd)
# uu2 = n.concatenate( (uu, [uu[-1] + (uu[-1]-uu[-2])]) ) # add rightmost bin
# count,bin = n.histogram(self.mjd, bins=uu2)
# goodmjd = bin[n.where(count == count.max())]
# complete = n.array([], dtype='int')
# for mjd in goodmjd:
# complete = n.concatenate( (complete, n.where(mjd == self.mjd)[0]) )
# self.complete = n.array(complete)
self.complete = n.arange(len(self.mjd))
# make another version of ants array
antnum = []
for aa in self.antname:
antnum.append(int(aa[2:])) # cuts the 'ea' from start of antenna string to get integer
self.antnum = n.array(antnum)
def calcgain(self, ant1, ant2, skyfreq, pol):
""" Calculates the complex gain product (g1*g2) for a pair of antennas.
"""
select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]]
self.logger.debug('select %s' % (str(select)))
if len(select): # for when telcal solutions don't exist
ind1 = n.where(ant1 == self.antnum[select])
ind2 = n.where(ant2 == self.antnum[select])
g1 = self.amp[select][ind1]*n.exp(1j*n.radians(self.phase[select][ind1])) * (not self.flagged.astype(int)[select][ind1][0])
g2 = self.amp[select][ind2]*n.exp(-1j*n.radians(self.phase[select][ind2])) * (not self.flagged.astype(int)[select][ind2][0])
else:
g1 = [0]; g2 = [0]
try:
assert (g1[0] != 0j) and (g2[0] != 0j)
invg1g2 = 1./(g1[0]*g2[0])
except (AssertionError, IndexError):
invg1g2 = 0
return invg1g2
def calcdelay(self, ant1, ant2, skyfreq, pol):
""" Calculates the relative delay (d1-d2) for a pair of antennas in ns.
"""
select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]]
ind1 = n.where(ant1 == self.antnum[select])
ind2 = n.where(ant2 == self.antnum[select])
d1 = self.delay[select][ind1]
d2 = self.delay[select][ind2]
if len(d1-d2) > 0:
return d1-d2
else:
return n.array([0])
def apply(self, data):
""" Applies calibration solution to data array. Assumes structure of (nint, nbl, nch, npol).
"""
# find best skyfreq for each channel
skyfreqs = n.unique(self.skyfreq[self.select]) # one per spw
nch_tot = len(self.freqs)
chan_bandnum = [range(nch_tot*i/len(skyfreqs), nch_tot*(i+1)/len(skyfreqs)) for i in range(len(skyfreqs))] # divide chans by number of spw in solution
self.logger.info('Solutions for %d spw: (%s)' % (len(skyfreqs), skyfreqs))
for j in range(len(skyfreqs)):
skyfreq = skyfreqs[j]
chans = chan_bandnum[j]
self.logger.info('Applying gain solution for chans from %d-%d' % (chans[0], chans[-1]))
# define freq structure to apply delay solution
nch = len(chans)
chanref = nch/2 # reference channel at center
relfreq = self.chansize*(n.arange(nch) - chanref) # relative frequency
for i in range(len(self.blarr)):
ant1, ant2 = self.blarr[i] # ant numbers (1-based)
for pol in self.polind:
# apply gain correction
invg1g2 = self.calcgain(ant1, ant2, skyfreq, pol)
data[:,i,chans,pol-self.polind[0]] = data[:,i,chans,pol-self.polind[0]] * invg1g2 # hack: lousy data pol indexing
# apply delay correction
d1d2 = self.calcdelay(ant1, ant2, skyfreq, pol)
delayrot = 2*n.pi*(d1d2[0] * 1e-9) * relfreq # phase to rotate across band
data[:,i,chans,pol-self.polind[0]] = data[:,i,chans,pol-self.polind[0]] * n.exp(-1j*delayrot[None, None, :]) # do rotation
|
|
import nose
import time
import pickle
import networkx
import logging
l = logging.getLogger("angr.tests.test_cfgemulated")
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
import angr
from angr import options as o
def compare_cfg(standard, g, function_list):
"""
Standard graph comes with addresses only, and it is based on instructions, not on basic blocks
"""
def get_function_name(addr):
start = 0
end = len(function_list) - 1
while start <= end:
mid = (start + end) / 2
f = function_list[mid]
if addr < f['start']:
end = mid - 1
elif addr > f['end']:
start = mid + 1
else:
return f['name']
return None
# Sort function list
function_list = sorted(function_list, key=lambda x: x['start'])
# Convert the IDA-style CFG into VEX-style CFG
s_graph = networkx.DiGraph()
all_nodes = sorted(standard.nodes())
addr_to_basicblock = {}
last_basicblock = None
for n in all_nodes:
if last_basicblock is None:
last_basicblock = (n, n)
block = last_basicblock
successors = standard.successors(n)
if len(successors) == 1 and successors[0] >= block[0]:
last_basicblock = (block[0], successors[0])
else:
# Save the existing block
addr_to_basicblock[block[0]] = block
# Create edges
for s in successors:
s_graph.add_edge(block[0], s)
# Clear last_basicblock so that we create a new basicblock next time
last_basicblock = None
graph = networkx.DiGraph()
for src, dst in g.edges():
graph.add_edge(src.addr, dst.addr)
# Graph comparison
for src, dst in s_graph.edges():
if graph.has_edge(src, dst):
continue
else:
# Edge doesn't exist in our CFG
l.error("Edge (%s-0x%x, %s-0x%x) only exists in IDA CFG.", get_function_name(src), src, get_function_name(dst), dst)
for src, dst in graph.edges():
if s_graph.has_edge(src, dst):
continue
else:
# Edge doesn't exist in our CFG
l.error("Edge (%s-0x%x, %s-0x%x) only exists in angr's CFG.", get_function_name(src), src, get_function_name(dst), dst)
def perform_single(binary_path, cfg_path=None):
proj = angr.Project(binary_path,
use_sim_procedures=True,
default_analysis_mode='symbolic',
load_options={'auto_load_libs': False})
start = time.time()
cfg = proj.analyses.CFGEmulated(context_sensitivity_level=1, fail_fast=True)
end = time.time()
duration = end - start
bbl_dict = cfg.nodes()
l.info("CFG generated in %f seconds.", duration)
l.info("Contains %d members in BBL dict.", len(bbl_dict))
if cfg_path is not None and os.path.isfile(cfg_path):
# Compare the graph with a predefined CFG
info = pickle.load(open(cfg_path, "rb"))
standard = info['cfg']
functions = info['functions']
graph = cfg.graph
compare_cfg(standard, graph, functions)
else:
l.warning("No standard CFG specified.")
def disabled_cfg_0():
binary_path = os.path.join(test_location, 'x86_64', 'cfg_0')
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def disabled_cfg_1():
binary_path = os.path.join(test_location, 'x86_64', 'cfg_1')
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def disabled_cfg_2():
binary_path = os.path.join(test_location, 'armel', 'test_division')
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def disabled_cfg_3():
binary_path = os.path.join(test_location, 'mips', 'test_arrays')
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def disabled_cfg_4():
binary_path = os.path.join(test_location, 'mipsel', 'darpa_ping')
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def test_additional_edges():
# Test the `additional_edges` parameter for CFG generation
binary_path = os.path.join(test_location, 'x86_64', 'switch')
proj = angr.Project(binary_path,
use_sim_procedures=True,
default_analysis_mode='symbolic',
load_options={'auto_load_libs': False})
additional_edges = {
0x400573 : [ 0x400580, 0x40058f, 0x40059e ]
}
cfg = proj.analyses.CFGEmulated(context_sensitivity_level=0, additional_edges=additional_edges, fail_fast=True,
resolve_indirect_jumps=False, # For this test case, we need to disable the
# jump table resolving, otherwise CFGEmulated
# can automatically find the node 0x4005ad.
)
nose.tools.assert_not_equal(cfg.get_any_node(0x400580), None)
nose.tools.assert_not_equal(cfg.get_any_node(0x40058f), None)
nose.tools.assert_not_equal(cfg.get_any_node(0x40059e), None)
nose.tools.assert_equal(cfg.get_any_node(0x4005ad), None)
def test_not_returning():
# Make sure we are properly labeling functions that do not return in function manager
binary_path = os.path.join(test_location, 'x86_64', 'not_returning')
proj = angr.Project(binary_path,
use_sim_procedures=True,
load_options={'auto_load_libs': False}
)
cfg = proj.analyses.CFGEmulated(context_sensitivity_level=0, fail_fast=True) # pylint:disable=unused-variable
# function_a returns
nose.tools.assert_not_equal(proj.kb.functions.function(name='function_a'), None)
nose.tools.assert_true(proj.kb.functions.function(name='function_a').returning)
# function_b does not return
nose.tools.assert_not_equal(proj.kb.functions.function(name='function_b'), None)
nose.tools.assert_false(proj.kb.functions.function(name='function_b').returning)
# function_c does not return
nose.tools.assert_not_equal(proj.kb.functions.function(name='function_c'), None)
nose.tools.assert_false(proj.kb.functions.function(name='function_c').returning)
# main does not return
nose.tools.assert_not_equal(proj.kb.functions.function(name='main'), None)
nose.tools.assert_false(proj.kb.functions.function(name='main').returning)
# function_d should not be reachable
nose.tools.assert_equal(proj.kb.functions.function(name='function_d'), None)
def disabled_cfg_5():
binary_path = os.path.join(test_location, 'mipsel', 'busybox')
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def test_cfg_6():
function_addresses = [0xfa630, 0xfa683, 0xfa6d4, 0xfa707, 0xfa754, 0xfa779, 0xfa7a9, 0xfa7d6, 0xfa844, 0xfa857,
0xfa8d9, 0xfa92f, 0xfa959, 0xfa9fb, 0xfabd6, 0xfac61, 0xfacc2, 0xfad29, 0xfaf94, 0xfbd07,
0xfc100, 0xfc101, 0xfc14f, 0xfc18e, 0xfc25e, 0xfc261, 0xfc3c6, 0xfc42f, 0xfc4a3, 0xfc4cf,
0xfc4db, 0xfc5ba, 0xfc5ef, 0xfc5fe, 0xfc611, 0xfc682, 0xfc6b7, 0xfc7fc, 0xfc8a8, 0xfc8e7,
0xfcb42, 0xfcb50, 0xfcb72, 0xfcc3b, 0xfcc7a, 0xfcc8b, 0xfccdc, 0xfd1a3, 0xff06e]
# We need to add DO_CCALLS to resolve long jmp and support real mode
o.modes['fastpath'] |= {o.DO_CCALLS}
binary_path = test_location + "/i386/bios.bin.elf"
proj = angr.Project(binary_path,
use_sim_procedures=True,
page_size=1)
cfg = proj.analyses.CFGEmulated(context_sensitivity_level=1, fail_fast=True) # pylint:disable=unused-variable
nose.tools.assert_greater_equal(set(f for f in proj.kb.functions), set(function_addresses))
o.modes['fastpath'] ^= {o.DO_CCALLS}
def test_fauxware():
binary_path = os.path.join(test_location, 'x86_64', 'fauxware')
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def disabled_loop_unrolling():
binary_path = os.path.join(test_location, 'x86_64', 'cfg_loop_unrolling')
p = angr.Project(binary_path)
cfg = p.analyses.CFGEmulated(fail_fast=True)
cfg.normalize()
cfg.unroll_loops(5)
nose.tools.assert_equal(len(cfg.get_all_nodes(0x400636)), 7)
def test_thumb_mode():
# In thumb mode, all addresses of instructions and in function manager should be odd numbers, which loyally
# reflect VEX's trick to encode the THUMB state in the address.
binary_path = os.path.join(test_location, 'armhf', 'test_arrays')
p = angr.Project(binary_path)
cfg = p.analyses.CFGEmulated(fail_fast=True)
def check_addr(a):
if a % 2 == 1:
nose.tools.assert_true(cfg.is_thumb_addr(a))
else:
nose.tools.assert_false(cfg.is_thumb_addr(a))
# CFGNodes
cfg_node_addrs = [ n.addr for n in cfg.graph.nodes() if not n.is_simprocedure ]
for a in cfg_node_addrs:
check_addr(a)
# Functions in function manager
for f_addr, f in p.kb.functions.items():
if f.is_simprocedure:
continue
check_addr(f_addr)
if f.startpoint is not None:
check_addr(f.startpoint.addr)
def test_fakeret_edges_0():
# Test the bug where a fakeret edge can be missing in certain cases
# Reported by Attila Axt (GitHub: @axt)
# Ref: https://github.com/angr/angr/issues/72
binary_path = os.path.join(test_location, "x86_64", "cfg_3")
p = angr.Project(binary_path)
cfg = p.analyses.CFGEmulated(context_sensitivity_level=3, fail_fast=True)
putchar_plt = cfg.functions.function(name="putchar", plt=True)
nose.tools.assert_true(putchar_plt.returning)
putchar = cfg.functions.function(name="putchar", plt=False)
nose.tools.assert_true(putchar.returning)
# Since context sensitivity is 3, there should be two different putchar nodes
putchar_cfgnodes = cfg.get_all_nodes(putchar.addr)
nose.tools.assert_equal(len(putchar_cfgnodes), 2)
# Each putchar node has a different predecessor as their PLT entry
plt_entry_0 = cfg.get_predecessors(putchar_cfgnodes[0])
nose.tools.assert_equal(len(plt_entry_0), 1)
plt_entry_0 = plt_entry_0[0]
plt_entry_1 = cfg.get_predecessors(putchar_cfgnodes[1])
nose.tools.assert_equal(len(plt_entry_1), 1)
plt_entry_1 = plt_entry_1[0]
nose.tools.assert_true(plt_entry_0 is not plt_entry_1)
# Each PLT entry should have a FakeRet edge
preds_0 = cfg.get_predecessors(plt_entry_0)
nose.tools.assert_equal(len(preds_0), 1)
preds_1 = cfg.get_predecessors(plt_entry_1)
nose.tools.assert_equal(len(preds_1), 1)
# Each predecessor must have a call edge and a FakeRet edge
edges_0 = cfg.get_successors_and_jumpkind(preds_0[0], excluding_fakeret=False)
nose.tools.assert_equal(len(edges_0), 2)
jumpkinds = { jumpkind for _, jumpkind in edges_0 }
nose.tools.assert_set_equal(jumpkinds, { 'Ijk_Call', 'Ijk_FakeRet' })
edges_1 = cfg.get_successors_and_jumpkind(preds_1[0], excluding_fakeret=False)
nose.tools.assert_equal(len(edges_1), 2)
jumpkinds = { jumpkind for _, jumpkind in edges_1 }
nose.tools.assert_set_equal(jumpkinds, { 'Ijk_Call', 'Ijk_FakeRet' })
def test_string_references():
# Test AttributeError on 'addr' which occurs when searching for string
# references
binary_path = os.path.join(test_location, "i386", "ctf_nuclear")
b = angr.Project(binary_path, load_options={'auto_load_libs': False})
cfg = b.analyses.CFGEmulated(keep_state=True, fail_fast=True)
string_references = []
for f in cfg.functions.values():
string_references.append(f.string_references())
# test passes if hasn't thrown an exception
def test_arrays():
binary_path = os.path.join(test_location, "armhf", "test_arrays")
b = angr.Project(binary_path, load_options={'auto_load_libs': False})
cfg = b.analyses.CFGEmulated(fail_fast=True)
node = cfg.model.get_any_node(0x10415)
nose.tools.assert_is_not_none(node)
successors = cfg.model.get_successors(node)
nose.tools.assert_equal(len(successors), 2)
def test_max_steps():
binary_path = os.path.join(test_location, "x86_64", "fauxware")
b = angr.Project(binary_path, load_options={'auto_load_libs': False})
cfg = b.analyses.CFGEmulated(max_steps=5, fail_fast=True)
dfs_edges = networkx.dfs_edges(cfg.graph)
depth_map = {}
for src, dst in dfs_edges:
if src not in depth_map:
depth_map[src] = 0
if dst not in depth_map:
depth_map[dst] = depth_map[src] + 1
depth_map[dst] = max(depth_map[src] + 1, depth_map[dst])
nose.tools.assert_less_equal(max(depth_map.values()), 5)
def test_armel_final_missing_block():
# Due to a stupid bug in CFGEmulated, the last block of a function might go missing in the function graph if the
# only entry edge to that block is an Ijk_Ret edge. See #475 on GitHub.
# Thank @gergo for reporting and providing this test binary.
binary_path = os.path.join(test_location, 'armel', 'last_block')
b = angr.Project(binary_path, auto_load_libs=False)
cfg = b.analyses.CFGEmulated(fail_fast=True)
blocks = list(cfg.kb.functions[0x8000].blocks)
nose.tools.assert_equal(len(blocks), 3)
nose.tools.assert_set_equal({ block.addr for block in blocks }, { 0x8000, 0x8014, 0x8020 })
def test_armel_final_missing_block_b():
# When _pending_jobs is not sorted, it is possible that we first process a pending job created earlier and then
# process another pending job created later. Ideally, we hope that jobs are always processed in a topological order,
# and the unsorted pending jobs break this assumption. In this test binary, at one point there can be two pending
# jobs, 0x10b05/0x10ac5(Ijk_FakeRet) and 0x10bbe(Ijk_FakeRet). If 0x10bbe is processed before 0x10b05, we do not
# know whether the function 0x10a29(aes) returns or not. As a result, the final block of the main function is not
# confirmed, and is not added to the function graph of function main.
#
# In fact, this also hints a different bug. We should always "confirm" that a function returns if its FakeRet job
# are processed for whatever reason.
#
# Fixing either bug will resolve the issue that the final block does not show up in the function graph of main. To
# stay on the safe side, both of them are fixed. Thanks @tyb0807 for reporting this issue and providing a test
# binary.
# EDG says: This binary is compiled incorrectly.
# The binary's app code was compiled as CortexM, but linked against ARM libraries.
# This is illegal, and does not actually execute on a real CortexM.
# Somebody should recompile it....
binary_path = os.path.join(test_location, 'armel', 'aes')
b = angr.Project(binary_path, arch="ARMEL", auto_load_libs=False)
function = b.loader.main_object.get_symbol('main').rebased_addr
cfg = b.analyses.CFGEmulated(starts=[function],
context_sensitivity_level=0,
normalize=True,
fail_fast=True,
)
blocks = list(cfg.kb.functions['main'].blocks)
nose.tools.assert_equal(len(blocks), 2)
nose.tools.assert_set_equal(set(block.addr for block in blocks), { 0x10b79, 0x10bbf })
def test_armel_incorrect_function_detection_caused_by_branch():
# GitHub issue #685
binary_path = os.path.join(test_location, "armel", "RTOSDemo.axf.issue_685")
b = angr.Project(binary_path, auto_load_libs=False)
cfg = b.analyses.CFGEmulated()
# The Main function should be identified as a single function
nose.tools.assert_in(0x80a1, cfg.functions)
main_func = cfg.functions[0x80a1]
# All blocks should be there
block_addrs = sorted([ b.addr for b in main_func.blocks ])
nose.tools.assert_equal(block_addrs, [0x80a1, 0x80b1, 0x80bb, 0x80cd, 0x80df, 0x80e3, 0x80ed])
# The ResetISR function should be identified as a single function, too
nose.tools.assert_in(0x8009, cfg.functions)
resetisr_func = cfg.functions[0x8009]
# All blocks should be there
block_addrs = sorted([ b.addr for b in resetisr_func.blocks ])
nose.tools.assert_equal(block_addrs, [0x8009, 0x8011, 0x801f, 0x8027])
def test_cfg_switches():
#logging.getLogger('angr.analyses.cfg.cfg_fast').setLevel(logging.INFO)
#logging.getLogger('angr.analyses.cfg.indirect_jump_resolvers.jumptable').setLevel(logging.DEBUG)
filename = "cfg_switches"
edges = {
'x86_64': {
# jump table 0 in func_0
(0x40053a, 0x400547),
(0x40053a, 0x400552),
(0x40053a, 0x40055d),
(0x40053a, 0x400568),
(0x40053a, 0x400573),
(0x40053a, 0x400580),
(0x40053a, 0x40058d),
# jump table 0 in func_1
(0x4005bc, 0x4005c9),
(0x4005bc, 0x4005d8),
(0x4005bc, 0x4005e7),
(0x4005bc, 0x4005f6),
(0x4005bc, 0x400605),
(0x4005bc, 0x400614),
(0x4005bc, 0x400623),
(0x4005bc, 0x400632),
(0x4005bc, 0x40063e),
(0x4005bc, 0x40064a),
(0x4005bc, 0x4006b0),
# jump table 1 in func_1
(0x40065a, 0x400667),
(0x40065a, 0x400673),
(0x40065a, 0x40067f),
(0x40065a, 0x40068b),
(0x40065a, 0x400697),
(0x40065a, 0x4006a3),
# jump table 0 in main
(0x4006e1, 0x4006ee),
(0x4006e1, 0x4006fa),
(0x4006e1, 0x40070b),
(0x4006e1, 0x40071c),
(0x4006e1, 0x40072d),
(0x4006e1, 0x40073e),
(0x4006e1, 0x40074f),
(0x4006e1, 0x40075b),
},
}
arches = edges.keys()
for arch in arches:
path = os.path.join(test_location, arch, filename)
proj = angr.Project(path, load_options={'auto_load_libs': False})
cfg = proj.analyses.CFGEmulated()
for src, dst in edges[arch]:
src_node = cfg.get_any_node(src)
dst_node = cfg.get_any_node(dst)
nose.tools.assert_in(dst_node, src_node.successors,
msg="CFG edge %s-%s is not found." % (src_node, dst_node)
)
class CFGEmulatedAborted(angr.analyses.cfg.cfg_emulated.CFGEmulated): # pylint:disable=abstract-method
"""
Only used in the test_abort_and_resume test case.
"""
should_abort = False
def _intra_analysis(self):
if CFGEmulatedAborted.should_abort:
self.abort()
else:
super()._intra_analysis()
def test_abort_and_resume():
angr.analyses.AnalysesHub.register_default('CFGEmulatedAborted', CFGEmulatedAborted)
CFGEmulatedAborted.should_abort = False
binary_path = os.path.join(test_location, "x86_64", "fauxware")
b = angr.Project(binary_path, auto_load_libs=False)
CFGEmulatedAborted.should_abort = True
cfg = b.analyses.CFGEmulatedAborted()
nose.tools.assert_greater(len(list(cfg.jobs)), 0) # there should be left-over jobs
CFGEmulatedAborted.should_abort = False
cfg.resume()
nose.tools.assert_equal(len(list(cfg.jobs)), 0) # no left-over job
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
print(f)
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.state_plugins.abstract_memory").setLevel(logging.DEBUG)
# logging.getLogger("angr.state_plugins.symbolic_memory").setLevel(logging.DEBUG)
# logging.getLogger("angr.analyses.cfg.cfg_emulated").setLevel(logging.DEBUG)
# logging.getLogger("s_irsb").setLevel(logging.DEBUG)
# Temporarily disable the warnings of claripy backend
#logging.getLogger("claripy.backends.backend").setLevel(logging.ERROR)
#logging.getLogger("claripy.claripy").setLevel(logging.ERROR)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.