code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a responsive display ad to an ad group.
Image assets are uploaded using AssetService. To get ad groups, run
get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
import requests
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def UploadImageAsset(client, url):
"""Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201809')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId']
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201809')
# Create the ad.
multi_asset_responsive_display_ad = {
'xsi_type': 'MultiAssetResponsiveDisplayAd',
'headlines': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Mars'
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Jupiter',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Travel to Pluto'
}
}],
'descriptions': [{
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
}, {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'See the planet in style.',
}
}],
'businessName': 'Galactic Luxury Cruises',
'longHeadline': {
'asset': {
'xsi_type': 'TextAsset',
'assetText': 'Visit the planet in a luxury spaceship.',
}
},
# This ad format does not allow the creation of an image asset by setting
# the asset.imageData field. An image asset must first be created using
# the AssetService, and asset.assetId must be populated when creating
# the ad.
'marketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/3b9Wfh')
}
}],
'squareMarketingImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}],
# Optional values
'finalUrls': ['http://www.example.com'],
'callToActionText': 'Shop Now',
# Set color settings using hexadecimal values. Set allowFlexibleColor to
# false if you want your ads to render by always using your colors
# strictly.
'mainColor': '#0000ff',
'accentColor': '#ffff00',
'allowFlexibleColor': False,
'formatSetting': 'NON_NATIVE',
# Set dynamic display ad settings, composed of landscape logo image,
# promotion text, and price prefix.
'dynamicSettingsPricePrefix': 'as low as',
'dynamicSettingsPromoText': 'Free shipping!',
'logoImages': [{
'asset': {
'xsi_type': 'ImageAsset',
'assetId': UploadImageAsset(client, 'https://goo.gl/mtt54n')
}
}]
}
# Create ad group ad.
ad_group_ad = {
'adGroupId': ad_group_id,
'ad': multi_asset_responsive_display_ad,
# Optional.
'status': 'PAUSED'
}
# Add ad.
ads = ad_group_ad_service.mutate([
{'operator': 'ADD', 'operand': ad_group_ad}
])
# Display results.
if 'value' in ads:
for ad in ads['value']:
print ('Added new responsive display ad ad with ID "%d" '
'and long headline "%s".'
% (ad['ad']['id'], ad['ad']['longHeadline']['asset']['assetText']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| Aloomaio/googleads-python-lib | examples/adwords/v201809/advanced_operations/add_multi_asset_responsive_display_ad.py | Python | apache-2.0 | 5,311 |
# Generated by Django 2.2.13 on 2020-06-12 06:49
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0330_auto_20200612_0843'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
| flavoi/diventi | diventi/accounts/migrations/0331_auto_20200612_0849.py | Python | apache-2.0 | 453 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class osloginCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'delete_posix_account': ('name', ),
'delete_ssh_public_key': ('name', ),
'get_login_profile': ('name', 'project_id', 'system_id', ),
'get_ssh_public_key': ('name', ),
'import_ssh_public_key': ('parent', 'ssh_public_key', 'project_id', ),
'update_ssh_public_key': ('name', 'ssh_public_key', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=osloginCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the oslogin client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| googleapis/python-oslogin | scripts/fixup_oslogin_v1_keywords.py | Python | apache-2.0 | 6,257 |
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-args,missing-docstring,g-doc-return-or-yield,g-space-before-docstring-summary,unused-argument,g-short-docstring-punctuation, g-no-space-after-docstring-summary
"""Utilities to create, read, write tf.Examples."""
import functools
import random
import numpy as np
import tensorflow.compat.v1 as tf
from REDACTED.minigo import bigtable_input
from REDACTED.minigo import coords
from REDACTED.minigo import dual_net
from REDACTED.minigo import features as features_lib
from REDACTED.minigo import go
from REDACTED.minigo import sgf_wrapper
from REDACTED.minigo import symmetries
TF_RECORD_CONFIG = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.ZLIB)
def _one_hot(index):
onehot = np.zeros([go.N * go.N + 1], dtype=np.float32)
onehot[index] = 1
return onehot
def make_tf_example(features, pi, value):
"""
Args:
features: [N, N, FEATURE_DIM] nparray of uint8
pi: [N * N + 1] nparray of float32
value: float
"""
return tf.train.Example(
features=tf.train.Features(
feature={
'x':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[features.tostring()])),
'pi':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[pi.tostring()])),
'outcome':
tf.train.Feature(
float_list=tf.train.FloatList(value=[value]))
}))
def write_tf_examples(filename, tf_examples, serialize=True):
"""
Args:
filename: Where to write tf.records
tf_examples: An iterable of tf.Example
serialize: whether to serialize the examples.
"""
with tf.python_io.TFRecordWriter(
filename, options=TF_RECORD_CONFIG) as writer:
for ex in tf_examples:
if serialize:
writer.write(ex.SerializeToString())
else:
writer.write(ex)
def batch_parse_tf_example(batch_size, layout, example_batch):
"""
Args:
batch_size: batch size
layout: 'nchw' or 'nhwc'
example_batch: a batch of tf.Example
Returns:
A tuple (feature_tensor, dict of output tensors)
"""
planes = dual_net.get_features_planes()
features = {
'x': tf.FixedLenFeature([], tf.string),
'pi': tf.FixedLenFeature([], tf.string),
'outcome': tf.FixedLenFeature([], tf.float32),
}
parsed = tf.parse_example(example_batch, features)
x = tf.decode_raw(parsed['x'], tf.uint8)
x = tf.cast(x, tf.float32)
if layout == 'nhwc':
shape = [batch_size, go.N, go.N, planes]
else:
shape = [batch_size, planes, go.N, go.N]
x = tf.reshape(x, shape)
pi = tf.decode_raw(parsed['pi'], tf.float32)
pi = tf.reshape(pi, [batch_size, go.N * go.N + 1])
outcome = parsed['outcome']
outcome.set_shape([batch_size])
return x, {'pi_tensor': pi, 'value_tensor': outcome}
def read_tf_records(batch_size, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None, interleave=True,
filter_amount=1.0):
"""
Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors
"""
if shuffle_examples and not shuffle_buffer_size:
raise ValueError('Must set shuffle buffer size if shuffling examples')
tf_records = list(tf_records)
if shuffle_records:
random.shuffle(tf_records)
record_list = tf.data.Dataset.from_tensor_slices(tf_records)
# compression_type here must agree with write_tf_examples
map_func = functools.partial(
tf.data.TFRecordDataset,
buffer_size=8 * 1024 * 1024,
compression_type='ZLIB')
if interleave:
# cycle_length = how many tfrecord files are read in parallel
# The idea is to shuffle both the order of the files being read,
# and the examples being read from the files.
dataset = record_list.apply(
tf.data.experimental.parallel_interleave(
map_func, cycle_length=64, sloppy=True))
else:
dataset = record_list.flat_map(map_func)
if filter_amount < 1.0:
dataset = dataset.filter(lambda _: tf.random_uniform([]) < filter_amount)
dataset = dataset.repeat(num_repeats)
if shuffle_examples:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.batch(batch_size)
return dataset
def _random_rotation(feature_layout, x_tensor, outcome_tensor):
pi_tensor = outcome_tensor['pi_tensor']
if feature_layout == 'nhwc':
x_rot_tensor, pi_rot_tensor = symmetries.rotate_train_nhwc(
x_tensor, pi_tensor)
else:
x_rot_tensor, pi_rot_tensor = symmetries.rotate_train_nchw(
x_tensor, pi_tensor)
outcome_tensor['pi_tensor'] = pi_rot_tensor
return x_rot_tensor, outcome_tensor
def get_input_tensors(batch_size,
feature_layout,
tf_records,
num_repeats=1,
shuffle_records=True,
shuffle_examples=True,
shuffle_buffer_size=None,
filter_amount=0.05,
random_rotation=True):
"""Read tf.Records and prepare them for ingestion by dual_net.
See `read_tf_records` for parameter documentation.
Returns a dict of tensors (see return value of batch_parse_tf_example)
"""
print('Reading tf_records from {} inputs'.format(len(tf_records)))
dataset = read_tf_records(
batch_size,
tf_records,
num_repeats=num_repeats,
shuffle_records=shuffle_records,
shuffle_examples=shuffle_examples,
shuffle_buffer_size=shuffle_buffer_size,
filter_amount=filter_amount,
interleave=False)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout), batch_size))
return dataset.make_one_shot_iterator().get_next()
def get_tpu_input_tensors(batch_size,
feature_layout,
tf_records,
num_repeats=1,
shuffle_records=True,
shuffle_examples=True,
shuffle_buffer_size=None,
filter_amount=0.05,
random_rotation=True):
# TPUs trains on sequential golden chunks to simplify preprocessing and
# reproducibility.
assert len(tf_records) < 101, 'Use example_buffer to build a golden_chunk'
dataset = read_tf_records(
batch_size,
tf_records,
num_repeats=num_repeats,
shuffle_records=shuffle_records,
shuffle_examples=shuffle_examples,
shuffle_buffer_size=shuffle_buffer_size,
filter_amount=filter_amount,
interleave=False)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
# TODO(sethtroisi@): Unify
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout),
batch_size,
drop_remainder=True))
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def get_tpu_bt_input_tensors(games,
games_nr,
batch_size,
feature_layout,
num_repeats=1,
number_of_games=500e3,
fresh_fraction=0.05,
random_rotation=True):
dataset = bigtable_input.get_unparsed_moves_from_last_n_games(
games, games_nr, number_of_games)
dataset = dataset.repeat(num_repeats)
dataset = dataset.batch(batch_size)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout),
batch_size,
drop_remainder=True))
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def make_dataset_from_selfplay(data_extracts):
"""
Returns an iterable of tf.Examples.
Args:
data_extracts: An iterable of (position, pi, result) tuples
"""
f = dual_net.get_features()
tf_examples = (make_tf_example(
features_lib.extract_features(pos, f), pi, result)
for pos, pi, result in data_extracts)
return tf_examples
def make_dataset_from_sgf(sgf_filename, tf_record):
pwcs = sgf_wrapper.replay_sgf_file(sgf_filename)
tf_examples = map(_make_tf_example_from_pwc, pwcs)
write_tf_examples(tf_record, tf_examples)
def _make_tf_example_from_pwc(position_w_context):
f = dual_net.get_features()
features = features_lib.extract_features(position_w_context.position, f)
pi = _one_hot(coords.to_flat(position_w_context.next_move))
value = position_w_context.result
return make_tf_example(features, pi, value)
| mlperf/training_results_v0.7 | Google/benchmarks/minigo/implementations/minigo-research-TF-tpu-v4-128/preprocessing.py | Python | apache-2.0 | 10,808 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.AP_list, name='AP_list'),
]
| OpenWinCon/OpenWinNet | web-gui/AP/urls.py | Python | apache-2.0 | 119 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
from tempest.test_discover import plugins
def load_tests(loader, tests, pattern):
ext_plugins = plugins.TempestTestPluginManager()
suite = unittest.TestSuite()
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
base_path = os.path.split(base_path)[0]
# Load local tempest tests
for test_dir in ['api', 'scenario']:
full_test_dir = os.path.join(base_path, 'tempest', test_dir)
if not pattern:
suite.addTests(loader.discover(full_test_dir,
top_level_dir=base_path))
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
top_level_dir=base_path))
plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
if not plugin_load_tests:
return suite
# Load any installed plugin tests
for plugin in plugin_load_tests:
test_dir, top_path = plugin_load_tests[plugin]
if not pattern:
suite.addTests(loader.discover(test_dir, top_level_dir=top_path))
else:
suite.addTests(loader.discover(test_dir, pattern=pattern,
top_level_dir=top_path))
return suite
| openstack/tempest | tempest/test_discover/test_discover.py | Python | apache-2.0 | 1,891 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from horizon import tables
class LogManagementTable(tables.DataTable):
class Meta(object):
table_actions = ()
row_actions = ()
| NECCSiPortal/NECCSPortal-dashboard | nec_portal/dashboards/admin/log_management/tables.py | Python | apache-2.0 | 731 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from docker.errors import DockerException, NotFound
from oslo_log import log as logging
from oslo_config import cfg
from docker import Client as DC
from validator.common.exception import CookbookSyntaxException, \
CookbookDeploymentException, \
CookbookInstallException, \
DockerContainerException
from validator.common.i18n import _LW, _LE, _, _LI
LOG = logging.getLogger(__name__)
opts = [
cfg.StrOpt('url'),
cfg.StrOpt('image'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group="clients_docker")
class PuppetClient(object):
"""
Wrapper for Docker client
"""
def __init__(self, url=CONF.clients_docker.url):
self._url = url
self.container = None
try:
self.dc = DC(base_url=self._url)
except DockerException as e:
LOG.error(_LE("Docker client error: %s") % e)
raise e
def cookbook_deployment_test(self, cookbook, image=CONF.clients_docker.image):
"""
Try to process a cookbook and return results
:param cookbook: cookbook to deploy
:param image: image to deploy to
:return: dictionary with results
"""
LOG.debug("Sending cookbook to docker server in %s" % self._url)
b_success = True
msg = {}
self.run_container(image)
# inject custom solo.json/solo.rb file
json_cont = CONF.clients_puppet.cmd_config % cookbook
cmd_inject = CONF.clients_puppet.cmd_inject.format(json_cont)
self.execute_command(cmd_inject)
msg['install'] = self.run_install(cookbook)
b_success &= msg['install']['success']
msg['test'] = self.run_test(cookbook)
b_success &= msg['test']['success']
msg['deploy'] = self.run_deploy(cookbook)
b_success &= msg['deploy']['success']
# check execution output
if b_success:
msg['result'] = {
'success': True,
'result': "Cookbook %s successfully deployed\n" % cookbook
}
else:
msg['result'] = {
'success': False,
'result': "Error deploying cookbook {}\n".format(cookbook)
}
LOG.error(_LW(msg))
self.remove_container()
return msg
def run_deploy(self, cookbook):
""" Run cookbook deployment
:param cookbook: cookbook to deploy
:return msg: dictionary with results and state
"""
try:
# launch execution
cmd_launch = CONF.clients_puppet.cmd_launch
resp_launch = self.execute_command(cmd_launch)
msg = {
'success': True,
'response': resp_launch
}
LOG.debug(_("Launch result: %s") % resp_launch)
if resp_launch is None or "FATAL" in resp_launch:
msg['success'] = False
except Exception as e:
self.remove_container(self.container)
LOG.error(_LW("Cookbook deployment exception %s" % e))
raise CookbookDeploymentException(cookbook=cookbook)
return msg
def run_test(self, cookbook):
""" Test cookbook syntax
:param cookbook: cookbook to test
:return msg: dictionary with results and state
"""
try:
cmd_test = CONF.clients_puppet.cmd_test.format(cookbook)
resp_test = self.execute_command(cmd_test)
msg = {
'success': True,
'response': resp_test
}
for line in resp_test.splitlines():
if "ERROR" in line:
msg['success'] = False
LOG.debug(_("Test result: %s") % resp_test)
except Exception as e:
self.remove_container(self.container)
LOG.error(_LW("Cookbook syntax exception %s" % e))
raise CookbookSyntaxException(cookbook=cookbook)
return msg
def run_install(self, cookbook):
"""Run download and install command
:param cookbook: cookbook to process
:return msg: operation result
"""
try:
cmd_install = CONF.clients_puppet.cmd_install.format(cookbook)
resp_install = self.execute_command(cmd_install)
msg = {
'success': True,
'response': resp_install
}
for line in resp_install.splitlines():
if "ERROR" in line:
msg['success'] = False
LOG.debug(_("Install result: %s") % resp_install)
except Exception as e:
self.remove_container(self.container)
LOG.error(_LW("Chef install exception: %s" % e))
raise CookbookInstallException(cookbook=cookbook)
return msg
def run_container(self, image):
"""Run and start a container based on the given image
:param image: image to run
:return:
"""
contname = "{}-validate".format(image).replace("/", "_")
try:
try:
self.dc.remove_container(contname, force=True)
LOG.info(_LI('Removing old %s container' % contname))
except NotFound:
pass
self.container = self.dc.create_container(
image,
tty=True,
name=contname
).get('Id')
self.dc.start(container=self.container)
except AttributeError as e:
LOG.error(_LW("Error creating container: %s" % e))
raise DockerContainerException(image=image)
def remove_container(self, kill=True):
"""destroy container on exit
:param kill: inhibits removal for testing purposes
"""
self.dc.stop(self.container)
if kill:
self.dc.remove_container(self.container)
def execute_command(self, command):
""" Execute a command in the given container
:param command: bash command to run
:return: execution result
"""
bash_txt = "/bin/bash -c \"{}\"".format(command.replace('"', '\\"'))
exec_txt = self.dc.exec_create(
container=self.container,
cmd=bash_txt
)
return self.dc.exec_start(exec_txt)
| pmverdugo/fiware-validator | validator/clients/puppet_client.py | Python | apache-2.0 | 6,861 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
FS Pairtree storage - Reverse lookup
====================================
Conventions used:
From http://www.cdlib.org/inside/diglib/pairtree/pairtreespec.html version 0.1
This is an implementation of a reverse lookup index, using the pairtree path spec to
record the link between local id and the id's that it corresponds to.
eg to denote issn:1234-1234 as being linked to a global id of "uuid:1e4f..."
--> create a file at ROOT_DIR/pairtree_rl/is/sn/+1/23/4-/12/34/uuid+1e4f...
Note that the id it links to is recorded as a filename encoded as per the pairtree spec.
Usage
=====
>>> from pairtree import PairtreeReverseLookup
>>> rl = PairtreeReverseLookup(storage_dir="ROOT")
>>> rl["issn:1234-1234"].append("uuid:1e4f...")
>>> rl["issn:1234-1234"]
["uuid:1e4f"]
>>> rl["issn:1234-1234"] = ["id:1", "uuid:32fad..."]
>>>
Notes
=====
This was created to avoid certain race conditions I had with a pickled dictionary for this index.
A sqllite or similar lookup would also be effective, but this one relies solely on pairtree.
"""
import os
from pairtree.pairtree_path import id_encode, id_decode, id_to_dirpath
PAIRTREE_RL = "pairtree_rl"
class PairtreeReverseLookup_list(object):
def __init__(self, rl_dir, id):
self._rl_dir = rl_dir
self._id = id
self._dirpath = id_to_dirpath(self._id, self._rl_dir)
def _get_ids(self):
if os.path.isdir(self._dirpath):
ids = []
for f in os.listdir(self._dirpath):
ids.append(id_decode(f))
return ids
else:
return []
def _add_id(self, new_id):
if not os.path.exists(self._dirpath):
os.makedirs(self._dirpath)
enc_id = id_encode(new_id)
if not os.path.isfile(enc_id):
with open(os.path.join(self._dirpath, enc_id), "w") as f:
f.write(new_id)
def _exists(self, id):
if os.path.exists(self._dirpath):
return id_encode(id) in os.listdir(self._dirpath)
else:
return False
def append(self, *args):
[self._add_id(x) for x in args if not self._exists(x)]
def __len__(self):
return len(os.listdir(self._dirpath))
def __repr__(self):
return "ID:'%s' -> ['%s']" % (self._id, "','".join(self._get_ids()))
def __str__(self):
return self.__repr__()
def __iter__(self):
for f in self._get_ids():
yield id_decode(f)
class PairtreeReverseLookup(object):
def __init__(self, storage_dir="data"):
self._storage_dir = storage_dir
self._rl_dir = os.path.join(storage_dir, PAIRTREE_RL)
self._init_store()
def _init_store(self):
if not os.path.isdir(self._storage_dir):
os.makedirs(self._storage_dir)
def __getitem__(self, id):
return PairtreeReverseLookup_list(self._rl_dir, id)
def __setitem__(self, id, value):
id_c = PairtreeReverseLookup_list(self._rl_dir, id)
if isinstance(list, value):
id_c.append(*value)
else:
id_c.append(value)
def __delitem__(self, id):
dirpath = id_to_dirpath(id, self._rl_dir)
if os.path.isdir(dirpath):
for f in os.listdir(dirpath):
os.remove(os.path.join(dirpath, f))
os.removedirs(dirpath) # will throw OSError if the dir cannot be removed.
self._init_store() # just in case
| benosteen/pairtree | pairtree/pairtree_revlookup.py | Python | apache-2.0 | 3,270 |
"""The base command."""
from datetime import datetime
from json import dumps
from watches.util import ESClientProducer
class Base(object):
"""A base command."""
TEXT_PLAIN = 'plain/text'
JSON_APPLICATION = 'application/json'
TRANSFORM_PARAM = '--transform'
TIMESTAMP_PARAM = '--timestamp'
TRANSFORM_VALUE_NESTED = 'nested'
TIMESTAMP_KEY = 'timestamp'
_ALL_KEYWORD = '_all'
_ALL_INDICES_PLACEHOLDER = 'indices_summary'
def __init__(self, options, *args, **kwargs):
self.options = options
self.args = args
self.kwargs = kwargs
if self.options["--verbose"]:
print('Supplied options:', dumps(self.options, indent=2, sort_keys=True))
self.es = ESClientProducer.create_client(self.options)
def run(self):
# Not sure if this is the best way to convert localtime to UTC in ISO 8601 format
ts = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
data = self.getData()
# Treat JSON_APPLICATION response differently than TEXT_PLAIN
# JSON data can be injected timestamp and formatted
if self.JSON_APPLICATION == self.getResponseContentType():
if self.options[self.TIMESTAMP_PARAM]:
data[self.TIMESTAMP_KEY] = ts
if self.options[self.TRANSFORM_PARAM]:
data = self.transformData(data)
self.printData(data)
def printData(self, data):
"""Print the data to the output. Depending on content type the data can be formatted differently.
Commands can also override this method is special treatment is needed, for example "just_*" commands.
"""
if self.JSON_APPLICATION == self.getResponseContentType():
if self.options["-l"]:
print(dumps(data, default=lambda x: str(x)))
else:
print(dumps(data, indent=2, sort_keys=False, default=lambda x: str(x)))
else:
print(data)
def getData(self):
raise NotImplementedError('Method getData() not implemented')
def getResponseContentType(self):
"""Response MIME type. By default we assume JSON, make sure to override if needed."""
return self.JSON_APPLICATION
def transformData(self, data):
"""
Data can be transformed before sending to client.
Currently, the only transformation type implemented is 'nested'.
:param data:
:return:
"""
transform = self.options[self.TRANSFORM_PARAM]
if transform:
if transform == self.TRANSFORM_VALUE_NESTED:
return self.transformNestedData(data)
else:
raise RuntimeError('Unsupported transform type')
else:
return data
def transformNestedData(self, data):
"""
If subclass supports 'nested' transformation then it needs to implement
this method and it can use and override provided helper methods.
By default the data is returned unchanged.
:param data:
:return:
"""
return data
def nestedNodes(self, nodes):
"""
Helper method to transform nodes object.
Subclass can override this if the default behaviour does not apply.
:param nodes:
:return:
"""
if isinstance(nodes, dict):
nodesArray = []
for key in nodes:
n = nodes[key]
n['node'] = key
nodesArray.append(n)
return nodesArray
return nodes
def nestedNodesShardsArray(self, nodes):
"""
Helper method to transform nodes shards array.
Subclass can override this if the default behaviour does not apply.
:param nodes:
:return:
"""
if isinstance(nodes, dict):
shardsArray = []
for node in nodes:
if isinstance(nodes[node], list):
for shard in nodes[node]:
# shard['node'] = node
# node value ^^ is already there in the dict
shardsArray.append(shard)
else:
raise RuntimeError('shards not in expected format')
else:
raise RuntimeError('shards not in expected format')
return shardsArray
def nestedIndices(self, indices):
"""
Helper method to transform indices object.
Subclass can override this if the default behaviour does not apply.
:param indices:
:return:
"""
if isinstance(indices, dict):
indicesArray = []
for key in indices:
i = indices[key]
i['index'] = key
indicesArray.append(i)
return indicesArray
else:
return indices
def nestedShards(self, shards):
"""
Helper method to transform shards object.
Subclass can override this if the default behaviour does not apply.
:param shards:
:return:
"""
if isinstance(shards, dict):
shardsArray = []
for key in shards:
s = shards[key]
# convert shard id to number (this is how other admin REST APIs represent it)
s['shard'] = int(key)
shardsArray.append(s)
return shardsArray
else:
return shards
def nestedShardsArray(self, shards):
"""
Helper method to transform shards array.
This is useful in case REST API returns shards data in an array.
:param shards:
:return:
"""
shardsArray = []
if isinstance(shards, dict):
for key in shards:
if isinstance(shards[key], list):
for shard in shards[key]:
shard['shard'] = int(key)
shardsArray.append(shard)
else:
raise RuntimeError('shards not in expected format')
else:
raise RuntimeError('shards not in expected format')
return shardsArray
def nestedIndicesAndShards(self, indices):
"""
Helper method to transform indices and shards.
This method is designed for cases where index contains 'shards' key as the top level field.
:param indices:
:return:
"""
indices = self.nestedIndices(indices)
for index in indices:
if isinstance(index, dict):
if 'shards' in index:
index['shards'] = self.nestedShards(index['shards'])
return indices
def check_filter_path(self, args):
if self.options['--filter_path'] and self.options["--filter_path"] is not None and len(self.options["--filter_path"]) > 0:
args.update({
'filter_path': self.options['--filter_path']
})
| ViaQ/watches-cli | watches/commands/base.py | Python | apache-2.0 | 7,002 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# swift documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 3 17:01:55 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime
from swift import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'oslosphinx',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Swift Release Notes'
copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__.rsplit('.', 1)[0]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
# todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'swift v2.10.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SwiftReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# # The paper size ('letterpaper' or 'a4paper').
# #
# # 'papersize': 'letterpaper',
# # The font size ('10pt', '11pt' or '12pt').
# #
# # 'pointsize': '10pt',
# # Additional stuff for the LaTeX preamble.
# #
# # 'preamble': '',
# # Latex figure (float) alignment
# #
# # 'figure_align': 'htbp',
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# (master_doc, 'swift.tex', u'swift Documentation',
# u'swift', 'manual'),
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# (master_doc, 'swift', u'swift Documentation',
# [author], 1)
# ]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# (master_doc, 'swift', u'swift Documentation',
# author, 'swift', 'One line description of project.',
# 'Miscellaneous'),
# ]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
locale_dirs = ['locale/']
| redbo/swift | releasenotes/source/conf.py | Python | apache-2.0 | 10,523 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Symplicity.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| sbelskie/symplicity | manage.py | Python | apache-2.0 | 253 |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.tasks.cpp_compile import CppCompile
from pants.backend.native.tasks.link_shared_libraries import LinkSharedLibraries
from pants_test.backend.native.tasks.native_task_test_base import (
NativeCompileTestMixin,
NativeTaskTestBase,
)
class LinkSharedLibrariesTest(NativeTaskTestBase, NativeCompileTestMixin):
@classmethod
def task_type(cls):
return LinkSharedLibraries
def test_caching(self):
cpp = self.create_simple_cpp_library(ctypes_native_library=NativeArtifact(lib_name="test"))
cpp_compile_task_type = self.synthesize_task_subtype(CppCompile, "cpp_compile_scope")
context = self.prepare_context_for_compile(
target_roots=[cpp],
for_task_types=[cpp_compile_task_type],
options={"libc": {"enable_libc_search": True}},
)
cpp_compile = cpp_compile_task_type(
context, os.path.join(self.pants_workdir, "cpp_compile")
)
cpp_compile.execute()
link_shared_libraries = self.create_task(context)
link_shared_libraries.execute()
link_shared_libraries.execute()
| tdyas/pants | tests/python/pants_test/backend/native/tasks/test_link_shared_libraries.py | Python | apache-2.0 | 1,358 |
# -*- coding: utf-8 -*-
import sys
import pymongo
from flask import Flask
# Fix messy code
reload(sys)
sys.setdefaultencoding('utf-8')
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config')
app.config.from_pyfile('config.py')
client = pymongo.MongoClient(app.config['DB_URI'])
db = client[app.config['DB_NAME']]
collection = db[app.config['DB_COLLECTION']]
from app import views
| ruter/WeChat-img-Search | app/__init__.py | Python | apache-2.0 | 418 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for suggestions."""
from constants import constants
from core.controllers import base
from core.domain import acl_decorators
from core.domain import suggestion_services
from core.platform import models
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class SuggestionHandler(base.BaseHandler):
""""Handles operations relating to suggestions."""
@acl_decorators.can_suggest_changes
def post(self):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
suggestion_services.create_suggestion(
self.payload.get('suggestion_type'),
self.payload.get('target_type'), self.payload.get('target_id'),
self.payload.get('target_version_at_submission'),
self.user_id, self.payload.get('change_cmd'),
self.payload.get('description'),
self.payload.get('final_reviewer_id'))
self.render_json(self.values)
class SuggestionToExplorationActionHandler(base.BaseHandler):
"""Handles actions performed on suggestions to explorations."""
ACTION_TYPE_ACCEPT = 'accept'
ACTION_TYPE_REJECT = 'reject'
# TODO (nithesh): Add permissions for users with enough scores to review
# Will be added as part of milestone 2 of the generalized review system
# project.
@acl_decorators.can_edit_exploration
def put(self, exploration_id, suggestion_id):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
if len(suggestion_id.split('.')) != 3:
raise self.InvalidInputException('Invalid format for suggestion_id.'
' It must contain 3 parts'
' separated by \'.\'')
if suggestion_id.split('.')[0] != 'exploration':
raise self.InvalidInputException('This handler allows actions only'
' on suggestions to explorations.')
if suggestion_id.split('.')[1] != exploration_id:
raise self.InvalidInputException('The exploration id provided does '
'not match the exploration id '
'present as part of the '
'suggestion_id')
action = self.payload.get('action')
suggestion = suggestion_services.get_suggestion_by_id(suggestion_id)
if action == self.ACTION_TYPE_ACCEPT:
suggestion_services.accept_suggestion(
suggestion, self.user_id, self.payload.get('commit_message'),
self.payload.get('review_message'))
elif action == self.ACTION_TYPE_REJECT:
suggestion_services.reject_suggestion(
suggestion, self.user_id, self.payload.get('review_message'))
else:
raise self.InvalidInputException('Invalid action.')
self.render_json(self.values)
class SuggestionListHandler(base.BaseHandler):
"""Handles list operations on suggestions."""
LIST_TYPE_AUTHOR = 'author'
LIST_TYPE_ID = 'id'
LIST_TYPE_REVIEWER = 'reviewer'
LIST_TYPE_STATUS = 'status'
LIST_TYPE_SUGGESTION_TYPE = 'type'
LIST_TYPE_TARGET_ID = 'target'
LIST_TYPES_TO_SERVICES_MAPPING = {
LIST_TYPE_AUTHOR: suggestion_services.get_suggestions_by_author,
LIST_TYPE_ID: suggestion_services.get_suggestion_by_id,
LIST_TYPE_REVIEWER: suggestion_services.get_suggestions_reviewed_by,
LIST_TYPE_STATUS: suggestion_services.get_suggestions_by_status,
LIST_TYPE_SUGGESTION_TYPE: suggestion_services.get_suggestion_by_type,
LIST_TYPE_TARGET_ID: suggestion_services.get_suggestions_by_target_id
}
PARAMS_FOR_LIST_TYPES = {
LIST_TYPE_AUTHOR: ['author_id'],
LIST_TYPE_ID: ['suggestion_id'],
LIST_TYPE_REVIEWER: ['reviewer_id'],
LIST_TYPE_STATUS: ['status'],
LIST_TYPE_SUGGESTION_TYPE: ['suggestion_type'],
LIST_TYPE_TARGET_ID: ['target_type', 'target_id']
}
def get_params_from_request(self, request, list_type):
return [request.get(param_name)
for param_name in self.PARAMS_FOR_LIST_TYPES[list_type]]
@acl_decorators.open_access
def get(self):
if not constants.USE_NEW_SUGGESTION_FRAMEWORK:
raise self.PageNotFoundException
list_type = self.request.get('list_type')
if list_type not in self.LIST_TYPES_TO_SERVICES_MAPPING:
raise self.InvalidInputException('Invalid list type.')
params = self.get_params_from_request(self.request, list_type)
suggestions = self.LIST_TYPES_TO_SERVICES_MAPPING[list_type](*params)
# When querying by ID, only a single suggestion is retrieved, so we make
# it a list.
if list_type == self.LIST_TYPE_ID:
suggestions = [suggestions]
self.values.update({'suggestions': [s.to_dict() for s in suggestions]})
self.render_json(self.values)
| AllanYangZhou/oppia | core/controllers/suggestion.py | Python | apache-2.0 | 5,721 |
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.user_api_token import UserApiToken # noqa: E501
from wavefront_api_client.rest import ApiException
class TestUserApiToken(unittest.TestCase):
"""UserApiToken unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserApiToken(self):
"""Test UserApiToken"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.user_api_token.UserApiToken() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| wavefrontHQ/python-client | test/test_user_api_token.py | Python | apache-2.0 | 1,268 |
# -*- coding: utf-8 -*-
"""
Thai Grapheme-to-Phoneme (Thai G2P)
GitHub : https://github.com/wannaphong/thai-g2p
"""
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pythainlp.corpus import get_corpus_path
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_MODEL_NAME = "thai-g2p"
class ThaiG2P:
"""
Latin transliteration of Thai words, using International Phonetic Alphabet
"""
def __init__(self):
# get the model, will download if it's not available locally
self.__model_filename = get_corpus_path(_MODEL_NAME)
loader = torch.load(self.__model_filename, map_location=device)
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"]
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"]
self._maxlength = 100
self._char_to_ix = loader["char_to_ix"]
self._ix_to_char = loader["ix_to_char"]
self._target_char_to_ix = loader["target_char_to_ix"]
self._ix_to_target_char = loader["ix_to_target_char"]
# encoder/ decoder
# Restore the model and construct the encoder and decoder.
self._encoder = Encoder(INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)
self._decoder = AttentionDecoder(
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT
)
self._network = Seq2Seq(
self._encoder,
self._decoder,
self._target_char_to_ix["<start>"],
self._target_char_to_ix["<end>"],
self._maxlength,
).to(device)
self._network.load_state_dict(loader["model_state_dict"])
self._network.eval()
def _prepare_sequence_in(self, text: str):
"""
Prepare input sequence for PyTorch.
"""
idxs = []
for ch in text:
if ch in self._char_to_ix:
idxs.append(self._char_to_ix[ch])
else:
idxs.append(self._char_to_ix["<UNK>"])
idxs.append(self._char_to_ix["<end>"])
tensor = torch.tensor(idxs, dtype=torch.long)
return tensor.to(device)
def g2p(self, text: str) -> str:
"""
:param str text: Thai text to be romanized
:return: English (more or less) text that spells out how the Thai text
should be pronounced.
"""
input_tensor = self._prepare_sequence_in(text).view(1, -1)
input_length = [len(text) + 1]
target_tensor_logits = self._network(
input_tensor, input_length, None, 0
)
# Seq2seq model returns <END> as the first token,
# As a result, target_tensor_logits.size() is torch.Size([0])
if target_tensor_logits.size(0) == 0:
target = ["<PAD>"]
else:
target_tensor = (
torch.argmax(target_tensor_logits.squeeze(1), 1)
.cpu()
.detach()
.numpy()
)
target = [self._ix_to_target_char[t] for t in target_tensor]
return "".join(target)
class Encoder(nn.Module):
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5
):
"""Constructor"""
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(
vocabulary_size, embedding_size
)
self.rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size // 2,
bidirectional=True,
batch_first=True,
)
self.dropout = nn.Dropout(dropout)
def forward(self, sequences, sequences_lengths):
# sequences: (batch_size, sequence_length=MAX_LENGTH)
# sequences_lengths: (batch_size)
batch_size = sequences.size(0)
self.hidden = self.init_hidden(batch_size)
sequences_lengths = np.sort(sequences_lengths)[::-1]
index_sorted = np.argsort(
-sequences_lengths
) # use negation in sort in descending order
index_unsort = np.argsort(index_sorted) # to unsorted sequence
index_sorted = torch.from_numpy(index_sorted)
sequences = sequences.index_select(0, index_sorted.to(device))
sequences = self.character_embedding(sequences)
sequences = self.dropout(sequences)
sequences_packed = nn.utils.rnn.pack_padded_sequence(
sequences, sequences_lengths.copy(), batch_first=True
)
sequences_output, self.hidden = self.rnn(sequences_packed, self.hidden)
sequences_output, _ = nn.utils.rnn.pad_packed_sequence(
sequences_output, batch_first=True
)
index_unsort = torch.from_numpy(index_unsort).to(device)
sequences_output = sequences_output.index_select(
0, index_unsort.clone().detach()
)
return sequences_output, self.hidden
def init_hidden(self, batch_size):
h_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
c_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
return (h_0, c_0)
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == "general":
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == "concat":
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, encoder_outputs, mask):
# Calculate energies for each encoder output
if self.method == "dot":
attn_energies = torch.bmm(
encoder_outputs, hidden.transpose(1, 2)
).squeeze(2)
elif self.method == "general":
attn_energies = self.attn(
encoder_outputs.view(-1, encoder_outputs.size(-1))
) # (batch_size * sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies.view(*encoder_outputs.size()),
hidden.transpose(1, 2),
).squeeze(
2
) # (batch_size, sequence_len)
elif self.method == "concat":
attn_energies = self.attn(
torch.cat(
(hidden.expand(*encoder_outputs.size()), encoder_outputs),
2,
)
) # (batch_size, sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies,
self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2),
).squeeze(2)
attn_energies = attn_energies.masked_fill(mask == 0, -1e10)
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies, 1)
class AttentionDecoder(nn.Module):
def __init__(
self, vocabulary_size, embedding_size, hidden_size, dropout=0.5
):
"""Constructor"""
super(AttentionDecoder, self).__init__()
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(
vocabulary_size, embedding_size
)
self.rnn = nn.LSTM(
input_size=embedding_size + self.hidden_size,
hidden_size=hidden_size,
bidirectional=False,
batch_first=True,
)
self.attn = Attn(method="general", hidden_size=self.hidden_size)
self.linear = nn.Linear(hidden_size, vocabulary_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input_character, last_hidden, encoder_outputs, mask):
""""Defines the forward computation of the decoder"""
# input_character: (batch_size, 1)
# last_hidden: (batch_size, hidden_dim)
# encoder_outputs: (batch_size, sequence_len, hidden_dim)
# mask: (batch_size, sequence_len)
hidden = last_hidden.permute(1, 0, 2)
attn_weights = self.attn(hidden, encoder_outputs, mask)
context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)
context_vector = torch.sum(context_vector, dim=1)
context_vector = context_vector.unsqueeze(1)
embedded = self.character_embedding(input_character)
embedded = self.dropout(embedded)
rnn_input = torch.cat((context_vector, embedded), -1)
output, hidden = self.rnn(rnn_input)
output = output.view(-1, output.size(2))
x = self.linear(output)
return x, hidden[0], attn_weights
class Seq2Seq(nn.Module):
def __init__(
self,
encoder,
decoder,
target_start_token,
target_end_token,
max_length,
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.pad_idx = 0
self.target_start_token = target_start_token
self.target_end_token = target_end_token
self.max_length = max_length
assert encoder.hidden_size == decoder.hidden_size
def create_mask(self, source_seq):
mask = source_seq != self.pad_idx
return mask
def forward(
self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5
):
# source_seq: (batch_size, MAX_LENGTH)
# source_seq_len: (batch_size, 1)
# target_seq: (batch_size, MAX_LENGTH)
batch_size = source_seq.size(0)
start_token = self.target_start_token
end_token = self.target_end_token
max_len = self.max_length
target_vocab_size = self.decoder.vocabulary_size
outputs = torch.zeros(max_len, batch_size, target_vocab_size).to(
device
)
if target_seq is None:
assert teacher_forcing_ratio == 0, "Must be zero during inference"
inference = True
else:
inference = False
encoder_outputs, encoder_hidden = self.encoder(
source_seq, source_seq_len
)
decoder_input = (
torch.tensor([[start_token] * batch_size])
.view(batch_size, 1)
.to(device)
)
encoder_hidden_h_t = torch.cat(
[encoder_hidden[0][0], encoder_hidden[0][1]], dim=1
).unsqueeze(dim=0)
decoder_hidden = encoder_hidden_h_t
max_source_len = encoder_outputs.size(1)
mask = self.create_mask(source_seq[:, 0:max_source_len])
for di in range(max_len):
decoder_output, decoder_hidden, _ = self.decoder(
decoder_input, decoder_hidden, encoder_outputs, mask
)
topv, topi = decoder_output.topk(1)
outputs[di] = decoder_output.to(device)
teacher_force = random.random() < teacher_forcing_ratio
decoder_input = (
target_seq[:, di].reshape(batch_size, 1)
if teacher_force
else topi.detach()
)
if inference and decoder_input == end_token:
return outputs[:di]
return outputs
_THAI_G2P = ThaiG2P()
def transliterate(text: str) -> str:
global _THAI_G2P
return _THAI_G2P.g2p(text)
| PyThaiNLP/pythainlp | pythainlp/transliterate/thaig2p.py | Python | apache-2.0 | 11,448 |
from changes.config import db
from changes.models.project import ProjectOption
from changes.models.snapshot import SnapshotStatus
from changes.testutils import APITestCase
class SnapshotListTest(APITestCase):
def test_simple(self):
project_1 = self.create_project()
build_1 = self.create_build(project_1)
snapshot_1 = self.create_snapshot(
project=project_1, status=SnapshotStatus.active, build=build_1)
plan_1 = self.create_plan(project_1)
image_1 = self.create_snapshot_image(snapshot_1, plan_1)
project_2 = self.create_project()
build_2 = self.create_build(project_2)
snapshot_2 = self.create_snapshot(
project=project_2, status=SnapshotStatus.invalidated, build=build_2)
plan_2 = self.create_plan(project_2)
image_2 = self.create_snapshot_image(snapshot_2, plan_1)
image_3 = self.create_snapshot_image(snapshot_2, plan_2)
db.session.add(ProjectOption(
project=project_2,
name='snapshot.current',
value=snapshot_2.id.hex,
))
db.session.commit()
path = '/api/0/snapshots/?state='
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 2
assert data[0]['id'] == snapshot_2.id.hex
assert data[0]['isActive']
assert len(data[0]['images']) == 2
assert data[0]['images'][0]['id'] == image_2.id.hex
assert data[0]['images'][1]['id'] == image_3.id.hex
assert data[1]['id'] == snapshot_1.id.hex
assert not data[1]['isActive']
assert len(data[1]['images']) == 1
assert data[1]['images'][0]['id'] == image_1.id.hex
path = '/api/0/snapshots/?state=valid'
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['id'] == snapshot_1.id.hex
path = '/api/0/snapshots/?state=invalid'
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['id'] == snapshot_2.id.hex
| dropbox/changes | tests/changes/api/test_snapshot_index.py | Python | apache-2.0 | 2,245 |
"""
Github Authentication
"""
import httplib2
from django.conf import settings
from django.core.mail import send_mail
from oauth2client.client import OAuth2WebServerFlow
from helios_auth import utils
# some parameters to indicate that status updating is not possible
STATUS_UPDATES = False
# display tweaks
LOGIN_MESSAGE = "Log in with GitHub"
def get_flow(redirect_url=None):
return OAuth2WebServerFlow(
client_id=settings.GH_CLIENT_ID,
client_secret=settings.GH_CLIENT_SECRET,
scope='read:user user:email',
auth_uri="https://github.com/login/oauth/authorize",
token_uri="https://github.com/login/oauth/access_token",
redirect_uri=redirect_url,
)
def get_auth_url(request, redirect_url):
flow = get_flow(redirect_url)
request.session['gh_redirect_uri'] = redirect_url
return flow.step1_get_authorize_url()
def get_user_info_after_auth(request):
redirect_uri = request.session['gh_redirect_uri']
del request.session['gh_redirect_uri']
flow = get_flow(redirect_uri)
if 'code' not in request.GET:
return None
code = request.GET['code']
credentials = flow.step2_exchange(code)
http = httplib2.Http(".cache")
http = credentials.authorize(http)
(_, content) = http.request("https://api.github.com/user", "GET")
response = utils.from_json(content.decode('utf-8'))
user_id = response['login']
user_name = response['name']
(_, content) = http.request("https://api.github.com/user/emails", "GET")
response = utils.from_json(content.decode('utf-8'))
user_email = None
for email in response:
if email['verified'] and email['primary']:
user_email = email['email']
break
if not user_email:
raise Exception("email address with GitHub not verified")
return {
'type': 'github',
'user_id': user_id,
'name': '%s (%s)' % (user_id, user_name),
'info': {'email': user_email},
'token': {},
}
def do_logout(user):
return None
def update_status(token, message):
pass
def send_message(user_id, name, user_info, subject, body):
send_mail(
subject,
body,
settings.SERVER_EMAIL,
["%s <%s>" % (user_id, user_info['email'])],
fail_silently=False,
)
def check_constraint(eligibility, user_info):
pass
#
# Election Creation
#
def can_create_election(user_id, user_info):
return True
| benadida/helios-server | helios_auth/auth_systems/github.py | Python | apache-2.0 | 2,315 |
"""Tests for the canary component."""
from unittest.mock import MagicMock, PropertyMock
from canary.api import SensorType
def mock_device(device_id, name, is_online=True, device_type_name=None):
"""Mock Canary Device class."""
device = MagicMock()
type(device).device_id = PropertyMock(return_value=device_id)
type(device).name = PropertyMock(return_value=name)
type(device).is_online = PropertyMock(return_value=is_online)
type(device).device_type = PropertyMock(
return_value={"id": 1, "name": device_type_name}
)
return device
def mock_location(
location_id, name, is_celsius=True, devices=None, mode=None, is_private=False
):
"""Mock Canary Location class."""
location = MagicMock()
type(location).location_id = PropertyMock(return_value=location_id)
type(location).name = PropertyMock(return_value=name)
type(location).is_celsius = PropertyMock(return_value=is_celsius)
type(location).is_private = PropertyMock(return_value=is_private)
type(location).devices = PropertyMock(return_value=devices or [])
type(location).mode = PropertyMock(return_value=mode)
return location
def mock_mode(mode_id, name):
"""Mock Canary Mode class."""
mode = MagicMock()
type(mode).mode_id = PropertyMock(return_value=mode_id)
type(mode).name = PropertyMock(return_value=name)
type(mode).resource_url = PropertyMock(return_value=f"/v1/modes/{mode_id}")
return mode
def mock_reading(sensor_type, sensor_value):
"""Mock Canary Reading class."""
reading = MagicMock()
type(reading).sensor_type = SensorType(sensor_type)
type(reading).value = PropertyMock(return_value=sensor_value)
return reading
| tchellomello/home-assistant | tests/components/canary/__init__.py | Python | apache-2.0 | 1,712 |
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import mockfs
import os
import pytest
import sys
import jsonschema
from jimmy import cli
from click.testing import CliRunner
from jimmy.lib.common import yaml_reader
from mock import call
from jimmy.tests import base
modules_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
jimmy_dir = os.path.join(os.path.dirname(modules_dir))
jenkins_schema_path = os.path.join(modules_dir, 'jenkins_configuration', 'resources', 'schema.yaml')
jenkins_yaml_path = os.path.join(jimmy_dir, 'sample', 'input', 'jenkins.yaml')
class TestJenkinsConfiguration(base.TestCase):
def setup_method(self, method):
self.runner = CliRunner()
def teardown_method(self, method):
mockfs.restore_builtins()
@mock.patch('jimmy.lib.core.load_py_modules')
@mock.patch('subprocess.call')
def test_cli_call(self, mock_subp, mock_modules):
with open(jenkins_schema_path, 'r') as f:
mock_jenkins_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({os.path.join(jimmy_dir, 'lib', 'schema.yaml'): self.jimmy_schema,
os.path.join(jimmy_dir, 'jimmy.yaml'): self.mock_jimmy_yaml,
jenkins_schema_path: mock_jenkins_schema,
jenkins_yaml_path: '\n'.join(
[
'jenkins:',
' configuration:',
' admin_email: CI <admin@example.com>',
' agent_tcp_port: 50000',
' location_url: http://example.com/jenkins/',
' markup_format: raw-html',
' num_of_executors: 2',
' scm_checkout_retry_count: 1'
])
})
sys.path.insert(0, modules_dir)
import jenkins_configuration
import read_source
sys.path.pop(0)
mock_modules.return_value = [jenkins_configuration, read_source]
os.chdir(jimmy_dir)
self.runner.invoke(cli)
calls = [call(['java', '-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'jenkins_configuration/resources/jenkins.groovy',
'setAdminEmail',
"'CI <admin@example.com>'"],
shell=False),
call(['java', '-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'jenkins_configuration/resources/jenkins.groovy',
'setAgentTcpPort',
'50000'],
shell=False),
call(['java', '-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'jenkins_configuration/resources/jenkins.groovy',
'setLocationUrl',
"'http://example.com/jenkins/'"],
shell=False),
call(['java', '-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'jenkins_configuration/resources/jenkins.groovy',
'setMarkupFormatter',
'raw-html'],
shell=False),
call(['java', '-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'jenkins_configuration/resources/jenkins.groovy',
'setNumExecutors',
'2'],
shell=False),
call(['java', '-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'jenkins_configuration/resources/jenkins.groovy',
'setScmCheckoutRetryCount',
'1'],
shell=False)]
mock_subp.assert_has_calls(calls, any_order=True)
assert 6 == mock_subp.call_count, "subprocess call should be equal to 6"
class TestJenkinsSchema(object):
def setup_method(self, method):
with open(jenkins_schema_path, 'r') as f:
mock_jenkins_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({jenkins_schema_path: mock_jenkins_schema})
self.schema = yaml_reader.read(jenkins_schema_path)
def teardown_method(self, method):
mockfs.restore_builtins()
def test_valid_repo_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'admin_email: CI <admin@example.com>',
'agent_tcp_port: 50000',
'location_url: http://example.com/jenkins/',
'markup_format: raw-html',
'num_of_executors: 2',
'scm_checkout_retry_count: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_validation_fail_if_markup_is_not_enum(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'scm_checkout_retry_count: 3',
'agent_tcp_port: 50000',
'admin_email: CI <test@example.com>',
'location_url: http://example.com/jenkins/',
'markup_format: smth',
'num_of_executors: 3'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'smth' is not one of ['plain-text', 'raw-html', 'unsafe']"
def test_validation_fail_if_scm_is_not_int(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'scm_checkout_retry_count: test',
'agent_tcp_port: 50000',
'admin_email: CI <test@example.com>',
'location_url: http://example.com/jenkins/',
'markup_format: raw-html',
'num_of_executors: 3'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'integer'"
def test_validation_fail_if_email_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'scm_checkout_retry_count: 3',
'agent_tcp_port: 50000',
'admin_email: 123',
'location_url: http://example.com/jenkins/',
'markup_format: raw-html',
'num_of_executors: 3'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_location_url_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'scm_checkout_retry_count: 3',
'agent_tcp_port: 50000',
'admin_email: CI <test@example.com>',
'location_url: 123',
'markup_format: raw-html',
'num_of_executors: 3'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_agent_tcp_port_is_not_int(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'scm_checkout_retry_count: 3',
'agent_tcp_port: test',
'admin_email: CI <test@example.com>',
'location_url: http://example.com/jenkins/',
'markup_format: raw-html',
'num_of_executors: 3'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'integer'"
def test_validation_fail_if_num_executors_is_not_int(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'scm_checkout_retry_count: 3',
'agent_tcp_port: 50000',
'admin_email: CI <test@example.com>',
'location_url: http://example.com/jenkins/',
'markup_format: raw-html',
'num_of_executors: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'integer'"
def test_validation_fail_for_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'admin_email: CI <test@example.com>',
'agent_tcp_port: 50000',
'location_url: http://example.com/jenkins/',
'markup_format: raw-html',
'num_of_executors: 3',
'scm_checkout_retry_count: 1',
'test: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
| ibelikov/jimmy | jimmy/modules/jenkins_configuration/tests/test_jenkins_configuration.py | Python | apache-2.0 | 11,065 |
from model.info_contact import Infos
import random
def test_delete_some_contact(app, db, check_ui):
if app.contact.count() == 0:
app.contact.create(Infos(firstname="AAAAA"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(map(app.contact.clean, new_contacts), key=Infos.id_or_max) == sorted(app.contact.get_contact_list(), key=Infos.id_or_max)
| Alex-Chizhov/python_training | home_works/test/test_del_contact.py | Python | apache-2.0 | 595 |
#!/usr/bin/env python
"""Entry point to run app.
Used to launch the REST and Cron servers.
"""
import logging
import io
import os
import flask
from flask_api import status
from google.cloud import storage
from typing import Text
logging.getLogger().setLevel(logging.INFO)
from metrics import base
import env
import metric_plot
import scrapers
app = flask.Flask(__name__)
HISTORY_DAYS = 180
BADGE_COLORS = [
'#EEEEEE',
'indianred',
'orange',
'yellow',
'green',
'forestgreen',
]
def _get_cloud_blob(filename: Text) -> storage.Blob:
client = storage.Client()
bucket = client.get_bucket(env.get('CLOUD_STORAGE_BUCKET'))
return storage.Blob(filename, bucket)
def _save_to_cloud(data: bytes, filename: Text, content_type: Text):
"""Saves data to a Google Cloud Storage blob.
Args:
data: byte-string to store
filename: key under which to store the file in the Cloud Storage bucket
content_type: content type of the file
"""
_get_cloud_blob(filename).upload_from_string(data, content_type=content_type)
def _get_from_cloud(filename: Text) -> bytes:
"""Download data from a Google Cloud Storage blob.
Args:
filename: key under which the file in the Cloud Storage bucket is stored
Returns:
The blob data as a byte-string.
"""
return _get_cloud_blob(filename).download_as_string()
@app.route('/_cron/scrape/<scrape_target>')
def scrape_latest(scrape_target: Text):
# This header is added to cron requests by GAE, and stripped from any external
# requests. See
# https://cloud.google.com/appengine/docs/standard/python3/scheduling-jobs-with-cron-yaml#validating_cron_requests
if not flask.request.headers.get('X-Appengine-Cron'):
return 'Attempted to access internal endpoint.', status.HTTP_403_FORBIDDEN
scrapers.scrape(scrape_target)
return 'Successfully scraped latest %s.' % scrape_target, status.HTTP_200_OK
@app.route('/_cron/recompute/<metric_cls_name>')
def recompute(metric_cls_name: Text):
# This header is added to cron requests by GAE, and stripped from any external
# requests. See
# https://cloud.google.com/appengine/docs/standard/python3/scheduling-jobs-with-cron-yaml#validating_cron_requests
if not flask.request.headers.get('X-Appengine-Cron'):
return 'Attempted to access internal endpoint.', status.HTTP_403_FORBIDDEN
try:
metric_cls = base.Metric.get_metric(metric_cls_name)
except KeyError:
logging.error('No active metric found for %s.', metric_cls_name)
return ('No active metric found for %s.' % metric_cls_name,
status.HTTP_404_NOT_FOUND)
logging.info('Recomputing %s.', metric_cls_name)
metric_cls().recompute()
return 'Successfully recomputed %s.' % metric_cls_name, status.HTTP_200_OK
@app.route(
'/_cron/plot_metric_history', defaults={'history_days': HISTORY_DAYS})
@app.route('/_cron/plot_metric_history/<history_days>')
def render_metric_history_plot(history_days: Text):
# This header is added to cron requests by GAE, and stripped from any external
# requests. See
# https://cloud.google.com/appengine/docs/standard/python3/scheduling-jobs-with-cron-yaml#validating_cron_requests
if not flask.request.headers.get('X-Appengine-Cron'):
return 'Attempted to access internal endpoint.', status.HTTP_403_FORBIDDEN
history_days = int(history_days)
logging.info('Rendering metric history plots for last %d days', history_days)
for metric_cls in base.Metric.get_active_metrics():
metric = metric_cls()
plotter = metric_plot.MetricHistoryPlotter(
metric, history_days=history_days)
plot_buffer = plotter.plot_metric_history()
_save_to_cloud(plot_buffer.read(),
'%s-history-%dd.png' % (metric.name, history_days),
'image/png')
return 'History plots updated.', status.HTTP_200_OK
@app.route('/api/metrics')
def list_metrics():
try:
results = base.Metric.get_latest().values()
except Exception as error:
return flask.jsonify({'error': error.message}), status.HTTP_500_SERVER_ERROR
return flask.jsonify({'metrics': [metric.serializable for metric in results]
}), status.HTTP_200_OK
@app.route(
'/api/plot/<metric_cls_name>.png', defaults={'history_days': HISTORY_DAYS})
@app.route('/api/plot/<history_days>/<metric_cls_name>.png')
def metric_history_plot(history_days: Text, metric_cls_name: Text):
try:
base.Metric.get_metric(metric_cls_name)
except KeyError:
logging.error('No active metric found for %s.', metric_cls_name)
return ('No active metric found for %s.' %
metric_cls_name), status.HTTP_404_NOT_FOUND
history_days = int(history_days)
plot_bytes = _get_from_cloud('%s-history-%dd.png' %
(metric_cls_name, history_days))
return flask.send_file(io.BytesIO(plot_bytes), mimetype='image/png')
@app.route('/api/badge/<metric_cls_name>')
def metric_badge(metric_cls_name: Text):
"""Provides a response for sheilds.io to render a badge for GitHub.
See https://shields.io/endpoint.
"""
response = {
'schemaVersion': 1,
'color': 'lightgray',
'label': metric_cls_name,
'message': '?',
}
try:
metric = base.Metric.get_latest()[metric_cls_name]
response['color'] = BADGE_COLORS[metric.score.value]
response['label'] = metric.label
response['message'] = metric.formatted_result
except KeyError:
logging.error('No active metric found for %s.', metric_cls_name)
finally:
return flask.jsonify(response), status.HTTP_200_OK
@app.route('/')
def show_metrics():
metrics = base.Metric.get_latest().values()
return flask.render_template(
'show_metrics.html', github_repo=env.get('GITHUB_REPO'), metrics=metrics)
@app.route('/history', defaults={'history_days': HISTORY_DAYS})
@app.route('/history/<history_days>')
def show_metric_history(history_days: Text):
history_days = int(history_days)
metric_names = [cls.__name__ for cls in base.Metric.get_active_metrics()]
return flask.render_template(
'show_metric_history.html',
github_repo=env.get('GITHUB_REPO'),
metric_names=metric_names,
history_days=history_days)
if __name__ == '__main__':
app.run(port=os.environ.get('PORT', 8080), debug=True)
| ampproject/amp-github-apps | project-metrics/metrics_service/server.py | Python | apache-2.0 | 6,263 |
from django.http import HttpResponse, HttpRequest
from typing import Optional
import ujson
from django.utils.translation import ugettext as _
from zerver.lib.actions import do_mute_topic, do_unmute_topic
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.lib.topic_mutes import topic_is_muted
from zerver.lib.streams import (
access_stream_by_id,
access_stream_by_name,
access_stream_for_unmute_topic_by_id,
access_stream_for_unmute_topic_by_name,
check_for_exactly_one_stream_arg,
)
from zerver.lib.validator import check_int
from zerver.models import get_stream, Stream, UserProfile
def mute_topic(user_profile: UserProfile,
stream_id: Optional[int],
stream_name: Optional[str],
topic_name: str) -> HttpResponse:
if stream_name is not None:
(stream, recipient, sub) = access_stream_by_name(user_profile, stream_name)
else:
assert stream_id is not None
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id)
if topic_is_muted(user_profile, stream.id, topic_name):
return json_error(_("Topic already muted"))
do_mute_topic(user_profile, stream, recipient, topic_name)
return json_success()
def unmute_topic(user_profile: UserProfile,
stream_id: Optional[int],
stream_name: Optional[str],
topic_name: str) -> HttpResponse:
error = _("Topic is not muted")
if stream_name is not None:
stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)
else:
assert stream_id is not None
stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)
if not topic_is_muted(user_profile, stream.id, topic_name):
return json_error(error)
do_unmute_topic(user_profile, stream, topic_name)
return json_success()
@has_request_variables
def update_muted_topic(request: HttpRequest,
user_profile: UserProfile,
stream_id: Optional[int]=REQ(validator=check_int, default=None),
stream: Optional[str]=REQ(default=None),
topic: str=REQ(),
op: str=REQ()) -> HttpResponse:
check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)
if op == 'add':
return mute_topic(
user_profile=user_profile,
stream_id=stream_id,
stream_name=stream,
topic_name=topic,
)
elif op == 'remove':
return unmute_topic(
user_profile=user_profile,
stream_id=stream_id,
stream_name=stream,
topic_name=topic,
)
| dhcrzf/zulip | zerver/views/muting.py | Python | apache-2.0 | 2,792 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compile utitilies."""
import tensorflow.compat.v2 as tf
from keras import backend
from keras.testing_infra import test_combinations
from keras import losses as losses_mod
from keras import metrics as metrics_mod
from keras.engine import compile_utils
class LossesContainerTest(test_combinations.TestCase):
def test_single_loss(self):
loss_container = compile_utils.LossesContainer('mse')
y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5))
total_loss = loss_container(y_t, y_p)
self.assertTrue(loss_container._built)
self.assertLen(loss_container._losses, 1)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 1.)
self.assertLen(loss_container.metrics, 1)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 1.)
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0.)
def test_loss_list(self):
loss_container = compile_utils.LossesContainer(['mse', 'mae'], [1, 0.5])
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(loss_container._output_names, ['output_1', 'output_2'])
self.assertLen(loss_container._losses, 2)
self.assertEqual(total_loss.numpy(), 0.25)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.25)
output_1_metric = loss_container.metrics[1]
self.assertEqual(output_1_metric.name, 'output_1_loss')
self.assertEqual(output_1_metric.result().numpy(), 0)
output_2_metric = loss_container.metrics[2]
self.assertEqual(output_2_metric.name, 'output_2_loss')
self.assertEqual(output_2_metric.result().numpy(), 0.5)
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0)
self.assertEqual(output_1_metric.result().numpy(), 0)
self.assertEqual(output_2_metric.result().numpy(), 0)
def test_loss_dict(self):
loss_container = compile_utils.LossesContainer(
{
'out1': 'mse',
'out2': 'mae'
}, {
'out1': 1,
'out2': 0.5
})
y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))}
y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertLen(loss_container._losses, 2)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 0.25)
self.assertLen(loss_container.metrics, 3)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.25)
out1_metric = loss_container.metrics[1]
self.assertEqual(out1_metric.name, 'out1_loss')
self.assertEqual(out1_metric.result().numpy(), 0)
out2_metric = loss_container.metrics[2]
self.assertEqual(out2_metric.name, 'out2_loss')
self.assertEqual(out2_metric.result().numpy(), 0.5)
loss_container.reset_state()
self.assertEqual(loss_metric.result().numpy(), 0)
self.assertEqual(out1_metric.result().numpy(), 0)
self.assertEqual(out2_metric.result().numpy(), 0)
def test_loss_partial_dict_with_output_names(self):
loss_container = compile_utils.LossesContainer(
{'out2': 'mae'}, {'out2': 1.}, output_names=['out1', 'out2'])
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(total_loss.numpy(), 0.5)
self.assertLen(loss_container.metrics, 2)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.5)
out2_metric = loss_container.metrics[1]
self.assertEqual(out2_metric.name, 'out2_loss')
self.assertEqual(out2_metric.result().numpy(), 0.5)
def test_loss_dict_with_nones(self):
loss_container = compile_utils.LossesContainer({
'out1': None,
'out2': 'mae'
})
y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))}
y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 0.5)
self.assertLen(loss_container.metrics, 2)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.5)
out2_metric = loss_container.metrics[1]
self.assertEqual(out2_metric.name, 'out2_loss')
self.assertEqual(out2_metric.result().numpy(), 0.5)
def test_nested_structure(self):
loss_container = compile_utils.LossesContainer(
{
'b': ['mse', None],
'a': 'mae'
}, loss_weights={
'b': [0.5, 0],
'a': 1
})
y_t = {
'b': [tf.ones((10, 1)),
tf.zeros((10, 1))],
'a': tf.zeros((10, 1))
}
y_p = {
'b': [tf.zeros((10, 1)),
tf.zeros((10, 1))],
'a': tf.ones((10, 1))
}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.numpy(), 0.75)
self.assertLen(loss_container.metrics, 3)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.75)
a_metric = loss_container.metrics[1]
self.assertEqual(a_metric.name, 'a_loss')
self.assertEqual(a_metric.result().numpy(), 0.5)
b_1_metric = loss_container.metrics[2]
self.assertEqual(b_1_metric.name, 'b_1_loss')
self.assertEqual(b_1_metric.result().numpy(), 0.5)
def test_broadcast_single_loss(self):
loss_container = compile_utils.LossesContainer('mse')
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(total_loss.numpy(), 0.5)
self.assertLen(loss_container.metrics, 3)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 0.5)
output_1_metric = loss_container.metrics[1]
self.assertEqual(output_1_metric.name, 'output_1_loss')
self.assertEqual(output_1_metric.result().numpy(), 0.)
output_2_metric = loss_container.metrics[2]
self.assertEqual(output_2_metric.name, 'output_2_loss')
self.assertEqual(output_2_metric.result().numpy(), 0.5)
def test_missing_label_with_no_loss(self):
# It's ok to exclude a label if that label has no
# losses or metrics associated with it.
loss_container = compile_utils.LossesContainer({
'output1': 'mse',
'output3': 'mae'
})
y_p = {
'output1': tf.convert_to_tensor([[0], [1], [2]]),
'output2': tf.convert_to_tensor([[3], [4], [5]]),
'output3': tf.convert_to_tensor([[6], [7], [8]])
}
y_t = {
'output1': tf.convert_to_tensor([[1], [2], [3]]),
'output3': tf.convert_to_tensor([[4], [5], [6]])
}
total_loss = loss_container(y_t, y_p)
self.assertEqual(total_loss.numpy(), 3.)
self.assertLen(loss_container.metrics, 3)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertEqual(loss_metric.result().numpy(), 3.)
output_1_metric = loss_container.metrics[1]
self.assertEqual(output_1_metric.name, 'output1_loss')
self.assertEqual(output_1_metric.result().numpy(), 1.)
output_3_metric = loss_container.metrics[2]
self.assertEqual(output_3_metric.name, 'output3_loss')
self.assertEqual(output_3_metric.result().numpy(), 2.)
def test_mismatched_dtypes(self):
y_t = tf.constant([1, 9, 2, -5], shape=(2, 2))
y_p = tf.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=tf.float32)
def my_mae(labels, preds):
self.assertEqual(labels.dtype, tf.int32)
self.assertEqual(preds.dtype, tf.float32)
labels = tf.cast(labels, preds.dtype)
return backend.mean(tf.abs(preds - labels), axis=-1)
loss_container = compile_utils.LossesContainer(my_mae)
total_loss = loss_container(y_t, y_p)
self.assertEqual(total_loss.dtype, tf.float32)
def test_integer_dtypes(self):
y_t = tf.constant([1, 9, 2, -5], shape=(2, 2))
y_p = tf.constant([4, 8, 12, 8], shape=(2, 2), dtype=tf.int64)
def my_mae(labels, preds):
self.assertEqual(labels.dtype, tf.int64)
self.assertEqual(preds.dtype, tf.int64)
return backend.mean(tf.abs(preds - labels), axis=-1)
loss_container = compile_utils.LossesContainer(my_mae)
total_loss = loss_container(y_t, y_p)
self.assertEqual(total_loss.dtype, tf.int64)
def test_float_dtypes(self):
y_t = tf.constant([1, 9, 2, -5],
shape=(2, 2),
dtype=tf.float32)
y_p = tf.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=tf.float64)
def my_mae(labels, preds):
self.assertEqual(labels.dtype, tf.float64)
self.assertEqual(preds.dtype, tf.float64)
return backend.mean(tf.abs(preds - labels), axis=-1)
loss_container = compile_utils.LossesContainer(my_mae)
total_loss = loss_container(y_t, y_p)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(total_loss.dtype, tf.float64)
def test_loss_masking(self):
loss_container = compile_utils.LossesContainer('mae')
y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32)
y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32)
y_p._keras_mask = tf.constant([[1, 0], [1, 0]],
dtype=tf.float32)
total_loss = loss_container(y_t, y_p)
self.assertAlmostEqual(total_loss.numpy(), .25) # sum over batch size
self.assertLen(loss_container.metrics, 1)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertAlmostEqual(loss_metric.result().numpy(), .25)
def test_loss_sample_weight(self):
loss_container = compile_utils.LossesContainer('mae')
y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32)
y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32)
sw = tf.constant([[.2, .3], [.5, 0]], dtype=tf.float32)
total_loss = loss_container(y_t, y_p, sample_weight=sw)
# (0 * .2 + 0 * .3 + 1 * .5 + 1 * 0) / 4
self.assertAlmostEqual(total_loss.numpy(), .125)
self.assertLen(loss_container.metrics, 1)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertAlmostEqual(loss_metric.result().numpy(), .125)
def test_loss_masking_sample_weight(self):
loss_container = compile_utils.LossesContainer('mae')
y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32)
y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32)
sw = tf.constant([[.2, .3], [.5, 0]], dtype=tf.float32)
y_p._keras_mask = tf.constant([[1, 0], [1, 0]],
dtype=tf.float32)
total_loss = loss_container(y_t, y_p, sample_weight=sw)
# (0 * .2 + 1 * .5) / 4
self.assertAlmostEqual(total_loss.numpy(), .125) # sum over batch size
self.assertLen(loss_container.metrics, 1)
loss_metric = loss_container.metrics[0]
self.assertEqual(loss_metric.name, 'loss')
self.assertAlmostEqual(loss_metric.result().numpy(), .125)
def test_custom_loss_callables(self):
def custom_loss_fn(y_true, y_pred):
return tf.reduce_sum(y_true - y_pred)
class CustomLossClass:
def __call__(self, y_true, y_pred):
return tf.reduce_sum(y_true - y_pred)
loss_container = compile_utils.LossesContainer(
[custom_loss_fn, CustomLossClass()])
y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5))
loss_container(y_t, y_p)
self.assertEqual(loss_container._losses[0].name, 'custom_loss_fn')
self.assertEqual(loss_container._losses[1].name, 'custom_loss_class')
def test_ragged_tensor_output(self):
"""Ensure that ragged tensors can be passed as targets and predictions."""
def custom_loss_fn(y_true, y_pred):
"""MSE supports RaggedTensors directly."""
return losses_mod.mse(y_true, y_pred)
class CustomLossClass(losses_mod.Loss):
"""User defined loss function must implement RaggedTensor support."""
def call(self, y_true, y_pred):
losses = tf.ragged.map_flat_values(
tf.math.squared_difference, y_true, y_pred)
return tf.reduce_mean(losses)
loss_container = compile_utils.LossesContainer(
[custom_loss_fn, CustomLossClass()])
v_t = tf.constant([[3., 4.], [1., 2.], [3., 5.]])
v_p = tf.constant([[3.1, 4.], [1., 2.], [3., 5.]])
y_t = tf.expand_dims(
tf.RaggedTensor.from_row_splits(v_t, [0, 2, 3]), 0)
y_p = tf.expand_dims(
tf.RaggedTensor.from_row_splits(v_p, [0, 2, 3]), 0)
total_loss = loss_container(y_t, y_p)
self.assertIsInstance(total_loss, tf.Tensor)
self.assertEqual(loss_container._losses[0].name, 'custom_loss_fn')
class MetricsContainerTest(test_combinations.TestCase):
def test_single_metric(self):
metric_container = compile_utils.MetricsContainer('mse')
y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5))
metric_container.update_state(y_t, y_p)
self.assertLen(metric_container.metrics, 1)
metric = metric_container.metrics[0]
self.assertEqual(metric.name, 'mse')
self.assertEqual(metric.result().numpy(), 1.)
metric_container.reset_state()
self.assertEqual(metric.result().numpy(), 0.)
def test_list_of_metrics_one_output(self):
metric_container = compile_utils.MetricsContainer(['mse', 'mae'])
y_t, y_p = 2 * tf.ones((10, 5)), tf.zeros((10, 5))
metric_container.update_state(y_t, y_p)
self.assertLen(metric_container.metrics, 2)
mse_metric = metric_container.metrics[0]
self.assertEqual(mse_metric.name, 'mse')
self.assertEqual(mse_metric.result().numpy(), 4.)
mae_metric = metric_container.metrics[1]
self.assertEqual(mae_metric.name, 'mae')
self.assertEqual(mae_metric.result().numpy(), 2.)
metric_container.reset_state()
self.assertEqual(mse_metric.result().numpy(), 0.)
self.assertEqual(mae_metric.result().numpy(), 0.)
def test_list_of_metrics_list_of_outputs(self):
metric_container = compile_utils.MetricsContainer(
metrics=['mse', 'mae'], # Should broadcast to both outputs.
weighted_metrics=['accuracy']) # Should broadcast to both outputs.
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), 2 * tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 6)
mse_metric = metric_container.metrics[0]
self.assertEqual(mse_metric.name, 'output_1_mse')
self.assertEqual(mse_metric.result().numpy(), 0.)
mse_metric = metric_container.metrics[1]
self.assertEqual(mse_metric.name, 'output_1_mae')
self.assertEqual(mse_metric.result().numpy(), 0.)
acc_metric_1 = metric_container.metrics[2]
self.assertEqual(acc_metric_1.name, 'output_1_accuracy')
self.assertEqual(acc_metric_1.result().numpy(), 1.)
self.assertEqual(acc_metric_1._fn, metrics_mod.binary_accuracy)
mae_metric = metric_container.metrics[3]
self.assertEqual(mae_metric.name, 'output_2_mse')
self.assertEqual(mae_metric.result().numpy(), 4.)
mae_metric = metric_container.metrics[4]
self.assertEqual(mae_metric.name, 'output_2_mae')
self.assertEqual(mae_metric.result().numpy(), 2.)
acc_metric_2 = metric_container.metrics[5]
self.assertEqual(acc_metric_2.name, 'output_2_accuracy')
self.assertEqual(acc_metric_2.result().numpy(), 0.)
self.assertEqual(acc_metric_2._fn, metrics_mod.binary_accuracy)
weighted_metrics = metric_container.weighted_metrics
self.assertLen(weighted_metrics, 2)
self.assertEqual(weighted_metrics[0].name, 'output_1_accuracy')
self.assertEqual(weighted_metrics[1].name, 'output_2_accuracy')
unweighted_metrics = metric_container.unweighted_metrics
self.assertLen(unweighted_metrics, 4)
self.assertEqual(unweighted_metrics[0].name, 'output_1_mse')
self.assertEqual(unweighted_metrics[1].name, 'output_1_mae')
self.assertEqual(unweighted_metrics[2].name, 'output_2_mse')
self.assertEqual(unweighted_metrics[3].name, 'output_2_mae')
def test_metric_dict(self):
metric_container = compile_utils.MetricsContainer(
metrics={
'out1': 'mse',
'out2': 'mae'
},
weighted_metrics={
'out1': 'mse',
'out2': 'mae'
})
y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))}
y_p = {'out1': tf.ones((10, 1)), 'out2': 2 * tf.ones((10, 1))}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
mse_metric = metric_container.metrics[0]
self.assertEqual(mse_metric.name, 'out1_mse')
self.assertEqual(mse_metric.result().numpy(), 0.)
weighted_mse_metric = metric_container.metrics[1]
self.assertEqual(weighted_mse_metric.name, 'out1_weighted_mse')
self.assertEqual(weighted_mse_metric.result().numpy(), 0.)
mae_metric = metric_container.metrics[2]
self.assertEqual(mae_metric.name, 'out2_mae')
self.assertEqual(mae_metric.result().numpy(), 2.)
weighted_mae_metric = metric_container.metrics[3]
self.assertEqual(weighted_mae_metric.name, 'out2_weighted_mae')
self.assertEqual(weighted_mae_metric.result().numpy(), 2.)
metric_container.reset_state()
self.assertEqual(mse_metric.result().numpy(), 0.)
self.assertEqual(weighted_mse_metric.result().numpy(), 0.)
self.assertEqual(mae_metric.result().numpy(), 0.)
self.assertEqual(weighted_mae_metric.result().numpy(), 0.)
def test_metric_partial_dict_with_output_names(self):
metric_container = compile_utils.MetricsContainer(
{'out2': 'mae'}, output_names=['out1', 'out2'])
y_t = [tf.ones((10, 1)), tf.zeros((10, 1))]
y_p = [tf.ones((10, 1)), tf.ones((10, 1))]
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 1)
mae_metric = metric_container.metrics[0]
self.assertEqual(mae_metric.name, 'out2_mae')
self.assertEqual(mae_metric.result().numpy(), 1.)
def test_metric_partial_dict_with_nones(self):
metric_container = compile_utils.MetricsContainer({
'out1': None,
'out2': 'mae'
})
y_t = {'out1': tf.ones((10, 1)), 'out2': tf.zeros((10, 1))}
y_p = {'out1': tf.ones((10, 1)), 'out2': tf.ones((10, 1))}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 1)
mae_metric = metric_container.metrics[0]
self.assertEqual(mae_metric.name, 'out2_mae')
self.assertEqual(mae_metric.result().numpy(), 1.)
def test_nested_structure(self):
metric_container = compile_utils.MetricsContainer(
metrics={
'b': ['mse', None],
'a': 'mae'
},
weighted_metrics={
'b': [None, None],
'a': 'mse'
})
y_t = {
'b': [2 * tf.ones((10, 1)),
tf.zeros((10, 1))],
'a': tf.zeros((10, 1))
}
y_p = {
'b': [tf.zeros((10, 1)),
tf.zeros((10, 1))],
'a': tf.ones((10, 1))
}
sw = tf.convert_to_tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 3)
a_mae_metric = metric_container.metrics[0]
self.assertEqual(a_mae_metric.name, 'a_mae')
self.assertEqual(a_mae_metric.result().numpy(), 1.)
weighted_a_mae_metric = metric_container.metrics[1]
self.assertEqual(weighted_a_mae_metric.name, 'a_mse')
self.assertEqual(weighted_a_mae_metric.result().numpy(), 1.)
b_1_mse_metric = metric_container.metrics[2]
self.assertEqual(b_1_mse_metric.name, 'b_1_mse')
self.assertEqual(b_1_mse_metric.result().numpy(), 4.)
def test_crossentropy(self):
metric_container = compile_utils.MetricsContainer('crossentropy')
y_t, y_p = tf.ones((10, 1)), tf.ones((10, 1))
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0]._fn,
metrics_mod.binary_crossentropy)
metric_container = compile_utils.MetricsContainer('crossentropy')
y_t, y_p = tf.ones((10, 1)), tf.ones((10, 20))
self.assertEqual(y_p.shape.as_list()[-1], 20)
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0]._fn,
metrics_mod.sparse_categorical_crossentropy)
metric_container = compile_utils.MetricsContainer('crossentropy')
y_t, y_p = tf.ones((10, 20)), tf.ones((10, 20))
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0]._fn,
metrics_mod.categorical_crossentropy)
def test_accuracy(self):
metric_container = compile_utils.MetricsContainer('accuracy')
y_t, y_p = tf.ones((10, 1)), tf.ones((10, 1))
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0]._fn,
metrics_mod.binary_accuracy)
metric_container = compile_utils.MetricsContainer('Accuracy')
y_t, y_p = tf.ones((10, 1)), tf.ones((10, 1))
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0]._fn,
metrics_mod.binary_accuracy)
metric_container = compile_utils.MetricsContainer('accuracy')
y_t, y_p = tf.ones((10, 1)), tf.ones((10, 20))
self.assertEqual(y_p.shape.as_list()[-1], 20)
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0]._fn,
metrics_mod.sparse_categorical_accuracy)
metric_container = compile_utils.MetricsContainer('accuracy')
y_t, y_p = tf.ones((10, 20)), tf.ones((10, 20))
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0]._fn,
metrics_mod.categorical_accuracy)
def test_metric_weighting(self):
metric_container = compile_utils.MetricsContainer(
metrics=['mae'], weighted_metrics=['mae'])
y_t = tf.convert_to_tensor([[0], [3], [0]])
y_p = tf.convert_to_tensor([[0], [0], [0]])
sw = tf.convert_to_tensor([[1], [0], [1]])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 2)
mae_metric = metric_container.metrics[0]
self.assertEqual(mae_metric.name, 'mae')
self.assertEqual(mae_metric.result().numpy(), 1.)
weighted_mae_metric = metric_container.metrics[1]
self.assertEqual(weighted_mae_metric.name, 'weighted_mae')
self.assertEqual(weighted_mae_metric.result().numpy(), 0.)
def test_broadcast_metrics_to_dict(self):
metric_container = compile_utils.MetricsContainer(metrics=['mae'])
y_p = {'output': tf.convert_to_tensor([[0], [1], [2]])}
y_t = {'output': tf.convert_to_tensor([[1], [2], [3]])}
metric_container.update_state(y_t, y_p)
mae_metric = metric_container.metrics[0]
self.assertEqual(mae_metric.name, 'mae')
self.assertEqual(mae_metric.result().numpy(), 1.)
def test_broadcast_metrics_to_dict_with_output_names(self):
metric_container = compile_utils.MetricsContainer(
metrics=['mae'], output_names=['output'])
y_p = tf.convert_to_tensor([[0], [1], [2]])
y_t = {'output': tf.convert_to_tensor([[1], [2], [3]])}
metric_container.update_state(y_t, y_p)
mae_metric = metric_container.metrics[0]
self.assertEqual(mae_metric.name, 'mae')
self.assertEqual(mae_metric.result().numpy(), 1.)
def test_missing_label_with_no_metrics(self):
# It's ok to exclude a label if that label has no
# losses or metrics associated with it.
metric_container = compile_utils.MetricsContainer(metrics={
'output1': 'mae',
'output3': 'mse'
})
y_p = {
'output1': tf.convert_to_tensor([[0], [1], [2]]),
'output2': tf.convert_to_tensor([[3], [4], [5]]),
'output3': tf.convert_to_tensor([[6], [7], [8]])
}
y_t = {
'output1': tf.convert_to_tensor([[1], [2], [3]]),
'output3': tf.convert_to_tensor([[4], [5], [6]])
}
metric_container.update_state(y_t, y_p)
self.assertLen(metric_container.metrics, 2)
mae_metric = metric_container.metrics[0]
self.assertEqual(mae_metric.name, 'output1_mae')
self.assertEqual(mae_metric.result().numpy(), 1.)
mse_metric = metric_container.metrics[1]
self.assertEqual(mse_metric.name, 'output3_mse')
self.assertEqual(mse_metric.result().numpy(), 4.)
def test_metrics_masking(self):
metrics_container = compile_utils.MetricsContainer(
metrics=['mae'], weighted_metrics=['mse'])
y_p = tf.constant([[[1], [1]], [[0], [0]]], dtype=tf.float32)
y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32)
y_p._keras_mask = tf.constant([[1, 1], [0, 0]],
dtype=tf.float32)
metrics_container.update_state(y_t, y_p)
self.assertLen(metrics_container.metrics, 2)
mae_metric = metrics_container.metrics[0]
self.assertEqual(mae_metric.name, 'mae')
self.assertAlmostEqual(mae_metric.result().numpy(), 0)
weighted_mae_metric = metrics_container.metrics[1]
self.assertEqual(weighted_mae_metric.name, 'mse')
self.assertAlmostEqual(weighted_mae_metric.result().numpy(), 0)
def test_metrics_sample_weight(self):
metrics_container = compile_utils.MetricsContainer(
metrics=['mae'], weighted_metrics=['mse'])
y_p = tf.constant([[[1], [1]], [[0], [1]]], dtype=tf.float32)
y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32)
sw = tf.constant([[.2, .3], [.5, 0]], dtype=tf.float32)
metrics_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metrics_container.metrics, 2)
mae_metric = metrics_container.metrics[0]
self.assertEqual(mae_metric.name, 'mae')
self.assertAlmostEqual(mae_metric.result().numpy(), .25) # 1 / 4
weighted_mae_metric = metrics_container.metrics[1]
self.assertEqual(weighted_mae_metric.name, 'mse')
self.assertAlmostEqual(weighted_mae_metric.result().numpy(), .5) # .5 / 1
def test_metrics_masking_sample_weight(self):
metrics_container = compile_utils.MetricsContainer(
metrics=['mae'], weighted_metrics=['mse'])
y_p = tf.constant([[[1], [1]], [[0], [1]]], dtype=tf.float32)
y_t = tf.constant([[[1], [1]], [[1], [1]]], dtype=tf.float32)
sw = tf.constant([[.3, .2], [.2, .3]], dtype=tf.float32)
y_p._keras_mask = tf.constant([[1, 0], [1, 0]],
dtype=tf.float32)
metrics_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metrics_container.metrics, 2)
mae_metric = metrics_container.metrics[0]
self.assertEqual(mae_metric.name, 'mae')
self.assertAlmostEqual(mae_metric.result().numpy(), .5) # 1 / .5
weighted_mae_metric = metrics_container.metrics[1]
self.assertEqual(weighted_mae_metric.name, 'mse')
self.assertAlmostEqual(weighted_mae_metric.result().numpy(), .2 / .5)
def test_loss_class_as_metric_with_distribution(self):
distribution = tf.distribute.OneDeviceStrategy('/device:CPU:0')
with distribution.scope():
metric_container = compile_utils.MetricsContainer(
losses_mod.MeanSquaredError())
y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5))
metric_container.update_state(y_t, y_p)
self.assertLen(metric_container.metrics, 1)
metric = metric_container.metrics[0]
self.assertEqual(metric.name, 'mean_squared_error')
self.assertEqual(metric.result().numpy(), 1.)
def test_custom_metric_callables(self):
def custom_metric_fn(y_true, y_pred):
return tf.reduce_sum(y_true - y_pred)
class CustomMetricClass:
def __call__(self, y_true, y_pred):
return tf.reduce_sum(y_true - y_pred)
metric_container = compile_utils.MetricsContainer(
[custom_metric_fn, CustomMetricClass()])
y_t, y_p = tf.ones((10, 5)), tf.zeros((10, 5))
metric_container.update_state(y_t, y_p)
self.assertEqual(metric_container.metrics[0].name, 'custom_metric_fn')
self.assertEqual(metric_container.metrics[1].name, 'custom_metric_class')
def test_reset_state_existing_metric_before_built(self):
metric = metrics_mod.Mean()
metric.update_state([2.0, 4.0])
self.assertEqual(metric.result().numpy(), 3.0)
metric_container = compile_utils.MetricsContainer(metric)
metric_container.reset_state()
self.assertEqual(metric.result().numpy(), 0.0)
def test_duplicated_metric_instance(self):
mean_obj = metrics_mod.Mean()
metric = mean_obj
with self.assertRaisesRegex(ValueError, 'Found duplicated metrics'):
compile_utils.MetricsContainer(metrics=metric, weighted_metrics=metric)
# duplicated string should be fine
metric = 'acc'
compile_utils.MetricsContainer(metrics=metric, weighted_metrics=metric)
# complicated structure
metric = [mean_obj, 'acc']
weighted_metric = {'output1': mean_obj, 'output2': 'acc'}
with self.assertRaisesRegex(ValueError, 'Found duplicated metrics'):
compile_utils.MetricsContainer(
metrics=metric, weighted_metrics=weighted_metric)
if __name__ == '__main__':
tf.compat.v1.enable_eager_execution()
tf.test.main()
| keras-team/keras | keras/engine/compile_utils_test.py | Python | apache-2.0 | 31,261 |
#encoding:utf-8
__authors__ = ['wei keke']
__version__ = "V0.1"
'''
# ChangeLog:
#---------------------------------------------------------------------------------
# Version Date Desc Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/17 初始版本
#---------------------------------------------------------------------------------
'''
import TestData.Network.ITC06_Setup as ModuleData
from TestAPIs.DataCenterAPIs import DataCenterAPIs
'''
@note: PreData
'''
dc_name = ModuleData.dc_name
dc_id = DataCenterAPIs().getDataCenterIdByName(ModuleData.dc_name)
nw_name = 'network001'
nw_info = '''
<network>
<name>%s</name>
<data_center id= "%s"/>
</network>
''' %(nw_name,dc_id)
'''
@note:TestData
'''
new_nw_name = 'network002'
update_info = '''
<network>
<name>%s</name>
<description>lalala</description>
<mtu>2000</mtu>
</network>
'''%new_nw_name
'''
@note: ExpectedData
'''
expected_status_code = 200 | faylau/oVirt3.3WebAPITest | src/TestData/Network/ITC06010401_UpdateNetwork.py | Python | apache-2.0 | 1,115 |
"""Support for OVO Energy."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
import aiohttp
import async_timeout
from ovoenergy import OVODailyUsage
from ovoenergy.ovoenergy import OVOEnergy
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up OVO Energy from a config entry."""
client = OVOEnergy()
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
_LOGGER.warning(exception)
raise ConfigEntryNotReady from exception
if not authenticated:
raise ConfigEntryAuthFailed
async def async_update_data() -> OVODailyUsage:
"""Fetch data from OVO Energy."""
async with async_timeout.timeout(10):
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
raise UpdateFailed(exception) from exception
if not authenticated:
raise ConfigEntryAuthFailed("Not authenticated with OVO Energy")
return await client.get_daily_usage(datetime.utcnow().strftime("%Y-%m"))
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=3600),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_CLIENT: client,
DATA_COORDINATOR: coordinator,
}
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
# Setup components
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigType) -> bool:
"""Unload OVO Energy config entry."""
# Unload sensors
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
del hass.data[DOMAIN][entry.entry_id]
return unload_ok
class OVOEnergyEntity(CoordinatorEntity):
"""Defines a base OVO Energy entity."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
client: OVOEnergy,
) -> None:
"""Initialize the OVO Energy entity."""
super().__init__(coordinator)
self._client = client
class OVOEnergyDeviceEntity(OVOEnergyEntity):
"""Defines a OVO Energy device entity."""
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this OVO Energy instance."""
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, self._client.account_id)},
manufacturer="OVO Energy",
name=self._client.username,
)
| jawilson/home-assistant | homeassistant/components/ovo_energy/__init__.py | Python | apache-2.0 | 3,757 |
import pytest
pytestmark = [
pytest.mark.skip_on_windows(reason="Not supported on Windows"),
pytest.mark.skip_if_binaries_missing(
"ansible",
"ansible-doc",
"ansible-playbook",
check_all=True,
reason="ansible is not installed",
),
]
@pytest.fixture
def ansible_ping_func(modules):
if "ansible.system.ping" in modules:
# we need to go by getattr() because salt's loader will try to find "system" in the dictionary and fail
# The ansible hack injects, in this case, "system.ping" as an attribute to the loaded module
return getattr(modules.ansible, "system.ping")
if "ansible.ping" in modules:
# Ansible >= 2.10
return modules.ansible.ping
pytest.fail("Where is the ping function these days in Ansible?!")
def test_ansible_functions_loaded(ansible_ping_func):
"""
Test that the ansible functions are actually loaded
"""
ret = ansible_ping_func()
assert ret == {"ping": "pong"}
def test_passing_data_to_ansible_modules(ansible_ping_func):
"""
Test that the ansible functions are actually loaded
"""
expected = "foobar"
ret = ansible_ping_func(data=expected)
assert ret == {"ping": expected}
| saltstack/salt | tests/pytests/functional/modules/test_ansiblegate.py | Python | apache-2.0 | 1,245 |
# ****************************************************************************
# Copyright 2018 The Apollo Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
# -*- coding: utf-8 -*-
"""Module for init environment."""
import sys
import os
import importlib
import time
import threading
import ctypes
from google.protobuf.descriptor_pb2 import FileDescriptorProto
PY_CALLBACK_TYPE = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p)
PY_CALLBACK_TYPE_T = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p)
# init vars
CYBER_PATH = os.environ['CYBER_PATH']
CYBER_DIR = os.path.split(CYBER_PATH)[0]
sys.path.append(CYBER_PATH + "/third_party/")
sys.path.append(CYBER_PATH + "/lib/")
sys.path.append(CYBER_PATH + "/python/cyber")
sys.path.append(CYBER_PATH + "/python/cyber_py")
sys.path.append(CYBER_PATH + "/lib/python/")
sys.path.append(CYBER_DIR + "/python/")
sys.path.append(CYBER_DIR + "/cyber/")
_CYBER_INIT = importlib.import_module('_cyber_init')
_CYBER_NODE = importlib.import_module('_cyber_node')
def init(module_name="cyber_py"):
"""
init cyber.
"""
return _CYBER_INIT.py_init(module_name)
def ok():
"""
is cyber envi ok.
"""
return _CYBER_INIT.py_ok()
def shutdown():
"""
shutdown cyber envi.
"""
return _CYBER_INIT.py_shutdown()
def is_shutdown():
"""
is cyber shutdown.
"""
return _CYBER_INIT.py_is_shutdown()
def waitforshutdown():
"""
waitforshutdown.
"""
return _CYBER_INIT.py_waitforshutdown()
# //////////////////////////////class//////////////////////////////
class Writer(object):
"""
Class for cyber writer wrapper.
"""
def __init__(self, name, writer, data_type):
self.name = name
self.writer = writer
self.data_type = data_type
def write(self, data):
"""
writer msg string
"""
return _CYBER_NODE.PyWriter_write(self.writer, data.SerializeToString())
class Reader(object):
"""
Class for cyber reader wrapper.
"""
def __init__(self, name, reader, data_type):
self.name = name
self.reader = reader
self.data_type = data_type
class Client(object):
"""
Class for cyber service client wrapper.
"""
def __init__(self, client, data_type):
self.client = client
self.data_type = data_type
def send_request(self, data):
"""
send request to service
@param self
@param data: proto message to send
@return : None or response
"""
response_str = _CYBER_NODE.PyClient_send_request(
self.client, data.SerializeToString())
if len(response_str) == 0:
return None
response = self.data_type()
response.ParseFromString(response_str)
return response
class Node(object):
"""
Class for cyber Node wrapper.
"""
def __init__(self, name):
self.node = _CYBER_NODE.new_PyNode(name)
self.list_writer = []
self.list_reader = []
self.subs = {}
self.pubs = {}
self.list_client = []
self.list_service = []
self.mutex = threading.Lock()
self.callbacks = {}
self.services = {}
def __del__(self):
# print("+++ node __del___")
for writer in self.list_writer:
_CYBER_NODE.delete_PyWriter(writer)
for reader in self.list_reader:
_CYBER_NODE.delete_PyReader(reader)
for c in self.list_client:
_CYBER_NODE.delete_PyClient(c)
for s in self.list_service:
_CYBER_NODE.delete_PyService(s)
_CYBER_NODE.delete_PyNode(self.node)
def register_message(self, file_desc):
"""
register proto message desc file.
"""
for dep in file_desc.dependencies:
self.register_message(dep)
proto = FileDescriptorProto()
file_desc.CopyToProto(proto)
proto.name = file_desc.name
desc_str = proto.SerializeToString()
_CYBER_NODE.PyNode_register_message(self.node, desc_str)
def create_writer(self, name, data_type, qos_depth=1):
"""
create a topic writer for send message to topic.
@param self
@param name str: topic name
@param data_type proto: message class for serialization
"""
self.register_message(data_type.DESCRIPTOR.file)
datatype = data_type.DESCRIPTOR.full_name
writer = _CYBER_NODE.PyNode_create_writer(self.node, name,
datatype, qos_depth)
self.list_writer.append(writer)
return Writer(name, writer, datatype)
def reader_callback(self, name):
"""
reader callback
"""
sub = self.subs[name]
msg_str = _CYBER_NODE.PyReader_read(sub[0], False)
if len(msg_str) > 0:
if sub[3] != "RawData":
proto = sub[3]()
proto.ParseFromString(msg_str)
else:
# print "read rawdata-> ",sub[3]
proto = msg_str
if sub[2] is None:
sub[1](proto)
else:
sub[1](proto, sub[2])
return 0
def create_reader(self, name, data_type, callback, args=None):
"""
create a topic reader for receive message from topic.
@param self
@param name str: topic name
@param data_type proto: message class for serialization
@callback fn: function to call (fn(data)) when data is
received. If args is set, the function must
accept the args as a second argument,
i.e. fn(data, args)
@args any: additional arguments to pass to the callback
"""
self.mutex.acquire()
if name in self.subs.keys():
self.mutex.release()
return None
self.mutex.release()
# datatype = data_type.DESCRIPTOR.full_name
reader = _CYBER_NODE.PyNode_create_reader(
self.node, name, str(data_type))
if reader is None:
return None
self.list_reader.append(reader)
sub = (reader, callback, args, data_type, False)
self.mutex.acquire()
self.subs[name] = sub
self.mutex.release()
fun_reader_cb = PY_CALLBACK_TYPE(self.reader_callback)
self.callbacks[name] = fun_reader_cb
f_ptr = ctypes.cast(self.callbacks[name], ctypes.c_void_p).value
_CYBER_NODE.PyReader_register_func(reader, f_ptr)
return Reader(name, reader, data_type)
def create_rawdata_reader(self, name, callback, args=None):
"""
Create RawData reader:listener RawMessage
"""
return self.create_reader(name, "RawData", callback, args)
def create_client(self, name, request_data_type, response_data_type):
datatype = request_data_type.DESCRIPTOR.full_name
c = _CYBER_NODE.PyNode_create_client(self.node, name,
str(datatype))
self.list_client.append(c)
return Client(c, response_data_type)
def service_callback(self, name):
v = self.services[name]
msg_str = _CYBER_NODE.PyService_read(v[0])
if (len(msg_str) > 0):
proto = v[3]()
proto.ParseFromString(msg_str)
response = None
if v[2] is None:
response = v[1](proto)
else:
response = v[1](proto, v[2])
_CYBER_NODE.PyService_write(v[0], response.SerializeToString())
return 0
def create_service(self, name, req_data_type, res_data_type, callback, args=None):
self.mutex.acquire()
if name in self.services.keys():
self.mutex.release()
return None
self.mutex.release()
datatype = req_data_type.DESCRIPTOR.full_name
s = _CYBER_NODE.PyNode_create_service(self.node, name, str(datatype))
self.list_service.append(s)
v = (s, callback, args, req_data_type, False)
self.mutex.acquire()
self.services[name] = v
self.mutex.release()
f = PY_CALLBACK_TYPE(self.service_callback)
self.callbacks[name] = f
f_ptr = ctypes.cast(f, ctypes.c_void_p).value
_CYBER_NODE.PyService_register_func(s, f_ptr)
return s
def spin(self):
"""
spin in wait and process message.
@param self
"""
while not _CYBER_INIT.py_is_shutdown():
time.sleep(0.002)
class ChannelUtils(object):
@staticmethod
def get_debugstring_rawmsgdata(msg_type, rawmsgdata):
"""
Parse rawmsg from rawmsg data
Input: message type; rawmsg data
Output: a human readable form of this message.for debugging and other purposes.
"""
return _CYBER_NODE.PyChannelUtils_get_debugstring_by_msgtype_rawmsgdata(msg_type, rawmsgdata)
@staticmethod
def get_msgtype(channel_name, sleep_s=2):
"""
Parse rawmsg from rawmsg data
Input: channel name, wait for topo discovery
Output: the corresponding message type of this channel in topo.
"""
return _CYBER_NODE.PyChannelUtils_get_msg_type(channel_name, sleep_s)
@staticmethod
def get_channels(sleep_s=2):
"""
Get active channels name
Input: wait for topo discovery
Output: all active channels
"""
return _CYBER_NODE.PyChannelUtils_get_active_channels(sleep_s)
@staticmethod
def get_channels_info(sleep_s=2):
"""
Get active channel info
Input: wait for topo discovery
Output: {'channel1':[], 'channel2':[]} .channels info
"""
return _CYBER_NODE.PyChannelUtils_get_channels_info(sleep_s)
| ycool/apollo | cyber/python/cyber_py/cyber.py | Python | apache-2.0 | 10,449 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import gc
import os
import pickle
import time
from helpers import unittest
import luigi
import mock
import psutil
from luigi.worker import Worker
def running_children():
children = set()
process = psutil.Process(os.getpid())
for child in process.children():
if child.is_running():
children.add(child.pid)
return children
@contextlib.contextmanager
def pause_gc():
if not gc.isenabled():
yield
try:
gc.disable()
yield
finally:
gc.enable()
class SlowCompleteWrapper(luigi.WrapperTask):
def requires(self):
return [SlowCompleteTask(i) for i in range(4)]
class SlowCompleteTask(luigi.Task):
n = luigi.IntParameter()
def complete(self):
time.sleep(0.1)
return True
class OverlappingSelfDependenciesTask(luigi.Task):
n = luigi.IntParameter()
k = luigi.IntParameter()
def complete(self):
return self.n < self.k or self.k == 0
def requires(self):
return [OverlappingSelfDependenciesTask(self.n - 1, k) for k in range(self.k + 1)]
class ExceptionCompleteTask(luigi.Task):
def complete(self):
assert False
class ExceptionRequiresTask(luigi.Task):
def requires(self):
assert False
class UnpicklableExceptionTask(luigi.Task):
def complete(self):
class UnpicklableException(Exception):
pass
raise UnpicklableException()
class ParallelSchedulingTest(unittest.TestCase):
def setUp(self):
self.sch = mock.Mock()
self.w = Worker(scheduler=self.sch, worker_id='x')
def added_tasks(self, status):
return [kw['task_id'] for args, kw in self.sch.add_task.call_args_list if kw['status'] == status]
def test_children_terminated(self):
before_children = running_children()
with pause_gc():
self.w.add(
OverlappingSelfDependenciesTask(5, 2),
multiprocess=True,
)
self.assertLessEqual(running_children(), before_children)
def test_multiprocess_scheduling_with_overlapping_dependencies(self):
self.w.add(OverlappingSelfDependenciesTask(5, 2), True)
self.assertEqual(15, self.sch.add_task.call_count)
self.assertEqual(set((
OverlappingSelfDependenciesTask(n=1, k=1).task_id,
OverlappingSelfDependenciesTask(n=2, k=1).task_id,
OverlappingSelfDependenciesTask(n=2, k=2).task_id,
OverlappingSelfDependenciesTask(n=3, k=1).task_id,
OverlappingSelfDependenciesTask(n=3, k=2).task_id,
OverlappingSelfDependenciesTask(n=4, k=1).task_id,
OverlappingSelfDependenciesTask(n=4, k=2).task_id,
OverlappingSelfDependenciesTask(n=5, k=2).task_id,
)), set(self.added_tasks('PENDING')))
self.assertEqual(set((
OverlappingSelfDependenciesTask(n=0, k=0).task_id,
OverlappingSelfDependenciesTask(n=0, k=1).task_id,
OverlappingSelfDependenciesTask(n=1, k=0).task_id,
OverlappingSelfDependenciesTask(n=1, k=2).task_id,
OverlappingSelfDependenciesTask(n=2, k=0).task_id,
OverlappingSelfDependenciesTask(n=3, k=0).task_id,
OverlappingSelfDependenciesTask(n=4, k=0).task_id,
)), set(self.added_tasks('DONE')))
@mock.patch('luigi.notifications.send_error_email')
def test_raise_exception_in_complete(self, send):
self.w.add(ExceptionCompleteTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
self.assertTrue('assert False' in send.call_args[0][1])
@mock.patch('luigi.notifications.send_error_email')
def test_raise_unpicklable_exception_in_complete(self, send):
# verify exception can't be pickled
self.assertRaises(Exception, UnpicklableExceptionTask().complete)
try:
UnpicklableExceptionTask().complete()
except Exception as e:
ex = e
self.assertRaises(pickle.PicklingError, pickle.dumps, ex)
# verify this can run async
self.w.add(UnpicklableExceptionTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
self.assertTrue('raise UnpicklableException()' in send.call_args[0][1])
@mock.patch('luigi.notifications.send_error_email')
def test_raise_exception_in_requires(self, send):
self.w.add(ExceptionRequiresTask(), multiprocess=True)
send.check_called_once()
self.assertEqual(0, self.sch.add_task.call_count)
if __name__ == '__main__':
unittest.main()
| bmaggard/luigi | test/worker_parallel_scheduling_test.py | Python | apache-2.0 | 5,302 |
#!/usr/bin/python
'''
Input: file
Output: stdout
Tidy output of spark stderr file CSV format
Jeremy Schaub
$ ./sparkread.py [time_output_file]
'''
import sys
class Measurement:
'''
Data structure for /usr/bin/time measurement
'''
def __init__(self):
self.stage_times = ['0']
self.spill_count = -1
self._expected_length = 2
def fields(self):
'''
Returns a list of fields in the data structure
'''
fields = ['spill_count']
num_stages = len(self.stage_times) - 1
stage_header = ['stage %d [sec]' % i for i in range(num_stages)]
stage_header.append('total time [sec]')
fields.extend(stage_header)
return fields
def headercsv(self):
'''
Returns a csv string with all header fields
'''
return ','.join(self.fields())
def rowcsv(self):
'''
Returns a csv string with all data fields
'''
values = [self.spill_count]
values.extend(self.stage_times)
return ','.join(values)
def headerhtml(self, fields=None):
'''
Returns an HTML string all header fields
'''
if not fields:
fields=self.fields()
row = '<tr>\n<th>%s</th>\n</tr>\n' % ('</th>\n<th>'.join(fields))
return row
def addfield(self, name=None, value=None):
if name not in self.fields():
self._expected_length += 1
setattr(self, name, value)
def htmlclass(self):
return "warning" if int(self.spill_count) != 0 else ""
def rowhtml(self, fields=None, rowclass=None):
''' Returns an html formatted string with all td cells in row '''
if not fields:
fields = self.fields()
if not rowclass:
rowclass = self.htmlclass()
values = [self.spill_count]
values.extend(self.stage_times)
html_row = '<tr class="%s">\n<td>' % (rowclass)
html_row += '</td>\n<td>'.join(values)
html_row += '</td>\n</tr>\n'
return html_row
def is_valid(self):
return len(self.fields()) == self._expected_length
def parse(self, spark_fn):
'''
This parses the output of the spark stderr file
'''
try:
with open(spark_fn, 'r') as f:
blob = f.read()
num_stages = len(blob.split('finished in ')[1:])
stage_times = ['' for i in range(num_stages)]
i = 0
total_time = 0
for a in blob.split('finished in ')[1:]:
stage_times[i] = a.split(' s\n')[0]
total_time += float(stage_times[i])
i += 1
stage_times.append(str(total_time))
self.stage_times = stage_times
self.spill_count = str(blob.lower().count('spill'))
if not self.is_valid():
sys.stderr.write('Not a valid spark file %s\n' % spark_fn)
assert False
except Exception as err:
sys.stderr.write('Problem parsing time file %s\n' % spark_fn)
sys.stderr.write(str(err) + '\n')
def main(spark_fn):
# Wrapper to print to stdout
m = Measurement()
m.parse(spark_fn)
sys.stdout.write('%s\n%s\n' % (m.headercsv(), m.rowcsv()))
if __name__ == '__main__':
main(sys.argv[1])
| jschaub30/tidy | sparkread.py | Python | apache-2.0 | 3,370 |
from __future__ import division
from itertools import chain
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pandas as pd
from fisher import pvalue
import re
import collections
from nltk.stem.porter import PorterStemmer
import math
from percept.tasks.base import Task
from percept.fields.base import Complex, List, Dict, Float
from inputs.inputs import SimpsonsFormats
from percept.utils.models import RegistryCategories, get_namespace
from percept.conf.base import settings
import os
from percept.tasks.train import Train
from sklearn.ensemble import RandomForestClassifier
import pickle
import random
import logging
log = logging.getLogger(__name__)
MAX_FEATURES = 500
DISTANCE_MIN=1
CHARACTER_DISTANCE_MIN = .2
RESET_SCENE_EVERY = 5
def make_df(datalist, labels, name_prefix=""):
df = pd.DataFrame(datalist).T
if name_prefix!="":
labels = [name_prefix + "_" + l for l in labels]
labels = [l.replace(" ", "_").lower() for l in labels]
df.columns = labels
df.index = range(df.shape[0])
return df
def return_one():
return 1
class SpellCorrector(object):
"""
Taken and slightly adapted from peter norvig's post at http://norvig.com/spell-correct.html
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
punctuation = [".", "!", "?", ","]
def __init__(self):
self.NWORDS = self.train(self.words(file(os.path.join(settings.PROJECT_PATH,'data/big.txt')).read()))
self.cache = {}
def words(self, text):
return re.findall('[a-z]+', text.lower())
def train(self, features):
model = collections.defaultdict(return_one)
for f in features:
model[f] += 1
return model
def edits1(self, word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b]
inserts = [a + c + b for a, b in splits for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(self, word):
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS)
def known(self, words): return set(w for w in words if w in self.NWORDS)
def correct(self, word):
if word in self.cache:
return self.cache[word]
suffix = ""
for p in self.punctuation:
if word.endswith(p):
suffix = p
word = word[:-1]
candidates = self.known([word]) or self.known(self.edits1(word)) or self.known_edits2(word) or [word]
newword = max(candidates, key=self.NWORDS.get) + suffix
self.cache.update({word : newword})
return newword
class Vectorizer(object):
def __init__(self):
self.fit_done = False
def fit(self, input_text, input_scores, max_features=100, min_features=3):
self.spell_corrector = SpellCorrector()
self.stemmer = PorterStemmer()
new_text = self.batch_generate_new_text(input_text)
input_text = [input_text[i] + new_text[i] for i in xrange(0,len(input_text))]
self.vectorizer1 = CountVectorizer(ngram_range=(1,2), min_df = min_features/len(input_text), max_df=.4, stop_words="english")
self.vectorizer1.fit(input_text)
self.vocab = self.get_vocab(input_text, input_scores, max_features)
self.vectorizer = CountVectorizer(ngram_range=(1,2), vocabulary=self.vocab)
self.fit_done = True
self.input_text = input_text
def spell_correct_text(self, text):
text = text.lower()
split = text.split(" ")
corrected = [self.spell_corrector.correct(w) for w in split]
return corrected
def batch_apply(self, all_tokens, applied_func):
for key in all_tokens:
cor = applied_func(all_tokens[key])
all_tokens[key] = cor
return all_tokens
def batch_generate_new_text(self, text):
text = [re.sub("[^A-Za-z0-9]", " ", t.lower()) for t in text]
text = [re.sub("\s+", " ", t) for t in text]
t_tokens = [t.split(" ") for t in text]
all_token_list = list(set(chain.from_iterable(t_tokens)))
all_token_dict = {}
for t in all_token_list:
all_token_dict.update({t : t})
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
for i in xrange(0,len(t_tokens)):
for j in xrange(0,len(t_tokens[i])):
t_tokens[i][j] = all_token_dict.get(t_tokens[i][j], t_tokens[i][j])
new_text = [" ".join(t) for t in t_tokens]
return new_text
def generate_new_text(self, text):
no_punctuation = re.sub("[^A-Za-z0-9]", " ", text.lower())
no_punctuation = re.sub("\s+", " ", no_punctuation)
corrected = self.spell_correct_text(no_punctuation)
corrected = [self.stemmer.stem(w) for w in corrected]
new = " ".join(corrected)
return new
def get_vocab(self, input_text, input_scores, max_features):
train_mat = self.vectorizer1.transform(input_text)
input_score_med = np.median(input_scores)
new_scores = [0 if i<=input_score_med else 1 for i in input_scores]
ind_max_features = math.floor(max_features/max(input_scores))
all_vocab = []
all_cols = [np.asarray(train_mat.getcol(i).todense().transpose())[0] for i in xrange(0,train_mat.shape[1])]
for s in xrange(0,max(input_scores)):
sel_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]==s]
out_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]!=s]
pvalues = []
for i in xrange(0,len(all_cols)):
lcol = all_cols[i]
good_lcol = lcol[sel_inds]
bad_lcol = lcol[out_inds]
good_lcol_present = len(good_lcol[good_lcol > 0])
good_lcol_missing = len(good_lcol[good_lcol == 0])
bad_lcol_present = len(bad_lcol[bad_lcol > 0])
bad_lcol_missing = len(bad_lcol[bad_lcol == 0])
pval = pvalue(good_lcol_present, bad_lcol_present, good_lcol_missing, bad_lcol_missing)
pvalues.append(pval.two_tail)
col_inds = list(xrange(0,train_mat.shape[1]))
p_frame = pd.DataFrame(np.array([col_inds, pvalues]).transpose(), columns=["inds", "pvalues"])
p_frame = p_frame.sort(['pvalues'], ascending=True)
getVar = lambda searchList, ind: [searchList[int(i)] for i in ind]
vocab = getVar(self.vectorizer1.get_feature_names(), p_frame['inds'][:ind_max_features+2])
all_vocab.append(vocab)
return list(set(list(chain.from_iterable(all_vocab))))
def batch_get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
new_text = self.batch_generate_new_text(text)
text = [text[i] + new_text[i] for i in xrange(0,len(text))]
return (self.vectorizer.transform(text).todense())
def get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
itext=text
if isinstance(text, list):
itext = text[0]
new_text = self.generate_new_text(itext)
if isinstance(text, list):
text = [text[0] + new_text]
else:
text = [text + new_text]
return (self.vectorizer.transform(text).todense())
class FeatureExtractor(Task):
data = Complex()
row_data = List()
speaker_code_dict = Dict()
speaker_codes = List()
vectorizer = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
help_text = "Cleanup simpsons scripts."
args = {'scriptfile' : os.path.abspath(os.path.join(settings.DATA_PATH, "script_tasks"))}
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
scriptfile = kwargs.get('scriptfile')
script_data = pickle.load(open(scriptfile))
script = script_data.tasks[2].voice_lines.value
speakers = []
lines = []
for s in script:
for (i,l) in enumerate(s):
if i>0:
previous_line = s[i-1]['line']
previous_speaker = s[i-1]['speaker']
else:
previous_line = ""
previous_speaker = ""
if i>1:
two_back_speaker = s[i-2]['speaker']
else:
two_back_speaker = ""
if len(s)>i+1:
next_line = s[i+1]['line']
else:
next_line = ""
current_line = s[i]['line']
current_speaker = s[i]['speaker']
lines.append(current_line)
speakers.append(current_speaker)
row_data = {
'previous_line' : previous_line,
'previous_speaker' : previous_speaker,
'next_line' : next_line,
'current_line' : current_line,
'current_speaker' : current_speaker,
'two_back_speaker' : two_back_speaker
}
self.row_data.append(row_data)
self.speaker_code_dict = {k:i for (i,k) in enumerate(list(set(speakers)))}
self.speaker_codes = [self.speaker_code_dict[s] for s in speakers]
self.max_features = math.floor(MAX_FEATURES)/3
self.vectorizer = Vectorizer()
self.vectorizer.fit(lines, self.speaker_codes, self.max_features)
prev_features = self.vectorizer.batch_get_features([rd['previous_line'] for rd in self.row_data])
cur_features = self.vectorizer.batch_get_features([rd['current_line'] for rd in self.row_data])
next_features = self.vectorizer.batch_get_features([rd['next_line'] for rd in self.row_data])
self.speaker_code_dict.update({'' : -1})
meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], [self.speaker_code_dict[s['previous_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "previous_speaker", "current_speaker"])
#meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "current_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features),meta_features],axis=1)
train_frame.index = range(train_frame.shape[0])
data = {
'vectorizer' : self.vectorizer,
'speaker_code_dict' : self.speaker_code_dict,
'train_frame' : train_frame,
'speakers' : make_df([speakers,self.speaker_codes, lines], ["speaker", "speaker_code", "line"]),
'data' : data,
'current_features' : cur_features,
}
return data
class RandomForestTrain(Train):
"""
A class to train a random forest
"""
colnames = List()
clf = Complex()
category = RegistryCategories.algorithms
namespace = get_namespace(__module__)
algorithm = RandomForestClassifier
args = {'n_estimators' : 300, 'min_samples_leaf' : 4, 'compute_importances' : True}
help_text = "Train and predict with Random Forest."
class KNNRF(Task):
data = Complex()
predictions = Complex()
importances = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
args = {'algo' : RandomForestTrain}
help_text = "Cleanup simpsons scripts."
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
from preprocess import CHARACTERS
vec_length = math.floor(MAX_FEATURES/3)
algo = kwargs.get('algo')
alg = algo()
train_data = data['train_frame'].iloc[:,:-1]
target = data['train_frame']['current_speaker']
clf = alg.train(train_data,target, **algo.args)
self.importances=clf.feature_importances_
test_data = data['data']
match_data = data['current_features']
reverse_speaker_code_dict = {data['speaker_code_dict'][k] : k for k in data['speaker_code_dict']}
speaker_list = []
speaker_codes = reverse_speaker_code_dict.keys()
for i in xrange(0,len(speaker_codes)):
s_text = "\n".join(list(data['speakers'][data['speakers']['speaker']==reverse_speaker_code_dict[speaker_codes[i]]]['line']))
speaker_list.append(s_text)
speaker_features = data['vectorizer'].batch_get_features(speaker_list)
self.predictions = []
counter = 0
for script in test_data['voice_script']:
counter+=1
log.info("On script {0} out of {1}".format(counter,len(test_data['voice_script'])))
lines = script.split("\n")
speaker_code = [-1 for i in xrange(0,len(lines))]
for (i,line) in enumerate(lines):
if i>0 and i%RESET_SCENE_EVERY!=0:
previous_line = lines[i-1]
previous_speaker = speaker_code[i-1]
else:
previous_line = ""
previous_speaker= -1
if i>1 and i%RESET_SCENE_EVERY!=0:
two_back_speaker = speaker_code[i-2]
else:
two_back_speaker = -1
if i<(len(lines)-1):
next_line = lines[i+1]
else:
next_line = ""
prev_features = data['vectorizer'].get_features(previous_line)
cur_features = data['vectorizer'].get_features(line)
next_features = data['vectorizer'].get_features(next_line)
meta_features = make_df([[two_back_speaker], [previous_speaker]],["two_back_speaker", "previous_speaker"])
#meta_features = make_df([[two_back_speaker]],["two_back_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features), meta_features],axis=1)
speaker_code[i] = alg.predict(train_frame)[0]
nearest_match, distance = self.find_nearest_match(cur_features, speaker_features)
if distance<CHARACTER_DISTANCE_MIN:
sc = speaker_codes[nearest_match]
speaker_code[i] = sc
continue
for k in CHARACTERS:
for c in CHARACTERS[k]:
if c in previous_line:
speaker_code[i] = data['speaker_code_dict'][k]
nearest_match, distance = self.find_nearest_match(cur_features,match_data)
if distance<DISTANCE_MIN:
sc = data['speakers']['speaker_code'][nearest_match]
speaker_code[i] = sc
continue
df = make_df([lines,speaker_code,[reverse_speaker_code_dict[round(s)] for s in speaker_code]],["line","speaker_code","speaker"])
self.predictions.append(df)
return data
def find_nearest_match(self, features, matrix):
features = np.asarray(features)
distances = [self.euclidean(u, features) for u in matrix]
nearest_match = distances.index(min(distances))
return nearest_match, min(distances)
def euclidean(self, v1, v2):
return np.sqrt(np.sum(np.square(np.subtract(v1,v2))))
"""
p = tasks[3].predictions.value
speakers = []
lines = []
for pr in p:
speakers.append(list(pr['speaker']))
lines.append(list(pr['line']))
from itertools import chain
speakers = list(chain.from_iterable(speakers))
lines = list(chain.from_iterable(lines))
rows = []
for (s,l) in zip(speakers, lines):
rows.append({
'speaker' : s,
'line': l,
})
import json
json.dump(rows,open("/home/vik/vikparuchuri/simpsons-scripts/data/final_voice.json","w+"))
""" | VikParuchuri/simpsons-scripts | tasks/train.py | Python | apache-2.0 | 16,847 |
import subprocess
import os.path
import re
import argparse
import sys
from pybedtools import BedTool
DEBUG = False
parser = argparse.ArgumentParser(description="find overlap gene.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
if not DEBUG:
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input locus file (bed format)', required=True)
parser.add_argument('-g', '--gene_sorted_bed', action='store', nargs='?', help='Gene locus file (sorted bed format)', required=True)
parser.add_argument('-o', '--output', action='store', nargs='?', help='Output overlap file', required=True)
args = parser.parse_args()
input_file=args.input
gene_file = args.gene_sorted_bed
output_file=args.output
else:
input_file= "/scratch/cqs/shengq1/vickers/20170720_AGO_human_CLIP/macs2/result/GSM1020022/GSM1020022_peaks.narrowPeak.bed"
gene_file = "/scratch/cqs/shengq1/references/smallrna/v3/hg19_miRBase21_GtRNAdb2_gencode19_ncbi.sorted.bed"
output_file="/scratch/cqs/shengq1/vickers/20170720_AGO_human_CLIP/macs2/result/GSM1020022/GSM1020022_peaks.narrowPeak.overlap.tsv"
closet = [nearest for nearest in BedTool(input_file).closest(gene_file, d=True)]
with open(output_file, 'w') as w:
for nearest in closet:
overlap = nearest.fields[12]
if overlap == u'0':
w.write(str(nearest))
| shengqh/ngsperl | lib/Annotation/findOverlapGene.py | Python | apache-2.0 | 1,368 |
# coding: utf-8
"""
DeliveryHub
DeliveryHub API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.order import Order
class TestOrder(unittest.TestCase):
""" Order unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testOrder(self):
"""
Test Order
"""
model = swagger_client.models.order.Order()
if __name__ == '__main__':
unittest.main()
| garywong89/PetStoreAPI | python/test/test_order.py | Python | apache-2.0 | 1,237 |
import boto3
import sure # noqa
from moto import mock_kinesis, mock_cloudformation
@mock_cloudformation
def test_kinesis_cloudformation_create_stream():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = '{"Resources":{"MyStream":{"Type":"AWS::Kinesis::Stream"}}}'
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[
"StackResourceSummaries"
][0]
provisioned_resource["LogicalResourceId"].should.equal("MyStream")
len(provisioned_resource["PhysicalResourceId"]).should.be.greater_than(0)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_get_attr():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Outputs:
StreamName:
Value: !Ref TheStream
StreamArn:
Value: !GetAtt TheStream.Arn
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
output_stream_name = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamName"
][0]
output_stream_arn = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamArn"
][0]
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName=output_stream_name)[
"StreamDescription"
]
output_stream_arn.should.equal(stream_description["StreamARN"])
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_update():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
ShardCount: 4
RetentionPeriodHours: 48
Tags:
- Key: TagKey1
Value: TagValue1
- Key: TagKey2
Value: TagValue2
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(48)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1")
tag2_value.should.equal("TagValue2")
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(4)
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
ShardCount: 6
RetentionPeriodHours: 24
Tags:
- Key: TagKey1
Value: TagValue1a
- Key: TagKey2
Value: TagValue2a
""".strip()
cf_conn.update_stack(StackName=stack_name, TemplateBody=template)
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(24)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1a")
tag2_value.should.equal("TagValue2a")
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(6)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_delete():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["StreamName"].should.equal("MyStream")
cf_conn.delete_stack(StackName=stack_name)
streams = kinesis_conn.list_streams()["StreamNames"]
len(streams).should.equal(0)
| william-richard/moto | tests/test_kinesis/test_kinesis_cloudformation.py | Python | apache-2.0 | 5,255 |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
import json
import click
from tabulate import tabulate
from platformio.clients.account import AccountClient
from platformio.commands.account import validate_email, validate_username
@click.group("org", short_help="Manage Organizations")
def cli():
pass
def validate_orgname(value):
return validate_username(value, "Organization name")
@cli.command("create", short_help="Create a new organization")
@click.argument(
"orgname", callback=lambda _, __, value: validate_orgname(value),
)
@click.option(
"--email", callback=lambda _, __, value: validate_email(value) if value else value
)
@click.option("--displayname",)
def org_create(orgname, email, displayname):
client = AccountClient()
client.create_org(orgname, email, displayname)
return click.secho(
"The organization %s has been successfully created." % orgname, fg="green",
)
@cli.command("list", short_help="List organizations")
@click.option("--json-output", is_flag=True)
def org_list(json_output):
client = AccountClient()
orgs = client.list_orgs()
if json_output:
return click.echo(json.dumps(orgs))
if not orgs:
return click.echo("You do not have any organizations")
for org in orgs:
click.echo()
click.secho(org.get("orgname"), fg="cyan")
click.echo("-" * len(org.get("orgname")))
data = []
if org.get("displayname"):
data.append(("Display Name:", org.get("displayname")))
if org.get("email"):
data.append(("Email:", org.get("email")))
data.append(
(
"Owners:",
", ".join((owner.get("username") for owner in org.get("owners"))),
)
)
click.echo(tabulate(data, tablefmt="plain"))
return click.echo()
@cli.command("update", short_help="Update organization")
@click.argument("orgname")
@click.option(
"--new-orgname", callback=lambda _, __, value: validate_orgname(value),
)
@click.option("--email")
@click.option("--displayname",)
def org_update(orgname, **kwargs):
client = AccountClient()
org = client.get_org(orgname)
del org["owners"]
new_org = org.copy()
if not any(kwargs.values()):
for field in org:
new_org[field] = click.prompt(
field.replace("_", " ").capitalize(), default=org[field]
)
if field == "email":
validate_email(new_org[field])
if field == "orgname":
validate_orgname(new_org[field])
else:
new_org.update(
{key.replace("new_", ""): value for key, value in kwargs.items() if value}
)
client.update_org(orgname, new_org)
return click.secho(
"The organization %s has been successfully updated." % orgname, fg="green",
)
@cli.command("destroy", short_help="Destroy organization")
@click.argument("orgname")
def account_destroy(orgname):
client = AccountClient()
click.confirm(
"Are you sure you want to delete the %s organization account?\n"
"Warning! All linked data will be permanently removed and can not be restored."
% orgname,
abort=True,
)
client.destroy_org(orgname)
return click.secho("Organization %s has been destroyed." % orgname, fg="green",)
@cli.command("add", short_help="Add a new owner to organization")
@click.argument("orgname",)
@click.argument("username",)
def org_add_owner(orgname, username):
client = AccountClient()
client.add_org_owner(orgname, username)
return click.secho(
"The new owner %s has been successfully added to the %s organization."
% (username, orgname),
fg="green",
)
@cli.command("remove", short_help="Remove an owner from organization")
@click.argument("orgname",)
@click.argument("username",)
def org_remove_owner(orgname, username):
client = AccountClient()
client.remove_org_owner(orgname, username)
return click.secho(
"The %s owner has been successfully removed from the %s organization."
% (username, orgname),
fg="green",
)
| platformio/platformio | platformio/commands/org.py | Python | apache-2.0 | 4,758 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import webob.exc
from oslo.config import cfg
from quantum.api import api_common
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.api.v2 import attributes
from quantum.api.v2 import resource as wsgi_resource
from quantum.common import exceptions
from quantum.openstack.common import log as logging
from quantum.openstack.common.notifier import api as notifier_api
from quantum import policy
from quantum import quota
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._publisher_id = notifier_api.publisher_id('network')
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise Exception(_("Native pagination depend on native "
"sorting"))
if not self._allow_sorting:
LOG.info(_("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.iteritems():
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _is_visible(self, context, attr_name, data):
action = "%s:%s" % (self._plugin_handlers[self.SHOW], attr_name)
# Optimistically init authz_check to True
authz_check = True
try:
attr = (attributes.RESOURCE_ATTRIBUTE_MAP
[self._collection].get(attr_name))
if attr and attr.get('enforce_policy'):
authz_check = policy.check_if_exists(
context, action, data)
except KeyError:
# The extension was not configured for adding its resources
# to the global resource attribute map. Policy check should
# not be performed
LOG.debug(_("The resource %(resource)s was not found in the "
"RESOURCE_ATTRIBUTE_MAP; unable to perform authZ "
"check for attribute %(attr)s"),
{'resource': self._collection,
'attr': attr})
except exceptions.PolicyRuleNotFound:
LOG.debug(_("Policy rule:%(action)s not found. Assuming no "
"authZ check is defined for %(attr)s"),
{'action': action,
'attr': attr_name})
attr_val = self._attr_info.get(attr_name)
return attr_val and attr_val['is_visible'] and authz_check
def _view(self, context, data, fields_to_strip=None):
# make sure fields_to_strip is iterable
if not fields_to_strip:
fields_to_strip = []
return dict(item for item in data.iteritems()
if (self._is_visible(context, item[0], data) and
item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
except exceptions.PolicyNotAuthorized:
raise webob.exc.HTTPNotFound()
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# TODO(salvatore-orlando): bp/make-authz-ortogonal
# The body of the action request should be included
# in the info passed to the policy engine
# Enforce policy, if any, for this action
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context, name, resource,
plugin=self._plugin)
return getattr(self._plugin, name)(*arg_list, **kwargs)
return _handle_action
else:
raise AttributeError
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin)]
collection = {self._collection:
[self._view(request.context, obj,
fields_to_strip=fields_to_add)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context, action, obj, plugin=self._plugin)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context, data, methodname)
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
return self._items(request, True, parent_id)
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
raise webob.exc.HTTPNotFound()
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
objs.append(self._view(request.context,
obj_creator(request.context,
**kwargs)))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception as ex:
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id} if parent_id
else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the exception
LOG.exception(_("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
raise ex
def create(self, request, body=None, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.create.start',
notifier_api.CONF.default_notification_level,
body)
body = Controller.prepare_request_body(request.context, body, True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
deltas = {}
bulk = True
else:
items = [body]
bulk = False
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource],
plugin=self._plugin)
try:
tenant_id = item[self._resource]['tenant_id']
count = quota.QUOTAS.count(request.context, self._resource,
self._plugin, self._collection,
tenant_id)
if bulk:
delta = deltas.get(tenant_id, 0) + 1
deltas[tenant_id] = delta
else:
delta = 1
kwargs = {self._resource: count + delta}
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
else:
quota.QUOTAS.limit_check(request.context,
item[self._resource]['tenant_id'],
**kwargs)
def notify(create_result):
notifier_method = self._resource + '.create.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
obj_creator = getattr(self._plugin, "%s_bulk" % action)
objs = obj_creator(request.context, body, **kwargs)
return notify({self._collection: [self._view(request.context, obj)
for obj in objs]})
else:
obj_creator = getattr(self._plugin, action)
if self._collection in body:
# Emulate atomic bulk behavior
objs = self._emulate_bulk_create(obj_creator, request,
body, parent_id)
return notify({self._collection: objs})
else:
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.delete.start',
notifier_api.CONF.default_notification_level,
{self._resource + '_id': id})
action = self._plugin_handlers[self.DELETE]
# Check authz
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj,
plugin=self._plugin)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
raise webob.exc.HTTPNotFound()
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
notifier_method = self._resource + '.delete.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._send_dhcp_notification(request.context,
result,
notifier_method)
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
parent_id = kwargs.get(self._parent_id_name)
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.update.start',
notifier_api.CONF.default_notification_level,
payload)
body = Controller.prepare_request_body(request.context, body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in self._attr_info.iteritems()
if ('required_by_policy' in value and
value['required_by_policy'] or
'default' not in value)]
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_obj.update(body[self._resource])
try:
policy.enforce(request.context,
action,
orig_obj,
plugin=self._plugin)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
raise webob.exc.HTTPNotFound()
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
return result
@staticmethod
def _populate_tenant_id(context, res_dict, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
else:
msg = _("Running without keystyone AuthN requires "
" that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
prep_req_body = lambda x: Controller.prepare_request_body(
context,
x if resource in x else {resource: x},
is_create,
resource,
attr_info,
allow_bulk)
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
bulk_body = [prep_req_body(item) for item in body[collection]]
if not bulk_body:
raise webob.exc.HTTPBadRequest(_("Resources required"))
return {collection: bulk_body}
res_dict = body.get(resource)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
Controller._populate_tenant_id(context, res_dict, is_create)
Controller._verify_attributes(res_dict, attr_info)
if is_create: # POST
for attr, attr_vals in attr_info.iteritems():
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in attr_info.iteritems():
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in attr_info.iteritems():
if (attr not in res_dict or
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise webob.exc.HTTPBadRequest(msg)
return body
@staticmethod
def _verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if self._resource not in ('port', 'subnet'):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
msg = _("Tenant %(tenant_id)s not allowed to "
"create %(resource)s on this network")
raise webob.exc.HTTPForbidden(msg % {
"tenant_id": resource_item['tenant_id'],
"resource": self._resource,
})
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
| ykaneko/quantum | quantum/api/v2/base.py | Python | apache-2.0 | 29,494 |
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import operator
import functools
import datetime
import pytz
import re
import gevent
import gevent.lock
import gevent.event
EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.UTC)
def utc_millisec():
return int(time.time()*1000)
def dt_to_millisec(dt):
if dt.tzinfo == None:
dt = dt.replace(tzinfo=pytz.UTC)
delta = dt - EPOCH
return int(delta.total_seconds()*1000)
def interval_in_sec(val):
if isinstance(val, int):
return val
multipliers = {
'': 1,
'm': 60,
'h': 3600,
'd': 86400
}
mo = re.match("([0-9]+)([dmh]?)", val)
if mo is None:
return None
return int(mo.group(1))*multipliers[mo.group(2)]
def age_out_in_millisec(val):
multipliers = {
'': 1000,
'm': 60000,
'h': 3600000,
'd': 86400000
}
mo = re.match("([0-9]+)([dmh]?)", val)
if mo is None:
return None
return int(mo.group(1))*multipliers[mo.group(2)]
def _merge_atomic_values(op, v1, v2):
if op(v1, v2):
return v2
return v1
def _merge_array(v1, v2):
for e in v2:
if e not in v1:
v1.append(e)
return v1
RESERVED_ATTRIBUTES = {
'sources': _merge_array,
'first_seen': functools.partial(_merge_atomic_values, operator.gt),
'last_seen': functools.partial(_merge_atomic_values, operator.lt),
'type': functools.partial(_merge_atomic_values, operator.eq),
'direction': functools.partial(_merge_atomic_values, operator.eq),
'confidence': functools.partial(_merge_atomic_values, operator.lt),
'country': functools.partial(_merge_atomic_values, operator.eq),
'AS': functools.partial(_merge_atomic_values, operator.eq)
}
class RWLock(object):
def __init__(self):
self.num_readers = 0
self.num_writers = 0
self.m1 = gevent.lock.Semaphore(1)
self.m2 = gevent.lock.Semaphore(1)
self.m3 = gevent.lock.Semaphore(1)
self.w = gevent.lock.Semaphore(1)
self.r = gevent.lock.Semaphore(1)
def lock(self):
self.m2.acquire()
self.num_writers += 1
if self.num_writers == 1:
self.r.acquire()
self.m2.release()
self.w.acquire()
def unlock(self):
self.w.release()
self.m2.acquire()
self.num_writers -= 1
if self.num_writers == 0:
self.r.release()
self.m2.release()
def rlock(self):
self.m3.acquire()
self.r.acquire()
self.m1.acquire()
self.num_readers += 1
if self.num_readers == 1:
self.w.acquire()
self.m1.release()
self.r.release()
self.m3.release()
def runlock(self):
self.m1.acquire()
self.num_readers -= 1
if self.num_readers == 0:
self.w.release()
self.m1.release()
def __enter__(self):
self.rlock()
def __exit__(self, type, value, traceback):
self.runlock()
_AGE_OUT_BASES = ['last_seen', 'first_seen']
def parse_age_out(s, age_out_bases=None, default_base=None):
if s is None:
return None
if age_out_bases is None:
age_out_bases = _AGE_OUT_BASES
if default_base is None:
default_base = 'first_seen'
if default_base not in age_out_bases:
raise ValueError('%s not in %s' % (default_base, age_out_bases))
result = {}
toks = s.split('+', 1)
if len(toks) == 1:
t = toks[0].strip()
if t in age_out_bases:
result['base'] = t
result['offset'] = 0
else:
result['base'] = default_base
result['offset'] = age_out_in_millisec(t)
if result['offset'] is None:
raise ValueError('Invalid age out offset %s' % t)
else:
base = toks[0].strip()
if base not in age_out_bases:
raise ValueError('Invalid age out base %s' % base)
result['base'] = base
result['offset'] = age_out_in_millisec(toks[1].strip())
if result['offset'] is None:
raise ValueError('Invalid age out offset %s' % t)
return result
class GThrottled(object):
def __init__(self, f, wait):
self._timeout = None
self._previous = 0
self._cancelled = False
self._args = []
self._kwargs = {}
self.f = f
self.wait = wait
def later(self):
self._previous = utc_millisec()
self._timeout = None
self.f(*self._args, **self._kwargs)
def __call__(self, *args, **kwargs):
now = utc_millisec()
remaining = self.wait - (now - self._previous)
if self._cancelled:
return
if remaining <= 0 or remaining > self.wait:
if self._timeout is not None:
self._timeout.join(timeout=5)
self._timeout = None
self._previous = now
self.f(*args, **kwargs)
elif self._timeout is None:
self._args = args
self._kwargs = kwargs
self._timeout = gevent.spawn_later(remaining/1000.0, self.later)
else:
self._args = args
self._kwargs = kwargs
def cancel(self):
self._cancelled = True
if self._timeout:
self._timeout.join(timeout=5)
if self._timeout is not None:
self._timeout.kill()
self._previous = 0
self._timeout = None
self._args = []
self._kwargs = {}
| PaloAltoNetworks/minemeld-core | minemeld/ft/utils.py | Python | apache-2.0 | 6,146 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
import numpy as np
import soccer_PK.utils
rospy.init_node("reward")
pub = rospy.Publisher("reward", Float32, queue_size=10)
rate = rospy.Rate(3)
rospy.wait_for_service('/gazebo/get_model_state')
soccer_PK.utils.reset_world()
# intial postion
ball_prev = 3.25
episode = 1
while not rospy.is_shutdown():
tic = rospy.get_time()
toc = tic
prev_reward = None
while toc - tic < 10:
done = False
# pub.publish(reward)
ball_locationx ,ball_locationy = soccer_PK.utils.get_ball_location()
# Goal
if ball_locationx > 4.5:
rospy.loginfo("GOAL!!!")
# save log file ($HOME/.ros/)
f = open('episode_result.log', 'a')
f.write('episode'+str(episode)+': 4.5\n')
f.close()
# reset
episode += 1
reward = 10
done = True
rospy.set_param("reward_value",[reward, done])
tic = rospy.get_time()
soccer_PK.utils.reset_world()
rospy.sleep(1)
# if the ball don't achieve goal
reward = (ball_prev - ball_locationx) / ball_prev
if prev_reward != reward:
rospy.set_param("reward_value",[reward, done])
prev_reward = reward
toc = rospy.get_time()
reward = -10
done = True
prev_reward = reward
# pub.publish(reward)
rospy.set_param("reward_value",[reward, done])
ball_locationx ,ball_locationy = soccer_PK.utils.get_ball_location()
f = open('episode_result.log', 'a')
f.write('episode'+str(episode)+': '+str(ball_locationx)+'\n')
f.close()
episode += 1
soccer_PK.utils.reset_world()
rospy.sleep(1)
rate.sleep()
| syoamakase/Re-ROS | re_environments/src/soccer_PK/soccer_PK_reward.py | Python | apache-2.0 | 1,763 |
# Copyright (c) 2014-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .base_commandline_predictor import BaseCommandlinePredictor
from .parsing import parse_netmhccons_stdout
class NetMHCcons(BaseCommandlinePredictor):
def __init__(
self,
alleles,
program_name="netMHCcons",
process_limit=0,
default_peptide_lengths=[9]):
BaseCommandlinePredictor.__init__(
self,
program_name=program_name,
alleles=alleles,
parse_output_fn=parse_netmhccons_stdout,
# netMHCcons does not have a supported allele flag
supported_alleles_flag=None,
length_flag="-length",
input_file_flag="-f",
allele_flag="-a",
peptide_mode_flags=["-inptype", "1"],
tempdir_flag="-tdir",
process_limit=process_limit,
default_peptide_lengths=default_peptide_lengths,
group_peptides_by_length=True)
| hammerlab/mhctools | mhctools/netmhc_cons.py | Python | apache-2.0 | 1,609 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def do_nothing(apps, schema_editor):
pass
def create_new_behaviors_and_strategies(apps, schema_editor):
CountingBehavior = apps.get_model("core", "CountingBehavior")
RefreshBehavior = apps.get_model("core", "RefreshBehavior")
RulesBehavior = apps.get_model("core", "RulesBehavior")
Provider = apps.get_model("core", "Provider")
AllocationStrategy = apps.get_model("core", "AllocationStrategy")
# Strategy #1 - Count from first to end of month, refresh on the first
counting_strategy_1, _ = CountingBehavior.objects.get_or_create(
name="1 Month - Calendar Window")
refresh_strategy_1, _ = RefreshBehavior.objects.get_or_create(
name="First of the Month")
# Strategy #2 - Count UP for one month, starting at (& refreshing at) the
# anniversary
counting_strategy_2, _ = CountingBehavior.objects.get_or_create(
name="1 Month - Calendar Window - Anniversary")
refresh_strategy_2, _ = RefreshBehavior.objects.get_or_create(
name="Anniversary Date")
# Rules that will be applied by default
rules = []
rule, _ = RulesBehavior.objects.get_or_create(
name="Ignore non-active status")
rules.append(rule)
rule, _ = RulesBehavior.objects.get_or_create(name="Multiply by Size CPU")
rules.append(rule)
for provider in Provider.objects.all():
new_strategy, _ = AllocationStrategy.objects.get_or_create(
provider=provider, counting_behavior=counting_strategy_1)
new_strategy.refresh_behaviors.add(refresh_strategy_1)
for rule in rules:
new_strategy.rules_behaviors.add(rule)
return
class Migration(migrations.Migration):
dependencies = [
('core', '0006_change_fields_as_not_null'),
]
operations = [
migrations.CreateModel(
name='AllocationStrategy', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ], options={
'db_table': 'allocation_strategy', }, bases=(
models.Model,), ), migrations.CreateModel(
name='CountingBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'counting_behavior', }, bases=(
models.Model,), ), migrations.CreateModel(
name='RefreshBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'refresh_behavior', }, bases=(
models.Model,), ), migrations.CreateModel(
name='RulesBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'rules_behavior', }, bases=(
models.Model,), ), migrations.AddField(
model_name='allocationstrategy', name='counting_behavior', field=models.ForeignKey(
to='core.CountingBehavior'), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='provider', field=models.OneToOneField(
to='core.Provider'), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='refresh_behaviors', field=models.ManyToManyField(
to='core.RefreshBehavior', null=True, blank=True), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='rules_behaviors', field=models.ManyToManyField(
to='core.RulesBehavior', null=True, blank=True), preserve_default=True, ), migrations.RunPython(
create_new_behaviors_and_strategies, do_nothing)]
| CCI-MOC/GUI-Backend | core/migrations/0007_create_allocation_strategy_and_behaviors.py | Python | apache-2.0 | 5,962 |
#
# Copyright 2014, 2015 Red Hat. All Rights Reserved.
#
# Author: Chris Dent <chdent@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SimpleWsgi provides a WSGI callable that can be used in tests to
reflect posted data and otherwise confirm headers and queries.
"""
import json
from six.moves.urllib import parse as urlparse
METHODS = ['GET', 'PUT', 'POST', 'DELETE', 'PATCH']
class SimpleWsgi(object):
"""A simple wsgi application to use in tests."""
def __call__(self, environ, start_response):
request_method = environ['REQUEST_METHOD'].upper()
query_data = urlparse.parse_qs(environ.get('QUERY_STRING', ''))
request_url = environ.get('REQUEST_URI',
environ.get('RAW_URI', 'unknown'))
accept_header = environ.get('HTTP_ACCEPT')
content_type_header = environ.get('CONTENT_TYPE', '')
request_url = self._fully_qualify(environ, request_url)
if accept_header:
response_content_type = accept_header
else:
response_content_type = 'application/json'
headers = [
('X-Gabbi-method', request_method),
('Content-Type', response_content_type),
('X-Gabbi-url', request_url),
]
if request_method not in METHODS:
headers.append(
('Allow', ', '.join(METHODS)))
start_response('405 Method Not Allowed', headers)
return []
if request_method.startswith('P'):
body = environ['wsgi.input'].read()
if body:
if not content_type_header:
start_response('400 Bad request', headers)
return []
if content_type_header == 'application/json':
body_data = json.loads(body.decode('utf-8'))
if query_data:
query_data.update(body_data)
else:
query_data = body_data
headers.append(('Location', request_url))
start_response('200 OK', headers)
query_output = json.dumps(query_data)
return [query_output.encode('utf-8')]
@staticmethod
def _fully_qualify(environ, url):
"""Turn a URL path into a fully qualified URL."""
path, query, fragment = urlparse.urlsplit(url)[2:]
server_name = environ.get('SERVER_NAME')
server_port = environ.get('SERVER_PORT')
server_scheme = environ.get('wsgi.url_scheme')
if server_port not in ['80', '443']:
netloc = '%s:%s' % (server_name, server_port)
else:
netloc = server_name
return urlparse.urlunsplit((server_scheme, netloc, path,
query, fragment))
| FND/gabbi | gabbi/simple_wsgi.py | Python | apache-2.0 | 3,301 |
# snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import time
from twisted.trial import unittest
from snapy.netsnmp.unittests import TestCase
from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID
class Result(object):
"""Container for async results"""
value = None
def set_result(value, result):
result.value = value
class TestSessionV1(TestCase):
version = "1"
bulk = False
basics = [
(OID(".1.3.6.1.4.2.1.1"), 1),
(OID(".1.3.6.1.4.2.1.2"), -1),
(OID(".1.3.6.1.4.2.1.3"), 1),
(OID(".1.3.6.1.4.2.1.4"), "test value"),
]
def setUpSession(self, address):
self.session = Session(
version=self.version,
community="public",
peername=address,
_use_bulk=self.bulk)
self.session.open()
def tearDownSession(self):
self.session.close()
def test_sget(self):
result = self.session.sget([x for x,v in self.basics])
self.assertEquals(result, self.basics)
return self.finishGet()
def test_get_small(self):
result = Result()
self.session.get([x for x,v in self.basics], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishGet()
def test_get_big(self):
oids = []
for i in xrange(1, 100):
oids.append(OID((1,3,6,1,4,2,4,i)))
result = Result()
self.session.get(oids, set_result, result)
self.session.wait()
result = dict(result.value)
for oid in oids:
assert oid in result
assert result[oid] == "data data data data"
return self.finishGet()
def test_walk_tree(self):
result = Result()
self.session.walk([".1.3.6.1.4.2.1"], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishWalk()
def test_walk_leaf(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result)
self.session.wait()
self.assertEquals(result.value, [(oid, 1)])
return self.finishGet()
def test_walk_strict(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result, strict=True)
self.session.wait()
self.assertEquals(result.value, [])
return self.finishStrictWalk()
def test_sysDescr(self):
result = self.session.sget([OID("SNMPv2-MIB::sysDescr.0")])
self.assert_(result)
self.assertIsInstance(result[0][1], str)
self.assert_(len(result[0][1]) > 0)
return self.finishGet()
class TestSessionV2c(TestSessionV1):
version = "2c"
def test_hrSystemDate(self):
# This is a special string that gets formatted using the
# MIB's DISPLAY-HINT value. Also, strip off everything
# other than the date and hour to avoid a race condition.
# And one more quirk, these dates are not zero padded
# so we must format the date manually, whee...
now = time.localtime()
now = "%d-%d-%d,%d" % (now[0], now[1], now[2], now[3])
result = self.session.sget([OID(".1.3.6.1.2.1.25.1.2.0")])
self.assert_(result)
value = result[0][1].split(':', 1)[0]
self.assertEquals(value, now)
return self.finishGet()
class TestSessionV2cBulk(TestSessionV2c):
bulk = True
class TestTimeoutsV1(unittest.TestCase):
version = "1"
def setUp(self):
self.session = Session(
version=self.version,
community="public",
peername="udp:127.0.0.1:9",
retries=0, timeout=0.1)
self.session.open()
def test_sget(self):
self.assertRaises(SnmpError, self.session.sget, [".1.3.6.1.4.2.1.1"])
def test_get(self):
result = Result()
self.session.get([".1.3.6.1.4.2.1.1"], set_result, result)
self.session.wait()
assert isinstance(result.value, SnmpTimeout)
def tearDown(self):
self.session.close()
class TestTimeoutsV2c(TestTimeoutsV1):
version = "2c"
class TestOID(unittest.TestCase):
def test_oid_name(self):
oid = OID("1.3.6.1.2.1.1.1.0")
self.assertEquals(oid, OID("SNMPv2-MIB::sysDescr.0"))
self.assertEquals(oid, OID("sysDescr.0"))
| marineam/nagcat | python/snapy/netsnmp/unittests/test_netsnmp.py | Python | apache-2.0 | 4,936 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from io import BytesIO
import mock
import pytest
from twitter.common.contextutil import temporary_dir
from apache.aurora.client import config
from apache.aurora.client.config import get_config as get_aurora_config
from apache.aurora.client.config import PRODUCTION_DEPRECATED_WARNING
from apache.aurora.config import AuroraConfig
from apache.aurora.config.loader import AuroraConfigLoader
from apache.aurora.config.schema.base import (
MB,
Announcer,
HealthCheckConfig,
Job,
Resources,
Task,
UpdateConfig
)
from apache.thermos.config.schema_base import Process
MESOS_CONFIG_BASE = """
HELLO_WORLD = Job(
name = 'hello_world',
role = 'john_doe',
cluster = 'test-cluster',
environment = 'test',
%(announce)s
task = Task(
name = 'main',
processes = [Process(name = 'hello_world', cmdline = '%(cmdline)s')],
resources = Resources(cpu = 0.1, ram = 64 * MB, disk = 64 * MB),
)
)
jobs = [HELLO_WORLD]
"""
MESOS_CONFIG_WITH_INCLUDE = """
%s
include(%s)
"""
MESOS_CONFIG_WITH_ANNOUNCE_1 = MESOS_CONFIG_BASE % {
'cmdline': 'echo {{thermos.ports[http]}}',
'announce': 'announce = Announcer(primary_port="http"),'}
MESOS_CONFIG_WITH_ANNOUNCE_2 = MESOS_CONFIG_BASE % {
'cmdline': 'echo {{thermos.ports[http]}}',
'announce': '''announce = Announcer(
primary_port = "http",
portmap = {"aurora": "http"}),
'''}
MESOS_CONFIG_WITH_INVALID_STATS = MESOS_CONFIG_BASE % {
'cmdline': 'echo {{thermos.ports[http]}}',
'announce': 'announce = Announcer(primary_port="http", stats_port="blah"),'}
MESOS_CONFIG_WITHOUT_ANNOUNCE = MESOS_CONFIG_BASE % {
'cmdline': 'echo {{thermos.ports[http]}}',
'announce': ''
}
def test_get_config_announces():
for good_config in (
MESOS_CONFIG_WITH_ANNOUNCE_1,
MESOS_CONFIG_WITH_ANNOUNCE_2,
MESOS_CONFIG_WITHOUT_ANNOUNCE):
bio = BytesIO(good_config)
get_aurora_config('hello_world', bio).job()
def test_get_config_with_broken_subscopes():
bad_config = MESOS_CONFIG_BASE % {
'cmdline': 'echo {{hello[{{thermos.ports[http]}}]}}',
'announce': '',
}
bio = BytesIO(bad_config)
with pytest.raises(AuroraConfig.InvalidConfig) as cm:
get_aurora_config('hello_world', bio).job()
assert 'Unexpected unbound refs' in str(cm.value.message)
def test_get_config_select():
bio = BytesIO(MESOS_CONFIG_WITHOUT_ANNOUNCE)
get_aurora_config(
'hello_world',
bio,
select_env='test',
select_role='john_doe',
select_cluster='test-cluster').job()
bio.seek(0)
with pytest.raises(ValueError) as cm:
get_aurora_config(
'hello_world',
bio,
select_env='staging42',
select_role='moua',
select_cluster='test-cluster').job()
assert 'test-cluster/john_doe/test/hello_world' in str(cm.value.message)
def test_include():
with temporary_dir() as dir:
hello_mesos_fname = "hello_world.mesos"
hello_mesos_path = os.path.join(dir, hello_mesos_fname)
with open(os.path.join(dir, hello_mesos_path), "wb") as hello_world_mesos:
hello_world_mesos.write(MESOS_CONFIG_WITHOUT_ANNOUNCE)
hello_world_mesos.flush()
hello_include_fname_path = os.path.join(dir, "hello_include_fname.mesos")
with open(hello_include_fname_path, "wb+") as hello_include_fname_fp:
hello_include_fname_fp.write(MESOS_CONFIG_WITH_INCLUDE %
("", """'%s'""" % hello_mesos_fname))
hello_include_fname_fp.flush()
get_aurora_config('hello_world', hello_include_fname_path)
hello_include_fname_fp.seek(0)
with pytest.raises(AuroraConfigLoader.InvalidConfigError):
get_aurora_config('hello_world', hello_include_fname_fp)
def test_dedicated_portmap():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
config._validate_announce_configuration(AuroraConfig(base_job))
config._validate_announce_configuration(
AuroraConfig(base_job(constraints={'dedicated': 'mesos-team'})))
config._validate_announce_configuration(
AuroraConfig(base_job(constraints={'dedicated': 'mesos-team'},
announce=Announcer(portmap={'http': 80}))))
with pytest.raises(ValueError):
config._validate_announce_configuration(
AuroraConfig(base_job(announce=Announcer(portmap={'http': 80}))))
with pytest.raises(ValueError):
config._validate_announce_configuration(
AuroraConfig(base_job(announce=Announcer(portmap={'http': 80}),
constraints={'foo': 'bar'})))
def test_update_config_passes_with_default_values():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
config._validate_update_config(AuroraConfig(base_job))
def test_update_config_passes_with_max_consecutive_failures_zero():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
health_check_config=HealthCheckConfig(max_consecutive_failures=0),
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
config._validate_update_config(AuroraConfig(base_job))
def test_update_config_fails_with_max_consecutive_failures_negative():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
update_config=UpdateConfig(watch_secs=26),
health_check_config=HealthCheckConfig(max_consecutive_failures=-1),
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
with pytest.raises(SystemExit):
config._validate_update_config(AuroraConfig(base_job))
def test_update_config_passes_with_min_consecutive_successes_zero():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
health_check_config=HealthCheckConfig(min_consecutive_successes=0),
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
config._validate_update_config(AuroraConfig(base_job))
def test_update_config_fails_with_min_consecutive_successes_negative():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
health_check_config=HealthCheckConfig(min_consecutive_successes=-1),
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
with pytest.raises(SystemExit):
config._validate_update_config(AuroraConfig(base_job))
def test_update_config_passes_with_watch_secs_zero():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
update_config=UpdateConfig(watch_secs=0),
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
config._validate_update_config(AuroraConfig(base_job))
def test_update_config_fails_watch_secs_negative():
base_job = Job(
name='hello_world', role='john_doe', cluster='test-cluster',
update_config=UpdateConfig(watch_secs=-1),
task=Task(name='main', processes=[],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)))
with pytest.raises(SystemExit):
config._validate_update_config(AuroraConfig(base_job))
def test_validate_deprecated_config_adds_warning_for_production():
job = Job(name='hello_world', role='john_doe', cluster='test-cluster', environment='test',
task=Task(name='main', processes=[Process(cmdline='echo {{_unbound_}}', name='eco')],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)), production='true')
with mock.patch('apache.aurora.client.config.deprecation_warning') as mock_warning:
config._validate_deprecated_config(AuroraConfig(job))
mock_warning.assert_called_once_with(PRODUCTION_DEPRECATED_WARNING)
def test_validate_deprecated_config_adds_no_warning_when_tier_is_set():
job = Job(name='hello_world', role='john_doe', cluster='test-cluster', environment='test',
task=Task(name='main', processes=[Process(cmdline='echo {{_unbound_}}', name='eco')],
resources=Resources(cpu=0.1, ram=64 * MB, disk=64 * MB)),
production='true', tier='preferred')
with mock.patch('apache.aurora.client.config.deprecation_warning') as mock_warning:
config._validate_deprecated_config(AuroraConfig(job))
assert mock_warning.call_count == 0
| crashlytics/aurora | src/test/python/apache/aurora/client/test_config.py | Python | apache-2.0 | 9,093 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
from .logger import logger, ctx
def find_exec(executable):
exec_exists = os.path.exists(executable)
return executable if exec_exists else shutil.which(executable)
# Decorator running a command and returning stdout
class capture_stdout:
def __init__(self, strip=False):
self.strip = strip
def __call__(self, f):
def strip_it(x):
return x.strip() if self.strip else x
def wrapper(*argv, **kwargs):
# Ensure stdout is captured
kwargs["stdout"] = subprocess.PIPE
return strip_it(f(*argv, **kwargs).stdout)
return wrapper
class Command:
""" A runnable command.
Class inheriting from the Command class must provide the bin
property/attribute.
"""
def run(self, *argv, **kwargs):
assert(hasattr(self, "bin"))
invocation = [find_exec(self.bin)]
invocation.extend(argv)
for key in ["stdout", "stderr"]:
# Preserve caller intention, otherwise silence
if key not in kwargs and ctx.quiet:
kwargs[key] = subprocess.PIPE
# Prefer safe by default
if "check" not in kwargs:
kwargs["check"] = True
logger.debug(f"Executing `{invocation}`")
return subprocess.run(invocation, **kwargs)
def __call__(self, *argv, **kwargs):
self.run(*argv, **kwargs)
| majetideepak/arrow | dev/archery/archery/utils/command.py | Python | apache-2.0 | 2,217 |
'''
Various tools to interface with pyGSTi for running GST experiments.
Created on May 16, 2018
Original Author: Guilhem Ribeill
Copyright 2018 Raytheon BBN Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from .PulsePrimitives import *
from .Cliffords import *
from .BasicSequences.helpers import create_cal_seqs
from .Compiler import compile_to_hardware
from itertools import chain
from random import choices
PYGSTI_PRESENT = False
try:
from pygsti.objects.circuit import Circuit
PYGSTI_PRESENT = True
except:
pass
#Default mapping from pyGSTi naming convention to QGL gates.
gst_gate_map = {"Gx": X90,
"Gy": Y90,
"Gi": Id}
def gst_map_1Q(gst_list, qubit, qgl_map=gst_gate_map, append_meas=True):
"""
Helper function that takes an arbitrarily nested list of pygsti gatestrings
and converts them into QGL sequences, keeping the same nesting of lists.
Inputs:
gst_list: GateString to convert, or possibly nested list of pyGSTi GateStrings.
qubit: QGL qubit to apply the sequence to
qgl_map: Dictionary that maps between pyGSTi "Gx" string to QGL pulse
append_meas: Append a measurement to each sequence.
Returns:
QGL sequences, preserving the input list nesting (as a generator)
"""
if isinstance(gst_list, Circuit):
gst_list = [gst_list]
for item in gst_list:
if isinstance(item, Circuit):
mapped = map(lambda x: qgl_map[str(x)](qubit), item.tup)
if append_meas:
yield list(chain(mapped, [MEAS(qubit)]))
else:
yield list(mapped)
elif isinstance(item, list):
yield list(gst_map_1Q(item, qubit, qgl_map=qgl_map, append_meas=append_meas))
def gst_map_2Q(gst_list, qubits, qgl_map=None, append_meas=False):
"""
Helper function that takes an arbitrarily nested list of pygsti gatestrings
and converts them into QGL sequences, keeping the same nesting of lists.
Inputs:
gst_list: GateString to convert, or possibly nested list of pyGSTi GateStrings.
qubit: QGL qubit to apply the sequence to
qgl_map: Dictionary that maps between pyGSTi "Gx" string to QGL pulse
append_meas: Append a measurement to each sequence.
Returns:
QGL sequences, preserving the input list nesting (as a generator)
"""
if isinstance(gst_list, GateString):
gst_list = [gst_list]
for item in gst_list:
if isinstance(item, GateString):
mapped = map(lambda x: qgl_map[x], item.tup)
if append_meas:
yield list(chain(mapped, [reduce(lambda x,y: x*y, map(MEAS, qubits))]))
else:
yield list(mapped)
elif isinstance(item, list):
yield list(gst_map_2Q(item, qubit, qgl_map=qgl_map, append_meas=append_meas))
def create_gst_sequence_from_pygsti(gst_list, qubit, gate_map=gst_gate_map):
""" Returns list of QGL sequences from a pyGSTi GateString list. See gst_map_1Q.
The return value is a list of sequences that can be complied by QGL.
"""
return list(gst_map_1Q(gst_list, qubit, qgl_map=gate_map, append_meas=True))
def pygsti_to_cliffords(gst_seq):
#Map from GST convention to cliffords
cliff_map = {"{}": 0,
"Gi": 1,
"Gx": 2,
"Gy": 5}
#convert to dictionary of lambdas for compatibility with gst_map_1Q
lambda_map = {k: lambda x, v=v: v for k, v in cliff_map.items()}
return list(gst_map_1Q(gst_seq, None, qgl_map=lambda_map,
append_meas=False))
def pauli_rand_clifford_circuit(gst_seq):
def seqreduce(s):
if not s:
return 0
else:
return reduce(lambda x,y: clifford_multiply(x,y), s)
def inv_cliff(c):
return inverse_clifford(clifford_mat(c, 1))
c_ps = [0, 2, 5, 8]
c_seqs = pygsti_to_cliffords(gst_seq)
r_seqs = []
for seq in c_seqs:
if not seq:
r_seqs.append([])
else:
rand_pauli = choices(c_ps, k=len(seq))
inter = 0
bare = 0
rseq = []
for j in range(len(seq)):
inter = clifford_multiply(clifford_multiply(inter, rand_pauli[j]), seq[j])
bare = clifford_multiply(bare, seq[j])
rseq.append(clifford_multiply(rand_pauli[j], seq[j]))
recovery = clifford_multiply(inv_cliff(inter), bare)
rseq[-1] = clifford_multiply(rseq[-1], recovery)
r_seqs.append(rseq)
all_ok = all((r == i for r, i in zip(map(seqreduce, r_seqs), map(seqreduce, c_seqs))))
assert all_ok, "Something went wrong when Pauli-frame randomizing!"
return r_seqs
def SingleQubitCliffordGST(qubit, pygsti_seq, pulse_library="Standard", randomized=False, num_cals=100, diac_compiled=True):
pulse_library = pulse_library.upper()
# QGL pulse libraries handle the Id pulse differently. In the standard
# case, the Id is of finite length equal to all the other one-pulse
# elements of the library. In the Atomic and DiAtomic cases, the ID is
# of length 0 by default. In GST, we need access to both types of the ID
# gate with the first experiment in any GST experiment equal to {} =
# Id(length = 0). All other Id gates in the sequence should be of finite
# length. So we'll modify the Clifford indexing here to make Id(length=0)
# the first element in the library and Id(length=length) the second.
if pulse_library == "STANDARD":
#clifford_pulse = lambda x: clifford_seq(x, qubit)
clifford_pulse = [clifford_seq(i, qubit) for i in range(24)]
clifford_pulse.insert(0, Id(qubit, length=0.0))
elif pulse_library == "DIAC":
#clifford_pulse = lambda x: DiAC(qubit, x, diac_compiled)
clifford_pulse = [AC(qubit, i, diac_compiled) for i in range(24)]
clifford_pulse.insert(1, Id(qubit))
elif pulse_library == "AC":
#clifford_pulse = lambda x: AC(qubit, x)
clifford_pulse = [AC(qubit, i) for i in range(24)]
clifford_pulse.insert(1, Id(qubit))
raise ValueError("Pulse library must be one of 'standard', 'diac', or 'ac'. Got {} instead".format(pulse_library))
if randomized:
seqs = pauli_rand_clifford_circuit(pygsti_seq)
else:
seqs = pygsti_to_cliffords(pygsti_seq)
qgl_seqs = []
for seq in seqs:
qgl_seqs.append([clifford_pulse[c] for c in seq])
qgl_seqs[-1].append(MEAS(qubit))
if num_cals != 0:
qgl_seqs += create_cal_seqs((qubit, ), abs(num_cals))
metafile = compile_to_hardware(qgl_seqs, 'GST/GST')
return metafile
| BBN-Q/QGL | QGL/GSTTools.py | Python | apache-2.0 | 7,229 |
"""Support for Xiaomi Mi Air Quality Monitor (PM2.5)."""
import logging
from miio import AirQualityMonitor, Device, DeviceException
import voluptuous as vol
from homeassistant.components.air_quality import PLATFORM_SCHEMA, AirQualityEntity
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.exceptions import NoEntitySpecifiedError, PlatformNotReady
import homeassistant.helpers.config_validation as cv
from .const import (
MODEL_AIRQUALITYMONITOR_B1,
MODEL_AIRQUALITYMONITOR_S1,
MODEL_AIRQUALITYMONITOR_V1,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Air Quality Monitor"
ATTR_CO2E = "carbon_dioxide_equivalent"
ATTR_TVOC = "total_volatile_organic_compounds"
ATTR_TEMP = "temperature"
ATTR_HUM = "humidity"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
PROP_TO_ATTR = {
"carbon_dioxide_equivalent": ATTR_CO2E,
"total_volatile_organic_compounds": ATTR_TVOC,
"temperature": ATTR_TEMP,
"humidity": ATTR_HUM,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the sensor from config."""
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
miio_device = Device(host, token)
try:
device_info = await hass.async_add_executor_job(miio_device.info)
except DeviceException:
raise PlatformNotReady
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.debug(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
device = AirQualityMonitor(host, token, model=model)
if model == MODEL_AIRQUALITYMONITOR_S1:
entity = AirMonitorS1(name, device, unique_id)
elif model == MODEL_AIRQUALITYMONITOR_B1:
entity = AirMonitorB1(name, device, unique_id)
elif model == MODEL_AIRQUALITYMONITOR_V1:
entity = AirMonitorV1(name, device, unique_id)
else:
raise NoEntitySpecifiedError(f"Not support for entity {unique_id}")
async_add_entities([entity], update_before_add=True)
class AirMonitorB1(AirQualityEntity):
"""Air Quality class for Xiaomi cgllc.airmonitor.b1 device."""
def __init__(self, name, device, unique_id):
"""Initialize the entity."""
self._name = name
self._device = device
self._unique_id = unique_id
self._icon = "mdi:cloud"
self._unit_of_measurement = "μg/m3"
self._available = None
self._air_quality_index = None
self._carbon_dioxide = None
self._carbon_dioxide_equivalent = None
self._particulate_matter_2_5 = None
self._total_volatile_organic_compounds = None
self._temperature = None
self._humidity = None
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide_equivalent = state.co2e
self._particulate_matter_2_5 = round(state.pm25, 1)
self._total_volatile_organic_compounds = round(state.tvoc, 3)
self._temperature = round(state.temperature, 2)
self._humidity = round(state.humidity, 2)
self._available = True
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def unique_id(self):
"""Return the unique ID."""
return self._unique_id
@property
def air_quality_index(self):
"""Return the Air Quality Index (AQI)."""
return self._air_quality_index
@property
def carbon_dioxide(self):
"""Return the CO2 (carbon dioxide) level."""
return self._carbon_dioxide
@property
def carbon_dioxide_equivalent(self):
"""Return the CO2e (carbon dioxide equivalent) level."""
return self._carbon_dioxide_equivalent
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._particulate_matter_2_5
@property
def total_volatile_organic_compounds(self):
"""Return the total volatile organic compounds."""
return self._total_volatile_organic_compounds
@property
def temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def humidity(self):
"""Return the current humidity."""
return self._humidity
@property
def device_state_attributes(self):
"""Return the state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
class AirMonitorS1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide = state.co2
self._particulate_matter_2_5 = state.pm25
self._total_volatile_organic_compounds = state.tvoc
self._temperature = state.temperature
self._humidity = state.humidity
self._available = True
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class AirMonitorV1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._air_quality_index = state.aqi
self._available = True
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return None
| postlund/home-assistant | homeassistant/components/xiaomi_miio/air_quality.py | Python | apache-2.0 | 7,163 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Objects relating to sourcing connections from metastore database
"""
from typing import List
from airflow.models.connection import Connection
from airflow.secrets import BaseSecretsBackend
from airflow.utils.session import provide_session
class MetastoreBackend(BaseSecretsBackend):
"""
Retrieves Connection object from airflow metastore database.
"""
# pylint: disable=missing-docstring
@provide_session
def get_connections(self, conn_id, session=None) -> List[Connection]:
conn_list = session.query(Connection).filter(Connection.conn_id == conn_id).all()
session.expunge_all()
return conn_list
@provide_session
def get_variable(self, key: str, session=None):
"""
Get Airflow Variable from Metadata DB
:param key: Variable Key
:return: Variable Value
"""
from airflow.models.variable import Variable
var_value = session.query(Variable).filter(Variable.key == key).first()
session.expunge_all()
if var_value:
return var_value.val
return None
| wooga/airflow | airflow/secrets/metastore.py | Python | apache-2.0 | 1,888 |
from io import StringIO
from pathlib import Path
from cwltool.main import main
from .util import get_data
def test_empty_input(tmp_path: Path) -> None:
"""Affirm that an empty input works."""
empty_json = "{}"
empty_input = StringIO(empty_json)
params = [
"--outdir",
str(tmp_path),
get_data("tests/wf/no-parameters-echo.cwl"),
"-",
]
try:
assert main(params, stdin=empty_input) == 0
except SystemExit as err:
assert err.code == 0
| common-workflow-language/cwltool | tests/test_empty_input.py | Python | apache-2.0 | 513 |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from enum import IntEnum
from xdrlib import Packer, Unpacker
from ..__version__ import __issues__
from ..type_checked import type_checked
__all__ = ["LedgerUpgradeType"]
@type_checked
class LedgerUpgradeType(IntEnum):
"""
XDR Source Code::
enum LedgerUpgradeType
{
LEDGER_UPGRADE_VERSION = 1,
LEDGER_UPGRADE_BASE_FEE = 2,
LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3,
LEDGER_UPGRADE_BASE_RESERVE = 4
};
"""
LEDGER_UPGRADE_VERSION = 1
LEDGER_UPGRADE_BASE_FEE = 2
LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3
LEDGER_UPGRADE_BASE_RESERVE = 4
def pack(self, packer: Packer) -> None:
packer.pack_int(self.value)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "LedgerUpgradeType":
value = unpacker.unpack_int()
return cls(value)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "LedgerUpgradeType":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "LedgerUpgradeType":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
@classmethod
def _missing_(cls, value):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please upgrade the SDK or submit an issue here: {__issues__}."
)
| StellarCN/py-stellar-base | stellar_sdk/xdr/ledger_upgrade_type.py | Python | apache-2.0 | 1,727 |
# -*- coding: utf-8 -*-
#
# google-cloud-irm documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-irm"
copyright = u"2019, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-irm",
"github_user": "googleapis",
"github_repo": "python-irm",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-irm-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-irm.tex",
u"google-cloud-irm Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "google-cloud-irm", u"google-cloud-irm Documentation", [author], 1,)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-irm",
u"google-cloud-irm Documentation",
author,
"google-cloud-irm",
"google-cloud-irm Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.io/grpc/python/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| googleapis/python-irm | docs/conf.py | Python | apache-2.0 | 11,253 |
import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://localhost/litecart/")
driver.implicitly_wait(10)
sticker_number = len(driver.find_elements_by_xpath("//div[contains(@class,'sticker')]"))
product_number = len(driver.find_elements_by_xpath("//*[contains(@href,'products')]"))
assert sticker_number == product_number
| olga121/Selenium_Webdriver | test_sticker.py | Python | apache-2.0 | 496 |
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""A library of construction functions for building block structures."""
import functools
import random
import string
from typing import AbstractSet, Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import intrinsic_defs
from tensorflow_federated.python.core.impl.compiler import tensorflow_computation_factory
from tensorflow_federated.python.core.impl.compiler import transformation_utils
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.types import type_transformations
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
Index = Union[str, int]
Path = Union[Index, Tuple[Index, ...]]
def select_output_from_lambda(
comp: building_blocks.Lambda,
paths: Union[Path, List[Path]]) -> building_blocks.Lambda:
"""Constructs a new function with result of selecting `paths` from `comp`.
Args:
comp: Lambda computation with result type `tff.StructType` from which we
wish to select the sub-results at `paths`.
paths: Either a `Path` or list of `Path`s specifying the indices we wish to
select from the result of `comp`. Each path must be a `tuple` of `str` or
`int` indices from which to select an output. If `paths` is a list, the
returned computation will have a `tff.StructType` result holding each of
the specified selections.
Returns:
A version of `comp` with result value the selection from the result of
`comp` specified by `paths`.
"""
comp.check_lambda()
comp.type_signature.result.check_struct()
def _select_path(result, path: Path):
if not isinstance(path, tuple):
path = (path,)
for index in path:
if result.is_struct():
result = result[index]
elif isinstance(index, str):
result = building_blocks.Selection(result, name=index)
elif isinstance(index, int):
result = building_blocks.Selection(result, index=index)
else:
raise TypeError('Invalid selection type: expected `str` or `int`, '
f'found value `{index}` of type `{type(index)}`.')
return result
if isinstance(paths, list):
# Avoid duplicating `comp.result` by binding it to a local.
result_name = next(unique_name_generator(comp))
result_ref = building_blocks.Reference(result_name,
comp.result.type_signature)
elements = [_select_path(result_ref, path) for path in paths]
result = building_blocks.Block([(result_name, comp.result)],
building_blocks.Struct(elements))
else:
result = _select_path(comp.result, paths)
return building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
result)
def unique_name_generator(comp: building_blocks.ComputationBuildingBlock,
prefix: str = '_var') -> Iterator[str]:
"""Yields a new unique name that does not exist in `comp`.
Args:
comp: The compuation building block to use as a reference.
prefix: The prefix to use when generating unique names. If `prefix` is
`None` or if `comp` contains any name with this prefix, then a unique
prefix will be generated from random lowercase ascii characters.
"""
if comp is not None:
names = transformation_utils.get_unique_names(comp)
else:
names = set()
while prefix is None or any(n.startswith(prefix) for n in names):
characters = string.ascii_lowercase
prefix = '_{}'.format(''.join(random.choice(characters) for _ in range(3)))
index = 1
while True:
yield '{}{}'.format(prefix, index)
index += 1
@functools.lru_cache()
def create_compiled_no_arg_empty_tuple_computation(
) -> building_blocks.CompiledComputation:
"""Returns graph representing a function that returns an empty tuple.
Returns:
An instance of `building_blocks.CompiledComputation`, a noarg function
which returns an empty tuple.
"""
proto, type_signature = tensorflow_computation_factory.create_empty_tuple()
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
@functools.lru_cache()
def create_compiled_empty_tuple() -> building_blocks.Call:
"""Returns called graph representing the empty tuple.
Returns:
An instance of `building_blocks.Call`, calling a noarg function
which returns an empty tuple. This function is an instance of
`building_blocks.CompiledComputation`.
"""
compiled = create_compiled_no_arg_empty_tuple_computation()
return building_blocks.Call(compiled, None)
@functools.lru_cache()
def create_identity(
type_signature: computation_types.Type) -> building_blocks.Lambda:
return building_blocks.Lambda(
'id_arg', type_signature,
building_blocks.Reference('id_arg', type_signature))
@functools.lru_cache()
def create_compiled_identity(
type_signature: computation_types.Type,
name: Optional[str] = None) -> building_blocks.CompiledComputation:
"""Creates CompiledComputation representing identity function.
Args:
type_signature: A `computation_types.Type`.
name: An optional string name to use as the name of the computation.
Returns:
An instance of `building_blocks.CompiledComputation`
representing the identity function taking an argument of type
`type_signature` and returning the same value.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings.
"""
proto, function_type = tensorflow_computation_factory.create_identity(
type_signature)
return building_blocks.CompiledComputation(
proto, name, type_signature=function_type)
class SelectionSpec(object):
"""Data class representing map from input tuple to selection of result.
Attributes:
tuple_index: The index of the source of the selection sequence in the
desired result of the generated TensorFlow. If this `SelectionSpec`
appears at index i of a list of `SelectionSpec`s, index j is the source
for the result of the generated function at index i.
selection_sequence: A list or tuple representing the selections to make from
`tuple_index`, so that the list `[0]` for example would represent the
output is the 0th element of `tuple_index`, while `[0, 0]` would represent
that the output is the 0th element of the 0th element of `tuple_index`.
"""
def __init__(self, tuple_index: int, selection_sequence: Sequence[int]):
self._tuple_index = tuple_index
self._selection_sequence = selection_sequence
@property
def tuple_index(self):
return self._tuple_index
@property
def selection_sequence(self):
return self._selection_sequence
def __str__(self):
return 'SelectionSequence(tuple_index={},selection_sequence={}'.format(
self._tuple_index, self._selection_sequence)
def __repr__(self):
return str(self)
def _extract_selections(parameter_value, output_spec):
results = []
for selection_spec in output_spec:
result_element = parameter_value[selection_spec.tuple_index]
for selection in selection_spec.selection_sequence:
py_typecheck.check_type(selection, int)
result_element = result_element[selection]
results.append(result_element)
return results
@functools.lru_cache()
def construct_tensorflow_selecting_and_packing_outputs(
parameter_type: computation_types.StructType,
output_structure: structure.Struct) -> building_blocks.CompiledComputation:
"""Constructs TensorFlow selecting and packing elements from its input.
The result of this function can be called on a deduplicated
`building_blocks.Struct` containing called graphs, thus preventing us from
embedding the same TensorFlow computation in the generated graphs, and
reducing the amount of work duplicated in the process of generating
TensorFlow.
The TensorFlow which results here will be a function which takes an argument
of type `arg_type`, returning a result specified by `output_structure`. Each
`SelectionSpec` nested inside of `output_structure` will represent a selection
from one of the arguments of the tuple `arg_type`, with the empty selection
being a possibility. The nested structure of `output_structure` will determine
how these selections are packed back into a result, IE, the result of the
function will be a nested tuple with the same structure as `output_structure`,
where the leaves of this structure (the `SelectionSpecs` of
`output_structure`) will be selections from the argument.
Args:
parameter_type: A `computation_types.StructType` of the argument on which
the constructed function will be called.
output_structure: `structure.Struct` with `SelectionSpec` or
`anonymous_tupl.Struct` elements, mapping from elements of the nested
argument tuple to the desired result of the generated computation.
`output_structure` must contain all the names desired on the output of the
computation.
Returns:
A `building_blocks.CompiledComputation` representing the specification
above.
Raises:
TypeError: If `arg_type` is not a `computation_types.StructType`, or
represents a type which cannot act as an input or output to a TensorFlow
computation in TFF, IE does not contain exclusively
`computation_types.SequenceType`, `computation_types.StructType` or
`computation_types.TensorType`.
"""
py_typecheck.check_type(parameter_type, computation_types.StructType)
py_typecheck.check_type(output_structure, structure.Struct)
def _check_output_structure(elem):
if isinstance(elem, structure.Struct):
for x in elem:
_check_output_structure(x)
elif not isinstance(elem, SelectionSpec):
raise TypeError('output_structure can only contain nested anonymous '
'tuples and `SelectionSpecs`; encountered the value {} '
'of type {}.'.format(elem, type(elem)))
_check_output_structure(output_structure)
output_spec = structure.flatten(output_structure)
type_analysis.check_tensorflow_compatible_type(parameter_type)
with tf.Graph().as_default() as graph:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', parameter_type, graph)
results = _extract_selections(parameter_value, output_spec)
repacked_result = structure.pack_sequence_as(output_structure, results)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
repacked_result, graph)
function_type = computation_types.FunctionType(parameter_type, result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding))
return building_blocks.CompiledComputation(
proto, type_signature=function_type)
@functools.lru_cache()
def create_tensorflow_constant(type_spec: computation_types.Type,
scalar_value: Union[int, float, str],
name=None) -> building_blocks.Call:
"""Creates called graph returning constant `scalar_value` of type `type_spec`.
`scalar_value` must be a scalar, and cannot be a float if any of the tensor
leaves of `type_spec` contain an integer data type. `type_spec` must contain
only named tuples and tensor types, but these can be arbitrarily nested.
Args:
type_spec: A `computation_types.Type` whose resulting type tree can only
contain named tuples and tensors.
scalar_value: Scalar value to place in all the tensor leaves of `type_spec`.
name: An optional string name to use as the name of the computation.
Returns:
An instance of `building_blocks.Call`, whose argument is `None`
and whose function is a noarg
`building_blocks.CompiledComputation` which returns the
specified `scalar_value` packed into a TFF structure of type `type_spec.
Raises:
TypeError: If the type assumptions above are violated.
"""
proto, function_type = tensorflow_computation_factory.create_constant(
scalar_value, type_spec)
compiled = building_blocks.CompiledComputation(
proto, name, type_signature=function_type)
return building_blocks.Call(compiled, None)
@functools.lru_cache()
def create_compiled_input_replication(
type_signature: computation_types.Type,
n_replicas: int) -> building_blocks.CompiledComputation:
"""Creates a compiled computation which replicates its argument.
Args:
type_signature: A `computation_types.Type`, the type of the parameter of the
constructed computation.
n_replicas: Integer, the number of times the argument is intended to be
replicated.
Returns:
An instance of `building_blocks.CompiledComputation` encoding
a function taking a single argument fo type `type_signature` and returning
`n_replicas` identical copies of this argument.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings, or if `n_replicas` is not an integer.
"""
proto, comp_type = tensorflow_computation_factory.create_replicate_input(
type_signature, n_replicas)
return building_blocks.CompiledComputation(proto, type_signature=comp_type)
def create_tensorflow_unary_operator(
operator: Callable[[Any], Any], operand_type: computation_types.Type
) -> building_blocks.CompiledComputation:
"""Creates a TensorFlow computation for the unary `operator`.
For `T` the `operand_type`, the type signature of the constructed operator
will be `(T -> U)`, where `U` is the result of applying `operator` to
a value of type `T`.
Notice that we have quite serious restrictions on `operand_type` here; not
only must it be compatible with stamping into a TensorFlow graph, but
additionally cannot contain a `computation_types.SequenceType`, as checked by
`type_analysis.is_generic_op_compatible_type`.
Args:
operator: Callable taking one argument specifying the operation to encode.
For example, `tf.math.abs`, `tf.math.reduce_sum`, ...
operand_type: The type of argument to the constructed unary operator. Must
be convertible to `computation_types.Type`.
Returns:
Instance of `building_blocks.CompiledComputation` encoding this unary
operator.
Raises:
TypeError: If the type tree of `operand_type` contains any type which is
incompatible with the TFF generic operators, as checked by
`type_analysis.is_generic_op_compatible_type`, or `operator` is not
callable.
"""
proto, type_signature = tensorflow_computation_factory.create_unary_operator(
operator, operand_type)
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
def create_tensorflow_binary_operator(
operator: Callable[[Any, Any], Any],
operand_type: computation_types.Type,
second_operand_type: Optional[computation_types.Type] = None
) -> building_blocks.CompiledComputation:
"""Creates a TensorFlow computation for the binary `operator`.
For `T` the `operand_type`, the type signature of the constructed operator
will be `(<T,T> -> U)`, where `U` is the result of applying `operator` to
a tuple of type `<T,T>`.
Notice that we have quite serious restrictions on `operand_type` here; not
only must it be compatible with stamping into a TensorFlow graph, but
additionally cannot contain a `computation_types.SequenceType`, as checked by
`type_analysis.is_generic_op_compatible_type`.
Notice also that if `operand_type` is a `computation_types.StructType` and
`second_operand_type` is not `None`, `operator` will be applied pointwise.
This places the burden on callers of this function to construct the correct
values to pass into the returned function. For example, to divide `[2, 2]` by
`2`, first the `int 2` must be packed into the data structure `[x, x]`, before
the division operator of the appropriate type is called.
Args:
operator: Callable taking two arguments specifying the operation to encode.
For example, `tf.add`, `tf.multiply`, `tf.divide`, ...
operand_type: The type of argument to the constructed binary operator. Must
be convertible to `computation_types.Type`.
second_operand_type: An optional type for the second argument to the
constructed binary operator. Must be convertible to
`computation_types.Type`. If `None`, uses `operand_type` for the second
argument's type.
Returns:
Instance of `building_blocks.CompiledComputation` encoding
this binary operator.
Raises:
TypeError: If the type tree of `operand_type` contains any type which is
incompatible with the TFF generic operators, as checked by
`type_analysis.is_generic_op_compatible_type`, or `operator` is not
callable.
"""
proto, type_signature = tensorflow_computation_factory.create_binary_operator(
operator, operand_type, second_operand_type)
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
def create_federated_getitem_call(
arg: building_blocks.ComputationBuildingBlock,
idx: Union[int, slice]) -> building_blocks.Call:
"""Creates computation building block passing getitem to federated value.
Args:
arg: Instance of `building_blocks.ComputationBuildingBlock` of
`computation_types.FederatedType` with member of type
`computation_types.StructType` from which we wish to pick out item `idx`.
idx: Index, instance of `int` or `slice` used to address the
`computation_types.StructType` underlying `arg`.
Returns:
Returns a `building_blocks.Call` with type signature
`computation_types.FederatedType` of same placement as `arg`, the result
of applying or mapping the appropriate `__getitem__` function, as defined
by `idx`.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(idx, (int, slice))
py_typecheck.check_type(arg.type_signature, computation_types.FederatedType)
py_typecheck.check_type(arg.type_signature.member,
computation_types.StructType)
getitem_comp = create_federated_getitem_comp(arg, idx)
return create_federated_map_or_apply(getitem_comp, arg)
def create_federated_getattr_call(arg: building_blocks.ComputationBuildingBlock,
name: str) -> building_blocks.Call:
"""Creates computation building block passing getattr to federated value.
Args:
arg: Instance of `building_blocks.ComputationBuildingBlock` of
`computation_types.FederatedType` with member of type
`computation_types.StructType` from which we wish to pick out item `name`.
name: String name to address the `computation_types.StructType` underlying
`arg`.
Returns:
Returns a `building_blocks.Call` with type signature
`computation_types.FederatedType` of same placement as `arg`,
the result of applying or mapping the appropriate `__getattr__` function,
as defined by `name`.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(name, str)
py_typecheck.check_type(arg.type_signature, computation_types.FederatedType)
py_typecheck.check_type(arg.type_signature.member,
computation_types.StructType)
getattr_comp = create_federated_getattr_comp(arg, name)
return create_federated_map_or_apply(getattr_comp, arg)
def create_federated_getattr_comp(
comp: building_blocks.ComputationBuildingBlock,
name: str) -> building_blocks.Lambda:
"""Function to construct computation for `federated_apply` of `__getattr__`.
Creates a `building_blocks.ComputationBuildingBlock`
which selects `name` from its argument, of type `comp.type_signature.member`,
an instance of `computation_types.StructType`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` with type
signature `computation_types.FederatedType` whose `member` attribute is of
type `computation_types.StructType`.
name: String name of attribute to grab.
Returns:
Instance of `building_blocks.Lambda` which grabs attribute
according to `name` of its argument.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.FederatedType)
py_typecheck.check_type(comp.type_signature.member,
computation_types.StructType)
py_typecheck.check_type(name, str)
element_names = [
x for x, _ in structure.iter_elements(comp.type_signature.member)
]
if name not in element_names:
raise ValueError(
'The federated value has no element of name `{}`. Value: {}'.format(
name, comp.formatted_representation()))
apply_input = building_blocks.Reference('x', comp.type_signature.member)
selected = building_blocks.Selection(apply_input, name=name)
apply_lambda = building_blocks.Lambda('x', apply_input.type_signature,
selected)
return apply_lambda
def create_federated_getitem_comp(
comp: building_blocks.ComputationBuildingBlock,
key: Union[int, slice]) -> building_blocks.Lambda:
"""Function to construct computation for `federated_apply` of `__getitem__`.
Creates a `building_blocks.ComputationBuildingBlock`
which selects `key` from its argument, of type `comp.type_signature.member`,
of type `computation_types.StructType`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` with type
signature `computation_types.FederatedType` whose `member` attribute is of
type `computation_types.StructType`.
key: Instance of `int` or `slice`, key used to grab elements from the member
of `comp`. implementation of slicing for `ValueImpl` objects with
`type_signature` `computation_types.StructType`.
Returns:
Instance of `building_blocks.Lambda` which grabs slice
according to `key` of its argument.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.FederatedType)
py_typecheck.check_type(comp.type_signature.member,
computation_types.StructType)
py_typecheck.check_type(key, (int, slice))
apply_input = building_blocks.Reference('x', comp.type_signature.member)
if isinstance(key, int):
selected = building_blocks.Selection(apply_input, index=key)
else:
elems = structure.to_elements(comp.type_signature.member)
index_range = range(*key.indices(len(elems)))
elem_list = []
for k in index_range:
elem_list.append(
(elems[k][0], building_blocks.Selection(apply_input, index=k)))
selected = building_blocks.Struct(elem_list)
apply_lambda = building_blocks.Lambda('x', apply_input.type_signature,
selected)
return apply_lambda
def create_computation_appending(
comp1: building_blocks.ComputationBuildingBlock,
comp2: building_blocks.ComputationBuildingBlock):
r"""Returns a block appending `comp2` to `comp1`.
Block
/ \
[comps=Tuple] Tuple
| |
[Comp, Comp] [Sel(0), ..., Sel(0), Sel(1)]
\ \ \
Sel(0) Sel(n) Ref(comps)
\ \
Ref(comps) Ref(comps)
Args:
comp1: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_type.StructType`.
comp2: A `building_blocks.ComputationBuildingBlock` or a named computation
(a tuple pair of name, computation) representing a single element of an
`structure.Struct`.
Returns:
A `building_blocks.Block`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(comp1, building_blocks.ComputationBuildingBlock)
if isinstance(comp2, building_blocks.ComputationBuildingBlock):
name2 = None
elif py_typecheck.is_name_value_pair(
comp2,
name_required=False,
value_type=building_blocks.ComputationBuildingBlock):
name2, comp2 = comp2
else:
raise TypeError('Unexpected tuple element: {}.'.format(comp2))
comps = building_blocks.Struct((comp1, comp2))
ref = building_blocks.Reference('comps', comps.type_signature)
sel_0 = building_blocks.Selection(ref, index=0)
elements = []
named_type_signatures = structure.to_elements(comp1.type_signature)
for index, (name, _) in enumerate(named_type_signatures):
sel = building_blocks.Selection(sel_0, index=index)
elements.append((name, sel))
sel_1 = building_blocks.Selection(ref, index=1)
elements.append((name2, sel_1))
result = building_blocks.Struct(elements)
symbols = ((ref.name, comps),)
return building_blocks.Block(symbols, result)
def _unname_fn_parameter(fn, unnamed_parameter_type):
"""Coerces `fn` to a comp whose parameter type is `unnamed_parameter_type`."""
if structure.name_list(fn.type_signature.parameter):
return building_blocks.Lambda(
'a', unnamed_parameter_type,
building_blocks.Call(
fn,
building_blocks.Reference('a', unnamed_parameter_type),
))
else:
return fn
def create_null_federated_aggregate() -> building_blocks.Call:
unit = building_blocks.Struct([])
unit_type = unit.type_signature
value = create_federated_value(unit, placements.CLIENTS)
zero = unit
accumulate = create_tensorflow_binary_operator(lambda a, b: a, unit_type)
merge = accumulate
report = create_compiled_identity(computation_types.StructType([]))
return create_federated_aggregate(value, zero, accumulate, merge, report)
def create_federated_aggregate(
value: building_blocks.ComputationBuildingBlock,
zero: building_blocks.ComputationBuildingBlock,
accumulate: building_blocks.ComputationBuildingBlock,
merge: building_blocks.ComputationBuildingBlock,
report: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated aggregate.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp, Comp, Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
zero: A `building_blocks.ComputationBuildingBlock` to use as the initial
value.
accumulate: A `building_blocks.ComputationBuildingBlock` to use as the
accumulate function.
merge: A `building_blocks.ComputationBuildingBlock` to use as the merge
function.
report: A `building_blocks.ComputationBuildingBlock` to use as the report
function.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(zero, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(accumulate, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(merge, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(report, building_blocks.ComputationBuildingBlock)
# Its okay if the first argument of accumulate is assignable from the zero,
# without being the exact type. This occurs when accumulate has a type like
# (<int32[?], int32> -> int32[?]) but zero is int32[0].
zero_arg_type = accumulate.type_signature.parameter[0]
zero_arg_type.check_assignable_from(zero.type_signature)
result_type = computation_types.FederatedType(report.type_signature.result,
placements.SERVER)
accumulate_parameter_type = computation_types.StructType(
[zero_arg_type, value.type_signature.member])
accumulate = _unname_fn_parameter(accumulate, accumulate_parameter_type)
merge_parameter_type = computation_types.StructType(
[zero_arg_type, zero_arg_type])
merge = _unname_fn_parameter(merge, merge_parameter_type)
intrinsic_type = computation_types.FunctionType((
type_conversions.type_to_non_all_equal(value.type_signature),
zero_arg_type,
accumulate.type_signature,
merge.type_signature,
report.type_signature,
), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_AGGREGATE.uri,
intrinsic_type)
values = building_blocks.Struct((value, zero, accumulate, merge, report))
return building_blocks.Call(intrinsic, values)
def create_federated_apply(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated apply.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(fn.type_signature.result,
placements.SERVER)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, arg.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_APPLY.uri,
intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_null_federated_broadcast():
return create_federated_broadcast(
create_federated_value(building_blocks.Struct([]), placements.SERVER))
def create_federated_broadcast(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated broadcast.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(
value.type_signature.member, placements.CLIENTS, all_equal=True)
intrinsic_type = computation_types.FunctionType(value.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_BROADCAST.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_eval(
fn: building_blocks.ComputationBuildingBlock,
placement: placements.PlacementLiteral,
) -> building_blocks.Call:
r"""Creates a called federated eval.
Call
/ \
Intrinsic Comp
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
placement: A `placements.PlacementLiteral` to use as the placement.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(fn.type_signature, computation_types.FunctionType)
if placement is placements.CLIENTS:
uri = intrinsic_defs.FEDERATED_EVAL_AT_CLIENTS.uri
all_equal = False
elif placement is placements.SERVER:
uri = intrinsic_defs.FEDERATED_EVAL_AT_SERVER.uri
all_equal = True
else:
raise TypeError('Unsupported placement {}.'.format(placement))
result_type = computation_types.FederatedType(
fn.type_signature.result, placement, all_equal=all_equal)
intrinsic_type = computation_types.FunctionType(fn.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, fn)
def create_null_federated_map() -> building_blocks.Call:
return create_federated_map(
create_compiled_identity(computation_types.StructType([])),
create_federated_value(building_blocks.Struct([]), placements.CLIENTS))
def create_federated_map(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated map.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
parameter_type = computation_types.FederatedType(arg.type_signature.member,
placements.CLIENTS)
result_type = computation_types.FederatedType(fn.type_signature.result,
placements.CLIENTS)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, parameter_type), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_MAP.uri,
intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_federated_map_all_equal(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated map of equal values.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Note: The `fn` is required to be deterministic and therefore should contain no
`building_blocks.CompiledComputations`.
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
parameter_type = computation_types.FederatedType(
arg.type_signature.member, placements.CLIENTS, all_equal=True)
result_type = computation_types.FederatedType(
fn.type_signature.result, placements.CLIENTS, all_equal=True)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, parameter_type), result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_MAP_ALL_EQUAL.uri, intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_federated_map_or_apply(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated map or apply depending on `arg`s placement.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
if arg.type_signature.placement is placements.CLIENTS:
if arg.type_signature.all_equal:
return create_federated_map_all_equal(fn, arg)
else:
return create_federated_map(fn, arg)
elif arg.type_signature.placement is placements.SERVER:
return create_federated_apply(fn, arg)
else:
raise TypeError('Unsupported placement {}.'.format(
arg.type_signature.placement))
def create_federated_mean(
value: building_blocks.ComputationBuildingBlock,
weight: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated mean.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
weight: A `building_blocks.ComputationBuildingBlock` to use as the weight or
`None`.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
if weight is not None:
py_typecheck.check_type(weight, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
if weight is not None:
intrinsic_type = computation_types.FunctionType(
(type_conversions.type_to_non_all_equal(value.type_signature),
type_conversions.type_to_non_all_equal(weight.type_signature)),
result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_WEIGHTED_MEAN.uri, intrinsic_type)
values = building_blocks.Struct((value, weight))
return building_blocks.Call(intrinsic, values)
else:
intrinsic_type = computation_types.FunctionType(
type_conversions.type_to_non_all_equal(value.type_signature),
result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_MEAN.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_null_federated_secure_modular_sum():
return create_federated_secure_modular_sum(
create_federated_value(building_blocks.Struct([]), placements.CLIENTS),
building_blocks.Struct([]))
def create_federated_secure_modular_sum(
value: building_blocks.ComputationBuildingBlock,
modulus: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called secure modular sum.
Call
/ \
Intrinsic [Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
modulus: A `building_blocks.ComputationBuildingBlock` to use as the
`modulus` value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(modulus, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(value.type_signature),
modulus.type_signature,
], result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_SECURE_MODULAR_SUM.uri, intrinsic_type)
values = building_blocks.Struct([value, modulus])
return building_blocks.Call(intrinsic, values)
def create_null_federated_secure_sum():
return create_federated_secure_sum(
create_federated_value(building_blocks.Struct([]), placements.CLIENTS),
building_blocks.Struct([]))
def create_federated_secure_sum(
value: building_blocks.ComputationBuildingBlock,
max_input: building_blocks.ComputationBuildingBlock
) -> building_blocks.Call:
r"""Creates a called secure sum.
Call
/ \
Intrinsic [Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
max_input: A `building_blocks.ComputationBuildingBlock` to use as the
`max_input` value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(max_input, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(value.type_signature),
max_input.type_signature,
], result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_SECURE_SUM.uri,
intrinsic_type)
values = building_blocks.Struct([value, max_input])
return building_blocks.Call(intrinsic, values)
def create_null_federated_secure_sum_bitwidth():
return create_federated_secure_sum_bitwidth(
create_federated_value(building_blocks.Struct([]), placements.CLIENTS),
building_blocks.Struct([]))
def create_federated_secure_sum_bitwidth(
value: building_blocks.ComputationBuildingBlock,
bitwidth: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called secure sum using bitwidth.
Call
/ \
Intrinsic [Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
bitwidth: A `building_blocks.ComputationBuildingBlock` to use as the
bitwidth value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(bitwidth, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(value.type_signature),
bitwidth.type_signature,
], result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_SECURE_SUM_BITWIDTH.uri, intrinsic_type)
values = building_blocks.Struct([value, bitwidth])
return building_blocks.Call(intrinsic, values)
def create_federated_select(
client_keys,
max_key,
server_val,
select_fn,
secure: bool,
) -> building_blocks.Call:
"""Creates a called `federated_select` or `federated_secure_select`."""
py_typecheck.check_type(client_keys, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(max_key, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(server_val, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(select_fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(secure, bool)
single_key_type = max_key.type_signature.member
select_fn_unnamed_param_type = computation_types.StructType([
(None, server_val.type_signature.member),
(None, single_key_type),
])
select_fn = _unname_fn_parameter(select_fn, select_fn_unnamed_param_type)
result_type = computation_types.at_clients(
computation_types.SequenceType(select_fn.type_signature.result))
intrinsic_type = computation_types.FunctionType([
type_conversions.type_to_non_all_equal(
client_keys.type_signature), max_key.type_signature,
server_val.type_signature, select_fn.type_signature
], result_type)
intrinsic_def = intrinsic_defs.FEDERATED_SECURE_SELECT if secure else intrinsic_defs.FEDERATED_SELECT
intrinsic = building_blocks.Intrinsic(intrinsic_def.uri, intrinsic_type)
values = building_blocks.Struct([client_keys, max_key, server_val, select_fn])
return building_blocks.Call(intrinsic, values)
def create_federated_sum(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated sum.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placements.SERVER)
intrinsic_type = computation_types.FunctionType(
type_conversions.type_to_non_all_equal(value.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_SUM.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_unzip(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Block:
r"""Creates a tuple of called federated maps or applies.
Block
/ \
[value=Comp] Tuple
|
[Call, Call, ...]
/ \ / \
Intrinsic Tuple Intrinsic Tuple
| |
[Lambda(arg), Ref(value)] [Lambda(arg), Ref(value)]
\ \
Sel(0) Sel(1)
\ \
Ref(arg) Ref(arg)
This function returns a tuple of federated values given a `value` with a
federated tuple type signature.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.StructType` containing at least one element.
Returns:
A `building_blocks.Block`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain any elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
named_type_signatures = structure.to_elements(value.type_signature.member)
length = len(named_type_signatures)
if length == 0:
raise ValueError('federated_zip is only supported on non-empty tuples.')
value_ref = building_blocks.Reference('value', value.type_signature)
elements = []
fn_ref = building_blocks.Reference('arg', named_type_signatures)
for index, (name, _) in enumerate(named_type_signatures):
sel = building_blocks.Selection(fn_ref, index=index)
fn = building_blocks.Lambda(fn_ref.name, fn_ref.type_signature, sel)
intrinsic = create_federated_map_or_apply(fn, value_ref)
elements.append((name, intrinsic))
result = building_blocks.Struct(elements,
value.type_signature.member.python_container)
symbols = ((value_ref.name, value),)
return building_blocks.Block(symbols, result)
def create_federated_value(
value: building_blocks.ComputationBuildingBlock,
placement: placements.PlacementLiteral) -> building_blocks.Call:
r"""Creates a called federated value.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
placement: A `placements.PlacementLiteral` to use as the placement.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
if placement is placements.CLIENTS:
uri = intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri
elif placement is placements.SERVER:
uri = intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri
else:
raise TypeError('Unsupported placement {}.'.format(placement))
result_type = computation_types.FederatedType(
value.type_signature, placement, all_equal=True)
intrinsic_type = computation_types.FunctionType(value.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, value)
def _check_placements(
placement_values: AbstractSet[placements.PlacementLiteral]):
"""Checks if the placements of the values being zipped are compatible."""
if not placement_values:
raise TypeError('federated_zip is only supported on nested structures '
'containing at least one FederatedType, but none were '
'found.')
elif len(placement_values) > 1:
placement_list = ', '.join(placement.name for placement in placement_values)
raise TypeError('federated_zip requires all nested FederatedTypes to '
'have the same placement, but values placed at '
f'{placement_list} were found.')
def create_federated_zip(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called federated zip.
This function accepts a value whose type signature is a (potentially) nested
tuple structure of federated values all with the same placement, and uses
one of the federated_zip intrinsics (at client or at server) to promote the
placement to the highest level. E.g., A value of type '<A@S, <<B@S>, C@S>>'
would be mapped to a value of type '<A, <<B>, C>>@S'.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.StructType` that may contain other nested
`computation_types.StructTypes` bottoming out in at least one element of
type `computation_Types.FederatedType`. These federated types must be at
the same placement.
Returns:
A `building_blocks.Call` whose type signature is now a federated
`computation_types.StructType`, placed at the same placement as the
leaves of `value`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain any elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(value.type_signature, computation_types.StructType)
all_placements = set()
def _record_placements(type_signature: computation_types.Type):
"""Records the placements in `type_signature` to `all_placements`."""
if type_signature.is_federated():
all_placements.add(type_signature.placement)
elif type_signature.is_struct():
for i in range(len(type_signature)):
_record_placements(type_signature[i])
else:
raise TypeError(
'Expected type signatures consisting of structures of StructType '
'bottoming out in FederatedType, found: \n{}'.format(type_signature))
_record_placements(value.type_signature)
_check_placements(all_placements)
placement = all_placements.pop()
if placement is placements.CLIENTS:
uri = intrinsic_defs.FEDERATED_ZIP_AT_CLIENTS.uri
elif placement is placements.SERVER:
uri = intrinsic_defs.FEDERATED_ZIP_AT_SERVER.uri
else:
raise TypeError('Unsupported placement {}.'.format(placement))
def normalize_all_equals(element_type):
if (element_type.is_federated() and element_type.placement.is_clients() and
element_type.all_equal):
return computation_types.at_clients(element_type.member), True
return element_type, False
normalized_input_type, _ = type_transformations.transform_type_postorder(
value.type_signature, normalize_all_equals)
unplaced_output_type = type_transformations.strip_placement(
value.type_signature)
output_type = computation_types.FederatedType(unplaced_output_type, placement)
intrinsic_type = computation_types.FunctionType(normalized_input_type,
output_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, value)
@functools.lru_cache()
def create_generic_constant(
type_spec: Optional[computation_types.Type],
scalar_value: Union[int,
float]) -> building_blocks.ComputationBuildingBlock:
"""Creates constant for a combination of federated, tuple and tensor types.
Args:
type_spec: A `computation_types.Type` containing only federated, tuple or
tensor types, or `None` to use to construct a generic constant.
scalar_value: The scalar value we wish this constant to have.
Returns:
Instance of `building_blocks.ComputationBuildingBlock`
representing `scalar_value` packed into `type_spec`.
Raises:
TypeError: If types don't match their specification in the args section.
Notice validation of consistency of `type_spec` with `scalar_value` is not
the rsponsibility of this function.
"""
if type_spec is None:
return create_tensorflow_constant(type_spec, scalar_value)
py_typecheck.check_type(type_spec, computation_types.Type)
inferred_scalar_value_type = type_conversions.infer_type(scalar_value)
if (not inferred_scalar_value_type.is_tensor() or
inferred_scalar_value_type.shape != tf.TensorShape(())):
raise TypeError(
'Must pass a scalar value to `create_generic_constant`; encountered a '
'value {}'.format(scalar_value))
if not type_analysis.contains_only(
type_spec, lambda t: t.is_federated() or t.is_struct() or t.is_tensor()):
raise TypeError
if type_analysis.contains_only(type_spec,
lambda t: t.is_struct() or t.is_tensor()):
return create_tensorflow_constant(type_spec, scalar_value)
elif type_spec.is_federated():
unplaced_zero = create_tensorflow_constant(type_spec.member, scalar_value)
if type_spec.placement == placements.CLIENTS:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type)
elif type_spec.placement == placements.SERVER:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type)
return building_blocks.Call(placement_function, unplaced_zero)
elif type_spec.is_struct():
elements = []
for k in range(len(type_spec)):
elements.append(create_generic_constant(type_spec[k], scalar_value))
names = [name for name, _ in structure.iter_elements(type_spec)]
packed_elements = building_blocks.Struct(elements)
named_tuple = create_named_tuple(packed_elements, names,
type_spec.python_container)
return named_tuple
else:
raise ValueError(
'The type_spec {} has slipped through all our '
'generic constant cases, and failed to raise.'.format(type_spec))
def create_sequence_map(
fn: building_blocks.ComputationBuildingBlock,
arg: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called sequence map.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
result_type = computation_types.SequenceType(fn.type_signature.result)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, arg.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_MAP.uri,
intrinsic_type)
values = building_blocks.Struct((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_sequence_reduce(
value: building_blocks.ComputationBuildingBlock,
zero: building_blocks.ComputationBuildingBlock,
op: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called sequence reduce.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
zero: A `building_blocks.ComputationBuildingBlock` to use as the initial
value.
op: A `building_blocks.ComputationBuildingBlock` to use as the op function.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(zero, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(op, building_blocks.ComputationBuildingBlock)
op_parameter_type = computation_types.StructType(
[zero.type_signature, value.type_signature.element])
op = _unname_fn_parameter(op, op_parameter_type)
intrinsic_type = computation_types.FunctionType((
value.type_signature,
zero.type_signature,
op.type_signature,
), op.type_signature.result)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_REDUCE.uri,
intrinsic_type)
values = building_blocks.Struct((value, zero, op))
return building_blocks.Call(intrinsic, values)
def create_sequence_sum(
value: building_blocks.ComputationBuildingBlock) -> building_blocks.Call:
r"""Creates a called sequence sum.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
intrinsic_type = computation_types.FunctionType(value.type_signature,
value.type_signature.element)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_SUM.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def _create_naming_function(tuple_type_to_name, names_to_add, container_type):
"""Private function to construct lambda naming a given tuple type.
Args:
tuple_type_to_name: Instance of `computation_types.StructType`, the type of
the argument which we wish to name.
names_to_add: Python `list` or `tuple`, the names we wish to give to
`tuple_type_to_name`.
container_type: Optional Python container type to associate with the
resulting tuple.
Returns:
An instance of `building_blocks.Lambda` representing a function
which will take an argument of type `tuple_type_to_name` and return a tuple
with the same elements, but with names in `names_to_add` attached.
Raises:
ValueError: If `tuple_type_to_name` and `names_to_add` have different
lengths.
"""
py_typecheck.check_type(tuple_type_to_name, computation_types.StructType)
if len(names_to_add) != len(tuple_type_to_name):
raise ValueError(
'Number of elements in `names_to_add` must match number of element in '
'the named tuple type `tuple_type_to_name`; here, `names_to_add` has '
'{} elements and `tuple_type_to_name` has {}.'.format(
len(names_to_add), len(tuple_type_to_name)))
naming_lambda_arg = building_blocks.Reference('x', tuple_type_to_name)
def _create_struct_element(i):
return (names_to_add[i],
building_blocks.Selection(naming_lambda_arg, index=i))
named_result = building_blocks.Struct(
[_create_struct_element(k) for k in range(len(names_to_add))],
container_type)
return building_blocks.Lambda('x', naming_lambda_arg.type_signature,
named_result)
def create_named_tuple(
comp: building_blocks.ComputationBuildingBlock,
names: Sequence[str],
container_type=None,
) -> building_blocks.ComputationBuildingBlock:
"""Creates a computation that applies `names` to `comp`.
Args:
comp: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.StructType`.
names: Python `tuple` or `list` containing instances of type `str` or
`None`, the names to apply to `comp`.
container_type: Optional Python container type to associated with the
resulting tuple.
Returns:
A `building_blocks.ComputationBuildingBlock` representing a
tuple with the elements from `comp` and the names from `names` attached to
the `type_signature` of those elements.
Raises:
TypeError: If the types do not match.
"""
py_typecheck.check_type(names, (list, tuple))
if not all(isinstance(x, (str, type(None))) for x in names):
raise TypeError('Expected `names` containing only instances of `str` or '
'`None`, found {}'.format(names))
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.StructType)
fn = _create_naming_function(comp.type_signature, names, container_type)
return building_blocks.Call(fn, comp)
def create_zip(
comp: building_blocks.ComputationBuildingBlock) -> building_blocks.Block:
r"""Returns a computation which zips `comp`.
Returns the following computation where `x` is `comp` unless `comp` is a
Reference, in which case the Reference is inlined and the Tuple is returned.
Block
/ \
[comp=x] Tuple
|
[Tuple, Tuple]
| |
[Sel(0), Sel(0)] [Sel(1), Sel(1)]
| | | |
Sel(0) Sel(1) Sel(0) Sel(1)
| | | |
Ref(comp) Ref(comp) Ref(comp) Ref(comp)
The returned computation intentionally drops names from the tuples, otherwise
it would be possible for the resulting type signature to contain a Tuple where
two elements have the same name and this is not allowed. It is left up to the
caller to descide if and where to add the names back.
Args:
comp: The computation building block in which to perform the merges.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.StructType)
named_type_signatures = structure.to_elements(comp.type_signature)
_, first_type_signature = named_type_signatures[0]
py_typecheck.check_type(first_type_signature, computation_types.StructType)
length = len(first_type_signature)
for _, type_signature in named_type_signatures:
py_typecheck.check_type(type_signature, computation_types.StructType)
if len(type_signature) != length:
raise TypeError(
'Expected a StructType containing StructTypes with the same '
'length, found: {}'.format(comp.type_signature))
if not comp.is_reference():
name_generator = unique_name_generator(comp)
name = next(name_generator)
ref = building_blocks.Reference(name, comp.type_signature)
else:
ref = comp
rows = []
for column in range(len(first_type_signature)):
columns = []
for row in range(len(named_type_signatures)):
sel_row = building_blocks.Selection(ref, index=row)
sel_column = building_blocks.Selection(sel_row, index=column)
columns.append(sel_column)
tup = building_blocks.Struct(columns)
rows.append(tup)
tup = building_blocks.Struct(rows)
if not comp.is_reference():
return building_blocks.Block(((ref.name, comp),), tup)
else:
return tup
def _check_generic_operator_type(type_spec):
"""Checks that `type_spec` can be the signature of args to a generic op."""
if not type_analysis.contains_only(
type_spec, lambda t: t.is_federated() or t.is_struct() or t.is_tensor()):
raise TypeError(
'Generic operators are only implemented for arguments both containing '
'only federated, tuple and tensor types; you have passed an argument '
'of type {} '.format(type_spec))
if not (type_spec.is_struct() and len(type_spec) == 2):
raise TypeError(
'We are trying to construct a generic operator declaring argument that '
'is not a two-tuple, the type {}.'.format(type_spec))
if not type_analysis.is_binary_op_with_upcast_compatible_pair(
type_spec[0], type_spec[1]):
raise TypeError(
'The two-tuple you have passed in is incompatible with upcasted '
'binary operators. You have passed the tuple type {}, which fails the '
'check that the two members of the tuple are either the same type, or '
'the second is a scalar with the same dtype as the leaves of the '
'first. See `type_analysis.is_binary_op_with_upcast_compatible_pair` for '
'more details.'.format(type_spec))
@functools.lru_cache()
def create_tensorflow_binary_operator_with_upcast(
operator: Callable[[Any, Any], Any], type_signature: computation_types.Type
) -> building_blocks.CompiledComputation:
"""Creates TF computation upcasting its argument and applying `operator`.
The concept of upcasting is explained further in the docstring for
`apply_binary_operator_with_upcast`.
Args:
operator: Callable defining the operator.
type_signature: Value convertible to `computation_types.StructType`, with
two elements, both of the same type or the second able to be upcast to the
first, as explained in `apply_binary_operator_with_upcast`, and both
containing only tuples and tensors in their type tree.
Returns:
A `building_blocks.CompiledComputation` encapsulating a function which
upcasts the second element of its argument and applies the binary
operator.
"""
py_typecheck.check_callable(operator)
_check_generic_operator_type(type_signature)
type_analysis.check_tensorflow_compatible_type(type_signature)
tf_proto, type_signature = tensorflow_computation_factory.create_binary_operator_with_upcast(
type_signature, operator)
compiled = building_blocks.CompiledComputation(
tf_proto, type_signature=type_signature)
return compiled
def apply_binary_operator_with_upcast(
arg: building_blocks.ComputationBuildingBlock,
operator: Callable[[Any, Any], Any]) -> building_blocks.Call:
"""Constructs result of applying `operator` to `arg` upcasting if appropriate.
Notice `arg` here must be of federated type, with a named tuple member of
length 2, or a named tuple type of length 2. If the named tuple type of `arg`
satisfies certain conditions (that is, there is only a single tensor dtype in
the first element of `arg`, and the second element represents a scalar of
this dtype), the second element will be upcast to match the first. Here this
means it will be pushed into a nested structure matching the structure of the
first element of `arg`. For example, it makes perfect sense to divide a model
of type `<a=float32[784],b=float32[10]>` by a scalar of type `float32`, but
the binary operator constructors we have implemented only take arguments of
type `<T, T>`. Therefore in this case we would broadcast the `float` argument
to the `tuple` type, before constructing a biary operator which divides
pointwise.
Args:
arg: `building_blocks.ComputationBuildingBlock` of federated type whose
`member` attribute is a named tuple type of length 2, or named tuple type
of length 2.
operator: Callable representing binary operator to apply to the 2-tuple
represented by the federated `arg`.
Returns:
Instance of `building_blocks.Call`
encapsulating the result of formally applying `operator` to
`arg[0], `arg[1]`, upcasting `arg[1]` in the condition described above.
Raises:
TypeError: If the types don't match.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_callable(operator)
if arg.type_signature.is_federated():
tuple_type = arg.type_signature.member
assert tuple_type.is_struct()
elif arg.type_signature.is_struct():
tuple_type = arg.type_signature
else:
raise TypeError(
'Generic binary operators are only implemented for federated tuple and '
'unplaced tuples; you have passed {}.'.format(arg.type_signature))
tf_representing_op = create_tensorflow_binary_operator_with_upcast(
operator, tuple_type)
if arg.type_signature.is_federated():
called = create_federated_map_or_apply(tf_representing_op, arg)
else:
called = building_blocks.Call(tf_representing_op, arg)
return called
def zip_to_match_type(
*, comp_to_zip: building_blocks.ComputationBuildingBlock,
target_type: computation_types.Type
) -> Optional[building_blocks.ComputationBuildingBlock]:
"""Zips computation argument to match target type.
This function will apply the appropriate federated zips to match `comp_to_zip`
to the requested type `target_type`, subject to a few caveats. We will
traverse `computation_types.StructTypes` to match types, so for example we
would zip `<<T@P, R@P>>` to match `<<T, R>@P>`, but we will not traverse
`computation_types.FunctionTypes`. Therefore we would not apply a zip to the
parameter of `(<<T@P, R@P>> -> Q)` to match (<<T, R>@P> -> Q).
If zipping in this manner cannot match the type of `comp_to_zip` to
`target_type`, `None` will be returned.
Args:
comp_to_zip: Instance of `building_blocks.ComputationBuildingBlock` to
traverse and attempt to zip to match `target_type`.
target_type: The type to target when traversing and zipping `comp_to_zip`.
Returns:
Either a potentially transformed version of `comp_to_zip` or `None`,
depending on whether inserting a zip according to the semantics above
can transformed `comp_to_zip` to the requested type.
"""
py_typecheck.check_type(comp_to_zip, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(target_type, computation_types.Type)
def _can_be_zipped_into(source_type: computation_types.Type,
target_type: computation_types.Type) -> bool:
"""Indicates possibility of the transformation `zip_to_match_type`."""
def _struct_can_be_zipped_to_federated(
struct_type: computation_types.StructType,
federated_type: computation_types.FederatedType) -> bool:
placements_encountered = set()
def _remove_placement(
subtype: computation_types.Type
) -> Tuple[computation_types.Type, bool]:
if subtype.is_federated():
placements_encountered.add(subtype.placement)
return subtype.member, True
return subtype, False
unplaced_struct, _ = type_transformations.transform_type_postorder(
struct_type, _remove_placement)
if not (all(
x is federated_type.placement for x in placements_encountered)):
return False
if (federated_type.placement is placements.CLIENTS and
federated_type.all_equal):
# There is no all-equal clients zip; return false.
return False
return federated_type.member.is_assignable_from(unplaced_struct)
def _struct_elem_zippable(source_name, source_element, target_name,
target_element):
return _can_be_zipped_into(
source_element, target_element) and source_name in (target_name, None)
if source_type.is_struct():
if target_type.is_federated():
return _struct_can_be_zipped_to_federated(source_type, target_type)
elif target_type.is_struct():
elements_zippable = []
for (s_name, s_el), (t_name, t_el) in zip(
structure.iter_elements(source_type),
structure.iter_elements(target_type)):
elements_zippable.append(
_struct_elem_zippable(s_name, s_el, t_name, t_el))
return all(elements_zippable)
else:
return target_type.is_assignable_from(source_type)
def _zip_to_match(
*, source: building_blocks.ComputationBuildingBlock,
target_type: computation_types.Type
) -> building_blocks.ComputationBuildingBlock:
if target_type.is_federated() and source.type_signature.is_struct():
return create_federated_zip(source)
elif target_type.is_struct() and source.type_signature.is_struct():
zipped_elements = []
# Bind a reference to the source to prevent duplication in the AST.
ref_name = next(unique_name_generator(source))
ref_to_source = building_blocks.Reference(ref_name, source.type_signature)
for idx, ((_, t_el), (s_name, _)) in enumerate(
zip(
structure.iter_elements(target_type),
structure.iter_elements(source.type_signature))):
s_selection = building_blocks.Selection(ref_to_source, index=idx)
zipped_elements.append(
(s_name, _zip_to_match(source=s_selection, target_type=t_el)))
# Insert binding above the constructed structure.
return building_blocks.Block([(ref_name, source)],
building_blocks.Struct(zipped_elements))
else:
# No zipping to be done here.
return source
if target_type.is_assignable_from(comp_to_zip.type_signature):
# No zipping needs to be done; return directly.
return comp_to_zip
elif _can_be_zipped_into(comp_to_zip.type_signature, target_type):
return _zip_to_match(source=comp_to_zip, target_type=target_type)
else:
# Zipping cannot be performed here.
return None
| tensorflow/federated | tensorflow_federated/python/core/impl/compiler/building_block_factory.py | Python | apache-2.0 | 75,967 |
# -*- coding: utf-8 -*-
# '''
# Author: Eachen Kuang
# Date: 2017.10.20
# Goal: 将文件转化为字典
# Other:
# '''
import string
from math import log
import numpy as np
def KLD(p,q):
p,q=zip(*filter(lambda (x,y): x!=0 or y!=0, zip(p,q))) #去掉二者都是0的概率值
p=p+np.spacing(1)
q=q+np.spacing(1)
print p, q
return sum([_p * log(_p/_q,2) for (_p,_q) in zip(p,q)])
# p=np.ones(5)/5.0
# q=[0,0,0.5,0.2,0.3]
# print KLD(p,q)
def JSD_core(p, q):
p, q = zip(*filter(lambda[x,y]: x != 0 or y != 0, zip(p, q))) # 去掉二者都是0的概率值
M = [0.5 * (_p + _q) for _p, _q in zip(p, q)]
p = p + np.spacing(1)
q = q + np.spacing(1)
M = M + np.spacing(1)
# print p,q,M
return 0.5 * KLD(p, M) + 0.5 * KLD(q, M)
reg = lambda x: [x.count(i) for i in string.lowercase] # 频数分布
rate = lambda y: [round(i * 1.0 / sum(reg(y)), 4) for i in reg(y)] # 概率分布
s1 = 'KuangYichen1raf'
s2 = 'YichenKuang2'
print JSD_core(rate(s1), rate(s2))
| EachenKuang/PythonRepository | PublicOpinion/JSDistance.py | Python | apache-2.0 | 1,014 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, time, sys
import hashlib, bencode
import requests
from bs4 import BeautifulSoup
reload(sys)
#print sys.getdefaultencoding()
#sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
def parse_tor(file):
bt_path = {}
bt_file = open(file, 'rb')
bt_info = bencode.bdecode(bt_file.read()).get('info')
bt_info_hash_hex = hashlib.sha1(bencode.bencode(bt_info)).hexdigest()
bt_file_size = bt_info.get('length')
bt_file_name = bt_info.get('name')
bt_path[bt_file_name]=bt_file_size
print bt_path
bt_file.close()
#提取无码的格式,如 082516-001
def format_rule1(s):
pattern="\d{6}-\d{3}|\d{6}-\d{2}|\d{6}_\d{3}|\d{6}_\d{2}"
rs=re.findall(pattern, s);
if len(rs)>=1:
return rs[0]
else:
return ""
def format_rule2(s):
rs=''
#匹配开头是数字,判断是非wm编号
wm=re.findall(r'^\d+',s)
if len(wm)==1: #是wm
rs=s[0:10]
return rs
# 如:mide-267FHD_ok_0001.mp4
#查找所有的非数字,['mide-', 'FHD_ok_', '.mp']
#第一个元素就是"mide-"
alpha_list=re.findall(r'\D+', s)
if len(alpha_list)>0:
rs+=alpha_list[0]
#查找所有的数字,['267', '0001', '4']
#第一个元素就是"267"
num_list=re.findall(r'\d+', s)
if len(num_list)>0:
rs+=num_list[0]
if rs=='':
rs=s
rs=rs.replace("-","")
rs=rs.replace(" ","")
rs=rs.replace("_","")
rs=rs.lower()
return rs
#for test
def format_torrent(path):
for x in os.listdir(path):
print format_rule2(x)
def walkpath(path):
#files= [(dirpath,filenames) for dirpath,dirname,filenames in os.walk(path)]
files= []
for dirpath,dirname,filenames in os.walk(path.decode('utf-8')):
for filename in filenames:
files.append((filename,dirpath))
return files
def walkfile(path):
files=[x for x in os.listdir(path) if all([os.path.splitext(x)[1]=='.txt', not os.path.isdir(path+"\\"+x)])]
# txtfile=[f for f in files if os.path.splitext(f)[1]=='.txt']
store=[]
for txtfile in files:
for line in open(path+"/"+txtfile):
p,f=os.path.split(line)
store.append((f.replace("\n",""),txtfile))
return store
#����list�ԱȺ��Ĺ��ܣ������ܵ���
def comparelist(src,des):
#src: ["file"]
#des:[("file","path")]
from collections import defaultdict
dic=defaultdict(list)
for x in src:
for a,b in des:
#print x,a,b
if format_rule2(x)==format_rule2(a):
dic[x].append(os.path.join(b,a))
return dic
def download(url):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
print "download from "+url+"\n"
try:
response = requests.get(url=url,headers=headers,timeout=5) # 最基本的GET请求
return response
except Exception,e:
print e
#print "status_code",response.status_code
| dannywxh/mypy | MyPys/common.py | Python | apache-2.0 | 3,687 |
# -*- coding: utf-8 -*-
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from functools import wraps
import math
import random
import time
from gcs_client import errors as errors
def is_complete(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
attributes = getattr(self, '_required_attributes') or []
for attribute in attributes:
if not getattr(self, attribute, None):
raise Exception('%(func_name)s needs %(attr)s to be set.' %
{'func_name': f.__name__, 'attr': attribute})
return f(self, *args, **kwargs)
return wrapped
# Generate default codes to retry from transient HTTP errors
DEFAULT_RETRY_CODES = tuple(
code for code, (cls_name, cls) in errors.http_errors.items()
if cls is errors.Transient)
class RetryParams(object):
"""Truncated Exponential Backoff configuration class.
This configuration is used to provide truncated exponential backoff retries
for communications.
The algorithm requires 4 arguments: max retries, initial delay, max backoff
wait time and backoff factor.
As long as we have pending retries we will wait
(backoff_factor ^ n-1) * initial delay
Where n is the number of retry.
As long as this wait is not greater than max backoff wait time, if it is
max backoff time wait will be used.
We'll add a random wait time to this delay to help avoid cases where many
clients get synchronized by some situation and all retry at once, sending
requests in synchronized waves.
For example with default values of max_retries=5, initial_delay=1,
max_backoff=32 and backoff_factor=2
- 1st failure: 1 second + random delay [ (2^(1-1)) * 1 ]
- 2nd failure: 2 seconds + random delay [ (2^(2-1)) * 1 ]
- 3rd failure: 4 seconds + random delay [ (2^(3-1)) * 1 ]
- 4th failure: 8 seconds + random delay [ (2^(4-1)) * 1 ]
- 5th failure: 16 seconds + random delay [ (2^(5-1)) * 1 ]
- 6th failure: Fail operation
"""
def __init__(self, max_retries=5, initial_delay=1, max_backoff=32,
backoff_factor=2, randomize=True):
"""Initialize retry configuration.
:param max_retries: Maximum number of retries before giving up.
:type max_retries: int
:param initial_delay: Seconds to wait for the first retry.
:type initial_delay: int or float
:param max_backoff: Maximum number of seconds to wait between retries.
:type max_backoff: int or float
:param backoff_factor: Base to use for the power used to calculate the
delay for the backoff.
:type backoff_factor: int or float
:param randomize: Whether to use randomization of the delay time to
avoid synchronized waves.
:type randomize: bool
"""
self.max_retries = max_retries
self.initial_delay = initial_delay
self.max_backoff = max_backoff
self.backoff_factor = backoff_factor
self.randomize = randomize
@classmethod
def get_default(cls):
"""Return default configuration (simpleton patern)."""
if not hasattr(cls, 'default'):
cls.default = cls()
return cls.default
@classmethod
def set_default(cls, *args, **kwargs):
"""Set default retry configuration.
Methods acepts a RetryParams instance or the same arguments as the
__init__ method.
"""
default = cls.get_default()
# For RetryParams argument copy dictionary to default instance so all
# references to the default configuration will have new values.
if len(args) == 1 and isinstance(args[0], RetryParams):
default.__dict__.update(args[0].__dict__)
# For individual arguments call __init__ method on default instance
else:
default.__init__(*args, **kwargs)
def retry(param='_retry_params', error_codes=DEFAULT_RETRY_CODES):
"""Truncated Exponential Backoff decorator.
There are multiple ways to use this decorator:
@retry
def my_func(self):
In this case we will try to use `self._retry_params` and if that's not
available we'll use default retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry('_retry_cfg')
def my_func(self):
In this case we will try to use `self._retry_cfg` and if that's
not available we'll use default retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry(RetryParams(5, 1, 32, 2, False))
def my_func(self):
In this case we will use a specific retry configuration and retry on
DEFAULT_RETRY_CODES status codes.
@retry('_retry_cfg', [408, 504])
def my_func(self):
In this case we will try to use `self._retry_cfg` and if that's
not available we'll use default retry configuration and retry only on
timeout status codes.
@retry(RetryParams(5, 1, 32, 2, False), [408, 504])
def my_func(self):
In this case we will use a specific retry configuration and retry only
on timeout status codes.
@retry(error_codes=[408, 504])
def my_func(self):
In this case we will try to use `self._retry_params` and if that's not
available we'll use default retry configuration and retry only on
timeout status codes.
If we pass None as the retry parameter or the value of the attribute on the
instance is None we will not do any retries.
"""
def _retry(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
# If retry configuration is none or a RetryParams instance, use it
if isinstance(param, (type(None), RetryParams)):
retry_params = param
# If it's an attribute name try to retrieve it
else:
retry_params = getattr(self, param, RetryParams.get_default())
delay = 0
random_delay = 0
n = 0 # Retry number
while True:
try:
result = f(self, *args, **kwargs)
return result
except errors.Http as exc:
if (not retry_params or n >= retry_params.max_retries or
exc.code not in error_codes):
raise exc
n += 1
# If we haven't reached maximum backoff yet calculate new delay
if delay < retry_params.max_backoff:
backoff = (math.pow(retry_params.backoff_factor, n-1) *
retry_params.initial_delay)
delay = min(retry_params.max_backoff, backoff)
if retry_params.randomize:
random_delay = random.random() * retry_params.initial_delay
time.sleep(delay + random_delay)
return wrapped
# If no argument has been used
if callable(param):
f, param = param, '_retry_params'
return _retry(f)
return _retry
| Akrog/gcs-client | gcs_client/common.py | Python | apache-2.0 | 7,705 |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import re
import shutil
from os.path import basename, getsize, isdir, isfile, islink, join, realpath
from tempfile import mkdtemp
import click
import requests
import semantic_version
from platformio import __version__, app, exception, fs, util
from platformio.compat import hashlib_encode_data
from platformio.downloader import FileDownloader
from platformio.lockfile import LockFile
from platformio.package.exception import ManifestException
from platformio.package.manifest.parser import ManifestParserFactory
from platformio.unpacker import FileUnpacker
from platformio.vcsclient import VCSClientFactory
# pylint: disable=too-many-arguments, too-many-return-statements
class PackageRepoIterator(object):
def __init__(self, package, repositories):
assert isinstance(repositories, list)
self.package = package
self.repositories = iter(repositories)
def __iter__(self):
return self
def __next__(self):
return self.next() # pylint: disable=not-callable
@staticmethod
@util.memoized(expire="60s")
def load_manifest(url):
r = None
try:
r = requests.get(url, headers={"User-Agent": app.get_user_agent()})
r.raise_for_status()
return r.json()
except: # pylint: disable=bare-except
pass
finally:
if r:
r.close()
return None
def next(self):
repo = next(self.repositories)
manifest = repo if isinstance(repo, dict) else self.load_manifest(repo)
if manifest and self.package in manifest:
return manifest[self.package]
return next(self)
class PkgRepoMixin(object):
PIO_VERSION = semantic_version.Version(util.pepver_to_semver(__version__))
@staticmethod
def is_system_compatible(valid_systems):
if not valid_systems or "*" in valid_systems:
return True
if not isinstance(valid_systems, list):
valid_systems = list([valid_systems])
return util.get_systype() in valid_systems
def max_satisfying_repo_version(self, versions, requirements=None):
item = None
reqspec = None
try:
reqspec = (
semantic_version.SimpleSpec(requirements) if requirements else None
)
except ValueError:
pass
for v in versions:
if not self.is_system_compatible(v.get("system")):
continue
# if "platformio" in v.get("engines", {}):
# if PkgRepoMixin.PIO_VERSION not in requirements.SimpleSpec(
# v['engines']['platformio']):
# continue
specver = semantic_version.Version(v["version"])
if reqspec and specver not in reqspec:
continue
if not item or semantic_version.Version(item["version"]) < specver:
item = v
return item
def get_latest_repo_version( # pylint: disable=unused-argument
self, name, requirements, silent=False
):
version = None
for versions in PackageRepoIterator(name, self.repositories):
pkgdata = self.max_satisfying_repo_version(versions, requirements)
if not pkgdata:
continue
if (
not version
or semantic_version.compare(pkgdata["version"], version) == 1
):
version = pkgdata["version"]
return version
def get_all_repo_versions(self, name):
result = []
for versions in PackageRepoIterator(name, self.repositories):
result.extend([semantic_version.Version(v["version"]) for v in versions])
return [str(v) for v in sorted(set(result))]
class PkgInstallerMixin(object):
SRC_MANIFEST_NAME = ".piopkgmanager.json"
TMP_FOLDER_PREFIX = "_tmp_installing-"
FILE_CACHE_VALID = None # for example, 1 week = "7d"
FILE_CACHE_MAX_SIZE = 1024 * 1024 * 50 # 50 Mb
MEMORY_CACHE = {} # cache for package manifests and read dirs
def cache_get(self, key, default=None):
return self.MEMORY_CACHE.get(key, default)
def cache_set(self, key, value):
self.MEMORY_CACHE[key] = value
def cache_reset(self):
self.MEMORY_CACHE.clear()
def read_dirs(self, src_dir):
cache_key = "read_dirs-%s" % src_dir
result = self.cache_get(cache_key)
if result:
return result
result = [
join(src_dir, name)
for name in sorted(os.listdir(src_dir))
if isdir(join(src_dir, name))
]
self.cache_set(cache_key, result)
return result
def download(self, url, dest_dir, sha1=None):
cache_key_fname = app.ContentCache.key_from_args(url, "fname")
cache_key_data = app.ContentCache.key_from_args(url, "data")
if self.FILE_CACHE_VALID:
with app.ContentCache() as cc:
fname = str(cc.get(cache_key_fname))
cache_path = cc.get_cache_path(cache_key_data)
if fname and isfile(cache_path):
dst_path = join(dest_dir, fname)
shutil.copy(cache_path, dst_path)
click.echo("Using cache: %s" % cache_path)
return dst_path
with_progress = not app.is_disabled_progressbar()
try:
fd = FileDownloader(url, dest_dir)
fd.start(with_progress=with_progress)
except IOError as e:
raise_error = not with_progress
if with_progress:
try:
fd = FileDownloader(url, dest_dir)
fd.start(with_progress=False)
except IOError:
raise_error = True
if raise_error:
click.secho(
"Error: Please read http://bit.ly/package-manager-ioerror",
fg="red",
err=True,
)
raise e
if sha1:
fd.verify(sha1)
dst_path = fd.get_filepath()
if (
not self.FILE_CACHE_VALID
or getsize(dst_path) > PkgInstallerMixin.FILE_CACHE_MAX_SIZE
):
return dst_path
with app.ContentCache() as cc:
cc.set(cache_key_fname, basename(dst_path), self.FILE_CACHE_VALID)
cc.set(cache_key_data, "DUMMY", self.FILE_CACHE_VALID)
shutil.copy(dst_path, cc.get_cache_path(cache_key_data))
return dst_path
@staticmethod
def unpack(source_path, dest_dir):
with_progress = not app.is_disabled_progressbar()
try:
with FileUnpacker(source_path) as fu:
return fu.unpack(dest_dir, with_progress=with_progress)
except IOError as e:
if not with_progress:
raise e
with FileUnpacker(source_path) as fu:
return fu.unpack(dest_dir, with_progress=False)
@staticmethod
def parse_semver_version(value, raise_exception=False):
try:
try:
return semantic_version.Version(value)
except ValueError:
if "." not in str(value) and not str(value).isdigit():
raise ValueError("Invalid SemVer version %s" % value)
return semantic_version.Version.coerce(value)
except ValueError as e:
if raise_exception:
raise e
return None
@staticmethod
def parse_pkg_uri(text, requirements=None): # pylint: disable=too-many-branches
text = str(text)
name, url = None, None
# Parse requirements
req_conditions = [
"@" in text,
not requirements,
":" not in text or text.rfind("/") < text.rfind("@"),
]
if all(req_conditions):
text, requirements = text.rsplit("@", 1)
# Handle PIO Library Registry ID
if text.isdigit():
text = "id=" + text
# Parse custom name
elif "=" in text and not text.startswith("id="):
name, text = text.split("=", 1)
# Parse URL
# if valid URL with scheme vcs+protocol://
if "+" in text and text.find("+") < text.find("://"):
url = text
elif "/" in text or "\\" in text:
git_conditions = [
# Handle GitHub URL (https://github.com/user/package)
text.startswith("https://github.com/")
and not text.endswith((".zip", ".tar.gz")),
(text.split("#", 1)[0] if "#" in text else text).endswith(".git"),
]
hg_conditions = [
# Handle Developer Mbed URL
# (https://developer.mbed.org/users/user/code/package/)
# (https://os.mbed.com/users/user/code/package/)
text.startswith("https://developer.mbed.org"),
text.startswith("https://os.mbed.com"),
]
if any(git_conditions):
url = "git+" + text
elif any(hg_conditions):
url = "hg+" + text
elif "://" not in text and (isfile(text) or isdir(text)):
url = "file://" + text
elif "://" in text:
url = text
# Handle short version of GitHub URL
elif text.count("/") == 1:
url = "git+https://github.com/" + text
# Parse name from URL
if url and not name:
_url = url.split("#", 1)[0] if "#" in url else url
if _url.endswith(("\\", "/")):
_url = _url[:-1]
name = basename(_url)
if "." in name and not name.startswith("."):
name = name.rsplit(".", 1)[0]
return (name or text, requirements, url)
@staticmethod
def get_install_dirname(manifest):
name = re.sub(r"[^\da-z\_\-\. ]", "_", manifest["name"], flags=re.I)
if "id" in manifest:
name += "_ID%d" % manifest["id"]
return str(name)
@classmethod
def get_src_manifest_path(cls, pkg_dir):
if not isdir(pkg_dir):
return None
for item in os.listdir(pkg_dir):
if not isdir(join(pkg_dir, item)):
continue
if isfile(join(pkg_dir, item, cls.SRC_MANIFEST_NAME)):
return join(pkg_dir, item, cls.SRC_MANIFEST_NAME)
return None
def get_manifest_path(self, pkg_dir):
if not isdir(pkg_dir):
return None
for name in self.manifest_names:
manifest_path = join(pkg_dir, name)
if isfile(manifest_path):
return manifest_path
return None
def manifest_exists(self, pkg_dir):
return self.get_manifest_path(pkg_dir) or self.get_src_manifest_path(pkg_dir)
def load_manifest(self, pkg_dir): # pylint: disable=too-many-branches
cache_key = "load_manifest-%s" % pkg_dir
result = self.cache_get(cache_key)
if result:
return result
manifest = {}
src_manifest = None
manifest_path = self.get_manifest_path(pkg_dir)
src_manifest_path = self.get_src_manifest_path(pkg_dir)
if src_manifest_path:
src_manifest = fs.load_json(src_manifest_path)
if not manifest_path and not src_manifest_path:
return None
try:
manifest = ManifestParserFactory.new_from_file(manifest_path).as_dict()
except ManifestException:
pass
if src_manifest:
if "version" in src_manifest:
manifest["version"] = src_manifest["version"]
manifest["__src_url"] = src_manifest["url"]
# handle a custom package name
autogen_name = self.parse_pkg_uri(manifest["__src_url"])[0]
if "name" not in manifest or autogen_name != src_manifest["name"]:
manifest["name"] = src_manifest["name"]
if "name" not in manifest:
manifest["name"] = basename(pkg_dir)
if "version" not in manifest:
manifest["version"] = "0.0.0"
manifest["__pkg_dir"] = realpath(pkg_dir)
self.cache_set(cache_key, manifest)
return manifest
def get_installed(self):
items = []
for pkg_dir in self.read_dirs(self.package_dir):
if self.TMP_FOLDER_PREFIX in pkg_dir:
continue
manifest = self.load_manifest(pkg_dir)
if not manifest:
continue
assert "name" in manifest
items.append(manifest)
return items
def get_package(self, name, requirements=None, url=None):
pkg_id = int(name[3:]) if name.startswith("id=") else 0
best = None
for manifest in self.get_installed():
if url:
if manifest.get("__src_url") != url:
continue
elif pkg_id and manifest.get("id") != pkg_id:
continue
elif not pkg_id and manifest["name"] != name:
continue
elif not PkgRepoMixin.is_system_compatible(manifest.get("system")):
continue
# strict version or VCS HASH
if requirements and requirements == manifest["version"]:
return manifest
try:
if requirements and not semantic_version.SimpleSpec(requirements).match(
self.parse_semver_version(manifest["version"], raise_exception=True)
):
continue
if not best or (
self.parse_semver_version(manifest["version"], raise_exception=True)
> self.parse_semver_version(best["version"], raise_exception=True)
):
best = manifest
except ValueError:
pass
return best
def get_package_dir(self, name, requirements=None, url=None):
manifest = self.get_package(name, requirements, url)
return (
manifest.get("__pkg_dir")
if manifest and isdir(manifest.get("__pkg_dir"))
else None
)
def get_package_by_dir(self, pkg_dir):
for manifest in self.get_installed():
if manifest["__pkg_dir"] == realpath(pkg_dir):
return manifest
return None
def find_pkg_root(self, src_dir):
if self.manifest_exists(src_dir):
return src_dir
for root, _, _ in os.walk(src_dir):
if self.manifest_exists(root):
return root
raise exception.MissingPackageManifest(", ".join(self.manifest_names))
def _install_from_piorepo(self, name, requirements):
pkg_dir = None
pkgdata = None
versions = None
last_exc = None
for versions in PackageRepoIterator(name, self.repositories):
pkgdata = self.max_satisfying_repo_version(versions, requirements)
if not pkgdata:
continue
try:
pkg_dir = self._install_from_url(
name, pkgdata["url"], requirements, pkgdata.get("sha1")
)
break
except Exception as e: # pylint: disable=broad-except
last_exc = e
click.secho("Warning! Package Mirror: %s" % e, fg="yellow")
click.secho("Looking for another mirror...", fg="yellow")
if versions is None:
util.internet_on(raise_exception=True)
raise exception.UnknownPackage(
name + (". Error -> %s" % last_exc if last_exc else "")
)
if not pkgdata:
raise exception.UndefinedPackageVersion(
requirements or "latest", util.get_systype()
)
return pkg_dir
def _install_from_url(self, name, url, requirements=None, sha1=None, track=False):
tmp_dir = mkdtemp("-package", self.TMP_FOLDER_PREFIX, self.package_dir)
src_manifest_dir = None
src_manifest = {"name": name, "url": url, "requirements": requirements}
try:
if url.startswith("file://"):
_url = url[7:]
if isfile(_url):
self.unpack(_url, tmp_dir)
else:
fs.rmtree(tmp_dir)
shutil.copytree(_url, tmp_dir, symlinks=True)
elif url.startswith(("http://", "https://")):
dlpath = self.download(url, tmp_dir, sha1)
assert isfile(dlpath)
self.unpack(dlpath, tmp_dir)
os.remove(dlpath)
else:
vcs = VCSClientFactory.newClient(tmp_dir, url)
assert vcs.export()
src_manifest_dir = vcs.storage_dir
src_manifest["version"] = vcs.get_current_revision()
_tmp_dir = tmp_dir
if not src_manifest_dir:
_tmp_dir = self.find_pkg_root(tmp_dir)
src_manifest_dir = join(_tmp_dir, ".pio")
# write source data to a special manifest
if track:
self._update_src_manifest(src_manifest, src_manifest_dir)
return self._install_from_tmp_dir(_tmp_dir, requirements)
finally:
if isdir(tmp_dir):
fs.rmtree(tmp_dir)
return None
def _update_src_manifest(self, data, src_dir):
if not isdir(src_dir):
os.makedirs(src_dir)
src_manifest_path = join(src_dir, self.SRC_MANIFEST_NAME)
_data = {}
if isfile(src_manifest_path):
_data = fs.load_json(src_manifest_path)
_data.update(data)
with open(src_manifest_path, "w") as fp:
json.dump(_data, fp)
def _install_from_tmp_dir( # pylint: disable=too-many-branches
self, tmp_dir, requirements=None
):
tmp_manifest = self.load_manifest(tmp_dir)
assert set(["name", "version"]) <= set(tmp_manifest)
pkg_dirname = self.get_install_dirname(tmp_manifest)
pkg_dir = join(self.package_dir, pkg_dirname)
cur_manifest = self.load_manifest(pkg_dir)
tmp_semver = self.parse_semver_version(tmp_manifest["version"])
cur_semver = None
if cur_manifest:
cur_semver = self.parse_semver_version(cur_manifest["version"])
# package should satisfy requirements
if requirements:
mismatch_error = "Package version %s doesn't satisfy requirements %s" % (
tmp_manifest["version"],
requirements,
)
try:
assert tmp_semver and tmp_semver in semantic_version.SimpleSpec(
requirements
), mismatch_error
except (AssertionError, ValueError):
assert tmp_manifest["version"] == requirements, mismatch_error
# check if package already exists
if cur_manifest:
# 0-overwrite, 1-rename, 2-fix to a version
action = 0
if "__src_url" in cur_manifest:
if cur_manifest["__src_url"] != tmp_manifest.get("__src_url"):
action = 1
elif "__src_url" in tmp_manifest:
action = 2
else:
if tmp_semver and (not cur_semver or tmp_semver > cur_semver):
action = 1
elif tmp_semver and cur_semver and tmp_semver != cur_semver:
action = 2
# rename
if action == 1:
target_dirname = "%s@%s" % (pkg_dirname, cur_manifest["version"])
if "__src_url" in cur_manifest:
target_dirname = "%s@src-%s" % (
pkg_dirname,
hashlib.md5(
hashlib_encode_data(cur_manifest["__src_url"])
).hexdigest(),
)
shutil.move(pkg_dir, join(self.package_dir, target_dirname))
# fix to a version
elif action == 2:
target_dirname = "%s@%s" % (pkg_dirname, tmp_manifest["version"])
if "__src_url" in tmp_manifest:
target_dirname = "%s@src-%s" % (
pkg_dirname,
hashlib.md5(
hashlib_encode_data(tmp_manifest["__src_url"])
).hexdigest(),
)
pkg_dir = join(self.package_dir, target_dirname)
# remove previous/not-satisfied package
if isdir(pkg_dir):
fs.rmtree(pkg_dir)
shutil.copytree(tmp_dir, pkg_dir, symlinks=True)
try:
shutil.rmtree(tmp_dir)
except: # pylint: disable=bare-except
pass
assert isdir(pkg_dir)
self.cache_reset()
return pkg_dir
class BasePkgManager(PkgRepoMixin, PkgInstallerMixin):
# Handle circle dependencies
INSTALL_HISTORY = None
def __init__(self, package_dir, repositories=None):
self.repositories = repositories
self.package_dir = package_dir
if not isdir(self.package_dir):
os.makedirs(self.package_dir)
assert isdir(self.package_dir)
@property
def manifest_names(self):
raise NotImplementedError()
def print_message(self, message, nl=True):
click.echo("%s: %s" % (self.__class__.__name__, message), nl=nl)
def outdated(self, pkg_dir, requirements=None):
"""
Has 3 different results:
`None` - unknown package, VCS is detached to commit
`False` - package is up-to-date
`String` - a found latest version
"""
if not isdir(pkg_dir):
return None
latest = None
manifest = self.load_manifest(pkg_dir)
# skip detached package to a specific version
if "@" in pkg_dir and "__src_url" not in manifest and not requirements:
return None
if "__src_url" in manifest:
try:
vcs = VCSClientFactory.newClient(
pkg_dir, manifest["__src_url"], silent=True
)
except (AttributeError, exception.PlatformioException):
return None
if not vcs.can_be_updated:
return None
latest = vcs.get_latest_revision()
else:
try:
latest = self.get_latest_repo_version(
"id=%d" % manifest["id"] if "id" in manifest else manifest["name"],
requirements,
silent=True,
)
except (exception.PlatformioException, ValueError):
return None
if not latest:
return None
up_to_date = False
try:
assert "__src_url" not in manifest
up_to_date = self.parse_semver_version(
manifest["version"], raise_exception=True
) >= self.parse_semver_version(latest, raise_exception=True)
except (AssertionError, ValueError):
up_to_date = latest == manifest["version"]
return False if up_to_date else latest
def install(
self, name, requirements=None, silent=False, after_update=False, force=False
): # pylint: disable=unused-argument
pkg_dir = None
# interprocess lock
with LockFile(self.package_dir):
self.cache_reset()
name, requirements, url = self.parse_pkg_uri(name, requirements)
package_dir = self.get_package_dir(name, requirements, url)
# avoid circle dependencies
if not self.INSTALL_HISTORY:
self.INSTALL_HISTORY = []
history_key = "%s-%s-%s" % (name, requirements or "", url or "")
if history_key in self.INSTALL_HISTORY:
return package_dir
self.INSTALL_HISTORY.append(history_key)
if package_dir and force:
self.uninstall(package_dir)
package_dir = None
if not package_dir or not silent:
msg = "Installing " + click.style(name, fg="cyan")
if requirements:
msg += " @ " + requirements
self.print_message(msg)
if package_dir:
if not silent:
click.secho(
"{name} @ {version} is already installed".format(
**self.load_manifest(package_dir)
),
fg="yellow",
)
return package_dir
if url:
pkg_dir = self._install_from_url(name, url, requirements, track=True)
else:
pkg_dir = self._install_from_piorepo(name, requirements)
if not pkg_dir or not self.manifest_exists(pkg_dir):
raise exception.PackageInstallError(
name, requirements or "*", util.get_systype()
)
manifest = self.load_manifest(pkg_dir)
assert manifest
click.secho(
"{name} @ {version} has been successfully installed!".format(
**manifest
),
fg="green",
)
return pkg_dir
def uninstall(
self, package, requirements=None, after_update=False
): # pylint: disable=unused-argument
# interprocess lock
with LockFile(self.package_dir):
self.cache_reset()
if isdir(package) and self.get_package_by_dir(package):
pkg_dir = package
else:
name, requirements, url = self.parse_pkg_uri(package, requirements)
pkg_dir = self.get_package_dir(name, requirements, url)
if not pkg_dir:
raise exception.UnknownPackage(
"%s @ %s" % (package, requirements or "*")
)
manifest = self.load_manifest(pkg_dir)
click.echo(
"Uninstalling %s @ %s: \t"
% (click.style(manifest["name"], fg="cyan"), manifest["version"]),
nl=False,
)
if islink(pkg_dir):
os.unlink(pkg_dir)
else:
fs.rmtree(pkg_dir)
self.cache_reset()
# unfix package with the same name
pkg_dir = self.get_package_dir(manifest["name"])
if pkg_dir and "@" in pkg_dir:
shutil.move(
pkg_dir, join(self.package_dir, self.get_install_dirname(manifest))
)
self.cache_reset()
click.echo("[%s]" % click.style("OK", fg="green"))
return True
def update(self, package, requirements=None, only_check=False):
self.cache_reset()
if isdir(package) and self.get_package_by_dir(package):
pkg_dir = package
else:
pkg_dir = self.get_package_dir(*self.parse_pkg_uri(package))
if not pkg_dir:
raise exception.UnknownPackage("%s @ %s" % (package, requirements or "*"))
manifest = self.load_manifest(pkg_dir)
name = manifest["name"]
click.echo(
"{} {:<40} @ {:<15}".format(
"Checking" if only_check else "Updating",
click.style(manifest["name"], fg="cyan"),
manifest["version"],
),
nl=False,
)
if not util.internet_on():
click.echo("[%s]" % (click.style("Off-line", fg="yellow")))
return None
latest = self.outdated(pkg_dir, requirements)
if latest:
click.echo("[%s]" % (click.style(latest, fg="red")))
elif latest is False:
click.echo("[%s]" % (click.style("Up-to-date", fg="green")))
else:
click.echo("[%s]" % (click.style("Detached", fg="yellow")))
if only_check or not latest:
return True
if "__src_url" in manifest:
vcs = VCSClientFactory.newClient(pkg_dir, manifest["__src_url"])
assert vcs.update()
self._update_src_manifest(
dict(version=vcs.get_current_revision()), vcs.storage_dir
)
else:
self.uninstall(pkg_dir, after_update=True)
self.install(name, latest, after_update=True)
return True
class PackageManager(BasePkgManager):
@property
def manifest_names(self):
return ["package.json"]
| platformio/platformio | platformio/managers/package.py | Python | apache-2.0 | 29,447 |
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import subprocess
import tempfile
import os
def write(vegaSpec, outputFile, format=None):
"""Use the 'vega' package in Nodejs to write to SVG or PNG files.
Unlike interactive plotting, this does not require a round trip through a web browser, but it does require a
Nodejs installation on your computer (to evaluate the Javascript).
To install the prerequisites on an Ubuntu system, do
# Cairo dependencies for generating PNG:
sudo apt-get install install libcairo2-dev libjpeg-dev libgif-dev libpango1.0-dev build-essential g++
# Nodejs and its package manager, npm:
sudo apt-get install npm
# Get the 'vega' package with npm; user-install, not global (no sudo)!
npm install vega
Parameters:
vegaSpec (string or dict): JSON string or its dict-of-dicts equivalent
outputFile (string or None): output file name or None to return output as a string
format ('svg', 'png', or None): None (default) guesses format from outputFile extension
"""
if format is None and outputFile is None:
format = "svg"
elif format is None and outputFile.endswith(".svg"):
format = "svg"
elif format is None and outputFile.endswith(".png"):
format = "png"
else:
raise IOError("Could not infer format from outputFile")
if format == "png":
cmd = "vg2png"
elif format == "svg":
cmd = "vg2svg"
else:
raise IOError("Only 'png' and 'svg' output is supported.")
npmbin = subprocess.Popen(["npm", "bin"], stdout=subprocess.PIPE)
if npmbin.wait() == 0:
npmbin = npmbin.stdout.read().strip()
else:
raise IOError("Nodejs Package Manager 'npm' must be installed to use nodejs.write function.")
tmp = tempfile.NamedTemporaryFile(delete=False)
if isinstance(vegaSpec, dict):
vegaSpec = json.dump(tmp, vegaSpec)
else:
tmp.write(vegaSpec)
tmp.close()
if outputFile is None:
vg2x = subprocess.Popen([cmd, tmp.name], stdout=subprocess.PIPE, env=dict(
os.environ, PATH=npmbin + ":" + os.environ.get("PATH", "")))
if vg2x.wait() == 0:
return vg2x.stdout.read()
else:
os.unlink(tmp.name)
raise IOError("Command '{0}' failed; if it's not installed, install it with 'npm install vega'".format(cmd))
else:
vg2x = subprocess.Popen([cmd, tmp.name, outputFile], stdout=subprocess.PIPE,
env=dict(os.environ, PATH=npmbin + ":" + os.environ.get("PATH", "")))
if vg2x.wait() != 0:
os.unlink(tmp.name)
raise IOError("Command '{0}' failed; if it's not installed, install it with 'npm install vega'".format(cmd))
| histogrammar/histogrammar-python | histogrammar/plot/vega/nodejs.py | Python | apache-2.0 | 3,370 |
import subprocess
import os
import argparse
import gzip
parser = argparse.ArgumentParser(description = "Postprocess the VCF file created by the CrossMap to make it valid again.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--vcf", help = "Path to the VCF file.")
args = parser.parse_args()
vcf_file = gzip.open(args.vcf)
contigs = dict()
for line in vcf_file:
line = line.rstrip()
if(line[0] == "#"):
print(line)
if(line[0:8] == "##contig"):
contig = line.split("##contig=<ID=")[1].split(",assembly=")[0]
contigs[contig] = 1
else:
fields = line.split("\t",1)
if(fields[0] in contigs):
print("\t".join(fields)) #Only keep SNPs that fall into contigs mentioned in the header
| kauralasoo/Blood_ATAC | scripts/postprocessCrossmap.py | Python | apache-2.0 | 728 |
import ConfigParser
import json
def get_player_id():
config = ConfigParser.ConfigParser()
config.read('yasp.cfg')
return config.get('yasp', 'player_id')
def get_hero_id():
config = ConfigParser.ConfigParser()
config.read('yasp.cfg')
return config.get('yasp', 'hero_id')
def get_hero_data():
file = open("heroes.json")
data = json.load(file)
file.close()
return dict([hero['id'], hero] for hero in data['heroes'])
| jcdavis/yasp-stats | yasp_util.py | Python | apache-2.0 | 437 |
import cPickle
def dump_rules(filename, rules):
with open(filename, 'w') as f:
cPickle.dump(rules, f, cPickle.HIGHEST_PROTOCOL)
def load_rules(filename):
with open(filename, 'r') as f:
rules = cPickle.load(f)
return rules
class Rule():
APPLY_ACTION = 0
CLEAR_ACTION = 1
WRITE_ACTION = 2
GOTO_TABLE = 3
INSTRUCTION = [APPLY_ACTION, CLEAR_ACTION, WRITE_ACTION, GOTO_TABLE]
SET_FIELD = 0
GROUP = 1
OUTPUT = 2
ACTION = [SET_FIELD, GROUP, OUTPUT]
EDGE_PORT = 1000
MAX_PRIORITY = 30000
def __init__(self, id, switch_id, prefix, in_port, out_port, priority=MAX_PRIORITY):
self.id = id;
self.switch_id = switch_id;
self.priority = priority
self.prefix = prefix
self.header_space, self.ip, self.match_length = self.to_header_space(prefix)
self.out_port = out_port
self.in_port = in_port
self.is_path_start = False
self.is_path_end = False
self.timeout = 0
self.path_index = None
self.inst_actions = {}
self.table_id = 0
self.group_id = None
self.modify_field = None
self.all_pair_path_index = None
self.is_incremental = False
self.is_sendback = False
self.is_deleted = False
self.is_modified_input = False
self.is_modified_output = False
def to_header_space(self, prefix):
ip = prefix.split('/')[0]
match_length = 32 if len(prefix.split('/')) < 2 else int(prefix.split('/')[1] )
hs = ''.join([bin(int(x)+256)[3:] for x in ip.split('.')])
hs = hs[:match_length]
hs += 'x'*(32-len(hs))
return hs, ip, match_length
def is_match(self, last_rule):
return self.header_space[:self.header_space.index('x')] == last_rule.get_header_sapce()[:self.header_space.index('x')]
def serialize(self):
return cPickle.dumps(self)
def get_id(self):
return self.id
def get_switch_id(self):
return self.switch_id
def set_in_port(self, in_port):
self.in_port = in_port
def get_in_port(self):
return self.in_port
def set_out_port(self, out_port):
self.out_port = out_port
def get_out_port(self):
return self.out_port
def set_inst_actions(self, inst, actions):
self.inst_actions[inst] = actions
def set_table_id(self, table_id):
self.table_id = table_id
def get_table_id(self):
return self.table_id
def set_path_index(self, index):
self.path_index = index
def get_path_index(self):
return self.path_index
def set_all_pair_path_index(self, index):
self.all_pair_path_index = index;
def set_priority(self, priority):
self.priority = priority
def get_priority(self):
return self.priority
def set_prefix(self, prefix):
self.prefix = prefix
self.header_space, self.ip, self.match_length = self.to_header_space(prefix)
def get_prefix(self):
return self.prefix
def get_header_space(self):
return self.header_space
def get_all_pair_path_index(self):
return self.all_pair_path_index;
def __str__(self):
string = 'Rule ID: ' + str(self.id) + ', ' + "Switch ID: " + str(self.switch_id) + ', ' + \
'Priority: ' + str(self.priority) + ', ' + 'Prefix: ' + self.prefix + ', ' + 'HeaderSpace: ' + self.header_space + ', ' + \
'Inport: ' + str(self.in_port) + ', ' + 'Outport: ' + str(self.out_port) + ', ' + 'Inst_actions: ' + str(self.inst_actions)
return string
if __name__ == '__main__':
pass
| ymktw/SDNProbe | pymodule/rule.py | Python | apache-2.0 | 3,687 |
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import re
from distutils.util import strtobool
import kombu
import gevent
import gevent.monkey
gevent.monkey.patch_all()
import time
import signal
from gevent.queue import Queue
try:
from gevent.lock import Semaphore
except ImportError:
# older versions of gevent
from gevent.coros import Semaphore
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common import vnc_greenlets
import ssl
__all__ = "VncKombuClient"
class VncKombuClientBase(object):
def _update_sandesh_status(self, status, msg=''):
ConnectionState.update(conn_type=ConnType.DATABASE,
name='RabbitMQ', status=status, message=msg,
server_addrs=self._server_addrs)
# end _update_sandesh_status
def publish(self, message):
self._publish_queue.put(message)
# end publish
def sigterm_handler(self):
self.shutdown()
exit()
def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger,
**kwargs):
self._rabbit_ip = rabbit_ip
self._rabbit_port = rabbit_port
self._rabbit_user = rabbit_user
self._rabbit_password = rabbit_password
self._rabbit_vhost = rabbit_vhost
self._subscribe_cb = subscribe_cb
self._logger = logger
self._publish_queue = Queue()
self._conn_lock = Semaphore()
self.obj_upd_exchange = kombu.Exchange('vnc_config.object-update', 'fanout',
durable=False)
self._ssl_params = self._fetch_ssl_params(**kwargs)
# Register a handler for SIGTERM so that we can release the lock
# Without it, it can take several minutes before new master is elected
# If any app using this wants to register their own sigterm handler,
# then we will have to modify this function to perhaps take an argument
gevent.signal(signal.SIGTERM, self.sigterm_handler)
def num_pending_messages(self):
return self._publish_queue.qsize()
# end num_pending_messages
def prepare_to_consume(self):
# override this method
return
def _reconnect(self, delete_old_q=False):
if self._conn_lock.locked():
# either connection-monitor or publisher should have taken
# the lock. The one who acquired the lock would re-establish
# the connection and releases the lock, so the other one can
# just wait on the lock, till it gets released
self._conn_lock.wait()
if self._conn_state == ConnectionStatus.UP:
return
with self._conn_lock:
msg = "RabbitMQ connection down"
self._logger(msg, level=SandeshLevel.SYS_NOTICE)
self._update_sandesh_status(ConnectionStatus.DOWN)
self._conn_state = ConnectionStatus.DOWN
self._conn.close()
self._conn.ensure_connection()
self._conn.connect()
self._update_sandesh_status(ConnectionStatus.UP)
self._conn_state = ConnectionStatus.UP
msg = 'RabbitMQ connection ESTABLISHED %s' % repr(self._conn)
self._logger(msg, level=SandeshLevel.SYS_NOTICE)
self._channel = self._conn.channel()
if delete_old_q:
# delete the old queue in first-connect context
# as db-resync would have caught up with history.
try:
bound_q = self._update_queue_obj(self._channel)
bound_q.delete()
except Exception as e:
msg = 'Unable to delete the old ampq queue: %s' %(str(e))
self._logger(msg, level=SandeshLevel.SYS_ERR)
self._consumer = kombu.Consumer(self._channel,
queues=self._update_queue_obj,
callbacks=[self._subscribe])
self._producer = kombu.Producer(self._channel, exchange=self.obj_upd_exchange)
# end _reconnect
def _delete_queue(self):
# delete the queue
try:
bound_q = self._update_queue_obj(self._channel)
if bound_q:
bound_q.delete()
except Exception as e:
msg = 'Unable to delete the old ampq queue: %s' %(str(e))
self._logger(msg, level=SandeshLevel.SYS_ERR)
#end _delete_queue
def _connection_watch(self, connected):
if not connected:
self._reconnect()
self.prepare_to_consume()
while True:
try:
self._consumer.consume()
self._conn.drain_events()
except self._conn.connection_errors + self._conn.channel_errors as e:
self._reconnect()
# end _connection_watch
def _connection_watch_forever(self):
connected = True
while True:
try:
self._connection_watch(connected)
except Exception as e:
msg = 'Error in rabbitmq drainer greenlet: %s' %(str(e))
self._logger(msg, level=SandeshLevel.SYS_ERR)
# avoid 'reconnect()' here as that itself might cause exception
connected = False
# end _connection_watch_forever
def _publisher(self):
message = None
connected = True
while True:
try:
if not connected:
self._reconnect()
connected = True
if not message:
# earlier was sent fine, dequeue one more
message = self._publish_queue.get()
while True:
try:
self._producer.publish(message)
message = None
break
except self._conn.connection_errors + self._conn.channel_errors as e:
self._reconnect()
except Exception as e:
log_str = "Error in rabbitmq publisher greenlet: %s" %(str(e))
self._logger(log_str, level=SandeshLevel.SYS_ERR)
# avoid 'reconnect()' here as that itself might cause exception
connected = False
# end _publisher
def _subscribe(self, body, message):
try:
self._subscribe_cb(body)
finally:
message.ack()
def _start(self, client_name):
self._reconnect(delete_old_q=True)
self._publisher_greenlet = vnc_greenlets.VncGreenlet(
'Kombu ' + client_name,
self._publisher)
self._connection_monitor_greenlet = vnc_greenlets.VncGreenlet(
'Kombu ' + client_name + '_ConnMon',
self._connection_watch_forever)
def greenlets(self):
return [self._publisher_greenlet, self._connection_monitor_greenlet]
def shutdown(self):
self._publisher_greenlet.kill()
self._connection_monitor_greenlet.kill()
self._producer.close()
self._consumer.close()
self._delete_queue()
self._conn.close()
def reset(self):
self._publish_queue = Queue()
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23
}
@classmethod
def validate_ssl_version(cls, version):
version = version.lower()
try:
return cls._SSL_PROTOCOLS[version]
except KeyError:
raise RuntimeError('Invalid SSL version: {}'.format(version))
def _fetch_ssl_params(self, **kwargs):
if strtobool(str(kwargs.get('rabbit_use_ssl', False))):
ssl_params = dict()
ssl_version = kwargs.get('kombu_ssl_version', '')
keyfile = kwargs.get('kombu_ssl_keyfile', '')
certfile = kwargs.get('kombu_ssl_certfile', '')
ca_certs = kwargs.get('kombu_ssl_ca_certs', '')
if ssl_version:
ssl_params.update({'ssl_version':
self.validate_ssl_version(ssl_version)})
if keyfile:
ssl_params.update({'keyfile': keyfile})
if certfile:
ssl_params.update({'certfile': certfile})
if ca_certs:
ssl_params.update({'ca_certs': ca_certs})
ssl_params.update({'cert_reqs': ssl.CERT_REQUIRED})
return ssl_params or True
return False
class VncKombuClientV1(VncKombuClientBase):
def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger,
**kwargs):
super(VncKombuClientV1, self).__init__(rabbit_ip, rabbit_port,
rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode,
q_name, subscribe_cb, logger,
**kwargs)
self._server_addrs = ["%s:%s" % (self._rabbit_ip, self._rabbit_port)]
self._conn = kombu.Connection(hostname=self._rabbit_ip,
port=self._rabbit_port,
userid=self._rabbit_user,
password=self._rabbit_password,
virtual_host=self._rabbit_vhost)
self._update_queue_obj = kombu.Queue(q_name, self.obj_upd_exchange, durable=False)
self._start(q_name)
# end __init__
class VncKombuClientV2(VncKombuClientBase):
def _parse_rabbit_hosts(self, rabbit_hosts):
server_list = rabbit_hosts.split(",")
default_dict = {'user': self._rabbit_user,
'password': self._rabbit_password,
'port': self._rabbit_port}
ret = []
for s in server_list:
match = re.match("(?:(?P<user>.*?)(?::(?P<password>.*?))*@)*(?P<host>.*?)(?::(?P<port>\d+))*$", s)
if match:
mdict = match.groupdict().copy()
for key in ['user', 'password', 'port']:
if not mdict[key]:
mdict[key] = default_dict[key]
ret.append(mdict)
return ret
def __init__(self, rabbit_hosts, rabbit_port, rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger,
**kwargs):
super(VncKombuClientV2, self).__init__(rabbit_hosts, rabbit_port,
rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode,
q_name, subscribe_cb, logger,
**kwargs)
self._server_addrs = rabbit_hosts.split(',')
_hosts = self._parse_rabbit_hosts(rabbit_hosts)
self._urls = []
for h in _hosts:
h['vhost'] = "" if not rabbit_vhost else rabbit_vhost
_url = "pyamqp://%(user)s:%(password)s@%(host)s:%(port)s/%(vhost)s" % h
self._urls.append(_url)
msg = "Initializing RabbitMQ connection, urls %s" % self._urls
self._logger(msg, level=SandeshLevel.SYS_NOTICE)
self._update_sandesh_status(ConnectionStatus.INIT)
self._conn_state = ConnectionStatus.INIT
self._conn = kombu.Connection(self._urls, ssl=self._ssl_params)
queue_args = {"x-ha-policy": "all"} if rabbit_ha_mode else None
self._update_queue_obj = kombu.Queue(q_name, self.obj_upd_exchange,
durable=False,
queue_arguments=queue_args)
self._start(q_name)
# end __init__
from distutils.version import LooseVersion
if LooseVersion(kombu.__version__) >= LooseVersion("2.5.0"):
VncKombuClient = VncKombuClientV2
else:
VncKombuClient = VncKombuClientV1
| codilime/contrail-controller | src/config/common/vnc_kombu.py | Python | apache-2.0 | 12,514 |
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import hashlib
import os
import shutil
import sys
import tempfile
import textwrap
import file_locks
from tracing import Tracing
def get_file_contents_if_exists(path, default=None):
with Tracing("BuckProject.get_file_contents_if_it_exists", args={"path": path}):
if not os.path.exists(path):
return default
with open(path) as f:
contents = f.read().strip()
return default if not contents else contents
def write_contents_to_file(path, contents):
with Tracing("BuckProject.write_contents_to_file", args={"path": path}):
with open(path, "w") as output_file:
output_file.write(str(contents))
def makedirs(path):
try:
os.makedirs(path)
except OSError as e:
# Potentially the case that multiple processes are running in parallel
# (e.g. a series of linters running buck query without buckd), so we
# should just swallow the error.
# This is mostly equivalent to os.makedirs(path, exist_ok=True) in
# Python 3.
if e.errno != errno.EEXIST and os.path.isdir(path):
raise
class BuckProject:
def __init__(self, root):
self.root = root
self._buck_out = os.path.join(root, "buck-out")
buck_out_tmp = os.path.join(self._buck_out, "tmp")
makedirs(buck_out_tmp)
self._buck_out_log = os.path.join(self._buck_out, "log")
makedirs(self._buck_out_log)
self.tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=buck_out_tmp)
# Only created if buckd is used.
self.buckd_tmp_dir = None
self.buckd_dir = os.path.join(root, ".buckd")
self.buckd_version_file = os.path.join(self.buckd_dir, "buckd.version")
self.buckd_pid_file = os.path.join(self.buckd_dir, "pid")
self.buckd_stdout = os.path.join(self.buckd_dir, "stdout")
self.buckd_stderr = os.path.join(self.buckd_dir, "stderr")
buck_javaargs_path = os.path.join(self.root, ".buckjavaargs")
self.buck_javaargs = get_file_contents_if_exists(buck_javaargs_path)
buck_javaargs_path_local = os.path.join(self.root, ".buckjavaargs.local")
self.buck_javaargs_local = get_file_contents_if_exists(buck_javaargs_path_local)
def get_root_hash(self):
return hashlib.sha256(self.root.encode("utf-8")).hexdigest()
def get_buckd_transport_file_path(self):
if os.name == "nt":
return u"\\\\.\\pipe\\buckd_{0}".format(self.get_root_hash())
else:
return os.path.join(self.buckd_dir, "sock")
def get_buckd_transport_address(self):
if os.name == "nt":
return "local:buckd_{0}".format(self.get_root_hash())
else:
return "local:.buckd/sock"
def get_running_buckd_version(self):
return get_file_contents_if_exists(self.buckd_version_file)
def get_running_buckd_pid(self):
try:
return int(get_file_contents_if_exists(self.buckd_pid_file))
except ValueError:
return None
except TypeError:
return None
def get_buckd_stdout(self):
return self.buckd_stdout
def get_buckd_stderr(self):
return self.buckd_stderr
def get_buck_out_log_dir(self):
return self._buck_out_log
def clean_up_buckd(self):
with Tracing("BuckProject.clean_up_buckd"):
if os.path.exists(self.buckd_dir):
file_locks.rmtree_if_can_lock(self.buckd_dir)
def create_buckd_tmp_dir(self):
if self.buckd_tmp_dir is not None:
return self.buckd_tmp_dir
tmp_dir_parent = os.path.join(self.buckd_dir, "tmp")
makedirs(tmp_dir_parent)
self.buckd_tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=tmp_dir_parent)
return self.buckd_tmp_dir
def save_buckd_version(self, version):
write_contents_to_file(self.buckd_version_file, version)
def save_buckd_pid(self, pid):
write_contents_to_file(self.buckd_pid_file, str(pid))
@staticmethod
def from_current_dir():
with Tracing("BuckProject.from_current_dir"):
current_dir = os.getcwd()
if "--version" in sys.argv or "-V" in sys.argv:
return BuckProject(current_dir)
at_root_dir = False
while not at_root_dir:
if os.path.exists(os.path.join(current_dir, ".buckconfig")):
return BuckProject(current_dir)
parent_dir = os.path.dirname(current_dir)
at_root_dir = current_dir == parent_dir
current_dir = parent_dir
raise NoBuckConfigFoundException()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with Tracing("BuckProject.__exit__"):
if os.path.exists(self.tmp_dir):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class NoBuckConfigFoundException(Exception):
def __init__(self):
no_buckconfig_message_path = ".no_buckconfig_message"
default_message = textwrap.dedent(
"""\
This does not appear to be the root of a Buck project. Please 'cd'
to the root of your project before running buck. If this really is
the root of your project, run
'touch .buckconfig'
and then re-run your buck command."""
)
message = get_file_contents_if_exists(
no_buckconfig_message_path, default_message
)
Exception.__init__(self, message)
| brettwooldridge/buck | programs/buck_project.py | Python | apache-2.0 | 6,332 |
# -*- coding: utf-8 -*-
import os
import logging
import datetime
def get_logger(directory, name):
"""
"""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
date_handler = DateFileHandler(directory, name)
fmt_str = '%(asctime)s %(process)d %(module)s.%(funcName)s.%(lineno)d %(levelname)s : %(message)s'
fmt = logging.Formatter(fmt_str, datefmt='%Y-%m-%d %H:%M:%S')
date_handler.setFormatter(fmt)
logger.addHandler(date_handler)
return logger
class DateFileHandler(logging.StreamHandler):
"""
log by date file
"""
def __init__(self, directory, log_name='', mode='a'):
self.directory = directory
self.log_name = log_name
self.mode = mode
self.last_date = None
logging.StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
logging.StreamHandler.close(self)
self.stream = None
finally:
self.release()
def gen_file_name(self):
self.last_date = datetime.datetime.now().date()
log_directory = '%s/%04d-%02d' % (self.directory, self.last_date.year, self.last_date.month)
os.system("mkdir -p %s" % log_directory)
log_file = '%s/%s.%s.log' % (log_directory, self.last_date.day, self.log_name)
return log_file
def _open(self):
log_file = self.gen_file_name()
stream = open(log_file, self.mode)
return stream
def should_roll(self):
date = datetime.datetime.now().date()
if date == self.last_date:
return False
else:
return True
def emit(self, record):
"""
Emit a record.
"""
if self.should_roll():
self.close()
if self.stream is None:
self.stream = self._open()
logging.StreamHandler.emit(self, record)
| alen-alex/aqi_service | aqi_service/util/log_utils.py | Python | apache-2.0 | 2,106 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the `Google Monitoring API (V3)`_.
Example::
>>> from gcloud import monitoring
>>> client = monitoring.Client()
>>> query = client.query(minutes=5)
>>> print(query.as_dataframe()) # Requires pandas.
At present, the client supports querying of time series, metric descriptors,
and monitored resource descriptors.
.. _Google Monitoring API (V3): https://cloud.google.com/monitoring/api/
"""
from gcloud.client import JSONClient
from gcloud.monitoring.connection import Connection
from gcloud.monitoring.metric import MetricDescriptor
from gcloud.monitoring.query import Query
from gcloud.monitoring.resource import ResourceDescriptor
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: string
:param project: The target project. If not passed, falls back to the
default inferred from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def query(self,
metric_type=Query.DEFAULT_METRIC_TYPE,
end_time=None,
days=0, hours=0, minutes=0):
"""Construct a query object for listing time series.
Example::
>>> query = client.query(minutes=5)
>>> print(query.as_dataframe()) # Requires pandas.
:type metric_type: string
:param metric_type: The metric type name. The default value is
:data:`Query.DEFAULT_METRIC_TYPE
<gcloud.monitoring.query.Query.DEFAULT_METRIC_TYPE>`,
but please note that this default value is provided only for
demonstration purposes and is subject to change. See the
`supported metrics`_.
:type end_time: :class:`datetime.datetime` or None
:param end_time: The end time (inclusive) of the time interval
for which results should be returned, as a datetime object.
The default is the start of the current minute.
The start time (exclusive) is determined by combining the
values of ``days``, ``hours``, and ``minutes``, and
subtracting the resulting duration from the end time.
It is also allowed to omit the end time and duration here,
in which case
:meth:`~gcloud.monitoring.query.Query.select_interval`
must be called before the query is executed.
:type days: integer
:param days: The number of days in the time interval.
:type hours: integer
:param hours: The number of hours in the time interval.
:type minutes: integer
:param minutes: The number of minutes in the time interval.
:rtype: :class:`~gcloud.monitoring.query.Query`
:returns: The query object.
:raises: :exc:`ValueError` if ``end_time`` is specified but
``days``, ``hours``, and ``minutes`` are all zero.
If you really want to specify a point in time, use
:meth:`~gcloud.monitoring.query.Query.select_interval`.
.. _supported metrics: https://cloud.google.com/monitoring/api/metrics
"""
return Query(self, metric_type,
end_time=end_time,
days=days, hours=hours, minutes=minutes)
def fetch_metric_descriptor(self, metric_type):
"""Look up a metric descriptor by type.
Example::
>>> METRIC = 'compute.googleapis.com/instance/cpu/utilization'
>>> print(client.fetch_metric_descriptor(METRIC))
:type metric_type: string
:param metric_type: The metric type name.
:rtype: :class:`~gcloud.monitoring.metric.MetricDescriptor`
:returns: The metric descriptor instance.
:raises: :class:`gcloud.exceptions.NotFound` if the metric descriptor
is not found.
"""
return MetricDescriptor._fetch(self, metric_type)
def list_metric_descriptors(self, filter_string=None):
"""List all metric descriptors for the project.
Example::
>>> for descriptor in client.list_metric_descriptors():
... print(descriptor.type)
:type filter_string: string or None
:param filter_string:
An optional filter expression describing the metric descriptors
to be returned. See the `filter documentation`_.
:rtype: list of :class:`~gcloud.monitoring.metric.MetricDescriptor`
:returns: A list of metric descriptor instances.
.. _filter documentation:
https://cloud.google.com/monitoring/api/v3/filters
"""
return MetricDescriptor._list(self, filter_string)
def fetch_resource_descriptor(self, resource_type):
"""Look up a resource descriptor by type.
Example::
>>> print(client.fetch_resource_descriptor('gce_instance'))
:type resource_type: string
:param resource_type: The resource type name.
:rtype: :class:`~gcloud.monitoring.resource.ResourceDescriptor`
:returns: The resource descriptor instance.
:raises: :class:`gcloud.exceptions.NotFound` if the resource descriptor
is not found.
"""
return ResourceDescriptor._fetch(self, resource_type)
def list_resource_descriptors(self, filter_string=None):
"""List all resource descriptors for the project.
Example::
>>> for descriptor in client.list_resource_descriptors():
... print(descriptor.type)
:type filter_string: string or None
:param filter_string:
An optional filter expression describing the resource descriptors
to be returned. See the `filter documentation`_.
:rtype: list of :class:`~gcloud.monitoring.resource.ResourceDescriptor`
:returns: A list of resource descriptor instances.
.. _filter documentation:
https://cloud.google.com/monitoring/api/v3/filters
"""
return ResourceDescriptor._list(self, filter_string)
| huangkuan/hack | lib/gcloud/monitoring/client.py | Python | apache-2.0 | 7,310 |
from __future__ import unicode_literals
import binascii
import hashlib
import logging
import socket
import ssl
import sys
from ansible.module_utils.mt_api.retryloop import RetryError
from ansible.module_utils.mt_api.retryloop import retryloop
from ansible.module_utils.mt_api.socket_utils import set_keepalive
PY2 = sys.version_info[0] < 3
logger = logging.getLogger(__name__)
class RosAPIError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
if isinstance(self.value, dict) and self.value.get('message'):
return self.value['message']
elif isinstance(self.value, list):
elements = (
'%s: %s' %
(element.__class__, str(element)) for element in self.value
)
return '[%s]' % (', '.join(element for element in elements))
else:
return str(self.value)
class RosAPIConnectionError(RosAPIError):
pass
class RosAPIFatalError(RosAPIError):
pass
class RosApiLengthUtils(object):
def __init__(self, api):
self.api = api
def write_lenght(self, length):
self.api.write_bytes(self.length_to_bytes(length))
def length_to_bytes(self, length):
if length < 0x80:
return self.to_bytes(length)
elif length < 0x4000:
length |= 0x8000
return self.to_bytes(length, 2)
elif length < 0x200000:
length |= 0xC00000
return self.to_bytes(length, 3)
elif length < 0x10000000:
length |= 0xE0000000
return self.to_bytes(length, 4)
else:
return self.to_bytes(0xF0) + self.to_bytes(length, 4)
def read_length(self):
b = self.api.read_bytes(1)
i = self.from_bytes(b)
if (i & 0x80) == 0x00:
return i
elif (i & 0xC0) == 0x80:
return self._unpack(1, i & ~0xC0)
elif (i & 0xE0) == 0xC0:
return self._unpack(2, i & ~0xE0)
elif (i & 0xF0) == 0xE0:
return self._unpack(3, i & ~0xF0)
elif (i & 0xF8) == 0xF0:
return self.from_bytes(self.api.read_bytes(1))
else:
raise RosAPIFatalError('Unknown value: %x' % i)
def _unpack(self, times, i):
temp1 = self.to_bytes(i)
temp2 = self.api.read_bytes(times)
try:
temp3 = temp2.decode('utf-8')
except:
try:
temp3 = temp2.decode('windows-1252')
except Exception:
print("Cannot decode response properly:", temp2)
print(Exception)
exit(1)
res = temp1 + temp3
return self.from_bytes(res)
if PY2:
def from_bytes(self, data):
data_values = [ord(char) for char in data]
value = 0
for byte_value in data_values:
value <<= 8
value += byte_value
return value
def to_bytes(self, i, size=1):
data = []
for _ in xrange(size):
data.append(chr(i & 0xff))
i >>= 8
return b''.join(reversed(data))
else:
def from_bytes(self, data):
return int.from_bytes(data, 'big')
def to_bytes(self, i, size=1):
return i.to_bytes(size, 'big')
class RosAPI(object):
"""Routeros api"""
def __init__(self, socket):
self.socket = socket
self.length_utils = RosApiLengthUtils(self)
def login(self, username, pwd):
for _, attrs in self.talk([b'/login']):
token = binascii.unhexlify(attrs[b'ret'])
hasher = hashlib.md5()
hasher.update(b'\x00')
hasher.update(pwd)
hasher.update(token)
self.talk([b'/login', b'=name=' + username,
b'=response=00' + hasher.hexdigest().encode('ascii')])
def talk(self, words):
if self.write_sentence(words) == 0:
return
output = []
while True:
input_sentence = self.read_sentence()
if not len(input_sentence):
continue
attrs = {}
reply = input_sentence.pop(0)
for line in input_sentence:
try:
second_eq_pos = line.index(b'=', 1)
except IndexError:
attrs[line[1:]] = b''
else:
attrs[line[1:second_eq_pos]] = line[second_eq_pos + 1:]
output.append((reply, attrs))
if reply == b'!done':
if output[0][0] == b'!trap':
raise RosAPIError(output[0][1])
if output[0][0] == b'!fatal':
self.socket.close()
raise RosAPIFatalError(output[0][1])
return output
def write_sentence(self, words):
words_written = 0
for word in words:
self.write_word(word)
words_written += 1
self.write_word(b'')
return words_written
def read_sentence(self):
sentence = []
while True:
word = self.read_word()
if not len(word):
return sentence
sentence.append(word)
def write_word(self, word):
logger.debug('>>> %s' % word)
self.length_utils.write_lenght(len(word))
self.write_bytes(word)
def read_word(self):
word = self.read_bytes(self.length_utils.read_length())
logger.debug('<<< %s' % word)
return word
def write_bytes(self, data):
sent_overal = 0
while sent_overal < len(data):
try:
sent = self.socket.send(data[sent_overal:])
except socket.error as e:
raise RosAPIConnectionError(str(e))
if sent == 0:
raise RosAPIConnectionError('Connection closed by remote end.')
sent_overal += sent
def read_bytes(self, length):
received_overal = b''
while len(received_overal) < length:
try:
received = self.socket.recv(
length - len(received_overal))
except socket.error as e:
raise RosAPIConnectionError(str(e))
if len(received) == 0:
raise RosAPIConnectionError('Connection closed by remote end.')
received_overal += received
return received_overal
class BaseRouterboardResource(object):
def __init__(self, api, namespace):
self.api = api
self.namespace = namespace
def call(self, command, set_kwargs, query_kwargs=None):
query_kwargs = query_kwargs or {}
query_arguments = self._prepare_arguments(True, **query_kwargs)
set_arguments = self._prepare_arguments(False, **set_kwargs)
query = ([('%s/%s' % (self.namespace, command)).encode('ascii')] +
query_arguments + set_arguments)
response = self.api.api_client.talk(query)
output = []
for response_type, attributes in response:
if response_type == b'!re':
output.append(self._remove_first_char_from_keys(attributes))
return output
@staticmethod
def _prepare_arguments(is_query, **kwargs):
command_arguments = []
for key, value in kwargs.items():
if key in ['id', 'proplist']:
key = '.%s' % key
key = key.replace('_', '-')
selector_char = '?' if is_query else '='
command_arguments.append(
('%s%s=' % (selector_char, key)).encode('ascii') + value)
return command_arguments
@staticmethod
def _remove_first_char_from_keys(dictionary):
elements = []
for key, value in dictionary.items():
key = key.decode('ascii')
if key in ['.id', '.proplist']:
key = key[1:]
elements.append((key, value))
return dict(elements)
def get(self, **kwargs):
return self.call('print', {}, kwargs)
def detailed_get(self, **kwargs):
return self.call('print', {'detail': b''}, kwargs)
def set(self, **kwargs):
return self.call('set', kwargs)
def add(self, **kwargs):
return self.call('add', kwargs)
def remove(self, **kwargs):
return self.call('remove', kwargs)
class RouterboardResource(BaseRouterboardResource):
def detailed_get(self, **kwargs):
return self.call('print', {'detail': ''}, kwargs)
def call(self, command, set_kwargs, query_kwargs=None):
query_kwargs = query_kwargs or {}
result = super(RouterboardResource, self).call(
command, self._encode_kwargs(set_kwargs),
self._encode_kwargs(query_kwargs))
for item in result:
for k in item:
item[k] = item[k].decode('ascii')
return result
def _encode_kwargs(self, kwargs):
return dict((k, v.encode('ascii')) for k, v in kwargs.items())
class RouterboardAPI(object):
def __init__(self, host, username='api', password='', port=8728, ssl=False):
self.host = host
self.username = username
self.password = password
self.socket = None
self.port = port
self.ssl = ssl
self.reconnect()
def __enter__(self):
return self
def __exit__(self, _, __, ___):
self.close_connection()
def reconnect(self):
if self.socket:
self.close_connection()
try:
for retry in retryloop(10, delay=0.1, timeout=30):
try:
self.connect()
self.login()
except socket.error:
retry()
except (socket.error, RetryError) as e:
raise RosAPIConnectionError(str(e))
def connect(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(15.0)
sock.connect((self.host, self.port))
set_keepalive(sock, after_idle_sec=10)
if self.ssl:
try:
self.socket = ssl.wrap_socket(sock)
except ssl.SSLError as e:
raise RosAPIConnectionError(str(e))
else:
self.socket = sock
self.api_client = RosAPI(self.socket)
def login(self):
self.api_client.login(self.username.encode('ascii'),
self.password.encode('ascii'))
def get_resource(self, namespace):
return RouterboardResource(self, namespace)
def get_base_resource(self, namespace):
return BaseRouterboardResource(self, namespace)
def close_connection(self):
self.socket.close()
class Mikrotik(object):
def __init__(self, hostname, username, password):
self.hostname = hostname
self.username = username
self.password = password
def login(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.hostname, 8728))
mt = RosAPI(s)
mt.login(self.username, self.password)
return mt
def talk(self, talk_command):
r = self.login()
response = r.talk(talk_command)
return(response)
def api_print(self, base_path, params=None):
command = [base_path + '/print']
if params is not None:
for key, value in params.iteritems():
item = b'=' + key + '=' + str(value)
command.append(item)
return self.talk(command)
def api_add(self, base_path, params):
command = [base_path + '/add']
for key, value in params.iteritems():
item = b'=' + key + '=' + str(value)
command.append(item)
return self.talk(command)
def api_edit(self, base_path, params):
command = [base_path + '/set']
for key, value in params.iteritems():
item = b'=' + key + '=' + str(value)
command.append(item)
return self.talk(command)
def api_remove(self, base_path, remove_id):
command = [
base_path + '/remove',
b'=.id=' + remove_id
]
return self.talk(command)
def api_command(self, base_path, params=None):
command = [base_path]
if params is not None:
for key, value in params.iteritems():
item = b'=' + key + '=' + str(value)
command.append(item)
return self.talk(command)
| zahodi/ansible-mikrotik | pythonlibs/mt_api/__init__.py | Python | apache-2.0 | 12,353 |
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the GlusterFS driver module."""
import contextlib
import errno
import mock
import os
import tempfile
import time
import traceback
import mox as mox_lib
from mox import IgnoreArg
from mox import IsA
from mox import stubout
from oslo.config import cfg
from cinder import brick
from cinder import compute
from cinder import context
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import imageutils
from cinder.openstack.common import processutils as putils
from cinder.openstack.common import units
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume import driver as base_driver
from cinder.volume.drivers import glusterfs
CONF = cfg.CONF
class DumbVolume(object):
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class FakeDb(object):
msg = "Tests are broken: mock this out."
def volume_get(self, *a, **kw):
raise Exception(self.msg)
def snapshot_get_all_for_volume(self, *a, **kw):
"""Mock this if you want results from it."""
return []
class GlusterFsDriverTestCase(test.TestCase):
"""Test case for GlusterFS driver."""
TEST_EXPORT1 = 'glusterfs-host1:/export'
TEST_EXPORT2 = 'glusterfs-host2:/export'
TEST_EXPORT2_OPTIONS = '-o backupvolfile-server=glusterfs-backup1'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/glusterfs'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/glusterfs/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab'
SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca'
SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede'
def setUp(self):
super(GlusterFsDriverTestCase, self).setUp()
self._mox = mox_lib.Mox()
self._configuration = mox_lib.MockObject(conf.Configuration)
self._configuration.append_config_values(mox_lib.IgnoreArg())
self._configuration.glusterfs_shares_config = \
self.TEST_SHARES_CONFIG_FILE
self._configuration.glusterfs_mount_point_base = \
self.TEST_MNT_POINT_BASE
self._configuration.glusterfs_sparsed_volumes = True
self._configuration.glusterfs_qcow2_volumes = False
self.stubs = stubout.StubOutForTesting()
self._driver =\
glusterfs.GlusterfsDriver(configuration=self._configuration,
db=FakeDb())
self._driver.shares = {}
compute.API = mock.MagicMock()
self.addCleanup(self._mox.UnsetStubs)
def stub_out_not_replaying(self, obj, attr_name):
attr_to_replace = getattr(obj, attr_name)
stub = mox_lib.MockObject(attr_to_replace)
self.stubs.Set(obj, attr_name, stub)
def assertRaisesAndMessageMatches(
self, excClass, msg, callableObj, *args, **kwargs):
"""Ensure that 'excClass' was raised and its message contains 'msg'."""
caught = False
try:
callableObj(*args, **kwargs)
except Exception as exc:
caught = True
self.assertEqual(excClass, type(exc),
'Wrong exception caught: %s Stacktrace: %s' %
(exc, traceback.print_exc()))
self.assertIn(msg, str(exc))
if not caught:
self.fail('Expected raised exception but nothing caught.')
def test_set_execute(self):
mox = self._mox
drv = self._driver
rfsclient = brick.remotefs.remotefs.RemoteFsClient
mox.StubOutWithMock(rfsclient, 'set_execute')
def my_execute(*a, **k):
pass
rfsclient.set_execute(my_execute)
mox.ReplayAll()
drv.set_execute(my_execute)
mox.VerifyAll()
def test_local_path(self):
"""local_path common use case."""
CONF.set_override("glusterfs_mount_point_base",
self.TEST_MNT_POINT_BASE)
drv = self._driver
volume = DumbVolume()
volume['provider_location'] = self.TEST_EXPORT1
volume['name'] = 'volume-123'
self.assertEqual(
'/mnt/test/ab03ab34eaca46a5fb81878f7e9b91fc/volume-123',
drv.local_path(volume))
def test_mount_glusterfs_should_mount_correctly(self):
"""_mount_glusterfs common case usage."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1,
self.TEST_MNT_POINT, run_as_root=True)
mox.ReplayAll()
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT)
mox.VerifyAll()
def test_mount_glusterfs_should_suppress_already_mounted_error(self):
"""_mount_glusterfs should suppress already mounted error if
ensure=True
"""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute('mount', '-t', 'glusterfs', self.TEST_EXPORT1,
self.TEST_MNT_POINT, run_as_root=True).\
AndRaise(putils.ProcessExecutionError(
stderr='is busy or already mounted'))
mox.ReplayAll()
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT,
ensure=True)
mox.VerifyAll()
def test_mount_glusterfs_should_reraise_already_mounted_error(self):
"""_mount_glusterfs should not suppress already mounted error
if ensure=False
"""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute(
'mount',
'-t',
'glusterfs',
self.TEST_EXPORT1,
self.TEST_MNT_POINT,
run_as_root=True). \
AndRaise(putils.ProcessExecutionError(stderr='is busy or '
'already mounted'))
mox.ReplayAll()
self.assertRaises(putils.ProcessExecutionError, drv._mount_glusterfs,
self.TEST_EXPORT1, self.TEST_MNT_POINT,
ensure=False)
mox.VerifyAll()
def test_mount_glusterfs_should_create_mountpoint_if_not_yet(self):
"""_mount_glusterfs should create mountpoint if it doesn't exist."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_execute')
drv._execute('mkdir', '-p', self.TEST_MNT_POINT)
drv._execute(*([IgnoreArg()] * 5), run_as_root=IgnoreArg())
mox.ReplayAll()
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT)
mox.VerifyAll()
def test_get_hash_str(self):
"""_get_hash_str should calculation correct value."""
drv = self._driver
self.assertEqual('ab03ab34eaca46a5fb81878f7e9b91fc',
drv._get_hash_str(self.TEST_EXPORT1))
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should call RemoteFsClient."""
mox = self._mox
drv = self._driver
hashed_path = '/mnt/test/abcdefabcdef'
mox.StubOutWithMock(brick.remotefs.remotefs.RemoteFsClient,
'get_mount_point')
CONF.set_override("glusterfs_mount_point_base",
self.TEST_MNT_POINT_BASE)
brick.remotefs.remotefs.RemoteFsClient.\
get_mount_point(self.TEST_EXPORT1).AndReturn(hashed_path)
mox.ReplayAll()
drv._get_mount_point_for_share(self.TEST_EXPORT1)
mox.VerifyAll()
def test_get_available_capacity_with_df(self):
"""_get_available_capacity should calculate correct value."""
mox = self._mox
drv = self._driver
df_total_size = 2620544
df_avail = 1490560
df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n'
df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \
(df_total_size, df_avail)
df_output = df_head + df_data
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
drv._get_mount_point_for_share(self.TEST_EXPORT1).\
AndReturn(self.TEST_MNT_POINT)
mox.StubOutWithMock(drv, '_execute')
drv._execute('df', '--portability', '--block-size', '1',
self.TEST_MNT_POINT,
run_as_root=True).AndReturn((df_output, None))
mox.ReplayAll()
self.assertEqual((df_avail, df_total_size),
drv._get_available_capacity(self.TEST_EXPORT1))
mox.VerifyAll()
def test_load_shares_config(self):
mox = self._mox
drv = self._driver
drv.configuration.glusterfs_shares_config = (
self.TEST_SHARES_CONFIG_FILE)
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_EXPORT1)
config_data.append('#' + self.TEST_EXPORT2)
config_data.append(self.TEST_EXPORT2 + ' ' + self.TEST_EXPORT2_OPTIONS)
config_data.append('broken:share_format')
config_data.append('')
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.ReplayAll()
drv._load_shares_config(drv.configuration.glusterfs_shares_config)
self.assertIn(self.TEST_EXPORT1, drv.shares)
self.assertIn(self.TEST_EXPORT2, drv.shares)
self.assertEqual(len(drv.shares), 2)
self.assertEqual(drv.shares[self.TEST_EXPORT2],
self.TEST_EXPORT2_OPTIONS)
mox.VerifyAll()
def test_ensure_share_mounted(self):
"""_ensure_share_mounted simple use case."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(utils, 'get_file_mode')
mox.StubOutWithMock(utils, 'get_file_gid')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_ensure_share_writable')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
drv._get_mount_point_for_share(self.TEST_EXPORT1).\
AndReturn(self.TEST_MNT_POINT)
mox.StubOutWithMock(drv, '_mount_glusterfs')
drv._mount_glusterfs(self.TEST_EXPORT1, self.TEST_MNT_POINT,
ensure=True)
utils.get_file_gid(self.TEST_MNT_POINT).AndReturn(333333)
utils.get_file_mode(self.TEST_MNT_POINT).AndReturn(0o777)
drv._ensure_share_writable(self.TEST_MNT_POINT)
drv._execute('chgrp', IgnoreArg(), self.TEST_MNT_POINT,
run_as_root=True)
mox.ReplayAll()
drv._ensure_share_mounted(self.TEST_EXPORT1)
mox.VerifyAll()
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_EXPORT1)
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.StubOutWithMock(drv, '_ensure_share_mounted')
drv._ensure_share_mounted(self.TEST_EXPORT1)
mox.ReplayAll()
drv._ensure_shares_mounted()
self.assertEqual(1, len(drv._mounted_shares))
self.assertEqual(self.TEST_EXPORT1, drv._mounted_shares[0])
mox.VerifyAll()
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
"""_ensure_shares_mounted should not save share if failed to mount."""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_read_config_file')
config_data = []
config_data.append(self.TEST_EXPORT1)
drv._read_config_file(self.TEST_SHARES_CONFIG_FILE).\
AndReturn(config_data)
mox.StubOutWithMock(drv, '_ensure_share_mounted')
drv._ensure_share_mounted(self.TEST_EXPORT1).AndRaise(Exception())
mox.ReplayAll()
drv._ensure_shares_mounted()
self.assertEqual(0, len(drv._mounted_shares))
mox.VerifyAll()
def test_setup_should_throw_error_if_shares_config_not_configured(self):
"""do_setup should throw error if shares config is not configured."""
drv = self._driver
drv.configuration.glusterfs_shares_config = None
self.assertRaisesAndMessageMatches(exception.GlusterfsException,
'no Gluster config file configured',
drv.do_setup,
IsA(context.RequestContext))
def test_setup_should_throw_exception_if_client_is_not_installed(self):
"""do_setup should throw exception if client is not installed."""
mox = self._mox
drv = self._driver
CONF.set_override("glusterfs_shares_config",
self.TEST_SHARES_CONFIG_FILE)
mox.StubOutWithMock(os.path, 'exists')
os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
mox.StubOutWithMock(drv, '_execute')
drv._execute('mount.glusterfs', check_exit_code=False).\
AndRaise(OSError(errno.ENOENT, 'No such file or directory'))
mox.ReplayAll()
self.assertRaisesAndMessageMatches(exception.GlusterfsException,
'mount.glusterfs is not installed',
drv.do_setup,
IsA(context.RequestContext))
mox.VerifyAll()
def _fake_load_shares_config(self, conf):
self._driver.shares = {'127.7.7.7:/gluster1': None}
def _fake_NamedTemporaryFile(self, prefix=None, dir=None):
raise OSError('Permission denied!')
def test_setup_set_share_permissions(self):
mox = self._mox
drv = self._driver
CONF.set_override("glusterfs_shares_config",
self.TEST_SHARES_CONFIG_FILE)
self.stubs.Set(drv, '_load_shares_config',
self._fake_load_shares_config)
self.stubs.Set(tempfile, 'NamedTemporaryFile',
self._fake_NamedTemporaryFile)
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(utils, 'get_file_gid')
mox.StubOutWithMock(utils, 'get_file_mode')
mox.StubOutWithMock(os, 'getegid')
drv._execute('mount.glusterfs', check_exit_code=False)
drv._execute('umount', '/mnt/test/8f0473c9ad824b8b6a27264b9cacb005',
run_as_root=True)
drv._execute('mkdir', '-p', mox_lib.IgnoreArg())
os.path.exists(self.TEST_SHARES_CONFIG_FILE).AndReturn(True)
drv._execute('mount', '-t', 'glusterfs', '127.7.7.7:/gluster1',
mox_lib.IgnoreArg(), run_as_root=True)
utils.get_file_gid(mox_lib.IgnoreArg()).AndReturn(33333)
# perms not writable
utils.get_file_mode(mox_lib.IgnoreArg()).AndReturn(0o000)
os.getegid().AndReturn(888)
drv._execute('chgrp', 888, mox_lib.IgnoreArg(), run_as_root=True)
drv._execute('chmod', 'g+w', mox_lib.IgnoreArg(), run_as_root=True)
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
"""_find_share should throw error if there is no mounted shares."""
drv = self._driver
drv._mounted_shares = []
self.assertRaises(exception.GlusterfsNoSharesMounted,
drv._find_share,
self.TEST_SIZE_IN_GB)
def test_find_share(self):
"""_find_share simple use case."""
mox = self._mox
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT1, self.TEST_EXPORT2]
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
AndReturn((2 * units.Gi, 5 * units.Gi))
drv._get_available_capacity(self.TEST_EXPORT2).\
AndReturn((3 * units.Gi, 10 * units.Gi))
mox.ReplayAll()
self.assertEqual(self.TEST_EXPORT2,
drv._find_share(self.TEST_SIZE_IN_GB))
mox.VerifyAll()
def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
"""_find_share should throw error if there is no share to host vol."""
mox = self._mox
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT1,
self.TEST_EXPORT2]
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
AndReturn((0, 5 * units.Gi))
drv._get_available_capacity(self.TEST_EXPORT2).\
AndReturn((0, 10 * units.Gi))
mox.ReplayAll()
self.assertRaises(exception.GlusterfsNoSuitableShareFound,
drv._find_share,
self.TEST_SIZE_IN_GB)
mox.VerifyAll()
def _simple_volume(self, id=None):
volume = DumbVolume()
volume['provider_location'] = self.TEST_EXPORT1
if id is None:
volume['id'] = self.VOLUME_UUID
else:
volume['id'] = id
# volume['name'] mirrors format from db/sqlalchemy/models.py
volume['name'] = 'volume-%s' % volume['id']
volume['size'] = 10
volume['status'] = 'available'
return volume
def test_create_sparsed_volume(self):
mox = self._mox
drv = self._driver
volume = self._simple_volume()
CONF.set_override('glusterfs_sparsed_volumes', True)
mox.StubOutWithMock(drv, '_create_sparsed_file')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._create_sparsed_file(IgnoreArg(), IgnoreArg())
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
def test_create_nonsparsed_volume(self):
mox = self._mox
drv = self._driver
volume = self._simple_volume()
old_value = self._configuration.glusterfs_sparsed_volumes
self._configuration.glusterfs_sparsed_volumes = False
mox.StubOutWithMock(drv, '_create_regular_file')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._create_regular_file(IgnoreArg(), IgnoreArg())
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
self._configuration.glusterfs_sparsed_volumes = old_value
def test_create_qcow2_volume(self):
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
old_value = self._configuration.glusterfs_qcow2_volumes
self._configuration.glusterfs_qcow2_volumes = True
mox.StubOutWithMock(drv, '_execute')
hashed = drv._get_hash_str(volume['provider_location'])
path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
drv._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', path,
str(volume['size'] * units.Gi),
run_as_root=True)
drv._execute('chmod', 'ugo+rw', path, run_as_root=True)
mox.ReplayAll()
drv._do_create_volume(volume)
mox.VerifyAll()
self._configuration.glusterfs_qcow2_volumes = old_value
def test_create_volume_should_ensure_glusterfs_mounted(self):
"""create_volume ensures shares provided in config are mounted."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(glusterfs, 'LOG')
self.stub_out_not_replaying(drv, '_find_share')
self.stub_out_not_replaying(drv, '_do_create_volume')
mox.StubOutWithMock(drv, '_ensure_shares_mounted')
drv._ensure_shares_mounted()
mox.ReplayAll()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
drv.create_volume(volume)
mox.VerifyAll()
def test_create_volume_should_return_provider_location(self):
"""create_volume should return provider_location with found share."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(glusterfs, 'LOG')
self.stub_out_not_replaying(drv, '_ensure_shares_mounted')
self.stub_out_not_replaying(drv, '_do_create_volume')
mox.StubOutWithMock(drv, '_find_share')
drv._find_share(self.TEST_SIZE_IN_GB).AndReturn(self.TEST_EXPORT1)
mox.ReplayAll()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
result = drv.create_volume(volume)
self.assertEqual(self.TEST_EXPORT1, result['provider_location'])
mox.VerifyAll()
def test_create_cloned_volume(self):
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv, '_create_snapshot')
mox.StubOutWithMock(drv, '_delete_snapshot')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_copy_volume_from_snapshot')
volume = self._simple_volume()
src_vref = self._simple_volume()
src_vref['id'] = '375e32b2-804a-49f2-b282-85d1d5a5b9e1'
src_vref['name'] = 'volume-%s' % src_vref['id']
volume_ref = {'id': volume['id'],
'name': volume['name'],
'status': volume['status'],
'provider_location': volume['provider_location'],
'size': volume['size']}
snap_ref = {'volume_name': src_vref['name'],
'name': 'clone-snap-%s' % src_vref['id'],
'size': src_vref['size'],
'volume_size': src_vref['size'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
drv._create_snapshot(snap_ref)
drv._copy_volume_from_snapshot(snap_ref, volume_ref, volume['size'])
drv._delete_snapshot(mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_cloned_volume(volume, src_vref)
mox.VerifyAll()
@mock.patch('cinder.openstack.common.fileutils.delete_if_exists')
def test_delete_volume(self, mock_delete_if_exists):
volume = self._simple_volume()
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename)
info_file = volume_path + '.info'
with contextlib.nested(
mock.patch.object(self._driver, '_ensure_share_mounted'),
mock.patch.object(self._driver, '_local_volume_dir'),
mock.patch.object(self._driver, 'get_active_image_from_info'),
mock.patch.object(self._driver, '_execute'),
mock.patch.object(self._driver, '_local_path_volume'),
mock.patch.object(self._driver, '_local_path_volume_info')
) as (mock_ensure_share_mounted, mock_local_volume_dir,
mock_active_image_from_info, mock_execute,
mock_local_path_volume, mock_local_path_volume_info):
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
mock_active_image_from_info.return_value = volume_filename
mock_local_path_volume.return_value = volume_path
mock_local_path_volume_info.return_value = info_file
self._driver.delete_volume(volume)
mock_ensure_share_mounted.assert_called_once_with(
volume['provider_location'])
mock_local_volume_dir.assert_called_once_with(volume)
mock_active_image_from_info.assert_called_once_with(volume)
mock_execute.assert_called_once_with('rm', '-f', volume_path,
run_as_root=True)
mock_local_path_volume_info.assert_called_once_with(volume)
mock_local_path_volume.assert_called_once_with(volume)
mock_delete_if_exists.assert_any_call(volume_path)
mock_delete_if_exists.assert_any_call(info_file)
def test_refresh_mounts(self):
with contextlib.nested(
mock.patch.object(self._driver, '_unmount_shares'),
mock.patch.object(self._driver, '_ensure_shares_mounted')
) as (mock_unmount_shares, mock_ensure_shares_mounted):
self._driver._refresh_mounts()
self.assertTrue(mock_unmount_shares.called)
self.assertTrue(mock_ensure_shares_mounted.called)
def test_refresh_mounts_with_excp(self):
with contextlib.nested(
mock.patch.object(self._driver, '_unmount_shares'),
mock.patch.object(self._driver, '_ensure_shares_mounted'),
mock.patch.object(glusterfs, 'LOG')
) as (mock_unmount_shares, mock_ensure_shares_mounted,
mock_logger):
mock_stderr = _("umount: <mnt_path>: target is busy")
mock_unmount_shares.side_effect = \
putils.ProcessExecutionError(stderr=mock_stderr)
self._driver._refresh_mounts()
self.assertTrue(mock_unmount_shares.called)
self.assertTrue(mock_logger.warn.called)
self.assertTrue(mock_ensure_shares_mounted.called)
mock_unmount_shares.reset_mock()
mock_ensure_shares_mounted.reset_mock()
mock_logger.reset_mock()
mock_logger.warn.reset_mock()
mock_stderr = _("umount: <mnt_path>: some other error")
mock_unmount_shares.side_effect = \
putils.ProcessExecutionError(stderr=mock_stderr)
self.assertRaises(putils.ProcessExecutionError,
self._driver._refresh_mounts)
self.assertTrue(mock_unmount_shares.called)
self.assertFalse(mock_ensure_shares_mounted.called)
def test_unmount_shares_with_excp(self):
self._driver.shares = {'127.7.7.7:/gluster1': None}
with contextlib.nested(
mock.patch.object(self._driver, '_load_shares_config'),
mock.patch.object(self._driver, '_do_umount'),
mock.patch.object(glusterfs, 'LOG')
) as (mock_load_shares_config, mock_do_umount, mock_logger):
mock_do_umount.side_effect = Exception()
self._driver._unmount_shares()
self.assertTrue(mock_do_umount.called)
self.assertTrue(mock_logger.warning.called)
mock_logger.debug.assert_not_called()
def test_unmount_shares_1share(self):
self._driver.shares = {'127.7.7.7:/gluster1': None}
with contextlib.nested(
mock.patch.object(self._driver, '_load_shares_config'),
mock.patch.object(self._driver, '_do_umount')
) as (mock_load_shares_config, mock_do_umount):
self._driver._unmount_shares()
self.assertTrue(mock_do_umount.called)
mock_do_umount.assert_called_once_with(True,
'127.7.7.7:/gluster1')
def test_unmount_shares_2share(self):
self._driver.shares = {'127.7.7.7:/gluster1': None,
'127.7.7.8:/gluster2': None}
with contextlib.nested(
mock.patch.object(self._driver, '_load_shares_config'),
mock.patch.object(self._driver, '_do_umount')
) as (mock_load_shares_config, mock_do_umount):
self._driver._unmount_shares()
mock_do_umount.assert_any_call(True,
'127.7.7.7:/gluster1')
mock_do_umount.assert_any_call(True,
'127.7.7.8:/gluster2')
def test_do_umount(self):
test_share = '127.7.7.7:/gluster1'
test_hashpath = '/hashed/mnt/path'
with contextlib.nested(
mock.patch.object(self._driver, '_get_mount_point_for_share'),
mock.patch.object(putils, 'execute')
) as (mock_get_mntp_share, mock_execute):
mock_get_mntp_share.return_value = test_hashpath
self._driver._do_umount(True, test_share)
self.assertTrue(mock_get_mntp_share.called)
self.assertTrue(mock_execute.called)
mock_get_mntp_share.assert_called_once_with(test_share)
cmd = ['umount', test_hashpath]
self.assertEqual(cmd[0], mock_execute.call_args[0][0])
self.assertEqual(cmd[1], mock_execute.call_args[0][1])
self.assertEqual(True,
mock_execute.call_args[1]['run_as_root'])
mock_get_mntp_share.reset_mock()
mock_get_mntp_share.return_value = test_hashpath
mock_execute.reset_mock()
self._driver._do_umount(False, test_share)
self.assertTrue(mock_get_mntp_share.called)
self.assertTrue(mock_execute.called)
mock_get_mntp_share.assert_called_once_with(test_share)
cmd = ['umount', test_hashpath]
self.assertEqual(cmd[0], mock_execute.call_args[0][0])
self.assertEqual(cmd[1], mock_execute.call_args[0][1])
self.assertEqual(True,
mock_execute.call_args[1]['run_as_root'])
def test_do_umount_with_excp1(self):
test_share = '127.7.7.7:/gluster1'
test_hashpath = '/hashed/mnt/path'
with contextlib.nested(
mock.patch.object(self._driver, '_get_mount_point_for_share'),
mock.patch.object(putils, 'execute'),
mock.patch.object(glusterfs, 'LOG')
) as (mock_get_mntp_share, mock_execute, mock_logger):
mock_get_mntp_share.return_value = test_hashpath
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(putils.ProcessExecutionError,
self._driver._do_umount, False,
test_share)
mock_logger.reset_mock()
mock_logger.info.reset_mock()
mock_logger.error.reset_mock()
mock_execute.side_effect = putils.ProcessExecutionError
try:
self._driver._do_umount(False, test_share)
except putils.ProcessExecutionError:
self.assertFalse(mock_logger.info.called)
self.assertTrue(mock_logger.error.called)
except Exception as e:
self.fail('Unexpected exception thrown:', e)
else:
self.fail('putils.ProcessExecutionError not thrown')
def test_do_umount_with_excp2(self):
test_share = '127.7.7.7:/gluster1'
test_hashpath = '/hashed/mnt/path'
with contextlib.nested(
mock.patch.object(self._driver, '_get_mount_point_for_share'),
mock.patch.object(putils, 'execute'),
mock.patch.object(glusterfs, 'LOG')
) as (mock_get_mntp_share, mock_execute, mock_logger):
mock_get_mntp_share.return_value = test_hashpath
mock_stderr = _("umount: %s: not mounted") % test_hashpath
mock_execute.side_effect = putils.ProcessExecutionError(
stderr=mock_stderr)
self._driver._do_umount(True, test_share)
self.assertTrue(mock_logger.info.called)
self.assertFalse(mock_logger.error.called)
mock_logger.reset_mock()
mock_logger.info.reset_mock()
mock_logger.error.reset_mock()
mock_stderr = _("umount: %s: target is busy") %\
(test_hashpath)
mock_execute.side_effect = putils.ProcessExecutionError(
stderr=mock_stderr)
self.assertRaises(putils.ProcessExecutionError,
self._driver._do_umount, True,
test_share)
mock_logger.reset_mock()
mock_logger.info.reset_mock()
mock_logger.error.reset_mock()
mock_stderr = _('umount: %s: target is busy') %\
(test_hashpath)
mock_execute.side_effect = putils.ProcessExecutionError(
stderr=mock_stderr)
try:
self._driver._do_umount(True, test_share)
except putils.ProcessExecutionError:
mock_logger.info.assert_not_called()
self.assertTrue(mock_logger.error.called)
except Exception as e:
self.fail('Unexpected exception thrown:', e)
else:
self.fail('putils.ProcessExecutionError not thrown')
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(drv, '_execute')
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = self.TEST_EXPORT1
mox.StubOutWithMock(drv, '_ensure_share_mounted')
drv._ensure_share_mounted(self.TEST_EXPORT1)
mox.ReplayAll()
drv.delete_volume(volume)
mox.VerifyAll()
def test_delete_should_not_delete_if_provider_location_not_provided(self):
"""delete_volume shouldn't delete if provider_location missed."""
mox = self._mox
drv = self._driver
self.stub_out_not_replaying(drv, '_ensure_share_mounted')
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = None
mox.StubOutWithMock(drv, '_execute')
mox.ReplayAll()
drv.delete_volume(volume)
mox.VerifyAll()
def test_create_snapshot(self):
(mox, drv) = self._mox, self._driver
self.stub_out_not_replaying(drv, '_ensure_share_mounted')
mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_write_info_file')
volume = self._simple_volume()
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID}
mox.StubOutWithMock(drv, '_execute')
vol_filename = 'volume-%s' % self.VOLUME_UUID
hashed = drv._get_hash_str(self.TEST_EXPORT1)
vol_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
vol_filename)
snap_path = '%s.%s' % (vol_path, self.SNAP_UUID)
info_path = '%s%s' % (vol_path, '.info')
info_dict = {'active': vol_filename}
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_dict)
drv._create_qcow2_snap_file(snap_ref, vol_filename, snap_path)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_dict)
# SNAP_UUID_2 has been removed from dict.
info_file_dict = {'active': 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID),
self.SNAP_UUID: 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID)}
drv._write_info_file(info_path, info_file_dict)
mox.ReplayAll()
drv.create_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_bottom(self):
"""Multiple snapshots exist.
In this test, path (volume-<uuid>) is backed by
snap_path (volume-<uuid>.<snap_uuid>) which is backed by
snap_path_2 (volume-<uuid>.<snap_uuid_2>).
Delete the snapshot identified by SNAP_UUID_2.
Chain goes from
(SNAP_UUID) (SNAP_UUID_2)
volume-abc -> volume-abc.baca -> volume-abc.bebe
to
(SNAP_UUID)
volume-abc -> volume-abc.baca
"""
(mox, drv) = self._mox, self._driver
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
volume_filename = 'volume-%s' % self.VOLUME_UUID
snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2)
snap_file = '%s.%s' % (volume_filename, self.SNAP_UUID)
snap_file_2 = '%s.%s' % (volume_filename, self.SNAP_UUID_2)
info_path = '%s%s' % (volume_path, '.info')
qemu_img_info_output = """image: volume-%s.%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_read_file')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_get_backing_chain_for_path')
mox.StubOutWithMock(drv, '_get_matching_backing_file')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(drv, '_ensure_share_writable')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
drv._ensure_share_writable(volume_dir)
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
image_utils.qemu_img_info(snap_path_2).AndReturn(img_info)
info_file_dict = {'active': snap_file_2,
self.SNAP_UUID_2: snap_file_2,
self.SNAP_UUID: snap_file}
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': self._simple_volume(),
'id': self.SNAP_UUID_2}
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_file_dict)
drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
drv._execute('rm', '-f', snap_path_2, run_as_root=True)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_file_dict)
drv._read_info_file(info_path).AndReturn(info_file_dict)
drv._write_info_file(info_path, info_file_dict)
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_middle(self):
"""Multiple snapshots exist.
In this test, path (volume-<uuid>) is backed by
snap_path (volume-<uuid>.<snap_uuid>) which is backed by
snap_path_2 (volume-<uuid>.<snap_uuid_2>).
Delete the snapshot identified with SNAP_UUID.
Chain goes from
(SNAP_UUID) (SNAP_UUID_2)
volume-abc -> volume-abc.baca -> volume-abc.bebe
to (SNAP_UUID_2)
volume-abc -> volume-abc.bebe
"""
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
snap_file = 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID)
snap_path_2 = '%s.%s' % (volume_path, self.SNAP_UUID_2)
snap_file_2 = 'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID_2)
qemu_img_info_output_snap_1 = """image: volume-%s.%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 122K
backing file: %s
""" % (self.VOLUME_UUID, self.SNAP_UUID,
'volume-%s.%s' % (self.VOLUME_UUID, self.SNAP_UUID))
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(drv, '_get_backing_chain_for_path')
mox.StubOutWithMock(drv, 'get_active_image_from_info')
mox.StubOutWithMock(drv, '_ensure_share_writable')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
info_file_dict = {self.SNAP_UUID_2: 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID_2),
self.SNAP_UUID: 'volume-%s.%s' %
(self.VOLUME_UUID, self.SNAP_UUID)}
drv._ensure_share_writable(volume_dir)
info_path = drv._local_path_volume(volume) + '.info'
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(info_file_dict)
img_info = imageutils.QemuImgInfo(qemu_img_info_output_snap_1)
image_utils.qemu_img_info(snap_path).AndReturn(img_info)
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID}
snap_path_chain = [{'filename': snap_file_2,
'backing-filename': snap_file},
{'filename': snap_file,
'backing-filename': volume_file}]
drv.get_active_image_from_info(volume).AndReturn(snap_file_2)
drv._get_backing_chain_for_path(volume, snap_path_2).\
AndReturn(snap_path_chain)
drv._read_info_file(info_path).AndReturn(info_file_dict)
drv._execute('qemu-img', 'commit', snap_path_2, run_as_root=True)
drv._execute('rm', '-f', snap_path_2, run_as_root=True)
drv._read_info_file(info_path).AndReturn(info_file_dict)
drv._write_info_file(info_path, info_file_dict)
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_not_in_info(self):
"""Snapshot not in info file / info file doesn't exist.
Snapshot creation failed so nothing is on-disk. Driver
should allow operation to succeed so the manager can
remove the snapshot record.
(Scenario: Snapshot object created in Cinder db but not
on backing storage.)
"""
(mox, drv) = self._mox, self._driver
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = os.path.join(volume_dir, volume_filename)
info_path = '%s%s' % (volume_path, '.info')
mox.StubOutWithMock(drv, '_read_file')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_ensure_share_writable')
snap_ref = {'name': 'test snap',
'volume_id': self.VOLUME_UUID,
'volume': self._simple_volume(),
'id': self.SNAP_UUID_2}
drv._ensure_share_writable(volume_dir)
drv._read_info_file(info_path, empty_if_missing=True).AndReturn({})
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_read_info_file(self):
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv, '_read_file')
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
info_path = '%s%s' % (volume_path, '.info')
drv._read_file(info_path).AndReturn('{"%(id)s": "volume-%(id)s"}' %
{'id': self.VOLUME_UUID})
mox.ReplayAll()
volume = DumbVolume()
volume['id'] = self.VOLUME_UUID
volume['name'] = 'volume-%s' % self.VOLUME_UUID
info = drv._read_info_file(info_path)
self.assertEqual(info[self.VOLUME_UUID],
'volume-%s' % self.VOLUME_UUID)
mox.VerifyAll()
def test_extend_volume(self):
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
drv._get_hash_str(
self.TEST_EXPORT1),
self.VOLUME_UUID)
qemu_img_info_output = """image: volume-%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 473K
""" % self.VOLUME_UUID
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, 'get_active_image_from_info')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(image_utils, 'resize_image')
drv.get_active_image_from_info(volume).AndReturn(volume['name'])
image_utils.qemu_img_info(volume_path).AndReturn(img_info)
image_utils.resize_image(volume_path, 3)
mox.ReplayAll()
drv.extend_volume(volume, 3)
mox.VerifyAll()
def test_create_snapshot_online(self):
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
volume['status'] = 'in-use'
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
info_path = '%s.info' % volume_path
ctxt = context.RequestContext('fake_user', 'fake_project')
snap_ref = {'name': 'test snap (online)',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID,
'context': ctxt,
'status': 'asdf',
'progress': 'asdf'}
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
mox.StubOutWithMock(db, 'snapshot_get')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(drv, '_nova')
# Stub out the busy wait.
self.stub_out_not_replaying(time, 'sleep')
drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
create_info = {'snapshot_id': snap_ref['id'],
'type': 'qcow2',
'new_file': snap_file}
drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'creating'
snap_ref_progress_0p = snap_ref_progress.copy()
snap_ref_progress_0p['progress'] = '0%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_0p)
snap_ref_progress_50p = snap_ref_progress.copy()
snap_ref_progress_50p['progress'] = '50%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_50p)
snap_ref_progress_90p = snap_ref_progress.copy()
snap_ref_progress_90p['progress'] = '90%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_90p)
snap_info = {'active': snap_file,
self.SNAP_UUID: snap_file}
drv._write_info_file(info_path, snap_info)
mox.ReplayAll()
drv.create_snapshot(snap_ref)
mox.VerifyAll()
def test_create_snapshot_online_novafailure(self):
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
volume['status'] = 'in-use'
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
ctxt = context.RequestContext('fake_user', 'fake_project')
snap_ref = {'name': 'test snap (online)',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID,
'context': ctxt}
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_create_qcow2_snap_file')
mox.StubOutWithMock(drv, '_nova')
# Stub out the busy wait.
self.stub_out_not_replaying(time, 'sleep')
mox.StubOutWithMock(db, 'snapshot_get')
mox.StubOutWithMock(drv, '_write_info_file')
drv._create_qcow2_snap_file(snap_ref, volume_file, snap_path)
create_info = {'snapshot_id': snap_ref['id'],
'type': 'qcow2',
'new_file': snap_file}
drv._nova.create_volume_snapshot(ctxt, self.VOLUME_UUID, create_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'creating'
snap_ref_progress_0p = snap_ref_progress.copy()
snap_ref_progress_0p['progress'] = '0%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_0p)
snap_ref_progress_50p = snap_ref_progress.copy()
snap_ref_progress_50p['progress'] = '50%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_50p)
snap_ref_progress_99p = snap_ref_progress.copy()
snap_ref_progress_99p['progress'] = '99%'
snap_ref_progress_99p['status'] = 'error'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_99p)
mox.ReplayAll()
self.assertRaisesAndMessageMatches(
exception.GlusterfsException,
'Nova returned "error" status while creating snapshot.',
drv.create_snapshot,
snap_ref)
mox.VerifyAll()
def test_delete_snapshot_online_1(self):
"""Delete the newest snapshot, with only one snap present."""
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
volume['status'] = 'in-use'
ctxt = context.RequestContext('fake_user', 'fake_project')
snap_ref = {'name': 'test snap to delete (online)',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID,
'context': ctxt}
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
info_path = '%s.info' % volume_path
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_nova')
# Stub out the busy wait.
self.stub_out_not_replaying(time, 'sleep')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(db, 'snapshot_get')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_ensure_share_writable')
snap_info = {'active': snap_file,
self.SNAP_UUID: snap_file}
drv._ensure_share_writable(volume_dir)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(snap_info)
qemu_img_info_output = """image: %s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (snap_file, volume_file)
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
vol_qemu_img_info_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume_file
volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output)
image_utils.qemu_img_info(snap_path).AndReturn(img_info)
image_utils.qemu_img_info(volume_path).AndReturn(volume_img_info)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(snap_info)
delete_info = {
'type': 'qcow2',
'merge_target_file': None,
'file_to_merge': None,
'volume_id': self.VOLUME_UUID
}
drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
drv._read_info_file(info_path).AndReturn(snap_info)
drv._read_info_file(info_path).AndReturn(snap_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'deleting'
snap_ref_progress_0p = snap_ref_progress.copy()
snap_ref_progress_0p['progress'] = '0%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_0p)
snap_ref_progress_50p = snap_ref_progress.copy()
snap_ref_progress_50p['progress'] = '50%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_50p)
snap_ref_progress_90p = snap_ref_progress.copy()
snap_ref_progress_90p['progress'] = '90%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_90p)
drv._write_info_file(info_path, snap_info)
drv._execute('rm', '-f', volume_path, run_as_root=True)
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_online_2(self):
"""Delete the middle of 3 snapshots."""
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
volume['status'] = 'in-use'
ctxt = context.RequestContext('fake_user', 'fake_project')
snap_ref = {'name': 'test snap to delete (online)',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID,
'context': ctxt}
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
info_path = '%s.info' % volume_path
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
snap_file_2 = '%s.%s' % (volume_file, self.SNAP_UUID_2)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_nova')
# Stub out the busy wait.
self.stub_out_not_replaying(time, 'sleep')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(drv, '_write_info_file')
mox.StubOutWithMock(db, 'snapshot_get')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_ensure_share_writable')
snap_info = {'active': snap_file_2,
self.SNAP_UUID: snap_file,
self.SNAP_UUID_2: snap_file_2}
drv._ensure_share_writable(volume_dir)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(snap_info)
qemu_img_info_output = """image: %s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (snap_file, volume_file)
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
vol_qemu_img_info_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume_file
volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output)
image_utils.qemu_img_info(snap_path).AndReturn(img_info)
image_utils.qemu_img_info(volume_path).AndReturn(volume_img_info)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(snap_info)
delete_info = {'type': 'qcow2',
'merge_target_file': volume_file,
'file_to_merge': snap_file,
'volume_id': self.VOLUME_UUID}
drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
drv._read_info_file(info_path).AndReturn(snap_info)
drv._read_info_file(info_path).AndReturn(snap_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'deleting'
snap_ref_progress_0p = snap_ref_progress.copy()
snap_ref_progress_0p['progress'] = '0%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_0p)
snap_ref_progress_50p = snap_ref_progress.copy()
snap_ref_progress_50p['progress'] = '50%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_50p)
snap_ref_progress_90p = snap_ref_progress.copy()
snap_ref_progress_90p['progress'] = '90%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_90p)
drv._write_info_file(info_path, snap_info)
drv._execute('rm', '-f', snap_path, run_as_root=True)
mox.ReplayAll()
drv.delete_snapshot(snap_ref)
mox.VerifyAll()
def test_delete_snapshot_online_novafailure(self):
"""Delete the newest snapshot."""
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
volume['status'] = 'in-use'
ctxt = context.RequestContext('fake_user', 'fake_project')
snap_ref = {'name': 'test snap to delete (online)',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID,
'context': ctxt}
hashed = drv._get_hash_str(self.TEST_EXPORT1)
volume_file = 'volume-%s' % self.VOLUME_UUID
volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, hashed)
volume_path = '%s/%s/%s' % (self.TEST_MNT_POINT_BASE,
hashed,
volume_file)
info_path = '%s.info' % volume_path
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
snap_file = '%s.%s' % (volume_file, self.SNAP_UUID)
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_nova')
# Stub out the busy wait.
self.stub_out_not_replaying(time, 'sleep')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(db, 'snapshot_get')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_ensure_share_writable')
snap_info = {'active': snap_file,
self.SNAP_UUID: snap_file}
drv._ensure_share_writable(volume_dir)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(snap_info)
qemu_img_info_output = """image: %s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (snap_file, volume_file)
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
vol_qemu_img_info_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume_file
volume_img_info = imageutils.QemuImgInfo(vol_qemu_img_info_output)
image_utils.qemu_img_info(snap_path).AndReturn(img_info)
image_utils.qemu_img_info(volume_path).AndReturn(volume_img_info)
drv._read_info_file(info_path, empty_if_missing=True).\
AndReturn(snap_info)
delete_info = {
'type': 'qcow2',
'merge_target_file': None,
'file_to_merge': None,
'volume_id': self.VOLUME_UUID
}
drv._nova.delete_volume_snapshot(ctxt, self.SNAP_UUID, delete_info)
drv._read_info_file(info_path).AndReturn(snap_info)
drv._read_info_file(info_path).AndReturn(snap_info)
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'deleting'
snap_ref_progress_0p = snap_ref_progress.copy()
snap_ref_progress_0p['progress'] = '0%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_0p)
snap_ref_progress_50p = snap_ref_progress.copy()
snap_ref_progress_50p['progress'] = '50%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_50p)
snap_ref_progress_90p = snap_ref_progress.copy()
snap_ref_progress_90p['status'] = 'error_deleting'
snap_ref_progress_90p['progress'] = '90%'
db.snapshot_get(ctxt, self.SNAP_UUID).AndReturn(snap_ref_progress_90p)
mox.ReplayAll()
self.assertRaisesAndMessageMatches(exception.GlusterfsException,
'Unable to delete snapshot',
drv.delete_snapshot,
snap_ref)
mox.VerifyAll()
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_delete_stale_snapshot')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'get_active_image_from_info')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_qemu_img_info')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_read_info_file')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_path_volume')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_volume_dir')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_ensure_share_writable')
def test_delete_snapshot_online_stale_snapshot(self,
mock_ensure_share_writable,
mock_local_volume_dir,
mock_local_path_volume,
mock_read_info_file,
mock_qemu_img_info,
mock_get_active_image,
mock_delete_stale_snap):
volume = self._simple_volume()
ctxt = context.RequestContext('fake_user', 'fake_project')
volume['status'] = 'in-use'
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename)
info_path = volume_path + '.info'
stale_snapshot = {'name': 'fake-volume',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID_2,
'context': ctxt}
active_snap_file = volume['name'] + '.' + self.SNAP_UUID_2
stale_snap_file = volume['name'] + '.' + stale_snapshot['id']
stale_snap_path = '%s/%s' % (self.TEST_MNT_POINT, stale_snap_file)
snap_info = {'active': active_snap_file,
stale_snapshot['id']: stale_snap_file}
qemu_img_info = imageutils.QemuImgInfo()
qemu_img_info.file_format = 'qcow2'
mock_local_path_volume.return_value = volume_path
mock_read_info_file.return_value = snap_info
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
mock_qemu_img_info.return_value = qemu_img_info
mock_get_active_image.return_value = active_snap_file
self._driver.delete_snapshot(stale_snapshot)
mock_ensure_share_writable.assert_called_once_with(
self.TEST_MNT_POINT)
mock_local_path_volume.assert_called_once_with(
stale_snapshot['volume'])
mock_read_info_file.assert_called_once_with(info_path,
empty_if_missing=True)
mock_qemu_img_info.assert_called_once_with(stale_snap_path)
mock_get_active_image.assert_called_once_with(
stale_snapshot['volume'])
mock_delete_stale_snap.assert_called_once_with(stale_snapshot)
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_write_info_file')
@mock.patch('cinder.openstack.common.fileutils.delete_if_exists')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'get_active_image_from_info')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_volume_dir')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_read_info_file')
@mock.patch('cinder.volume.drivers.glusterfs.GlusterfsDriver.'
'_local_path_volume')
def test_delete_stale_snapshot(self, mock_local_path_volume,
mock_read_info_file,
mock_local_volume_dir,
mock_get_active_image,
mock_delete_if_exists,
mock_write_info_file):
volume = self._simple_volume()
volume['status'] = 'in-use'
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename)
info_path = volume_path + '.info'
# Test case where snapshot_file = active_file
snapshot = {'name': 'fake-volume',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID_2}
active_snap_file = volume['name'] + '.' + self.SNAP_UUID_2
stale_snap_file = volume['name'] + '.' + snapshot['id']
stale_snap_path = '%s/%s' % (self.TEST_MNT_POINT, stale_snap_file)
snap_info = {'active': active_snap_file,
snapshot['id']: stale_snap_file}
mock_local_path_volume.return_value = volume_path
mock_read_info_file.return_value = snap_info
mock_get_active_image.return_value = active_snap_file
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
self._driver._delete_stale_snapshot(snapshot)
mock_local_path_volume.assert_called_with(snapshot['volume'])
mock_read_info_file.assert_called_with(info_path)
mock_delete_if_exists.assert_not_called()
mock_write_info_file.assert_not_called()
# Test case where snapshot_file != active_file
snapshot = {'name': 'fake-volume',
'volume_id': self.VOLUME_UUID,
'volume': volume,
'id': self.SNAP_UUID}
active_snap_file = volume['name'] + '.' + self.SNAP_UUID_2
stale_snap_file = volume['name'] + '.' + snapshot['id']
stale_snap_path = '%s/%s' % (self.TEST_MNT_POINT, stale_snap_file)
snap_info = {'active': active_snap_file,
snapshot['id']: stale_snap_file}
mock_local_path_volume.return_value = volume_path
mock_read_info_file.return_value = snap_info
mock_get_active_image.return_value = active_snap_file
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
self._driver._delete_stale_snapshot(snapshot)
mock_local_path_volume.assert_called_with(snapshot['volume'])
mock_read_info_file.assert_called_with(info_path)
mock_delete_if_exists.assert_called_once_with(stale_snap_path)
snap_info.pop(snapshot['id'], None)
mock_write_info_file.assert_called_once_with(info_path, snap_info)
def test_get_backing_chain_for_path(self):
(mox, drv) = self._mox, self._driver
CONF.set_override('glusterfs_mount_point_base',
self.TEST_MNT_POINT_BASE)
volume = self._simple_volume()
vol_filename = volume['name']
vol_filename_2 = volume['name'] + '.asdfjkl'
vol_filename_3 = volume['name'] + 'qwertyuiop'
hashed = drv._get_hash_str(self.TEST_EXPORT1)
vol_dir = '%s/%s' % (self.TEST_MNT_POINT_BASE, hashed)
vol_path = '%s/%s' % (vol_dir, vol_filename)
vol_path_2 = '%s/%s' % (vol_dir, vol_filename_2)
vol_path_3 = '%s/%s' % (vol_dir, vol_filename_3)
mox.StubOutWithMock(drv, '_local_volume_dir')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
qemu_img_output_base = """image: %(image_name)s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
"""
qemu_img_output = """image: %(image_name)s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %(backing_file)s
"""
qemu_img_output_1 = qemu_img_output_base % {'image_name': vol_filename}
qemu_img_output_2 = qemu_img_output % {'image_name': vol_filename_2,
'backing_file': vol_filename}
qemu_img_output_3 = qemu_img_output % {'image_name': vol_filename_3,
'backing_file': vol_filename_2}
info_1 = imageutils.QemuImgInfo(qemu_img_output_1)
info_2 = imageutils.QemuImgInfo(qemu_img_output_2)
info_3 = imageutils.QemuImgInfo(qemu_img_output_3)
image_utils.qemu_img_info(vol_path_3).\
AndReturn(info_3)
drv._local_volume_dir(volume).AndReturn(vol_dir)
image_utils.qemu_img_info(vol_path_2).\
AndReturn(info_2)
drv._local_volume_dir(volume).AndReturn(vol_dir)
image_utils.qemu_img_info(vol_path).\
AndReturn(info_1)
mox.ReplayAll()
chain = drv._get_backing_chain_for_path(volume, vol_path_3)
mox.VerifyAll()
# Verify chain contains all expected data
item_1 = drv._get_matching_backing_file(chain, vol_filename)
self.assertEqual(item_1['filename'], vol_filename_2)
chain.remove(item_1)
item_2 = drv._get_matching_backing_file(chain, vol_filename_2)
self.assertEqual(item_2['filename'], vol_filename_3)
chain.remove(item_2)
self.assertEqual(len(chain), 1)
self.assertEqual(chain[0]['filename'], vol_filename)
def test_copy_volume_from_snapshot(self):
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_read_info_file')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
dest_volume = self._simple_volume(
'c1073000-0000-0000-0000-0000000c1073')
src_volume = self._simple_volume()
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(self.TEST_EXPORT1))
src_vol_path = os.path.join(vol_dir, src_volume['name'])
dest_vol_path = os.path.join(vol_dir, dest_volume['name'])
info_path = os.path.join(vol_dir, src_volume['name']) + '.info'
snapshot = {'volume_name': src_volume['name'],
'name': 'clone-snap-%s' % src_volume['id'],
'size': src_volume['size'],
'volume_size': src_volume['size'],
'volume_id': src_volume['id'],
'id': 'tmp-snap-%s' % src_volume['id'],
'volume': src_volume}
snap_file = dest_volume['name'] + '.' + snapshot['id']
snap_path = os.path.join(vol_dir, snap_file)
size = dest_volume['size']
drv._read_info_file(info_path).AndReturn(
{'active': snap_file,
snapshot['id']: snap_file}
)
qemu_img_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (snap_file, src_volume['name'])
img_info = imageutils.QemuImgInfo(qemu_img_output)
image_utils.qemu_img_info(snap_path).AndReturn(img_info)
image_utils.convert_image(src_vol_path, dest_vol_path, 'raw')
drv._set_rw_permissions_for_all(dest_vol_path)
mox.ReplayAll()
drv._copy_volume_from_snapshot(snapshot, dest_volume, size)
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
(mox, drv) = self._mox, self._driver
src_volume = self._simple_volume()
snap_ref = {'volume_name': src_volume['name'],
'name': 'clone-snap-%s' % src_volume['id'],
'size': src_volume['size'],
'volume_size': src_volume['size'],
'volume_id': src_volume['id'],
'id': 'tmp-snap-%s' % src_volume['id'],
'volume': src_volume,
'status': 'available'}
new_volume = DumbVolume()
new_volume['size'] = snap_ref['size']
mox.StubOutWithMock(drv, '_ensure_shares_mounted')
mox.StubOutWithMock(drv, '_find_share')
mox.StubOutWithMock(drv, '_do_create_volume')
mox.StubOutWithMock(drv, '_copy_volume_from_snapshot')
drv._ensure_shares_mounted()
drv._find_share(new_volume['size']).AndReturn(self.TEST_EXPORT1)
drv._do_create_volume(new_volume)
drv._copy_volume_from_snapshot(snap_ref,
new_volume,
new_volume['size'])
mox.ReplayAll()
drv.create_volume_from_snapshot(new_volume, snap_ref)
mox.VerifyAll()
def test_initialize_connection(self):
(mox, drv) = self._mox, self._driver
volume = self._simple_volume()
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(self.TEST_EXPORT1))
vol_path = os.path.join(vol_dir, volume['name'])
qemu_img_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume['name']
img_info = imageutils.QemuImgInfo(qemu_img_output)
mox.StubOutWithMock(drv, 'get_active_image_from_info')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
drv.get_active_image_from_info(volume).AndReturn(volume['name'])
image_utils.qemu_img_info(vol_path).AndReturn(img_info)
mox.ReplayAll()
conn_info = drv.initialize_connection(volume, None)
mox.VerifyAll()
self.assertEqual(conn_info['data']['format'], 'raw')
self.assertEqual(conn_info['driver_volume_type'], 'glusterfs')
self.assertEqual(conn_info['data']['name'], volume['name'])
self.assertEqual(conn_info['mount_point_base'],
self.TEST_MNT_POINT_BASE)
def test_get_mount_point_base(self):
(mox, drv) = self._mox, self._driver
self.assertEqual(drv._get_mount_point_base(),
self.TEST_MNT_POINT_BASE)
def test_backup_volume(self):
"""Backup a volume with no snapshots."""
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv.db, 'volume_get')
mox.StubOutWithMock(drv, 'get_active_image_from_info')
mox.StubOutWithMock(drv, '_qemu_img_info')
mox.StubOutWithMock(base_driver.VolumeDriver, 'backup_volume')
ctxt = context.RequestContext('fake_user', 'fake_project')
volume = self._simple_volume()
backup = {'volume_id': volume['id']}
drv.db.volume_get(ctxt, volume['id']).AndReturn(volume)
drv.get_active_image_from_info(IgnoreArg()).AndReturn('/some/path')
info = imageutils.QemuImgInfo()
info.file_format = 'raw'
drv._qemu_img_info(IgnoreArg()).AndReturn(info)
base_driver.VolumeDriver.backup_volume(IgnoreArg(),
IgnoreArg(),
IgnoreArg())
mox.ReplayAll()
drv.backup_volume(ctxt, backup, IgnoreArg())
mox.VerifyAll()
def test_backup_volume_previous_snap(self):
"""Backup a volume that previously had a snapshot.
Snapshot was deleted, snap_info is different from above.
"""
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv.db, 'volume_get')
mox.StubOutWithMock(drv, 'get_active_image_from_info')
mox.StubOutWithMock(drv, '_qemu_img_info')
mox.StubOutWithMock(base_driver.VolumeDriver, 'backup_volume')
ctxt = context.RequestContext('fake_user', 'fake_project')
volume = self._simple_volume()
backup = {'volume_id': volume['id']}
drv.db.volume_get(ctxt, volume['id']).AndReturn(volume)
drv.get_active_image_from_info(IgnoreArg()).AndReturn('/some/file2')
info = imageutils.QemuImgInfo()
info.file_format = 'raw'
drv._qemu_img_info(IgnoreArg()).AndReturn(info)
base_driver.VolumeDriver.backup_volume(IgnoreArg(),
IgnoreArg(),
IgnoreArg())
mox.ReplayAll()
drv.backup_volume(ctxt, backup, IgnoreArg())
mox.VerifyAll()
def test_backup_snap_failure_1(self):
"""Backup fails if snapshot exists (database)."""
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv.db, 'snapshot_get_all_for_volume')
ctxt = context.RequestContext('fake_user', 'fake_project')
volume = self._simple_volume()
backup = {'volume_id': volume['id']}
drv.db.snapshot_get_all_for_volume(ctxt, volume['id']).AndReturn(
[{'snap1': 'a'}, {'snap2': 'b'}])
mox.ReplayAll()
self.assertRaises(exception.InvalidVolume,
drv.backup_volume,
ctxt, backup, IgnoreArg())
mox.VerifyAll()
def test_backup_snap_failure_2(self):
"""Backup fails if snapshot exists (on-disk)."""
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv.db, 'volume_get')
mox.StubOutWithMock(drv, 'get_active_image_from_info')
mox.StubOutWithMock(drv, '_qemu_img_info')
ctxt = context.RequestContext('fake_user', 'fake_project')
volume = self._simple_volume()
backup = {'volume_id': volume['id']}
drv.db.volume_get(ctxt, volume['id']).AndReturn(volume)
drv.get_active_image_from_info(IgnoreArg()).\
AndReturn('/some/path/file2')
info = imageutils.QemuImgInfo()
info.file_format = 'raw'
info.backing_file = 'file1'
drv._qemu_img_info(IgnoreArg()).AndReturn(info)
mox.ReplayAll()
self.assertRaises(exception.InvalidVolume,
drv.backup_volume,
ctxt, backup, IgnoreArg())
mox.VerifyAll()
def test_backup_failure_unsupported_format(self):
"""Attempt to backup a volume with a qcow2 base."""
(mox, drv) = self._mox, self._driver
mox.StubOutWithMock(drv, '_qemu_img_info')
mox.StubOutWithMock(drv.db, 'volume_get')
mox.StubOutWithMock(drv, 'get_active_image_from_info')
ctxt = context.RequestContext('fake_user', 'fake_project')
volume = self._simple_volume()
backup = {'volume_id': volume['id']}
drv.get_active_image_from_info(IgnoreArg()).AndReturn('/some/path')
info = imageutils.QemuImgInfo()
info.file_format = 'qcow2'
drv.db.volume_get(ctxt, volume['id']).AndReturn(volume)
drv._qemu_img_info(IgnoreArg()).AndReturn(info)
mox.ReplayAll()
self.assertRaises(exception.InvalidVolume,
drv.backup_volume,
ctxt, backup, IgnoreArg())
mox.VerifyAll()
def test_copy_volume_to_image_raw_image(self):
drv = self._driver
volume = self._simple_volume()
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name'])
with contextlib.nested(
mock.patch.object(drv, 'get_active_image_from_info'),
mock.patch.object(drv, '_local_volume_dir'),
mock.patch.object(image_utils, 'qemu_img_info'),
mock.patch.object(image_utils, 'upload_volume')
) as (mock_get_active_image_from_info, mock_local_volume_dir,
mock_qemu_img_info, mock_upload_volume):
mock_get_active_image_from_info.return_value = volume['name']
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
qemu_img_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume['name']
img_info = imageutils.QemuImgInfo(qemu_img_output)
mock_qemu_img_info.return_value = img_info
upload_path = volume_path
drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, mock.ANY)
mock_get_active_image_from_info.assert_called_once_with(volume)
mock_local_volume_dir.assert_called_once_with(volume)
mock_qemu_img_info.assert_called_once_with(volume_path)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path)
def test_copy_volume_to_image_qcow2_image(self):
"""Upload a qcow2 image file which has to be converted to raw first."""
drv = self._driver
volume = self._simple_volume()
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name'])
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
with contextlib.nested(
mock.patch.object(drv, 'get_active_image_from_info'),
mock.patch.object(drv, '_local_volume_dir'),
mock.patch.object(image_utils, 'qemu_img_info'),
mock.patch.object(image_utils, 'convert_image'),
mock.patch.object(image_utils, 'upload_volume'),
mock.patch.object(drv, '_execute')
) as (mock_get_active_image_from_info, mock_local_volume_dir,
mock_qemu_img_info, mock_convert_image, mock_upload_volume,
mock_execute):
mock_get_active_image_from_info.return_value = volume['name']
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
qemu_img_output = """image: %s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume['name']
img_info = imageutils.QemuImgInfo(qemu_img_output)
mock_qemu_img_info.return_value = img_info
upload_path = '%s/%s.temp_image.%s' % (self.TEST_MNT_POINT,
volume['id'],
image_meta['id'])
drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta)
mock_get_active_image_from_info.assert_called_once_with(volume)
mock_local_volume_dir.assert_called_with(volume)
mock_qemu_img_info.assert_called_once_with(volume_path)
mock_convert_image.assert_called_once_with(
volume_path, upload_path, 'raw')
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path)
mock_execute.assert_called_once_with('rm', '-f', upload_path)
def test_copy_volume_to_image_snapshot_exists(self):
"""Upload an active snapshot which has to be converted to raw first."""
drv = self._driver
volume = self._simple_volume()
volume_path = '%s/volume-%s' % (self.TEST_MNT_POINT, self.VOLUME_UUID)
volume_filename = 'volume-%s' % self.VOLUME_UUID
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
with contextlib.nested(
mock.patch.object(drv, 'get_active_image_from_info'),
mock.patch.object(drv, '_local_volume_dir'),
mock.patch.object(image_utils, 'qemu_img_info'),
mock.patch.object(image_utils, 'convert_image'),
mock.patch.object(image_utils, 'upload_volume'),
mock.patch.object(drv, '_execute')
) as (mock_get_active_image_from_info, mock_local_volume_dir,
mock_qemu_img_info, mock_convert_image, mock_upload_volume,
mock_execute):
mock_get_active_image_from_info.return_value = volume['name']
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
qemu_img_output = """image: volume-%s.%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename)
img_info = imageutils.QemuImgInfo(qemu_img_output)
mock_qemu_img_info.return_value = img_info
upload_path = '%s/%s.temp_image.%s' % (self.TEST_MNT_POINT,
volume['id'],
image_meta['id'])
drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta)
mock_get_active_image_from_info.assert_called_once_with(volume)
mock_local_volume_dir.assert_called_with(volume)
mock_qemu_img_info.assert_called_once_with(volume_path)
mock_convert_image.assert_called_once_with(
volume_path, upload_path, 'raw')
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path)
mock_execute.assert_called_once_with('rm', '-f', upload_path)
| github-borat/cinder | cinder/tests/test_glusterfs.py | Python | apache-2.0 | 87,804 |
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Server to run Google Data Loss Prevention API DeID.
For now, no authentication is implemented, to be run on localhost.
Requires Apache Beam client, Flask, Google Python API Client:
pip install --upgrade apache_beam
pip install --upgrade flask
pip install --upgrade google-api-python-client
"""
from __future__ import absolute_import
from datetime import datetime
import json
import logging
import posixpath
from apiclient import discovery
import enum
import flask
from common import gcsutil
from common import unicodecsv
from deid_app.backend import config
from deid_app.backend import model
from dlp import run_deid_lib
from eval import run_pipeline_lib as eval_lib
import jsonschema
from google.cloud import bigquery
from google.cloud import storage
from google.cloud.exceptions import exceptions
logging.getLogger().setLevel(logging.INFO)
app = flask.Flask(__name__)
app.config.from_object(config.Config)
bq_client = bigquery.Client(app.config['PROJECT_ID'])
with app.app_context():
model.init_app(app)
PATIENT_ID = 'patient_id'
RECORD_NUM = 'record_number'
NOTE = 'note'
FINDINGS = 'findings'
EXPECTED_CSV_SCHEMA = [RECORD_NUM, PATIENT_ID, NOTE]
EXPECTED_FINDINGS_SCHEMA = [RECORD_NUM, PATIENT_ID, FINDINGS,
run_deid_lib.DLP_FINDINGS_TIMESTAMP]
EXPECTED_OUTPUT_SCHEMA = (EXPECTED_CSV_SCHEMA +
[run_deid_lib.DLP_DEID_TIMESTAMP])
CSV_FIELD_TYPE = {
RECORD_NUM: 'INT64',
PATIENT_ID: 'STRING',
NOTE: 'STRING',
}
deid_schema = {
'type':
'object',
'properties': {
'name': {
'type': 'string'
},
'inputMethod': {
'type': 'string'
},
'inputInfo': {
'type': 'string'
},
'outputMethod': {
'type': 'string'
},
'outputInfo': {
'type': 'string'
},
'findingsTable': {
'type': 'string'
},
'maeTable': {
'type': 'string'
},
'maeDir': {
'type': 'string'
},
'batchSize': {
'type': 'number'
},
},
'required': [
'name',
'inputMethod',
'inputInfo',
'outputMethod',
'outputInfo',
],
}
eval_pipeline_shema = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'input': {
'type': 'object',
'properties': {
'gcs': {
'type': 'object',
'properties': {
'pattern': {'type': 'string'},
'golden': {'type': 'string'},
},
'required': [
'pattern',
'golden',
],
},
'bigquery': {
'type': 'object',
'properties': {
'query': {'type': 'string'},
'golden': {'type': 'string'},
},
'required': [
'query',
'golden',
],
},
},
'oneOf': [
{'required': ['gcs']},
{'required': ['bigquery']},
],
},
'output': {
'type': 'object',
'properties': {
'gcs': {
'type': 'object',
'properties': {
'dir': {'type': 'string'},
'debug': {'type': 'boolean'},
},
'required': [
'dir',
'debug',
],
},
'bigquery': {
'type': 'object',
'properties': {
'stats': {'type': 'string'},
'debug': {'type': 'string'},
'perNote': {'type': 'string'},
},
'required': [
'stats',
'debug',
],
},
},
'anyOf': [
{'required': ['gcs']},
{'required': ['bigquery']},
],
},
'ignoreTypes': {
'type': 'array',
'items': {'type': 'string'},
},
},
'required': [
'name',
'input',
'output',
],
}
dlp_image_demo_schema = {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['image/jpeg',
'image/bmp',
'image/png',
'image/svg',
'text/plain',
]
},
'data': {'type': 'string'},
},
'required': [
'data',
'type'
],
}
bq_table_schema = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'mode': {
'type': 'string',
'enum': ['NULLABLE',
'REQUIRED',
'REPEATED',
]
},
'name': {'type': 'string'},
'type': {'type': 'string'},
},
'required': [
'name',
'type',
],
},
}
dlp_image_redaction_configs = [{
'redactionColor': {
'blue': 0.1,
'green': 0.1,
'red': 0.8
},
'redactAllText': 'true'
}]
def get_bq_dataset(dataset_id):
"""Returns a dataset instance from BigQuery."""
dataset_ref = bq_client.dataset(dataset_id)
try:
dataset = bq_client.get_dataset(dataset_ref)
except exceptions.NotFound as e:
raise e
return dataset
def append_project(table_name):
"""formats a table name to 'project:dataset.table'."""
return '{}:{}'.format(app.config['PROJECT_ID'], table_name)
def get_bq_table(dataset_id, table_id):
"""Return a table instance from BigQuery."""
dataset_ref = bq_client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
try:
return bq_client.get_table(table_ref)
except exceptions.NotFound as e:
raise e
def get_bq_rows(query):
"""Returns a BigQuery query as a list of rows."""
query_job = bq_client.query(query)
res = query_job.result() # blocks until query is done.
return [dict(list(row.items())) for row in res]
def verify_bq_table(dataset_id, table_id, expected_schema):
"""Verifies that a table exists and has an expected schema.
Args:
dataset_id: The name of the BigQuery dataset.
table_id: The name of the BigQuery table.
expected_schema: A list of the expected names of columns.
Raises:
exceptions.NotFound: If the table does not exist in BigQuery.
Returns:
A boolean of the verification status.
"""
table = get_bq_table(dataset_id, table_id)
table_headers = [col.name for col in table.schema]
return set(table_headers) == set(expected_schema)
def verify_gcs_path(path):
"""Verifies that a GCS path exists.
Args:
path: A string that represents the target path.
Returns:
A boolean of the verification status.
"""
storage_client = storage.Client()
path_info = gcsutil.GcsFileName.from_path(path)
try:
bucket = storage_client.get_bucket(path_info.bucket)
except exceptions.NotFound:
return False
return storage.Blob(bucket=bucket,
name=path_info.blob).exists(storage_client)
@app.route('/')
@app.route('/index')
@app.route('/api')
def index():
return flask.jsonify(data='Deid backend server', status=200), 200
@app.route('/api/project')
def get_project():
return flask.jsonify(project=app.config['PROJECT_ID']), 200
@app.route('/api/datasets')
def get_datasets():
datasets = list(bq_client.list_datasets())
dataset_ids = [dataset.dataset_id for dataset in datasets]
return flask.jsonify(datasets=dataset_ids), 200
@app.route('/api/datasets/<dataset_id>', methods=['POST', 'DELETE'])
def manage_dataset(dataset_id):
"""Create and delete datasets from BigQuery."""
dataset_ref = bq_client.dataset(dataset_id)
method = flask.request.method
if method == 'POST':
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
payload = flask.request.json
if payload:
dataset.location = payload.get('location') or dataset.location
dataset.description = payload.get('description') or ''
try:
dataset = bq_client.create_dataset(dataset)
except exceptions.Conflict as e:
error_msg = 'There already exists a dataset with this name'
return flask.jsonify(error=e.code, text=error_msg), e.code
return flask.jsonify(result='success'), 200
elif method == 'DELETE':
try:
bq_client.delete_dataset(dataset_ref, delete_contents=True)
except exceptions.NotFound as e:
error_msg = 'Dataset Does not exist'
return flask.jsonify(error=e.code, text=error_msg), e.code
return flask.jsonify(result='success'), 200
@app.route('/api/datasets/<dataset_id>/tables')
def get_tables(dataset_id):
"""Get table names for a provided dataset."""
try:
get_bq_dataset(dataset_id)
except exceptions.NotFound as e:
return flask.jsonify(error=e.code, text=e.message), e.code
dataset_ref = bq_client.dataset(dataset_id)
tables = list(bq_client.list_tables(dataset_ref))
table_ids = [table.table_id for table in tables]
return flask.jsonify(dataset=dataset_id, tables=table_ids), 200
@app.route('/api/datasets/<dataset_id>/tables/<table_id>',
methods=['POST'])
def manage_tables(dataset_id, table_id):
"""Create tables in datasets in BigQuery."""
try:
get_bq_dataset(dataset_id)
except exceptions.NotFound as e:
return flask.jsonify(error=e.code, text=e.message), e.code
table_ref = bq_client.dataset(dataset_id).table(table_id)
try:
jsonschema.validate(flask.request.json, bq_table_schema)
except jsonschema.ValidationError:
error_msg = 'unable to validate provided payload.'
return flask.jsonify(error=400, text=error_msg), 400
schema = [bigquery.SchemaField(field['name'], field['type'],
field.get('mode') or 'NULLABLE')
for field in flask.request.json]
table = bigquery.Table(table_ref, schema=schema)
try:
table = bq_client.create_table(table)
except exceptions.GoogleAPIError as e:
return flask.jsonify(error=e.message), 400
return flask.jsonify(result='success'), 200
@app.route('/api/deidentify/<job_id>/metadata')
def get_job_metadata(job_id):
"""Gets the list of patient_id, record_num for a given job."""
job = model.DeidJobTable.query.get(job_id)
if not job:
error_msg = 'Job does not exist'
return flask.jsonify(text=error_msg, error=404), 404
try:
orig_data = get_bq_rows(job.original_query)
except exceptions.NotFound as e:
return flask.jsonify(text=e.message, error=e.code), e.code
# The metadata is only the patient_id and record_number
metadata = [{
'patientId': row[PATIENT_ID],
'recordNumber': row[RECORD_NUM],
} for row in orig_data]
return flask.jsonify(notesMetadata=metadata), 200
class NoteAnnotation(enum.Enum):
HIGHLIGHTED = 0
UNHIGHLIGHTED = 1
class NoteHighlight(object):
"""Represents a chunk of a note that was deidentified and its metadata.
A note is split into a list of NoteHighlight objects. Each NoteHighlight can
indicate that the note is highlighted. In that case, the NoteHighlight should
contain a replacement and color information for the chunk that should be
highlighted.
Attributes:
annotation: A string representation of a NoteAnnotation that indicates
whether this range is highlighted or not.
quote: A string that represents the original chunk of the note.
replacement: a string that indicates the value to replace a highlighted
chunk with.
begin: An integer of the index of this chunk compared to the rest of the
note.
length: An integer with the length of the chunk.
color: A string that represents the color to be associated with a
highlighted chunk.
"""
def __init__(self, annotation, quote, replacement, begin, length, color):
"""Initializes a NoteHighlight object with all attributed."""
self.annotation = annotation
self.quote = quote
self.replacement = replacement
self.begin = begin
self.length = length
self.color = color
@app.route('/api/deidentify/<job_id>/note/<record_number>')
def get_note_highlights(job_id, record_number):
"""returns a list of ranges to highlight."""
job = model.DeidJobTable.query.get(job_id)
if not job:
error_msg = 'Job does not exist'
return flask.jsonify(text=error_msg, error=404), 404
orig_query = job.original_query + ' where {}={}'.format(
RECORD_NUM, record_number)
findings_query = 'select findings from {} where {}=\'{}\' and {}={}'.format(
job.findings_table, run_deid_lib.DLP_FINDINGS_TIMESTAMP, job.timestamp,
RECORD_NUM, record_number)
try:
orig_row = get_bq_rows(orig_query)
findings_data = get_bq_rows(findings_query)
except exceptions.NotFound as e:
return flask.jsonify(text=e.message, error=e.code), e.code
if len(findings_data) != 1 or len(orig_row) != 1:
error_msg = 'Selected record number does not exist or is not unique'
return flask.jsonify(text=error_msg, error=400), 400
findings = json.loads(findings_data[0]['findings'])['findings']
note = orig_row[0][NOTE]
res = []
findings.sort(key=lambda x: int(x['location']['codepointRange']['start']))
# Assumption:
# The location attribute always has a codepointRange field that indicates
# the offset of the identified string in unicode format.
# The original text is always replaced with its detected info type.
offset = 0
for finding in findings:
location = finding['location']['codepointRange']
start, end = int(location['start']), int(location['end'])
# This check handles overlapping findings. For now, this ensures that the
# code doesn't crash in that case.
if start < offset:
continue
color = 'Bisque'
# For every detected text, there is 2 chunks that can be created: the one
# preceding the detected text (unhighlighted) and the highlighted one (the
# detected text).
# The unhighlighted chunk
first_quote = note[offset:start]
first_replacement = first_quote
first_annotation = NoteAnnotation.UNHIGHLIGHTED
first_length = start - offset - 1
first_chunk = NoteHighlight(first_annotation.name, first_quote,
first_replacement, offset, first_length, color)
res.append(first_chunk.__dict__) # dict is json serializable.
# The highlighted chunk
second_quote = note[start:end]
second_replacement = finding['infoType']['name']
second_annotation = NoteAnnotation.HIGHLIGHTED
second_length = end - start
second_chunk = NoteHighlight(second_annotation.name, second_quote,
second_replacement, start, second_length,
color)
res.append(second_chunk.__dict__)
offset = end
# If the last info type isn't at the end of the note, then there is some
# leftover unhighlighted chunk.
final_chunk = NoteHighlight(NoteAnnotation.UNHIGHLIGHTED.name, note[offset:],
'', offset, len(note) - offset, '')
res.append(final_chunk.__dict__)
return flask.jsonify(data=res), 200
@app.route('/api/deidentify', methods=['GET', 'POST'])
def deidentify():
"""run dlp pipeline."""
if flask.request.method == 'GET':
jobs, offset = model.get_list(model.DeidJobTable)
result = [{
'id': job['id'],
'name': job['name'],
'originalQuery': job['original_query'],
'deidTable': job['deid_table'],
'status': job['status'],
'logTrace': job['log_trace'],
'timestamp': job['timestamp'],
} for job in jobs]
return flask.jsonify(jobs=result, offset=offset), 200
try:
jsonschema.validate(flask.request.json, deid_schema)
except jsonschema.ValidationError:
error_msg = 'unable to validate provided payload.'
return flask.jsonify(error=400, text=error_msg), 400
job_data = {
'name': flask.request.json['name'],
'timestamp': datetime.utcnow(),
}
(input_query, input_table, deid_table, findings_table, mae_dir, mae_table,
mae_task_name, batch_size, dtd_dir, input_csv, output_csv) = (
None, None, None, None, None,
None, None, None, None, None, None)
request = flask.request
# determine input
input_method, input_info = (request.json['inputMethod'],
request.json['inputInfo'])
if input_method == 'input_table':
input_table = input_info
try:
dataset, table = input_table.split('.')
if not verify_bq_table(dataset, table, EXPECTED_CSV_SCHEMA):
error_msg = ('input table schema does not match the expected one. '
'Expecting: {}'.format(', '.join(EXPECTED_CSV_SCHEMA)))
return flask.jsonify(error=400, text=error_msg), 400
except exceptions.NotFound:
return flask.jsonify(error=400, text='unable to locate input data'), 400
job_data['original_query'] = 'SELECT * FROM {}'.format(input_table)
elif input_method == 'input_query':
input_query = input_info
job_data['original_query'] = input_query
try:
get_bq_rows(input_query)
except exceptions.BadRequest:
error_msg = 'invalid input query'
return flask.jsonify(error=400, text=error_msg), 400
elif input_method == 'input_csv':
input_csv = input_info
else:
error_msg = 'wrong input method provided'
return flask.jsonify(error=400, text=error_msg), 400
# Determine output
output_method, output_info = (request.json['outputMethod'],
request.json['outputInfo'])
job_data['deid_table'] = output_info
if output_method == 'deid_table':
deid_table = output_info
dataset, table = deid_table.split('.')
try:
if not verify_bq_table(dataset, table, EXPECTED_OUTPUT_SCHEMA):
error_msg = ('output table schema does not match the expected one. '
'Expecting: {}'.format(', '.join(EXPECTED_OUTPUT_SCHEMA)))
return flask.jsonify(error=400, text=error_msg), 400
except exceptions.NotFound:
# if table not found, a new one will be created
pass
elif output_method == 'output_csv':
output_csv = output_info
else:
error_msg = 'wrong output method provided'
return flask.jsonify(error=400, text=error_msg), 400
deid_config_json = run_deid_lib.parse_config_file(
app.config['DEID_CONFIG_FILE'])
findings_table = request.json.get('findingsTable')
job_data['findings_table'] = findings_table
try:
dataset, table = findings_table.split('.')
if not verify_bq_table(dataset, table, EXPECTED_FINDINGS_SCHEMA):
error_msg = ('findings table schema does not match the expected one. '
'Expecting: {}'.format(', '.join(EXPECTED_FINDINGS_SCHEMA)))
return flask.jsonify(error=400, text=error_msg), 400
except exceptions.NotFound:
# if table not found, a new one will be created
pass
mae_table = request.json.get('maeTable')
mae_dir = request.json.get('maeDir')
batch_size = request.json.get('batchSize') or 1
pipeline_args = ['--project', app.config['PROJECT_ID']]
deid_job = model.create(model.DeidJobTable, job_data)
errors = run_deid_lib.run_pipeline(
input_query, input_table, deid_table, findings_table, mae_dir, mae_table,
deid_config_json, mae_task_name, app.config['PROJECT_ID'], storage.Client,
bq_client, bigquery.job.QueryJobConfig, app.config['DLP_API_NAME'],
batch_size, dtd_dir, input_csv, output_csv, deid_job.timestamp,
pipeline_args)
if errors:
deid_job.update(status=400, log_trace=errors)
return flask.jsonify(error=400, text=errors), 400
deid_job.update(status=200)
return flask.jsonify(result='success'), 200
@app.route('/api/eval', methods=['GET', 'POST'])
def evaluate():
"""Run evaluation pipeline."""
if flask.request.method == 'GET':
jobs, offset = model.get_list(model.EvalJobTable)
return flask.jsonify(jobs=jobs, offset=offset), 200
# Process POST requests.
try:
jsonschema.validate(flask.request.json, eval_pipeline_shema)
except jsonschema.ValidationError:
error_msg = 'unable to validate provided payload.'
return flask.jsonify(error=400, text=error_msg), 400
(mae_input_pattern, mae_golden_dir, results_dir, mae_input_query,
mae_golden_table, write_per_note_stats_to_gcs, results_table,
per_note_results_table, debug_output_table, types_to_ignore) = (
None, None, None, None, None, None, None, None, None, None)
job_data = {
'name': flask.request.json['name'],
'timestamp': datetime.utcnow(),
}
# Get input info
input_json = flask.request.json['input']
gcs_input, bq_input = input_json.get('gcs'), input_json.get('bigquery')
if gcs_input:
mae_input_pattern = job_data['findings'] = gcs_input['pattern'] + '*.xml'
mae_golden_dir = job_data['goldens'] = gcs_input['golden']
if bq_input:
job_data['findings'] = bq_input['query']
mae_input_query = append_project(job_data['findings'])
job_data['goldens'] = bq_input['golden']
mae_golden_table = append_project(job_data['goldens'])
try:
findings_dataset, findings_table = job_data['findings'].split('.')
get_bq_table(findings_dataset, findings_table)
golden_dataset, golden_table = job_data['golden'].split('.')
get_bq_table(golden_dataset, golden_table)
except exceptions.NotFound:
error_msg = 'unable to locate input BigQuery tables'
return flask.jsonify(error=400, text=error_msg), 400
# Get output info
output_json = flask.request.json['output']
gcs_output, bq_output = output_json.get('gcs'), output_json.get('bigquery')
if gcs_output:
results_dir = job_data['stats'] = gcs_output['dir']
write_per_note_stats_to_gcs = gcs_output['debug']
if write_per_note_stats_to_gcs:
job_data['debug'] = gcs_output['dir']
if bq_output:
job_data['stats'] = bq_output['stats']
results_table = append_project(job_data['stats'])
job_data['debug'] = bq_output['debug']
debug_output_table = append_project(job_data['debug'])
if bq_output.get('perNote'):
per_note_results_table = append_project(bq_output.get('perNote'))
# Get types to ignore
types_to_ignore = flask.request.json.get('ignoreTypes') or []
# Get pipeline args
pipeline_args = []
eval_job = model.create(model.EvalJobTable, job_data)
errors = eval_lib.run_pipeline(mae_input_pattern, mae_golden_dir, results_dir,
mae_input_query, mae_golden_table,
write_per_note_stats_to_gcs, results_table,
per_note_results_table, debug_output_table,
types_to_ignore, eval_job.timestamp,
pipeline_args)
if errors:
eval_job.update(status=400, log_trace=errors)
return flask.jsonify(error=400, text=errors), 400
eval_job.update(status=200)
return flask.jsonify(result='success'), 200
@app.route('/api/eval/stats/<job_id>', methods=['GET'])
def get_eval_stats(job_id):
"""Returns the evaluation statistics of an EvalJob."""
job = model.EvalJobTable.query.get(job_id)
if not job:
error_msg = 'evaluation job does not exist'
return flask.jsonify(text=error_msg, error=404), 404
if job.status != 200:
error_msg = 'selected job did not finish successfully'
return flask.jsonify(text=error_msg, error=400), 400
stats = job.stats
if stats.startswith('gs://'):
st_client = storage.Client()
filename = gcsutil.GcsFileName.from_path(
posixpath.join(stats, 'aggregate_results.txt'))
bucket = st_client.lookup_bucket(filename.bucket)
if not bucket:
error_msg = 'stats bucket was not found'
return flask.jsonify(error=404, text=error_msg), 404
blob = bucket.blob(filename.blob)
contents = blob.download_as_string()
stats_rows = eval_lib.format_aggregate_text_for_bq(contents,
str(job.timestamp))
else:
query = 'SELECT * FROM {} where timestamp = \'{}\''.format(job.stats,
job.timestamp)
try:
stats_rows = get_bq_rows(query)
except exceptions.NotFound as e:
return flask.jsonify(error=e.code, text=e.message), e.code
# Change the key format from snake_case into camelCase and remove any keys
# with None values
result = [
dict( # pylint: disable=g-complex-comprehension
(k, v) for (k, v) in {
'infoType': stat.get('info_type'),
'recall': stat.get('recall'),
'precision': stat.get('precision'),
'fScore': stat.get('f_score'),
'truePositives': stat.get('true_positives'),
'falsePositives': stat.get('false_positives'),
'falseNegatives': stat.get('false_negatives'),
'timestamp': stat.get('timestamp'),
}.items() if v is not None) for stat in stats_rows
]
return flask.jsonify(stats=result), 200
@app.route('/api/deidentify/upload/table', methods=['POST'])
def upload_dlp_csv():
"""Uploads a csv table to BigQuery dataset.
The table is expected to have the config schema:
[RECORD_NUM, PATIENT_ID, NOTE].
Returns:
A flask response indicating the result of the operation.
"""
csv_file = flask.request.files.get('csv')
if not csv_file:
return flask.jsonify(error=400, text='no file provided'), 400
form = flask.request.form
dataset_id, table_id = form.get('dataset'), form.get('table')
if not dataset_id or not table_id:
return flask.jsonify(error=400, text='table or dataset not provided'), 400
csv_iter = unicodecsv.UnicodeReader(csv_file)
try:
headers = csv_iter.next()
except StopIteration:
return flask.jsonify(error=400, text='file is empty'), 400
if set(headers) != set(EXPECTED_CSV_SCHEMA):
return flask.jsonify(
error=400, text='expected table schema is: {}'.format(
', '.join(EXPECTED_CSV_SCHEMA))), 400
try:
if not verify_bq_table(dataset_id, table_id, EXPECTED_CSV_SCHEMA):
error_msg = ('selected table schema does not match the expected one. '
'Expecting: {}'.format(', '.join(EXPECTED_CSV_SCHEMA)))
return flask.jsonify(error=400, text=error_msg), 400
except exceptions.NotFound:
# Table not found, create it
dataset_ref = bq_client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
schema = [bigquery.schema.SchemaField(
name=col, field_type=CSV_FIELD_TYPE[col])
for col in headers]
table = bq_client.create_table(bigquery.table.Table(table_ref, schema))
rows = [
{header: entry for header, entry in zip(headers, row)}
for row in csv_iter]
if not rows:
return flask.jsonify(error=400, text='no rows provided'), 400
bq_client.insert_rows_json(table, rows)
return flask.jsonify(res='success'), 200
@app.route('/api/demo/image', methods=['POST'])
def deid_image():
"""redact all text from provided image."""
request = flask.request
try:
jsonschema.validate(request.json, dlp_image_demo_schema)
except jsonschema.ValidationError:
error_msg = 'unable to validate provided parameter'
return flask.jsonify(error=400, text=error_msg), 400
dlp = discovery.build(app.config['DLP_API_NAME'], 'v2',
cache_discovery=False)
def get_image_type(req_type):
"""change image type format to match what's expected from dlp."""
if req_type == 'image/jpeg':
return 'IMAGE_JPEG'
elif req_type == 'image/bmp':
return 'IMAGE_BMP'
elif req_type == 'image/png':
return 'IMAGE_PNG'
elif req_type == 'image/svg':
return 'IMAGE/SVG'
else:
return None
byte_item = {
'type': get_image_type(request.json['type']),
'data': request.json['data'],
}
body = {
'byteItem': byte_item,
'imageRedactionConfigs': dlp_image_redaction_configs,
}
projects = dlp.projects()
image = projects.image()
parent = 'projects/{0}'.format(app.config['PROJECT_ID'])
response = image.redact(body=body, parent=parent).execute()
return flask.jsonify(redactedByteStream=response['redactedImage'], status=200)
if __name__ == '__main__':
app.run(threaded=True)
| GoogleCloudPlatform/healthcare-deid | deid_app/backend/server.py | Python | apache-2.0 | 29,411 |
# File: TscTelnetLib.py ; This file is part of Twister.
# version: 2.002
#
# Copyright (C) 2012 , Luxoft
#
# Authors:
# Adrian Toader <adtoader@luxoft.com>
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains Telnet connection functions.
"""
from telnetlib import Telnet
from time import sleep
#from time import time as epochtime
from thread import start_new_thread
#from os import remove, rename
#from os.path import dirname, exists, abspath, join, getsize
#Efrom json import load, dump
#__dir__ = dirname(abspath(__file__))
__all__ = ['TelnetManager', 'TelnetConnection']
#
class TelnetManager(object):
""" Twister Telnet connections manager """
def __init__(self):
""" init """
# connections are TelnetConnection instances
self.connections = {}
# active connection name; is used for all commands as default
# if no name is specified
self.activeConnection = None
def open_connection(self, name, host, port=23, user=None, password=None,
userExpect=None, passwordExpect=None, keepalive=True):
""" open a new TelnetConnection instance and add it to manager list """
if not self.connections.has_key(name):
connection = TelnetConnection(name, host, port, user, password,
userExpect, passwordExpect, keepalive)
self.connections.update([(name, connection), ])
return True
else:
print('telnet open connection error: connection name already in use')
return False
def login(self, name, user=None, password=None,
userExpect=None, passwordExpect=None):
""" login on telnet connection """
try:
return self.connections[name].login(user, password,
userExpect, passwordExpect)
except Exception, e:
print('telnet manager login error: {er}'.format(er=e))
return False
def write(self, command, name=None):
""" write command to telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].write(command)
elif self.activeConnection:
return self.connections[self.activeConnection].write(command)
return False
def read(self, name=None):
""" read from telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].read()
elif self.activeConnection:
return self.connections[self.activeConnection].read()
return False
def read_until(self, expected, name=None):
""" read from telnet connection until expected """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].read_until(expected)
elif self.activeConnection:
return self.connections[self.activeConnection].read_until(expected)
return False
def set_newline(self, newline, name=None):
""" set the new line char for telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].set_newline(newline)
elif self.activeConnection:
return self.connections[self.activeConnection].set_newline(newline)
return False
def set_timeout(self, timeout, name=None):
""" set timeout for operations on telnet connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name].set_timeout(timeout)
elif self.activeConnection:
return self.connections[self.activeConnection].set_timeout(timeout)
return False
def get_connection(self, name=None):
""" get the TelnetConnection instance """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if name:
return self.connections[name]
elif self.activeConnection:
return self.connections[self.activeConnection]
return False
def set_active_connection(self, name):
""" set the active connection """
if not self.connections.has_key(name):
print 'connection not found'
return False
self.activeConnection = name
return True
def list_connections(self):
""" list all connections """
return [name for name in self.connections.iterkeys()]
def close_connection(self, name=None):
""" close connection """
if ((not name and not self.activeConnection) or
(name and not self.connections.has_key(name))):
print 'connection not found'
return False
if not name and self.activeConnection:
del(self.connections[self.activeConnection])
self.activeConnection = None
return True
try:
del(self.connections[name])
if name == self.activeConnection:
self.activeConnection = None
except Exception, e:
print('telnet manager error while closing connection: {er}'.format(er=e))
return False
return True
def close_all_connections(self):
""" close all connections """
del(self.connections)
self.connections = {}
self.activeConnection = None
print('all connections closed')
return True
class TelnetConnection:
""" tsc telnet connection """
def __init__(self, name, host, port=23, user=None, password=None,
userExpect=None, passwordExpect=None, keepalive=True):
""" init """
self.connection = None
self.host = host
self.port = port
self.loginAccount = {
'user': user,
'password': password
}
self.name = name
self.newline = '\n'
self.timeout = 4
self.keepAliveRetries = 0
self.keepAliveThread = None
self.keepAlive = keepalive
self.loginDriver = {
'userExpect': userExpect,
'passwordExpect': passwordExpect
}
"""
self.loginDrivers = None
self.loginDriversPath = join(__dir__, 'logindrivers.list')
self.loginDriversLockPath = join(__dir__, 'logindrivers.lock')
self.loadLoginDrivers()
"""
try:
self.connection = Telnet(self.host, self.port, self.timeout)
print('telnet connection created!')
self.login()
if self.keepAlive:
self.keepAliveThread = start_new_thread(self.keep_alive, ())
else:
self.keepAliveThread = None
except Exception, e:
self.connection = None
self.keepAliveThread = None
print('telnet connection failed: {er}'.format(er=e))
def __del__(self):
""" delete """
if self.connection:
self.connection.close()
sleep(2)
del(self)
def keep_alive(self):
""" keep connection alive """
timeout = (0.2, self.timeout)[self.timeout>2]
while not self.connection.eof:
self.connection.write('')
sleep(timeout)
def alive(self):
""" check if connection is alive """
if self.connection and not self.connection.eof:
return True
try:
self.connection = Telnet(self.host, self.port)
print('telnet connection created!')
self.login()
if self.keepAlive:
self.keepAliveThread = start_new_thread(self.keep_alive, ())
else:
self.keepAliveThread = None
except Exception, e:
self.connection = None
self.keepAliveThread = None
self.keepAliveRetries += 1
if self.keepAliveRetries > 4:
print('telnet connection restore retry failed!')
return False
print('telnet connection restore failed: {er}'\
'retry: {n}!'.format(er=e, n=self.keepAliveRetries))
self.alive()
return True
def set_newline(self, newline):
""" set the new line char for telnet connection """
if isinstance(newline, str):
self.newline = newline
return True
return False
def set_timeout(self, timeout):
""" set timeout for operations on telnet connection """
if isinstance(timeout, int):
self.timeout = [2, timeout][timeout > 2]
return True
return False
def read(self):
""" read from telnet connection """
if not self.alive():
return False
try:
response = self.connection.read_very_eager()
if response:
return response
except Exception, e:
print('read command error: {er}'.format(er=e))
return False
return False
def read_until(self, expected):
""" read from telnet connection until expected """
if not self.alive():
return False
try:
response = self.connection.read_until(expected, self.timeout)
if response:
print(response)
return True
except Exception, e:
print('read until command error: {er}'.format(er=e))
return False
return False
def write(self, command, result=True, display=True):
""" write command to telnet connection """
if not self.alive():
return False
try:
self.connection.write( str(command) + self.newline )
sleep(2)
if display: print('command: {c}'.format(c=command))
if result:
return self.connection.read_very_eager()
else:
return True
except Exception, e:
print('send command error: {er}'.format(er=e))
return False
def expect(self, expected, command=None, result=True, display=True):
""" write command to telnet connection on expected prompt """
if not self.alive():
return False
try:
response = self.connection.read_until(expected, self.timeout)
print(response)
if response:
if command:
self.connection.write( str(command) + self.newline)
sleep(2)
if display: print('command: {c}'.format(c=command))
if result:
return self.connection.read_very_eager()
else:
return True
return False
except Exception, e:
print('expect send command error: {er}'.format(er=e))
return False
def login(self, user=None, password=None,
userExpect=None, passwordExpect=None):
""" login on telnet connection """
if not self.alive():
return False
self.loginAccount['user'] = (user,
self.loginAccount['user'])[user is None]
self.loginAccount['password'] = (password,
self.loginAccount['password'])[password is None]
self.loginDriver['userExpect'] = (userExpect,
self.loginDriver['userExpect'])[userExpect is None]
self.loginDriver['passwordExpect'] = (passwordExpect,
self.loginDriver['passwordExpect'])[passwordExpect is None]
print('login ..')
if None in [self.loginAccount['user'], self.loginAccount['password']]:
print('no login data!')
return False
if None in [self.loginDriver['userExpect'],
self.loginDriver['passwordExpect']]:
print('no login expected data!')
return False #return self.autologin()
response = self.expect(self.loginDriver['userExpect'],
self.loginAccount['user'], False)
if response:
response = self.expect(self.loginDriver['passwordExpect'],
self.loginAccount['password'],
True, False)
if response:
print(response)
"""
if ((self.loginDriver['userExpect'] not in
self.loginDrivers['userExpect'] or
self.loginDriver['passwordExpect'] not in
self.loginDrivers['passwordExpect'])
and not None in self.loginDriver.itervalues()):
self.saveLoginDrivers(self.loginDriver['userExpect'],
self.loginDriver['passwordExpect'])
"""
return True
print('fail')
return False
"""
def autologin(self):
# autologin on telnet connection
print('tring autologin ..')
response = self.connection.expect(self.loginDrivers['userExpect'],
self.timeout)
if not None in response:
print(response)
self.write(self.loginAccount['user'], False)
response = self.connection.expect(
self.loginDrivers['passwordExpect'],
self.timeout)
if not None in response:
print(response)
print self.write(self.loginAccount['password'], True, False)
return True
print('fail')
return False
def loadLoginDrivers(self):
# load the known login drivers
retries = 0
while exists(self.loginDriversLockPath) and retries <= self.timeout * 2:
retries += 1
sleep(0.4)
with open(self.loginDriversLockPath, 'wb+') as loginDriversLockFile:
loginDriversLockFile.write('lock\n')
if not exists(self.loginDriversPath):
with open(self.loginDriversPath, 'wb+') as loginDriversFile:
self.loginDrivers = {}
self.loginDrivers['userExpect'] = []
self.loginDrivers['passwordExpect'] = []
dump(self.loginDrivers, loginDriversFile)
if getsize(self.loginDriversPath) > 524288L:
rename(self.loginDriversPath,
self.loginDriversPath + '.bck' + str(epochtime()))
with open(self.loginDriversPath, 'rb') as loginDriversFile:
self.loginDrivers = load(loginDriversFile)
remove(self.loginDriversLockPath)
def saveLoginDrivers(self, userExpect, passwordExpect):
# save new login driver
retries = 0
while exists(self.loginDriversLockPath) and retries <= self.timeout * 2:
retries += 1
sleep(0.4)
with open(self.loginDriversLockPath, 'wb+') as loginDriversLockFile:
loginDriversLockFile.write('lock\n')
with open(self.loginDriversPath, 'rb') as loginDriversFile:
self.loginDrivers = load(loginDriversFile)
self.loginDrivers['userExpect'].append(userExpect)
self.loginDrivers['passwordExpect'].append(passwordExpect)
with open(self.loginDriversPath, 'wb+') as loginDriversFile:
dump(self.loginDrivers, loginDriversFile)
remove(self.loginDriversLockPath)
"""
| twister/twister.github.io | lib/TscTelnetLib.py | Python | apache-2.0 | 17,030 |
import discord
from discord.ext import commands
from sys import argv
class Memes:
"""
Meme commands
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
async def _meme(self, ctx, msg):
author = ctx.message.author
if ctx.message.channel.name[0:5] == "help-" or "assistance" in ctx.message.channel.name or (self.bot.nomemes_role in author.roles):
await self.bot.delete_message(ctx.message)
try:
await self.bot.send_message(author, "Meme commands are disabled in this channel, or your privileges have been revoked.")
except discord.errors.Forbidden:
await self.bot.say(author.mention + " Meme commands are disabled in this channel, or your privileges have been revoked.")
else:
await self.bot.say(self.bot.escape_name(ctx.message.author.display_name) + ": " + msg)
# list memes
@commands.command(name="listmemes", pass_context=True)
async def _listmemes(self, ctx):
"""List meme commands."""
# this feels wrong...
funcs = dir(self)
msg = "```\n"
msg += ", ".join(func for func in funcs if func != "bot" and func[0] != "_")
msg += "```"
await self._meme(ctx, msg)
# 3dshacks memes
@commands.command(pass_context=True, hidden=True)
async def s_99(self, ctx):
"""Memes."""
await self._meme(ctx, "**ALL HAIL BRITANNIA!**")
@commands.command(pass_context=True, hidden=True)
async def screams(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/j0Dkv2Z.png")
@commands.command(pass_context=True, hidden=True)
async def eeh(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/2SBC1Qo.jpg")
@commands.command(pass_context=True, hidden=True)
async def dubyadud(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/Sohsi8s.png")
@commands.command(pass_context=True, hidden=True)
async def megumi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/GMRp1dj.jpg")
@commands.command(pass_context=True, hidden=True)
async def inori(self, ctx):
"""Memes."""
await self._meme(ctx, "https://i.imgur.com/WLncIsi.gif")
@commands.command(pass_context=True, hidden=True)
async def inori3(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/so8thgu.gifv")
@commands.command(pass_context=True, hidden=True)
async def inori4(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/267IXh1.gif")
@commands.command(pass_context=True, hidden=True)
async def inori5(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lKcsiBP.png")
@commands.command(pass_context=True, hidden=True)
async def inori6(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/SIJzpau.gifv")
@commands.command(pass_context=True, hidden=True)
async def shotsfired(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/zf2XrNk.gifv")
@commands.command(pass_context=True, hidden=True)
async def rusure(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/dqh3fNi.png")
@commands.command(pass_context=True, hidden=True)
async def r34(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/sjQZKBF.gif")
@commands.command(pass_context=True, hidden=True)
async def lenny(self, ctx):
"""Memes."""
await self._meme(ctx, "( ͡° ͜ʖ ͡°)")
@commands.command(pass_context=True, hidden=True)
async def rip(self, ctx):
"""Memes."""
await self._meme(ctx, "Press F to pay respects.")
@commands.command(pass_context=True, hidden=True)
async def permabrocked(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ARsOh3p.jpg")
@commands.command(pass_context=True, hidden=True)
async def knp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/EsJ191C.png")
@commands.command(pass_context=True, hidden=True)
async def lucina(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/tnWSXf7.png")
@commands.command(pass_context=True, hidden=True)
async def lucina2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ZPMveve.jpg")
@commands.command(pass_context=True, hidden=True)
async def xarec(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/A59RbRT.png")
@commands.command(pass_context=True, hidden=True)
async def clap(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/UYbIZYs.gifv")
@commands.command(pass_context=True, hidden=True)
async def ayyy(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/bgvuHAd.png")
@commands.command(pass_context=True, hidden=True)
async def hazel(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/vpu8bX3.png")
@commands.command(pass_context=True, hidden=True)
async def thumbsup(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/hki1IIs.gifv")
# Cute commands :3
@commands.command(pass_context=True, hidden=True)
async def headpat(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/7V6gIIW.jpg")
@commands.command(pass_context=True, hidden=True)
async def headpat2(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/djhHX0n.gifv")
@commands.command(pass_context=True, hidden=True)
async def sudoku(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/VHlIZRC.png")
@commands.command(pass_context=True, hidden=True)
async def baka(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/OyjCHNe.png")
@commands.command(pass_context=True, hidden=True)
async def mugi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lw80tT0.gif")
@commands.command(pass_context=True, hidden=True)
async def lisp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/RQeZErU.png")
@commands.command(pass_context=True, hidden=True)
async def dev(self, ctx):
"""Reminds user where they are."""
await self.bot.say("You seem to be in <#196635781798952960>.")
@commands.command(pass_context=True, hidden=True)
async def headrub(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/j6xSoKv.jpg")
@commands.command(pass_context=True, hidden=True)
async def blackalabi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/JzFem4y.png")
@commands.command(pass_context=True, hidden=True)
async def nom(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/p1r53ni.jpg")
# Load the extension
def setup(bot):
bot.add_cog(Memes(bot))
| T3CHNOLOG1C/Kurisu | addons/memes.py | Python | apache-2.0 | 7,307 |
import abc
import json
from future.utils import with_metaclass
from collections import defaultdict
import numpy as np
import tensorflow as tf
from dps import cfg
from dps.utils import Parameterized, Param
from dps.utils.tf import build_gradient_train_op, trainable_variables, get_scheduled_values, ScopedFunction
from dps.datasets.base import Dataset
class Updater(with_metaclass(abc.ABCMeta, Parameterized)):
build_saver = True
def __init__(self, env, scope=None, mpi_context=None, **kwargs):
self.scope = scope
self.env = env
self.mpi_context = mpi_context
self._n_experiences = 0
self.step = 0
self._saver = None
@property
def n_experiences(self):
return self._n_experiences
def build_graph(self):
# with tf.name_scope(self.scope or self.__class__.__name__) as scope:
# self._scope = scope
self._build_graph()
global_step = tf.train.get_or_create_global_step()
self.inc_global_step_op = tf.assign_add(global_step, 1)
global_step_input = tf.placeholder(tf.int64, ())
assign_global_step = tf.assign(global_step, global_step_input)
tf.get_default_session().run(assign_global_step, feed_dict={global_step_input: 0})
if self.build_saver:
updater_variables = {v.name: v for v in self.trainable_variables(for_opt=False)}
self.saver = tf.train.Saver(updater_variables)
@abc.abstractmethod
def _build_graph(self):
raise Exception("NotImplemented")
def update(self, batch_size, step):
update_result = self._update(batch_size)
sess = tf.get_default_session()
sess.run(self.inc_global_step_op)
self._n_experiences += batch_size
return update_result
@abc.abstractmethod
def _update(self, batch_size):
raise Exception("NotImplemented")
def evaluate(self, batch_size, step, mode="val"):
assert mode in "val test".split()
return self._evaluate(batch_size, mode)
@abc.abstractmethod
def _evaluate(self, batch_size, mode):
raise Exception("NotImplemented")
def trainable_variables(self, for_opt):
raise Exception("AbstractMethod")
def save(self, filename):
path = self.saver.save(tf.get_default_session(), filename)
return path
def restore(self, path):
self.saver.restore(tf.get_default_session(), path)
class DummyUpdater(Updater):
""" For when you just want to build datasets. Much faster than most normal updaters. """
build_saver = False
def trainable_variables(self, for_opt):
return []
def _build_graph(self):
pass
def _update(self, batch_size):
return dict()
def _evaluate(self, batch_size, mode):
return dict()
def save(self, session, filename):
return ''
def restore(self, path):
pass
class DifferentiableUpdater(Updater):
""" Update parameters of a differentiable function `f` using gradient-based algorithm.
Must be used in context of a default graph, session and config.
Parameters
----------
env: gym Env
The environment we're trying to learn about.
f: An instance of ScopedFunction
Accepts a tensor (input), returns a tensor (inference).
"""
optimizer_spec = Param()
lr_schedule = Param()
noise_schedule = Param()
max_grad_norm = Param()
l2_weight = Param(None)
stopping_criteria = "loss,min"
def __init__(self, env, f, **kwargs):
assert hasattr(env, 'build'), (
"Environments used with DifferentiableUpdater must possess "
"a method called `build` which builds returns a dictionary of scalar tensors."
)
self.f = f
super(DifferentiableUpdater, self).__init__(env, **kwargs)
def trainable_variables(self, for_opt):
return trainable_variables(self.f.scope, for_opt=for_opt)
def _build_graph(self):
self.recorded_tensors = self.env.build(self.f)
self.loss = self.recorded_tensors['loss']
tvars = self.trainable_variables(for_opt=True)
if self.l2_weight is not None:
self.loss += self.l2_weight * sum(tf.nn.l2_loss(v) for v in tvars if 'weights' in v.name)
self.train_op, self.train_recorded_tensors = build_gradient_train_op(
self.loss, tvars, self.optimizer_spec, self.lr_schedule,
self.max_grad_norm, self.noise_schedule)
self.recorded_tensors.update(get_scheduled_values())
def _update(self, batch_size):
feed_dict = self.env.data_manager.do_train()
sess = tf.get_default_session()
_, record, train_record = sess.run(
[self.train_op, self.recorded_tensors, self.train_recorded_tensors], feed_dict=feed_dict)
record.update(train_record)
return record
def _evaluate(self, batch_size, mode):
if mode == "val":
feed_dict = self.env.data_manager.do_val()
elif mode == "test":
feed_dict = self.env.data_manager.do_test()
else:
raise Exception("Unknown evaluation mode: {}".format(mode))
sess = tf.get_default_session()
return sess.run(self.recorded_tensors, feed_dict=feed_dict)
class VideoUpdater(Updater):
optimizer_spec = Param()
lr_schedule = Param()
noise_schedule = Param()
max_grad_norm = Param()
grad_n_record_groups = Param(None)
def __init__(self, env, scope=None, **kwargs):
self.obs_shape = env.obs_shape
*other, self.image_height, self.image_width, self.image_depth = self.obs_shape
self.n_frames = other[0] if other else 0
self.network = cfg.build_network(env, self, scope="network")
super(VideoUpdater, self).__init__(env, scope=scope, **kwargs)
def trainable_variables(self, for_opt):
return self.network.trainable_variables(for_opt)
def _update(self, batch_size):
if cfg.get('no_gradient', False):
return dict()
feed_dict = self.data_manager.do_train()
sess = tf.get_default_session()
_, record, train_record = sess.run(
[self.train_op, self.recorded_tensors, self.train_records], feed_dict=feed_dict)
record.update(train_record)
return record
def _evaluate(self, _batch_size, mode):
return self.evaluator.eval(self.recorded_tensors, self.data_manager, mode)
def _build_graph(self):
self.data_manager = DataManager(datasets=self.env.datasets)
self.data_manager.build_graph()
data = self.data_manager.iterator.get_next()
self.inp = data["image"]
network_outputs = self.network(data, self.data_manager.is_training)
network_tensors = network_outputs["tensors"]
network_recorded_tensors = network_outputs["recorded_tensors"]
network_losses = network_outputs["losses"]
self.tensors = network_tensors
self.recorded_tensors = recorded_tensors = dict(global_step=tf.train.get_or_create_global_step())
# --- loss ---
self.loss = tf.constant(0., tf.float32)
for name, tensor in network_losses.items():
self.loss += tensor
recorded_tensors['loss_' + name] = tensor
recorded_tensors['loss'] = self.loss
# --- train op ---
if cfg.do_train and not cfg.get('no_gradient', False):
tvars = self.trainable_variables(for_opt=True)
self.train_op, self.train_records = build_gradient_train_op(
self.loss, tvars, self.optimizer_spec, self.lr_schedule,
self.max_grad_norm, self.noise_schedule, grad_n_record_groups=self.grad_n_record_groups)
sess = tf.get_default_session()
for k, v in getattr(sess, 'scheduled_values', None).items():
if k in recorded_tensors:
recorded_tensors['scheduled_' + k] = v
else:
recorded_tensors[k] = v
# --- recorded values ---
intersection = recorded_tensors.keys() & network_recorded_tensors.keys()
assert not intersection, "Key sets have non-zero intersection: {}".format(intersection)
recorded_tensors.update(network_recorded_tensors)
intersection = recorded_tensors.keys() & self.network.eval_funcs.keys()
assert not intersection, "Key sets have non-zero intersection: {}".format(intersection)
if self.network.eval_funcs:
eval_funcs = self.network.eval_funcs
else:
eval_funcs = {}
# For running functions, during evaluation, that are not implemented in tensorflow
self.evaluator = Evaluator(eval_funcs, network_tensors, self)
class TensorRecorder(ScopedFunction):
_recorded_tensors = None
def record_tensors(self, **kwargs):
for k, v in kwargs.items():
self.recorded_tensors[k] = tf.reduce_mean(tf.to_float(v))
@property
def recorded_tensors(self):
if self._recorded_tensors is None:
self._recorded_tensors = {}
return self._recorded_tensors
class DataManager(Parameterized):
""" Manages a collection of datasets (of type dps/datasets/base.py:Dataset) and iterators accessing them.
Datasets of type Dataset are passed into the constructor. At least one of those must be called
'train', 'val' or 'test'. When build_graph is called, iterators accessing those datasets
are created, and a special string-handle iterator is created. (Note: an iterator is a tensorflow
operations which is used to stream data from a file stored on disk). The string-handle iterator
can switch between datasets; which dataset it accesses is controlled by the value of a string tensor.
This allows us to build a single model (i.e. a single tensorflow graph), but feed it different data.
For example, we can easily switch from feeding the model training data to feeding it evaluation data.
Note: all datasets collected under a single DataManager instance must return data with the same structure.
(i.e. they should have the same set of Features; see dps/datasets/base.py:Dataset).
Convenience functions do_train, do_val and do_test are provided. When called, they return feed_dicts
when can be used to set the string handle to the appropriate value for the desired dataset.
Additional iterators can be provided by directly calling `build_iterator`, after `build_graph` has
been called. Indeed, this MUST be done in order to access datasets other than 'train', 'val', 'test',
as `build_graph` does not create iterators for these non-standard datasets.
Example use:
dm = DataManager(
train=MyTrainDataset(),
val=MyValDataset(),
test=MyTestDataset(),
)
input_data = dm.iterator.get_next()
The form of input_data will depend on the Features of the datasets; most often if will be a dictionary of tensors.
"""
shuffle_buffer_size = Param()
prefetch_buffer_size_in_batches = Param(10)
prefetch_to_device = Param(False)
batch_size = Param()
train_initialized = False
def __init__(self, train=None, val=None, test=None, datasets=None, **kwargs):
self.datasets = {}
self.datasets.update(train=train, val=val, test=test)
self.datasets.update(datasets)
assert (
self.datasets['train'] is not None
or self.datasets['val'] is not None
or self.datasets['test'] is not None), (
'Must provide at least one dataset with name "train", "val", or "test".')
self.iterators_and_handles = {}
def build_graph(self):
tf_dsets = []
train_dataset = self.datasets.get('train', None)
if train_dataset is not None:
train_dset, _, _ = self.build_iterator('train', 'train', self.batch_size, True, self.shuffle_buffer_size)
tf_dsets.append(train_dset)
val_dataset = self.datasets.get('val', None)
if val_dataset is not None:
val_dset, _, _ = self.build_iterator('val', 'val', self.batch_size, False, 0)
tf_dsets.append(val_dset)
test_dataset = self.datasets.get('test', None)
if test_dataset is not None:
test_dset, _, _ = self.build_iterator('test', 'test', self.batch_size, False, 0)
tf_dsets.append(test_dset)
# --- outputs ---
self.handle = tf.placeholder(tf.string, shape=(), name="dataset_handle")
tf_dset = tf_dsets[0]
if cfg.use_gpu and self.prefetch_to_device:
# In tensorflow 1.13 (at least), tf wants to put this op on CPU, not sure why. This results in an error like:
#
# InvalidArgumentError: Attempted create an iterator on device "/job:localhost/replica:0/task:0/device:CPU:0"
# from handle defined on device "/job:localhost/replica:0/task:0/device:GPU:0"
#
# And the error explicitly references IteratorFromStringHandleV2 built here. The reason is that the
# resources that are pointed to by self.handle are all on the GPU, but, unless we are explicit,
# the iterator created from that handle will be on the CPU, which is apparently not allowed.
with tf.device("/gpu:0"):
self.iterator = tf.data.Iterator.from_string_handle(
self.handle, tf_dset.output_types, tf_dset.output_shapes)
else:
self.iterator = tf.data.Iterator.from_string_handle(
self.handle, tf_dset.output_types, tf_dset.output_shapes)
self.is_training = tf.placeholder(tf.bool, shape=(), name="is_training")
def build_iterator(self, name, base_dataset_name, batch_size, repeat, shuffle_buffer_size):
base_dataset = self.datasets[base_dataset_name]
if batch_size is None:
batch_size = self.batch_size
if isinstance(base_dataset, tf.data.Dataset):
dset = base_dataset
elif isinstance(base_dataset, Dataset):
dset = tf.data.TFRecordDataset(base_dataset.filename)
else:
raise Exception("Unknown dataset type: {}.".format(base_dataset))
# --- possibly repeat and/or shuffle --
if repeat and shuffle_buffer_size > 0:
try:
shuffle_and_repeat_func = tf.data.experimental.shuffle_and_repeat
except AttributeError:
shuffle_and_repeat_func = tf.contrib.data.shuffle_and_repeat
shuffle_and_repeat = shuffle_and_repeat_func(self.shuffle_buffer_size)
dset = dset.apply(shuffle_and_repeat)
elif shuffle_buffer_size > 0:
dset = dset.shuffle(self.shuffle_buffer_size)
# --- batch and parse ---
dset = dset.batch(batch_size)
if hasattr(base_dataset, 'parse_example_batch'):
dset = dset.map(base_dataset.parse_example_batch)
# --- possibly prefetch to improve performance ---
if self.prefetch_buffer_size_in_batches > 0:
if cfg.use_gpu and self.prefetch_to_device:
# Suggested here: https://github.com/tensorflow/tensorflow/issues/18947#issuecomment-407778515
dset = (dset.apply(tf.data.experimental.copy_to_device('/gpu:0'))
.prefetch(self.prefetch_buffer_size_in_batches))
else:
dset = dset.prefetch(self.prefetch_buffer_size_in_batches)
# --- finalize ---
iterator = dset.make_initializable_iterator()
sess = tf.get_default_session()
handle = sess.run(iterator.string_handle(name="{}_string_handle".format(name)))
self.iterators_and_handles[name] = (iterator, handle)
return dset, iterator, handle
def do_train(self, is_training=True):
return self.do('train', is_training)
def do_val(self, is_training=False):
return self.do('val', is_training)
def do_test(self, is_training=False):
return self.do('test', is_training)
def do(self, name, is_training=False):
""" Initialize iterator (unless it's the `train` iterator, which is handled slightly differently)
and return a feed_dict populated with the appropriate handle for the requested iterator. """
iterator, handle = self.iterators_and_handles[name]
sess = tf.get_default_session()
if name == 'train':
if not self.train_initialized:
sess.run(iterator.initializer)
self.train_initialized = True
else:
sess.run(iterator.initializer)
return {self.handle: handle, self.is_training: is_training}
class DummyFunc:
keys_accessed = ""
def __call__(self, fetched, updater):
return {}
class Evaluator:
""" A helper object for running a list of functions on a collection of evaluated tensors.
Parameters
----------
functions: a dict (name-> function). Each function as assumed to have an attribute `keys_accessed`
listing the keys (into `tensors`) that will be accessed by that function.
tensors: a (possibly nested) dictionary of tensors which will provide the input to the functions
updater: the updater object, passed into the functions at eval time
"""
def __init__(self, functions, tensors, updater):
self._functions = functions
self._tensors = tensors
# Force evaluation to happen at with the default feed_dict
functions["dummy"] = DummyFunc()
self.updater = updater
self.functions = defaultdict(list)
self.feed_dicts = {}
fetch_keys = defaultdict(set)
for name, func in functions.items():
if hasattr(func, 'get_feed_dict'):
feed_dict = func.get_feed_dict(updater)
else:
feed_dict = {}
fd_key = {str(k): str(v) for k, v in feed_dict.items()}
fd_key = json.dumps(fd_key, default=str, indent=4, sort_keys=True)
self.functions[fd_key].append((name, func))
self.feed_dicts[fd_key] = feed_dict
# store for the function
keys_accessed = func.keys_accessed
if isinstance(keys_accessed, str):
keys_accessed = keys_accessed.split()
for key in keys_accessed:
fetch_keys[fd_key].add(key)
self.fetches = {}
for fd_key, _fetch_keys in fetch_keys.items():
fetches = self.fetches[fd_key] = {}
for key in _fetch_keys:
dst = fetches
src = tensors
subkeys = key.split(":")
for i, _key in enumerate(subkeys):
if i == len(subkeys)-1:
dst[_key] = src[_key]
else:
if _key not in dst:
dst[_key] = dict()
dst = dst[_key]
src = src[_key]
def _check_continue(self, record):
return True
def eval(self, recorded_tensors, data_manager, mode):
final_record = {}
for key, functions in self.functions.items():
if mode == "val":
feed_dict = data_manager.do_val()
elif mode == "test":
feed_dict = data_manager.do_test()
else:
raise Exception("Unknown evaluation mode: {}".format(mode))
extra_feed_dict = self.feed_dicts[key]
feed_dict.update(extra_feed_dict)
sess = tf.get_default_session()
n_points = 0
record = defaultdict(float)
fetches = self.fetches.get(key, {})
while True:
try:
if extra_feed_dict:
_recorded_tensors = dict(batch_size=recorded_tensors['batch_size'])
_record, fetched = sess.run([_recorded_tensors, fetches], feed_dict=feed_dict)
else:
# Only get values from recorded_tensors when using the default feed dict.
_record, fetched = sess.run([recorded_tensors, fetches], feed_dict=feed_dict)
except tf.errors.OutOfRangeError:
break
for name, func in functions:
result = func(fetched, self.updater)
if isinstance(result, dict):
for k, v in result.items():
_record["{}:{}".format(name, k)] = np.mean(v)
else:
_record[name] = np.mean(result)
batch_size = _record['batch_size']
# Assumes that each record entry is an average over the batch
for k, v in _record.items():
record[k] += batch_size * v
n_points += batch_size
do_continue = self._check_continue(_record)
if not do_continue:
break
record = {k: v / n_points for k, v in record.items()}
intersection = record.keys() & final_record.keys() - set(['batch_size'])
assert not intersection, "Key sets have non-zero intersection: {}".format(intersection)
final_record.update(record)
return final_record
| e2crawfo/dps | dps/tf/updater.py | Python | apache-2.0 | 21,553 |
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from ...sipmessaging import SIPHeaderField
from ...sipmessaging import classproperty
class EventSIPHeaderField(SIPHeaderField):
# noinspection PyNestedDecorators
@classproperty
@classmethod
def canonical_field_name(cls):
return 'Event'
# noinspection PyNestedDecorators
@classproperty
@classmethod
def canonical_compact_field_name(cls):
return 'o'
@classmethod
def new_for_attributes(cls, field_name="Event", field_value_string=""):
return cls.new_for_field_name_and_value_string(field_name=field_name, field_value_string=field_value_string)
@property
def is_event(self):
return True
# http://www.iana.org/assignments/sip-parameters/sip-parameters.xhtml#sip-parameters-2
@property
def adaptive_min_rate(self):
return self.parameter_named('adaptive-min-rate')
@adaptive_min_rate.setter
def adaptive_min_rate(self, a_string):
self.parameter_named_put('adaptive-min-rate', a_string)
@property
def body(self):
return self.parameter_named('body')
@body.setter
def body(self, a_string):
self.parameter_named_put('body', a_string)
@property
def call_id(self):
return self.parameter_named('call-id')
@call_id.setter
def call_id(self, a_string):
self.parameter_named_put('call-id', a_string)
@property
def effective_by(self):
return self.parameter_named('effective-by')
@effective_by.setter
def effective_by(self, a_string):
self.parameter_named_put('effective-by', a_string)
@property
def from_tag(self):
return self.parameter_named('from-tag')
@from_tag.setter
def from_tag(self, a_string):
self.parameter_named_put('from-tag', a_string)
@property
def id(self):
return self.parameter_named('id')
@id.setter
def id(self, a_string):
self.parameter_named_put('id', a_string)
@property
def include_session_description(self):
return self.parameter_named('include-session-description')
@include_session_description.setter
def include_session_description(self, a_string):
self.parameter_named_put('include-session-description', a_string)
@property
def max_rate(self):
return self.parameter_named('max-rate')
@max_rate.setter
def max_rate(self, a_string):
self.parameter_named_put('max-rate', a_string)
@property
def min_rate(self):
return self.parameter_named('min-rate')
@min_rate.setter
def min_rate(self, a_string):
self.parameter_named_put('min-rate', a_string)
@property
def model(self):
return self.parameter_named('model')
@model.setter
def model(self, a_string):
self.parameter_named_put('model', a_string)
@property
def profile_type(self):
return self.parameter_named('profile-type')
@profile_type.setter
def profile_type(self, a_string):
self.parameter_named_put('profile-type', a_string)
@property
def shared(self):
return self.parameter_named('shared')
@shared.setter
def shared(self, a_string):
self.parameter_named_put('shared', a_string)
@property
def to_tag(self):
return self.parameter_named('to-tag')
@to_tag.setter
def to_tag(self, a_string):
self.parameter_named_put('to-tag', a_string)
@property
def vendor(self):
return self.parameter_named('vendor')
@vendor.setter
def vendor(self, a_string):
self.parameter_named_put('vendor', a_string)
@property
def version(self):
return self.parameter_named('version')
@version.setter
def version(self, a_string):
self.parameter_named_put('version', a_string)
| bobjects/BobStack | bobstack/sipmessaging/concreteheaderfields/eventSIPHeaderField.py | Python | apache-2.0 | 3,899 |
"""
Copyright 2015 Google, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Implements an HTML documentation emitter for YANG modules
"""
import os
import re
from xml.etree import ElementTree as ET
from jinja2 import Environment, FileSystemLoader
from .doc_emitter import DocEmitter
from .yangdoc_defs import YangDocDefs
from . import html_helper
from . import yangpath
class HTMLEmitter(DocEmitter):
def genModuleDoc(self, mod, ctx):
"""HTML emitter for top-level module documentation given a
ModuleDoc object"""
ht = html_helper.HTMLHelper()
# TODO: this is far too hardcoded
mod_div = ht.open_tag("div", newline=True)
# module name
mod_div += ht.h1(mod.module_name, {"class": "module-name", "id": ("mod-" + ht.gen_html_id(mod.module_name))},2,True)
if 'version' in mod.module.attrs:
mod_div += ht.h4("openconfig-version: " + mod.module.attrs['version'], {"class": "module-header"},2,True)
# module description header
mod_div += ht.h4("Description", {"class": "module-desc-header"},2,True)
# module description text
paragraphs = text_to_paragraphs(mod.module.attrs['desc'])
for para in paragraphs:
mod_div += ht.para(para, {"class": "module-desc-text"},2,True)
mod_div += ht.h4("Imports", {"class": "module-header"},2,True)
mod_div += "<p class=\"module-desc-text\">"
for i in mod.module.attrs['imports']:
mod_div += "%s<br>\n" % i
mod_div += "</p>\n"
mod_div += ht.close_tag(newline=True)
# initialize and store in the module docs
self.moduledocs[mod.module_name] = {}
self.moduledocs[mod.module_name]['module'] = mod_div
self.moduledocs[mod.module_name]['data'] = ""
# handle typedefs
if len(mod.typedefs) > 0:
types_div = ht.open_tag("div", newline=True)
types_div += ht.h3("Defined types", {"class": "module-types-header", "id": mod.module_name + "-defined-types"},2,True)
for (typename, td) in mod.typedefs.items():
types_div += ht.h4(typename,{"class": "module-type-name","id": "type-" + ht.gen_html_id(typename)},2,True)
types_div += ht.para(ht.add_tag("span","description:" + ht.br(newline=True), {"class": "module-type-text-label"}) + td.attrs['desc'],{"class": "module-type-text"},2,True)
types_div += gen_type_info(td.typedoc, 2)
for prop in YangDocDefs.type_leaf_properties:
if prop in td.attrs:
types_div += ht.para(ht.add_tag("span", prop,{"class": "module-type-text-label"}) + ": " + td.attrs[prop],{"class": "module-type-text"},2,True)
types_div += ht.close_tag(newline=True)
else:
# module doesn't have any typedefs
types_div = ""
# store the typedef docs
self.moduledocs[mod.module_name]['typedefs'] = types_div
# handle identities
if len(mod.identities) > 0:
idents_div = ht.open_tag("div", newline=True)
idents_div += ht.h3("Identities", {"class": "module-types-header", "id": mod.module_name + "-identities"},2,True)
for base_id in mod.base_identities:
idents_div += ht.h4("base: " + base_id,{"class": "module-type-name","id":"ident-" + ht.gen_html_id(base_id)},2,True)
idents_div += ht.para(ht.add_tag("span","description:" + ht.br(newline=True), {"class": "module-type-text-label"}) + mod.identities[base_id].attrs['desc'],{"class": "module-type-text"},2,True)
# collect all of the identities that have base_id as
# their base
# TODO(aashaikh): this needs to be updated to handle nested identities / multiple inheritance
derived = { key:value for key,value in mod.identities.items() if value.attrs['base'] == base_id }
# emit the identities derived from the current base
for (idname, id) in derived.items():
idents_div += ht.h4(idname,{"class": "module-type-name","id":"ident-" + ht.gen_html_id(idname)},2,True)
idents_div += ht.para(ht.add_tag("span","description:",{"class": "module-type-text-label"}) + ht.br(newline=True) + id.attrs['desc'],{"class":"module-type-text"},2,True)
idents_div += ht.para(ht.add_tag("span", "base identity: ",{"class": "module-type-text-label"})
+ ht.add_tag("a", id.attrs['base'],{"href":"#ident-"+ht.gen_html_id(id.attrs['base'])}),
{"class":"module-type-text"},2,True)
idents_div += ht.close_tag(newline=True)
else:
# module doesn't have any identities
idents_div = ""
# store the identity docs
self.moduledocs[mod.module_name]['identities'] = idents_div
gen_nav_tree(self, mod, 0)
def genStatementDoc(self, statement, ctx, level=1):
"""HTML emitter for module data node given a StatementDoc
object"""
if ctx.opts.no_structure and statement.keyword in ctx.skip_keywords:
return
ht = html_helper.HTMLHelper()
s_div = ht.open_tag("div", {"class":"statement-section"}, newline=True)
if ctx.opts.strip_namespace:
pathstr = yangpath.strip_namespace(statement.attrs['path'])
else:
pathstr = statement.attrs['path']
# for 'skipped' nodes, just print the path
if statement.keyword in self.path_only:
s_div += ht.h4(pathstr,None,level,True)
s_div += ht.close_tag(newline=True)
return s_div
# statement path and name
(prefix, last) = yangpath.remove_last(pathstr)
prefix_name = ht.add_tag("span", prefix + "/", {"class": "statement-path"})
statement_name = prefix_name + ht.br(level,True) + statement.name
s_div += ht.h4(statement_name, {"class": "statement-name","id":statement.attrs['id']},level,True)
# node description
if 'desc' in statement.attrs:
s_div += ht.para(ht.add_tag("span", "description",{"class": "statement-info-label"}) + ":<br />" + statement.attrs['desc'],{"class": "statement-info-text"},level,True)
s_div += ht.close_tag(newline=True)
# check for additional properties
notes = ""
if statement.attrs['is_key']:
notes += " (list key)"
if statement.attrs['config']:
notes += " (rw)"
else:
notes += " (ro)"
keyword = statement.keyword + notes
s_div += ht.para(ht.add_tag("span", "nodetype",{"class": "statement-info-label"}) + ": " + keyword,{"class": "statement-info-text"},level,True)
# s_div += ht.para(ht.add_tag("span", "path",{"class":"statement-info-label"}) + ": " + pathstr,{"class":"statement-info-text"},level,True)
# handle list nodes
if statement.attrs['is_list']:
list_keys = ""
for key in statement.attrs['keys']:
list_keys += " [" + ht.add_tag("a", key[0], {"href":"#" + key[1]}) + "]"
s_div += ht.para(ht.add_tag("span", "list keys",{"class": "statement-info-label"}) + ": " + list_keys,{"class": "statement-info-text"},level,True)
if statement.typedoc:
s_div += gen_type_info(statement.typedoc, level)
for prop in YangDocDefs.type_leaf_properties:
if prop in statement.attrs:
s_div += ht.para(ht.add_tag("span", prop, {"class": "statement-info-label"}) + ": " + statement.attrs[prop],{"class": "statement-info-text"},level,True)
# add this statement to the collection of data
self.moduledocs[statement.module_doc.module_name]['data'] += s_div
def emitDocs(self, ctx, section=None):
"""Return the HTML output for all modules,
or single section if specified"""
ht = html_helper.HTMLHelper()
docs = []
navs = []
navids = []
# create the documentation elements for each module
for module_name in self.moduledocs:
# check if the module has no data nodes
if 'data' not in self.moduledocs[module_name]:
self.moduledocs[module_name]['data'] = ""
else:
# create the header for the data elements
hdr = ht.h3("Data elements", {"class": "module-types-header", "id": module_name + "-data"},2,True)
self.moduledocs[module_name]['data'] = hdr + self.moduledocs[module_name]['data']
if section is not None:
return self.moduledocs[module_name][section]
else:
docs.append(self.moduledocs[module_name]['module'] +
self.moduledocs[module_name]['typedefs'] +
self.moduledocs[module_name]['identities'] +
self.moduledocs[module_name]['data'])
navs.append(self.moduledocs[module_name]['navlist'])
navids.append(self.moduledocs[module_name]['navid'])
if ctx.opts.doc_title is None:
# just use the name of the first module returned by the dict if no title
# is supplied
doc_title = list(self.moduledocs.keys())[0]
else:
doc_title = ctx.opts.doc_title
s = populate_template(doc_title, docs, navs, navids)
return s
def gen_type_info(typedoc, level=1):
"""Create and return documentation based on the type. Expands compound
types."""
ht = html_helper.HTMLHelper()
s = ""
# emit type-specific attributes
typename = typedoc.typename
s += ht.para(ht.add_tag("span", "type",{"class": "statement-info-label"}) + ": " + typename,{"class": "statement-info-text"},level,True)
if typename == 'enumeration':
s += " "*level + "<ul>\n"
for (enum, desc) in typedoc.attrs['enums'].items():
s += " "*level + "<li>" + enum + "<br />" + desc + "</li>\n"
s += " "*level + "</ul>\n"
elif typename == 'string':
if 'pattern' in typedoc.attrs['restrictions']:
s += " "*level + "<ul>\n"
s += " "*level + "<li>pattern:<br>\n"
s += " "*level + typedoc.attrs['restrictions']['pattern'] + "\n</li>\n"
s += " "*level + "</ul>\n"
elif typename in YangDocDefs.integer_types:
if 'range' in typedoc.attrs['restrictions']:
s += " "*level + "<ul>\n"
s += " "*level + "<li>range:\n"
s += " "*level + typedoc.attrs['restrictions']['range'] + "\n</li>\n"
s += " "*level + "</ul>\n"
elif typename == 'identityref':
s += " "*level + "<ul>\n"
s += " "*level + "<li>base: " + typedoc.attrs['base'] + "</li>\n"
s += " "*level + "</ul>\n"
elif typename == 'leafref':
s += " "*level + "<ul>\n"
s += " "*level + "<li>path reference: " + typedoc.attrs['leafref_path'] + "</li>\n"
s += " "*level + "</ul>\n"
elif typename == 'union':
s += " "*level + "<ul>\n"
for childtype in typedoc.childtypes:
s += " "*level + gen_type_info(childtype)
s += " "*level + "</ul>\n"
else:
pass
return s
def populate_template(title, docs, navs, nav_ids):
"""Populate HTML templates with the documentation content"""
template_path = os.path.dirname(__file__) + "/../templates"
j2_env = Environment(loader=FileSystemLoader(template_path),
trim_blocks=True)
template = j2_env.get_template('yangdoc.html')
return template.render({'title': title,
'htmldocs': docs,
'menus': navs,
'menu_ids': nav_ids })
def gen_nav_tree(emitter, root_mod, level=0):
"""Generate a list structure to serve as navigation for the
module. root_mod is a top-level ModuleDoc object"""
ht = html_helper.HTMLHelper()
nav = "<ul id=\"%s\">\n" % ("tree-" + ht.gen_html_id(root_mod.module_name))
# module link
nav += "<li><a class=\"menu-module-name\" href=\"%s\">%s</a></li>\n" % ("#mod-" + ht.gen_html_id(root_mod.module_name), root_mod.module_name)
# generate links for types and identities
if len(root_mod.typedefs) > 0:
nav += "<li><a href=\"%s\">%s</a>\n" % ("#" + ht.gen_html_id(root_mod.module_name) + "-defined-types", "Defined types")
types = root_mod.typedefs.keys()
nav += " <ul>\n"
for typename in types:
nav += " <li><a href=\"%s\">%s</a></li>\n" % ("#type-"+ht.gen_html_id(typename), typename)
nav += " </ul>\n"
nav += "</li>\n"
if len(root_mod.identities) > 0:
nav += "<li><a href=\"%s\">%s</a>\n" % ("#" + ht.gen_html_id(root_mod.module_name) + "-identities", "Identities")
nav += " <ul>\n"
for base_id in root_mod.base_identities:
derived = { key:value for key,value in root_mod.identities.items() if value.attrs['base'] == base_id }
nav += " <li><a href=\"%s\">%s</a>\n" % ("#ident-" + ht.gen_html_id(base_id), base_id)
nav += " <ul>\n"
for idname in derived.keys():
nav += " <li><a href=\"%s\">%s</a></li>\n" % ("#ident-" + ht.gen_html_id(idname), idname)
nav += " </ul>\n"
nav += " </li>\n"
nav += " </ul>\n"
nav += "</li>\n"
# generate links for data nodes
top = root_mod.module
level = 0
# nav += "<li><a href=\"%s\">%s</a>\n" % ("#" + ht.gen_html_id(root_mod.module_name) + "-data", "Data elements")
if len(top.children) > 0:
nav += "<li><a href=\"#%s-data\">%s</a>\n" % (root_mod.module_name, "Data elements")
nav += "<ul>\n"
for child in top.children:
nav += gen_nav(child, root_mod, level)
nav += "</li>\n"
nav += "</ul>\n"
nav += "</ul>\n"
# store the navigation list
emitter.moduledocs[root_mod.module_name]['navlist'] = nav
emitter.moduledocs[root_mod.module_name]['navid'] = "tree-" + ht.gen_html_id(root_mod.module_name)
#modtop.nav += "</ul>"
# top.nav += "<li>" + statement.name + "</li>\n"
def gen_nav(node, root_mod, level = 0):
"""Add the list item for node (StatementDoc object)"""
# print "nav: %s %s (%d)" % (node.keyword, node.name, len(node.children))
current_level = level
nav = ""
if len(node.children) > 0:
# print the current node (opening li element)
nav += " "*level + " <li>" + "<a href=\"#" + node.attrs['id'] + "\">" + node.name + "</a>\n"
# start new list for the children
nav += " "*level + " <ul>\n"
level += 1
for child in node.children:
nav += gen_nav (child, root_mod, level)
# close list of children
nav += " "*current_level + " </ul>\n"
nav += " "*current_level + "</li>\n"
else:
# no children -- just print the current node and return
nav += " "*current_level + " <li>" "<a href=\"#" + node.attrs['id'] + "\">" + node.name + "</a>\n"
return nav
def text_to_paragraphs(textblock):
"""Simple conversion of text into paragraphs based (naively) on blank
lines -- intended to use with long, multi-paragraph descriptions"""
paras = textblock.split("\n\n")
return paras
| openconfig/oc-pyang | openconfig_pyang/plugins/util/html_emitter.py | Python | apache-2.0 | 14,661 |
# -*- coding: utf-8 -*-
"""OSF mailing utilities.
Email templates go in website/templates/emails
Templates must end in ``.txt.mako`` for plaintext emails or``.html.mako`` for html emails.
You can then create a `Mail` object given the basename of the template and
the email subject. ::
CONFIRM_EMAIL = Mail(tpl_prefix='confirm', subject="Confirm your email address")
You can then use ``send_mail`` to send the email.
Usage: ::
from website import mails
...
mails.send_mail('foo@bar.com', mails.CONFIRM_EMAIL, user=user)
"""
import os
import logging
from mako.lookup import TemplateLookup, Template
from framework.email import tasks
from website import settings
logger = logging.getLogger(__name__)
EMAIL_TEMPLATES_DIR = os.path.join(settings.TEMPLATES_PATH, 'emails')
_tpl_lookup = TemplateLookup(
directories=[EMAIL_TEMPLATES_DIR],
)
TXT_EXT = '.txt.mako'
HTML_EXT = '.html.mako'
class Mail(object):
"""An email object.
:param str tpl_prefix: The template name prefix.
:param str subject: The subject of the email.
"""
def __init__(self, tpl_prefix, subject):
self.tpl_prefix = tpl_prefix
self._subject = subject
def html(self, **context):
"""Render the HTML email message."""
tpl_name = self.tpl_prefix + HTML_EXT
return render_message(tpl_name, **context)
def text(self, **context):
"""Render the plaintext email message"""
tpl_name = self.tpl_prefix + TXT_EXT
return render_message(tpl_name, **context)
def subject(self, **context):
return Template(self._subject).render(**context)
def render_message(tpl_name, **context):
"""Render an email message."""
tpl = _tpl_lookup.get_template(tpl_name)
return tpl.render(**context)
def send_mail(to_addr, mail, mimetype='plain', from_addr=None, mailer=None,
username=None, password=None, mail_server=None, callback=None, **context):
"""Send an email from the OSF.
Example: ::
from website import mails
mails.send_email('foo@bar.com', mails.TEST, name="Foo")
:param str to_addr: The recipient's email address
:param Mail mail: The mail object
:param str mimetype: Either 'plain' or 'html'
:param function callback: celery task to execute after send_mail completes
:param **context: Context vars for the message template
.. note:
Uses celery if available
"""
from_addr = from_addr or settings.FROM_EMAIL
mailer = mailer or tasks.send_email
subject = mail.subject(**context)
message = mail.text(**context) if mimetype in ('plain', 'txt') else mail.html(**context)
# Don't use ttls and login in DEBUG_MODE
ttls = login = not settings.DEBUG_MODE
logger.debug('Sending email...')
logger.debug(u'To: {to_addr}\nFrom: {from_addr}\nSubject: {subject}\nMessage: {message}'.format(**locals()))
kwargs = dict(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype=mimetype,
ttls=ttls,
login=login,
username=username,
password=password,
mail_server=mail_server
)
if settings.USE_CELERY:
return mailer.apply_async(kwargs=kwargs, link=callback)
else:
ret = mailer(**kwargs)
if callback:
callback()
return ret
# Predefined Emails
TEST = Mail('test', subject='A test email to ${name}')
CONFIRM_EMAIL = Mail('confirm', subject='Confirm your email address')
CONFIRM_MERGE = Mail('confirm_merge', subject='Confirm account merge')
REMOVED_EMAIL = Mail('email_removed', subject='Email address removed from your OSF account')
PRIMARY_EMAIL_CHANGED = Mail('primary_email_changed', subject='Primary email changed')
INVITE = Mail('invite', subject='You have been added as a contributor to an OSF project.')
CONTRIBUTOR_ADDED = Mail('contributor_added', subject='You have been added as a contributor to an OSF project.')
FORWARD_INVITE = Mail('forward_invite', subject='Please forward to ${fullname}')
FORWARD_INVITE_REGiSTERED = Mail('forward_invite_registered', subject='Please forward to ${fullname}')
FORGOT_PASSWORD = Mail('forgot_password', subject='Reset Password')
PENDING_VERIFICATION = Mail('pending_invite', subject="Your account is almost ready!")
PENDING_VERIFICATION_REGISTERED = Mail('pending_registered', subject='Received request to be a contributor')
REQUEST_EXPORT = Mail('support_request', subject='[via OSF] Export Request')
REQUEST_DEACTIVATION = Mail('support_request', subject='[via OSF] Deactivation Request')
CONFERENCE_SUBMITTED = Mail(
'conference_submitted',
subject='Project created on Open Science Framework',
)
CONFERENCE_INACTIVE = Mail(
'conference_inactive',
subject='Open Science Framework Error: Conference inactive',
)
CONFERENCE_FAILED = Mail(
'conference_failed',
subject='Open Science Framework Error: No files attached',
)
DIGEST = Mail('digest', subject='OSF Email Digest')
TRANSACTIONAL = Mail('transactional', subject='OSF: ${subject}')
# Retraction related Mail objects
PENDING_RETRACTION_ADMIN = Mail(
'pending_retraction_admin',
subject='Retraction pending for one of your projects.'
)
PENDING_RETRACTION_NON_ADMIN = Mail(
'pending_retraction_non_admin',
subject='Retraction pending for one of your projects.'
)
# Embargo related Mail objects
PENDING_EMBARGO_ADMIN = Mail(
'pending_embargo_admin',
subject='Registration pending for one of your projects.'
)
PENDING_EMBARGO_NON_ADMIN = Mail(
'pending_embargo_non_admin',
subject='Registration pending for one of your projects.'
)
# Registration related Mail Objects
PENDING_REGISTRATION_ADMIN = Mail(
'pending_registration_admin',
subject='Registration pending for one of your projects.'
)
PENDING_REGISTRATION_NON_ADMIN = Mail(
'pending_registration_non_admin',
subject='Registration pending for one of your projects.'
)
FILE_OPERATION_SUCCESS = Mail(
'file_operation_success',
subject='Your ${action} has finished',
)
FILE_OPERATION_FAILED = Mail(
'file_operation_failed',
subject='Your ${action} has failed',
)
UNESCAPE = "<% from website.util.sanitize import unescape_entities %> ${unescape_entities(src.title)}"
PROBLEM_REGISTERING = "Problem registering " + UNESCAPE
ARCHIVE_SIZE_EXCEEDED_DESK = Mail(
'archive_size_exceeded_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_SIZE_EXCEEDED_USER = Mail(
'archive_size_exceeded_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_COPY_ERROR_DESK = Mail(
'archive_copy_error_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_COPY_ERROR_USER = Mail(
'archive_copy_error_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_UNCAUGHT_ERROR_DESK = Mail(
'archive_uncaught_error_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_UNCAUGHT_ERROR_USER = Mail(
'archive_uncaught_error_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_SUCCESS = Mail(
'archive_success',
subject="Registration of " + UNESCAPE + " complete"
)
| jolene-esposito/osf.io | website/mails.py | Python | apache-2.0 | 7,045 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
import requests_mock
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.opsgenie.hooks.opsgenie_alert import OpsgenieAlertHook
from airflow.utils import db
class TestOpsgenieAlertHook(unittest.TestCase):
conn_id = 'opsgenie_conn_id_test'
opsgenie_alert_endpoint = 'https://api.opsgenie.com/v2/alerts'
_payload = {
'message': 'An example alert message',
'alias': 'Life is too short for no alias',
'description': 'Every alert needs a description',
'responders': [
{'id': '4513b7ea-3b91-438f-b7e4-e3e54af9147c', 'type': 'team'},
{'name': 'NOC', 'type': 'team'},
{'id': 'bb4d9938-c3c2-455d-aaab-727aa701c0d8', 'type': 'user'},
{'username': 'trinity@opsgenie.com', 'type': 'user'},
{'id': 'aee8a0de-c80f-4515-a232-501c0bc9d715', 'type': 'escalation'},
{'name': 'Nightwatch Escalation', 'type': 'escalation'},
{'id': '80564037-1984-4f38-b98e-8a1f662df552', 'type': 'schedule'},
{'name': 'First Responders Schedule', 'type': 'schedule'}
],
'visibleTo': [
{'id': '4513b7ea-3b91-438f-b7e4-e3e54af9147c', 'type': 'team'},
{'name': 'rocket_team', 'type': 'team'},
{'id': 'bb4d9938-c3c2-455d-aaab-727aa701c0d8', 'type': 'user'},
{'username': 'trinity@opsgenie.com', 'type': 'user'}
],
'actions': ['Restart', 'AnExampleAction'],
'tags': ['OverwriteQuietHours', 'Critical'],
'details': {'key1': 'value1', 'key2': 'value2'},
'entity': 'An example entity',
'source': 'Airflow',
'priority': 'P1',
'user': 'Jesse',
'note': 'Write this down'
}
_mock_success_response_body = {
"result": "Request will be processed",
"took": 0.302,
"requestId": "43a29c5c-3dbf-4fa4-9c26-f4f71023e120"
}
def setUp(self):
db.merge_conn(
Connection(
conn_id=self.conn_id,
host='https://api.opsgenie.com/',
password='eb243592-faa2-4ba2-a551q-1afdf565c889'
)
)
def test_get_api_key(self):
hook = OpsgenieAlertHook(opsgenie_conn_id=self.conn_id)
api_key = hook._get_api_key()
self.assertEqual('eb243592-faa2-4ba2-a551q-1afdf565c889', api_key)
def test_get_conn_defaults_host(self):
hook = OpsgenieAlertHook()
hook.get_conn()
self.assertEqual('https://api.opsgenie.com', hook.base_url)
@requests_mock.mock()
def test_call_with_success(self, m):
hook = OpsgenieAlertHook(opsgenie_conn_id=self.conn_id)
m.post(
self.opsgenie_alert_endpoint,
status_code=202,
json=self._mock_success_response_body
)
resp = hook.execute(payload=self._payload)
self.assertEqual(resp.status_code, 202)
self.assertEqual(resp.json(), self._mock_success_response_body)
@requests_mock.mock()
def test_api_key_set(self, m):
hook = OpsgenieAlertHook(opsgenie_conn_id=self.conn_id)
m.post(
self.opsgenie_alert_endpoint,
status_code=202,
json=self._mock_success_response_body
)
resp = hook.execute(payload=self._payload)
self.assertEqual(resp.request.headers.get('Authorization'),
'GenieKey eb243592-faa2-4ba2-a551q-1afdf565c889')
@requests_mock.mock()
def test_api_key_not_set(self, m):
hook = OpsgenieAlertHook()
m.post(
self.opsgenie_alert_endpoint,
status_code=202,
json=self._mock_success_response_body
)
with self.assertRaises(AirflowException):
hook.execute(payload=self._payload)
@requests_mock.mock()
def test_payload_set(self, m):
hook = OpsgenieAlertHook(opsgenie_conn_id=self.conn_id)
m.post(
self.opsgenie_alert_endpoint,
status_code=202,
json=self._mock_success_response_body
)
resp = hook.execute(payload=self._payload)
self.assertEqual(json.loads(resp.request.body), self._payload)
| wooga/airflow | tests/providers/opsgenie/hooks/test_opsgenie_alert.py | Python | apache-2.0 | 5,041 |
# -*- coding: utf-8 -*-
import flask
import functools
import logging
import requests
from .. import storage
from .. import toolkit
from . import cache
from . import config
DEFAULT_CACHE_TAGS_TTL = 48 * 3600
logger = logging.getLogger(__name__)
def is_mirror():
cfg = config.load()
return bool(cfg.get('mirroring', False))
def _response_headers(base):
headers = {}
if not base:
return headers
for k, v in base.iteritems():
if k.lower() == 'content-encoding':
continue
headers[k.lower()] = v
logger.warn(headers)
return headers
def lookup_source(path, stream=False, source=None):
if not source:
cfg = config.load()
mirroring_cfg = cfg.mirroring
if not mirroring_cfg:
return
source = cfg.mirroring['source']
source_url = '{0}{1}'.format(source, path)
headers = {}
for k, v in flask.request.headers.iteritems():
if k.lower() != 'location' and k.lower() != 'host':
headers[k] = v
logger.debug('Request: GET {0}\nHeaders: {1}'.format(
source_url, headers
))
source_resp = requests.get(
source_url,
headers=headers,
cookies=flask.request.cookies,
stream=stream
)
if source_resp.status_code != 200:
logger.debug('Source responded to request with non-200'
' status')
logger.debug('Response: {0}\n{1}\n'.format(
source_resp.status_code, source_resp.text
))
return None
return source_resp
def source_lookup_tag(f):
@functools.wraps(f)
def wrapper(namespace, repository, *args, **kwargs):
cfg = config.load()
mirroring_cfg = cfg.mirroring
resp = f(namespace, repository, *args, **kwargs)
if not mirroring_cfg:
return resp
source = mirroring_cfg['source']
tags_cache_ttl = mirroring_cfg.get('tags_cache_ttl',
DEFAULT_CACHE_TAGS_TTL)
if resp.status_code != 404:
logger.debug('Status code is not 404, no source '
'lookup required')
return resp
if not cache.redis_conn:
# No tags cache, just return
logger.warning('mirroring: Tags cache is disabled, please set a '
'valid `cache\' directive in the config.')
source_resp = lookup_source(
flask.request.path, stream=False, source=source
)
if not source_resp:
return resp
headers = _response_headers(source_resp.headers)
return toolkit.response(data=source_resp.content, headers=headers,
raw=True)
store = storage.load()
request_path = flask.request.path
if request_path.endswith('/tags'):
# client GETs a list of tags
tag_path = store.tag_path(namespace, repository)
else:
# client GETs a single tag
tag_path = store.tag_path(namespace, repository, kwargs['tag'])
data = cache.redis_conn.get('{0}:{1}'.format(
cache.cache_prefix, tag_path
))
if data is not None:
return toolkit.response(data=data, raw=True)
source_resp = lookup_source(
flask.request.path, stream=False, source=source
)
if not source_resp:
return resp
data = source_resp.content
headers = _response_headers(source_resp.headers)
cache.redis_conn.setex('{0}:{1}'.format(
cache.cache_prefix, tag_path
), tags_cache_ttl, data)
return toolkit.response(data=data, headers=headers,
raw=True)
return wrapper
def source_lookup(cache=False, stream=False, index_route=False):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
cfg = config.load()
mirroring_cfg = cfg.mirroring
resp = f(*args, **kwargs)
if not mirroring_cfg:
return resp
source = mirroring_cfg['source']
if index_route:
source = mirroring_cfg.get('source_index', source)
logger.debug('Source provided, registry acts as mirror')
if resp.status_code != 404:
logger.debug('Status code is not 404, no source '
'lookup required')
return resp
source_resp = lookup_source(
flask.request.path, stream=stream, source=source
)
if not source_resp:
return resp
store = storage.load()
headers = _response_headers(source_resp.headers)
if index_route and 'x-docker-endpoints' in headers:
headers['x-docker-endpoints'] = toolkit.get_endpoints()
if not stream:
logger.debug('JSON data found on source, writing response')
resp_data = source_resp.content
if cache:
store_mirrored_data(
resp_data, flask.request.url_rule.rule, kwargs,
store
)
return toolkit.response(
data=resp_data,
headers=headers,
raw=True
)
logger.debug('Layer data found on source, preparing to '
'stream response...')
layer_path = store.image_layer_path(kwargs['image_id'])
return _handle_mirrored_layer(source_resp, layer_path, store,
headers)
return wrapper
return decorator
def _handle_mirrored_layer(source_resp, layer_path, store, headers):
sr = toolkit.SocketReader(source_resp)
tmp, hndlr = storage.temp_store_handler()
sr.add_handler(hndlr)
def generate():
for chunk in sr.iterate(store.buffer_size):
yield chunk
# FIXME: this could be done outside of the request context
tmp.seek(0)
store.stream_write(layer_path, tmp)
tmp.close()
return flask.Response(generate(), headers=headers)
def store_mirrored_data(data, endpoint, args, store):
logger.debug('Endpoint: {0}'.format(endpoint))
path_method, arglist = ({
'/v1/images/<image_id>/json': ('image_json_path', ('image_id',)),
'/v1/images/<image_id>/ancestry': (
'image_ancestry_path', ('image_id',)
),
'/v1/repositories/<path:repository>/json': (
'registry_json_path', ('namespace', 'repository')
),
}).get(endpoint, (None, None))
if not path_method:
return
logger.debug('Path method: {0}'.format(path_method))
pm_args = {}
for arg in arglist:
pm_args[arg] = args[arg]
logger.debug('Path method args: {0}'.format(pm_args))
storage_path = getattr(store, path_method)(**pm_args)
logger.debug('Storage path: {0}'.format(storage_path))
store.put_content(storage_path, data)
| glenux/contrib-docker-registry | docker_registry/lib/mirroring.py | Python | apache-2.0 | 7,154 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Change default pool_slots to 1
Revision ID: 8646922c8a04
Revises: 449b4072c2da
Create Date: 2021-02-23 23:19:22.409973
"""
import dill
import sqlalchemy as sa
from alembic import op
from sqlalchemy import Column, Float, Integer, PickleType, String
# revision identifiers, used by Alembic.
from sqlalchemy.ext.declarative import declarative_base
from airflow.models.base import COLLATION_ARGS
from airflow.utils.sqlalchemy import UtcDateTime
revision = '8646922c8a04'
down_revision = '449b4072c2da'
branch_labels = None
depends_on = None
Base = declarative_base()
BATCH_SIZE = 5000
ID_LEN = 250
class TaskInstance(Base): # type: ignore
"""Task instance class."""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Float)
state = Column(String(20))
_try_number = Column('try_number', Integer, default=0)
max_tries = Column(Integer)
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer)
pool = Column(String(50), nullable=False)
pool_slots = Column(Integer, default=1)
queue = Column(String(256))
priority_weight = Column(Integer)
operator = Column(String(1000))
queued_dttm = Column(UtcDateTime)
queued_by_job_id = Column(Integer)
pid = Column(Integer)
executor_config = Column(PickleType(pickler=dill))
external_executor_id = Column(String(ID_LEN, **COLLATION_ARGS))
def upgrade():
"""Change default pool_slots to 1 and make pool_slots not nullable"""
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
session.query(TaskInstance).filter(TaskInstance.pool_slots.is_(None)).update(
{TaskInstance.pool_slots: 1}, synchronize_session=False
)
session.commit()
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=False)
def downgrade():
"""Unapply Change default pool_slots to 1"""
with op.batch_alter_table("task_instance", schema=None) as batch_op:
batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=True)
| dhuang/incubator-airflow | airflow/migrations/versions/8646922c8a04_change_default_pool_slots_to_1.py | Python | apache-2.0 | 3,222 |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# uobjnew Summarize object allocations in high-level languages.
# For Linux, uses BCC, eBPF.
#
# USAGE: uobjnew [-h] [-T TOP] [-v] {java,ruby,c} pid [interval]
#
# Copyright 2016 Sasha Goldshtein
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 25-Oct-2016 Sasha Goldshtein Created this.
from __future__ import print_function
import argparse
from bcc import BPF, USDT
from time import sleep
examples = """examples:
./uobjnew java 145 # summarize Java allocations in process 145
./uobjnew c 2020 1 # grab malloc() sizes and print every second
./uobjnew ruby 6712 -C 10 # top 10 Ruby types by number of allocations
./uobjnew ruby 6712 -S 10 # top 10 Ruby types by total size
"""
parser = argparse.ArgumentParser(
description="Summarize object allocations in high-level languages.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("language", choices=["java", "ruby", "c"],
help="language to trace")
parser.add_argument("pid", type=int, help="process id to attach to")
parser.add_argument("interval", type=int, nargs='?',
help="print every specified number of seconds")
parser.add_argument("-C", "--top-count", type=int,
help="number of most frequently allocated types to print")
parser.add_argument("-S", "--top-size", type=int,
help="number of largest types by allocated bytes to print")
parser.add_argument("-v", "--verbose", action="store_true",
help="verbose mode: print the BPF program (for debugging purposes)")
args = parser.parse_args()
program = """
#include <linux/ptrace.h>
struct key_t {
#if MALLOC_TRACING
u64 size;
#else
char name[50];
#endif
};
struct val_t {
u64 total_size;
u64 num_allocs;
};
BPF_HASH(allocs, struct key_t, struct val_t);
""".replace("MALLOC_TRACING", "1" if args.language == "c" else "0")
usdt = USDT(pid=args.pid)
#
# Java
#
if args.language == "java":
program += """
int alloc_entry(struct pt_regs *ctx) {
struct key_t key = {};
struct val_t *valp, zero = {};
u64 classptr = 0, size = 0;
bpf_usdt_readarg(2, ctx, &classptr);
bpf_usdt_readarg(4, ctx, &size);
bpf_probe_read(&key.name, sizeof(key.name), (void *)classptr);
valp = allocs.lookup_or_init(&key, &zero);
valp->total_size += size;
valp->num_allocs += 1;
return 0;
}
"""
usdt.enable_probe("object__alloc", "alloc_entry")
#
# Ruby
#
elif args.language == "ruby":
create_template = """
int THETHING_alloc_entry(struct pt_regs *ctx) {
struct key_t key = { .name = "THETHING" };
struct val_t *valp, zero = {};
u64 size = 0;
bpf_usdt_readarg(1, ctx, &size);
valp = allocs.lookup_or_init(&key, &zero);
valp->total_size += size;
valp->num_allocs += 1;
return 0;
}
"""
program += """
int object_alloc_entry(struct pt_regs *ctx) {
struct key_t key = {};
struct val_t *valp, zero = {};
u64 classptr = 0;
bpf_usdt_readarg(1, ctx, &classptr);
bpf_probe_read(&key.name, sizeof(key.name), (void *)classptr);
valp = allocs.lookup_or_init(&key, &zero);
valp->num_allocs += 1; // We don't know the size, unfortunately
return 0;
}
"""
usdt.enable_probe("object__create", "object_alloc_entry")
for thing in ["string", "hash", "array"]:
program += create_template.replace("THETHING", thing)
usdt.enable_probe("%s__create" % thing, "%s_alloc_entry" % thing)
#
# C
#
elif args.language == "c":
program += """
int alloc_entry(struct pt_regs *ctx, size_t size) {
struct key_t key = {};
struct val_t *valp, zero = {};
key.size = size;
valp = allocs.lookup_or_init(&key, &zero);
valp->total_size += size;
valp->num_allocs += 1;
return 0;
}
"""
if args.verbose:
print(usdt.get_text())
print(program)
bpf = BPF(text=program, usdt_contexts=[usdt])
if args.language == "c":
bpf.attach_uprobe(name="c", sym="malloc", fn_name="alloc_entry",
pid=args.pid)
exit_signaled = False
print("Tracing allocations in process %d (language: %s)... Ctrl-C to quit." %
(args.pid, args.language or "none"))
while True:
try:
sleep(args.interval or 99999999)
except KeyboardInterrupt:
exit_signaled = True
print()
data = bpf["allocs"]
if args.top_count:
data = sorted(data.items(), key=lambda (k, v): v.num_allocs)
data = data[-args.top_count:]
elif args.top_size:
data = sorted(data.items(), key=lambda (k, v): v.total_size)
data = data[-args.top_size:]
else:
data = sorted(data.items(), key=lambda (k, v): v.total_size)
print("%-30s %8s %12s" % ("TYPE", "# ALLOCS", "# BYTES"))
for key, value in data:
if args.language == "c":
obj_type = "block size %d" % key.size
else:
obj_type = key.name
print("%-30s %8d %12d" %
(obj_type, value.num_allocs, value.total_size))
if args.interval and not exit_signaled:
bpf["allocs"].clear()
else:
exit()
| mkacik/bcc | tools/uobjnew.py | Python | apache-2.0 | 5,131 |
# Copyright 2022 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python 3
r"""Example of basic DEFINE_config_dataclass usage.
To run this example:
python define_config_dataclass_basic.py -- --my_config.field1=8 \
--my_config.nested.field=2.1 --my_config.tuple='(1, 2, (1, 2))'
"""
import dataclasses
from typing import Any, Mapping, Sequence
from absl import app
from ml_collections import config_flags
@dataclasses.dataclass
class MyConfig:
field1: int
field2: str
nested: Mapping[str, Any]
tuple: Sequence[int]
config = MyConfig(
field1=1,
field2='tom',
nested={'field': 2.23},
tuple=(1, 2, 3),
)
_CONFIG = config_flags.DEFINE_config_dataclass('my_config', config)
def main(_):
print(_CONFIG.value)
if __name__ == '__main__':
app.run(main)
| google/ml_collections | ml_collections/config_flags/examples/define_config_dataclass_basic.py | Python | apache-2.0 | 1,322 |
from model.group import Group
import pytest
def test_add_group(app, db, json_groups):
group = json_groups
with pytest.allure.step('Given a group list'):
old_groups = db.get_group_list()
with pytest.allure.step('When I add a group %s to the list' % group):
app.group.create(group)
#assert len (old_groups) + 1 == app.group.count()
with pytest.allure.step('Then the new group list is equal to the old list with the added group'):
new_groups = db.get_group_list()
old_groups.append(group)
assert sorted (old_groups, key=Group.id_or_max ) == sorted (new_groups, key=Group.id_or_max)
| potolock/proverca | test/test_add_group.py | Python | apache-2.0 | 690 |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.contrib.node.subsystems.command import command_gen
LOG = logging.getLogger(__name__)
PACKAGE_MANAGER_NPM = 'npm'
PACKAGE_MANAGER_YARNPKG = 'yarnpkg'
PACKAGE_MANAGER_YARNPKG_ALIAS = 'yarn'
VALID_PACKAGE_MANAGERS = [PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, PACKAGE_MANAGER_YARNPKG_ALIAS]
# TODO: Change to enum type when migrated to Python 3.4+
class PackageInstallationTypeOption(object):
PROD = 'prod'
DEV = 'dev'
PEER = 'peer'
BUNDLE = 'bundle'
OPTIONAL = 'optional'
NO_SAVE = 'not saved'
class PackageInstallationVersionOption(object):
EXACT = 'exact'
TILDE = 'tilde'
class PackageManager(object):
"""Defines node package manager functionalities."""
def __init__(self, name, tool_installations):
self.name = name
self.tool_installations = tool_installations
def _get_installation_args(self, install_optional, production_only, force):
"""Returns command line args for installing package.
:param install_optional: True to request install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:rtype: list of strings
"""
raise NotImplementedError
def _get_run_script_args(self):
"""Returns command line args to run a package.json script.
:rtype: list of strings
"""
raise NotImplementedError
def _get_add_package_args(self, package, type_option, version_option):
"""Returns command line args to add a node pacakge.
:rtype: list of strings
"""
raise NotImplementedError()
def run_command(self, args=None, node_paths=None):
"""Returns a command that when executed will run an arbitury command via package manager."""
return command_gen(
self.tool_installations,
self.name,
args=args,
node_paths=node_paths
)
def install_module(
self,
install_optional=False,
production_only=False,
force=False,
node_paths=None):
"""Returns a command that when executed will install node package.
:param install_optional: True to install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param node_paths: A list of path that should be included in $PATH when
running installation.
"""
args=self._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force)
return self.run_command(args=args, node_paths=node_paths)
def run_script(self, script_name, script_args=None, node_paths=None):
"""Returns a command to execute a package.json script.
:param script_name: Name of the script to name. Note that script name 'test'
can be used to run node tests.
:param script_args: Args to be passed to package.json script.
:param node_paths: A list of path that should be included in $PATH when
running the script.
"""
# TODO: consider add a pants.util function to manipulate command line.
package_manager_args = self._get_run_script_args()
package_manager_args.append(script_name)
if script_args:
package_manager_args.append('--')
package_manager_args.extend(script_args)
return self.run_command(args=package_manager_args, node_paths=node_paths)
def add_package(
self,
package,
node_paths=None,
type_option=PackageInstallationTypeOption.PROD,
version_option=None):
"""Returns a command that when executed will add a node package to current node module.
:param package: string. A valid npm/yarn package description. The accepted forms are
package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz
https://url/to.tgz
:param node_paths: A list of path that should be included in $PATH when
running the script.
:param type_option: A value from PackageInstallationTypeOption that indicates the type
of package to be installed. Default to 'prod', which is a production dependency.
:param version_option: A value from PackageInstallationVersionOption that indicates how
to match version. Default to None, which uses package manager default.
"""
args=self._get_add_package_args(
package,
type_option=type_option,
version_option=version_option)
return self.run_command(args=args, node_paths=node_paths)
def run_cli(self, cli, args=None, node_paths=None):
"""Returns a command that when executed will run an installed cli via package manager."""
cli_args = [cli]
if args:
cli_args.append('--')
cli_args.extend(args)
return self.run_command(args=cli_args, node_paths=node_paths)
class PackageManagerYarnpkg(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerYarnpkg, self).__init__(PACKAGE_MANAGER_YARNPKG, tool_installation)
def _get_run_script_args(self):
return ['run']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['--non-interactive']
if not install_optional:
return_args.append('--ignore-optional')
if production_only:
return_args.append('--production=true')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['add', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '', # Yarn save production is the default.
PackageInstallationTypeOption.DEV: '--dev',
PackageInstallationTypeOption.PEER: '--peer',
PackageInstallationTypeOption.OPTIONAL: '--optional',
PackageInstallationTypeOption.BUNDLE: None,
PackageInstallationTypeOption.NO_SAVE: None,
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--exact',
PackageInstallationVersionOption.TILDE: '--tilde',
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
class PackageManagerNpm(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerNpm, self).__init__(PACKAGE_MANAGER_NPM, tool_installation)
def _get_run_script_args(self):
return ['run-script']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['install']
if not install_optional:
return_args.append('--no-optional')
if production_only:
return_args.append('--production')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['install', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '--save-prod',
PackageInstallationTypeOption.DEV: '--save-dev',
PackageInstallationTypeOption.PEER: None,
PackageInstallationTypeOption.OPTIONAL: '--save-optional',
PackageInstallationTypeOption.BUNDLE: '--save-bundle',
PackageInstallationTypeOption.NO_SAVE: '--no-save',
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--save-exact',
PackageInstallationVersionOption.TILDE: None,
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored.'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
def run_cli(self, cli, args=None, node_paths=None):
raise RuntimeError('npm does not support run cli directly. Please use Yarn instead.')
| UnrememberMe/pants | contrib/node/src/python/pants/contrib/node/subsystems/package_managers.py | Python | apache-2.0 | 8,798 |
#!/usr/bin/python3
ci_version="0.10"
# This script is used to retrieve corpus information. It can be run after the parser
# has finished its work. The corpus information is part of the final report.
# Database connection is configured in the server configuration.
# Include custom libs
import sys
sys.path.append( '../../include/python' )
import serverutils.config as config
import serverutils.mongohelper as mongohelper
import pymongo
mongoClient, mongoDb = mongohelper.getMongoClient(silent = True)
# Initialize all values. To make finding minimal values, we set those
# variables to an extremely high value initially, so that there is at least
# one character that has less...
movieCount = 0
characterCount = 0
minPerMovieCharacterCount = 999
minPerMovieCharacterCountMovie = None
maxPerMovieCharacterCount = 0
maxPerMovieCharacterCountMovie = None
totalWordCount = 0
characterWordCounts = []
minWordCount = 99999
maxWordCount = 0
minWordCountChar = None
maxWordCountChar = None
print("Processing movies ",end="")
# For every movie in our database
for movie in mongoDb.rawMovies.find():
print(".",end="",flush=True)
# Count the movie and (re-)initialize movie-specific variables
movieCount+=1
characters = {}
movieCharacterCount = 0
# For every quote...
for quote in mongoDb.rawQuotes.find({'_id.movie': movie['_id']}):
# Sort the quotes into character-specific lists to be able to generate
# values for the characters
if quote['character'] in characters:
characters[quote['character']] = characters[quote['character']] + " " + quote['text']
else:
characters[quote['character']] = quote['text']
movieCharacterCount += 1
# Calculating word counts for every character
wordCounts = {cid: len(txt.split()) for cid,txt in characters.items()}
for char, wc in wordCounts.items():
totalWordCount += wc
characterWordCounts += [wc]
charname = char + " (" + movie['normalizedMovieId'] + ")"
if minWordCount > wc:
minWordCount = wc
minWordCountChar = charname
elif minWordCount == wc:
minWordCountChar += ", " + charname
if maxWordCount < wc:
maxWordCount = wc
maxWordCountChar = charname
elif maxWordCount == wc:
maxWordCountChar += ", " + charname
# Adding to total Character Count
characterCount += movieCharacterCount
# Counting Characters per Movie
if minPerMovieCharacterCount > movieCharacterCount:
minPerMovieCharacterCount = movieCharacterCount
minPerMovieCharacterCountMovie = movie['normalizedMovieId']
elif minPerMovieCharacterCount == movieCharacterCount:
minPerMovieCharacterCountMovie+= ", " + movie['normalizedMovieId']
if maxPerMovieCharacterCount < movieCharacterCount:
maxPerMovieCharacterCount = movieCharacterCount
maxPerMovieCharacterCountMovie = movie['normalizedMovieId']
elif maxPerMovieCharacterCount == movieCharacterCount:
maxPerMovieCharacterCountMovie += ", " + movie['normalizedMovieId']
# Display results
print("")
print("Movies in DB: ", movieCount)
print("Total characters: ", characterCount)
print("Total words: ", totalWordCount)
print()
print("Characters per movie... ")
print(" ... on avarage: ", float(characterCount)/float(movieCount))
print(" ... max: ", maxPerMovieCharacterCount, "(in "+maxPerMovieCharacterCountMovie+")")
print(" ... min: ", minPerMovieCharacterCount, "(in "+minPerMovieCharacterCountMovie+")")
print()
print("Word count...")
print(" ... avg. per character: ", totalWordCount / characterCount)
print(" ... avg. per movie: ", totalWordCount / movieCount)
print(" ... max: ", maxWordCount, "(for " + maxWordCountChar + ")")
print(" ... min: ", minWordCount, "(for " + minWordCountChar + ")")
print()
print("Word count - amount of characters:")
for i in range(0, maxWordCount + 500, 500):
print(" " + str(i) + " - " + str(i+499) + ": "+str(len(list(filter(lambda a: i <= a < i+500, characterWordCounts)))))
| Thylossus/tud-movie-character-insights | Server/Tools/Parser/corpusInfo.py | Python | apache-2.0 | 3,953 |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.urls import reverse
import logging
import requests
from ci.git_api import GitAPI, GitException, copydoc
import re
import json
try:
from urllib.parse import quote_plus, urljoin
except ImportError:
from urllib import quote_plus
from urlparse import urljoin
logger = logging.getLogger('ci')
class GitLabAPI(GitAPI):
STATUS = ((GitAPI.PENDING, "pending"),
(GitAPI.ERROR, "failed"),
(GitAPI.SUCCESS, "success"),
(GitAPI.FAILURE, "failed"),
(GitAPI.RUNNING, "running"),
(GitAPI.CANCELED, "canceled"),
)
def __init__(self, config, access_user=None, token=None):
super(GitLabAPI, self).__init__(config, access_user=access_user, token=token)
self._api_url = '%s/api/v4' % config.get("api_url", "")
self._hostname = config.get("hostname", "unknown_gitlab")
self._prefix = "%s_" % self._hostname
self._html_url = config.get("html_url", "")
self._ssl_cert = config.get("ssl_cert", False)
self._repos_key = "%s_repos" % self._prefix
self._org_repos_key = "%s_org_repos" % self._prefix
self._user_key= "%s_user" % self._prefix
if access_user is not None and access_user.token:
token = json.loads(access_user.token)
# For backwards compatability, users that haven't signed in
# with the new OAuth2 application, their current token
# is a private token which requires a different http header to be set.
# The OAuth2 token data has the "token_type" key
# while the private token data just has the "access_token" key
if "token_type" in token:
self._session = self._access_user.start_session()
else:
self._headers["PRIVATE-TOKEN"] = token.get("access_token")
self._session = requests
elif self._token is not None:
# We assume the token that is passed in is a personal access token
# or private token
self._headers["PRIVATE-TOKEN"] = self._token
self._session = requests
else:
self._session = requests
@copydoc(GitAPI.sign_in_url)
def sign_in_url(self):
return reverse('ci:gitlab:sign_in', args=[self._hostname])
def _gitlab_id(self, owner, repo):
name = '%s/%s' % (owner, repo)
return quote_plus(name)
def _repo_url(self, path_with_namespace):
return '%s/projects/%s' % (self._api_url, quote_plus(path_with_namespace))
def _project_url(self, project_id):
"""
Get the projects API URL based on project ID.
Input:
project_id[int]: Project ID
"""
return "%s/projects/%s" % (self._api_url, project_id)
def _branch_by_id_url(self, repo_id, branch_id):
"""
Get the branch API URL using IDs instead of owner/repo/branch.
Input:
repo_id[int]: ID of the repo
branch_id[int]: ID of the branch
"""
return "%s/projects/%s/repository/branches/%s" % (self._api_url, repo_id, quote_plus(str(branch_id)))
@copydoc(GitAPI.branch_html_url)
def branch_html_url(self, owner, repo, branch):
return "%s/tree/%s" % (self.repo_html_url(owner, repo), branch)
@copydoc(GitAPI.repo_html_url)
def repo_html_url(self, owner, repo):
return "%s/%s/%s" % (self._html_url, owner, repo)
def _comment_api_url(self, project_id, pr_iid):
"""
Get the API URL for a comment.
Input:
project_id[int]: ID of the project
pr_iid[int]: Repo internal MR ID
"""
return "%s/projects/%s/merge_requests/%s/notes" % (self._api_url, project_id, pr_iid)
@copydoc(GitAPI.commit_html_url)
def commit_html_url(self, owner, repo, sha):
return '%s/commit/%s' % (self.repo_html_url(owner, repo), sha)
def _pr_html_url(self, repo_path, pr_iid):
return '{}/{}/merge_requests/{}'.format(self._html_url, repo_path, pr_iid)
@copydoc(GitAPI.get_all_repos)
def get_all_repos(self, owner):
repos = self._get_user_repos(owner)
repos.extend(self._get_user_org_repos(owner))
return repos
def _get_user_repos(self, username):
"""
Gets a list of repos username owns or is a collaborator on.
"""
url = "%s/projects" % self._api_url
get_data = {"simple": True}
data = self.get_all_pages(url, params=get_data)
owner_repo = []
if not self._bad_response and data:
for repo in data:
r = repo["path_with_namespace"]
if r.startswith("%s/" % username):
owner_repo.append(r)
owner_repo.sort()
return owner_repo
@copydoc(GitAPI.get_repos)
def get_repos(self, session):
if self._repos_key in session:
return session[self._repos_key]
username = session.get(self._user_key, "")
if username:
owner_repo = self._get_user_repos(username)
session[self._repos_key] = owner_repo
return owner_repo
@copydoc(GitAPI.get_branches)
def get_branches(self, path_with_namespace):
url = "%s/repository/branches" % (self._repo_url(path_with_namespace))
data = self.get_all_pages(url)
branches = []
if not self._bad_response and data:
for branch in data:
branches.append(branch['name'])
branches.sort()
return branches
def _get_user_org_repos(self, username):
"""
Get a list of organizations that the user is a member of.
"""
url = "%s/projects" % self._api_url
get_data = {"simple": True}
data = self.get_all_pages(url, params=get_data)
org_repo = []
if not self._bad_response and data:
for repo in data:
org = repo['path_with_namespace']
if not org.startswith("%s/" % username):
org_repo.append(org)
org_repo.sort()
return org_repo
def _status_str(self, status):
"""
Used to convert a GitAPI status into a string that GitLab wants.
"""
for status_pair in self.STATUS:
if status == status_pair[0]:
return status_pair[1]
return None
@copydoc(GitAPI.update_pr_status)
def update_pr_status(self, base, head, state, event_url, description, context, job_stage):
"""
This updates the status of a paritcular commit associated with a PR.
"""
if not self._update_remote:
return
if job_stage in [self.STATUS_START_RUNNING, self.STATUS_CONTINUE_RUNNING]:
# GitLab doesn't like setting status to "running" multiple times
# and there is no point since we are only updating the description
# and that doesn't show up anywhere
return
path_with_namespace = "%s/%s" % (head.user().name, head.repo().name)
data = {
'id': quote_plus(path_with_namespace),
'sha': head.sha,
'ref': head.branch.name,
'state': self._status_str(state),
'target_url': event_url,
'description': description,
'name': context,
}
url = "%s/statuses/%s?state=%s" % (self._repo_url(path_with_namespace),
head.sha,
self._status_str(state))
response = self.post(url, data=data)
if not self._bad_response and response.status_code not in [200, 201, 202]:
logger.warning("Error setting pr status %s\nSent data:\n%s\nReply:\n%s" % \
(url, self._format_json(data), self._format_json(response.json())))
elif not self._bad_response:
logger.info("Set pr status %s:\nSent Data:\n%s" % (url, self._format_json(data)))
def _is_group_member(self, group_id, username):
"""
Returns whether the user is a member of the group_id
"""
url = "%s/groups/%s/members" % (self._api_url, group_id)
data = self.get_all_pages(url)
if not self._bad_response or data:
for member in data:
if member.get('username') == username:
return True
return False
@copydoc(GitAPI.is_collaborator)
def is_collaborator(self, user, repo):
if repo.user == user:
# the user is the owner
return True
path_with_namespace = '%s/%s' % (repo.user.name, repo.name)
url = "%s/users" % self._repo_url(path_with_namespace)
extra = {"search": user.name}
response = self.get(url, params=extra)
if not self._bad_response:
data = response.json()
for member in data:
if member.get('username') == user.name:
return True
return False
@copydoc(GitAPI.pr_comment)
def pr_comment(self, url, msg):
if not self._update_remote:
return
comment = {'body': msg}
self.post(url, data=comment)
if not self._bad_response:
logger.info("Posted comment to %s.\nComment: %s" %(url, msg))
else:
self._add_error("Failed to leave comment at %s.\nComment: %s" %(url, msg))
@copydoc(GitAPI.last_sha)
def last_sha(self, owner, repo, branch):
path_with_namespace = '%s/%s' % (owner, repo)
url = "%s/repository/branches/%s" % (self._repo_url(path_with_namespace), quote_plus(str(branch)))
response = self.get(url)
if not self._bad_response:
data = response.json()
return data['commit']['id']
@copydoc(GitAPI.install_webhooks)
def install_webhooks(self, user, repo):
"""
Updates the webhook for this server on GitHub.
Input:
user[models.GitUser]: The user trying to update the web hooks.
repo[models.Repository]: The repository to set the web hook on.
Raises:
GitException if there are any errors.
"""
if not self._install_webhook:
return
path_with_namespace = '%s/%s' % (repo.user.name, repo.name)
hook_url = '%s/hooks' % self._repo_url(path_with_namespace)
callback_url = urljoin(self._civet_url, reverse('ci:gitlab:webhook', args=[user.build_key]))
data = self.get_all_pages(hook_url)
have_hook = False
if not self._bad_response and data:
for hook in data:
if hook.get('merge_requests_events') and hook.get('push_events') and hook.get('url') == callback_url:
have_hook = True
break
if have_hook:
return
add_hook = {
'id': self._gitlab_id(repo.user.name, repo.name),
'url': callback_url,
'push_events': 'true',
'merge_requests_events': 'true',
'issues_events': 'false',
'tag_push_events': 'false',
'note_events': 'false',
'enable_ssl_verification': 'false',
}
response = self.post(hook_url, data=add_hook)
if self._bad_response:
raise GitException(self._format_json(response.json()))
logger.info('Added webhook to %s for user %s' % (repo, user.name))
def _get_pr_changed_files(self, owner, repo, pr_iid):
"""
Gets a list of changed files in this PR.
Input:
owner[str]: name of the owner of the repo
repo[str]: name of the repository
pr_num[int]: PR number
Return:
list[str]: Filenames that have changed in the PR
"""
url = "%s/projects/%s/merge_requests/%s/changes" % (self._api_url, self._gitlab_id(owner, repo), pr_iid)
data = self.get_all_pages(url)
filenames = []
if not self._bad_response and data:
filenames = [ f['new_path'] for f in data['changes'] ]
filenames.sort()
if not filenames and not self._bad_response:
self._add_error("Didn't read any PR changed files at URL: %s\nData:\n%s" % (url, self._format_json(data)))
return filenames
def _get_project_access_level(self, path_with_namespace):
"""
Gets the access level for a project for the current authorized user.
Input:
owner[str]: owner of the project
repo[str]: name of the repo
"""
access_level_map = {10: "Guest", 20: "Reporter", 30: "Developer", 40: "Master", 50: "Owner"}
url = "%s/user" % self._api_url
user_id = None
# This will get the info on the currently authorized user
response = self.get(url)
if self._bad_response:
return "Unknown"
data = response.json()
user_id = data.get("id")
if not user_id:
return "Unknown"
# /projects/<project>/users doesn't seem to give the access level, so use members
url = "%s/members/%s" % (self._repo_url(path_with_namespace), user_id)
response = self.get(url)
if not self._bad_response:
data = response.json()
access_level = data.get("access_level")
return access_level_map.get(access_level, "Unknown")
# If we get here then the signed in user is not in projects/members but could
# be in groups/members. GitLab API sucks. See https://gitlab.com/gitlab-org/gitlab-ce/issues/18672
url = self._repo_url(path_with_namespace)
response = self.get(url)
if self._bad_response:
return "Unknown"
data = response.json()
namespace = data.get("namespace")
group_id = namespace.get("id")
url = "%s/groups/%s/members/%s" % (self._api_url, group_id, user_id)
response = self.get(url)
if self._bad_response:
return "Unknown"
data = response.json()
access_level = data.get("access_level")
return access_level_map.get(access_level, "Unknown")
@copydoc(GitAPI.get_pr_comments)
def get_pr_comments(self, url, username, comment_re):
data = self.get_all_pages(url)
comments = []
if not self._bad_response and data:
for c in data:
if c["author"]["username"] != username:
continue
if re.search(comment_re, c["body"]):
c["url"] = "%s/%s" % (url, c["id"])
comments.append(c)
return comments
@copydoc(GitAPI.remove_pr_comment)
def remove_pr_comment(self, comment):
if not self._update_remote:
return
url = comment.get("url")
self.delete(url)
if not self._bad_response:
logger.info("Removed comment: %s" % url)
@copydoc(GitAPI.edit_pr_comment)
def edit_pr_comment(self, comment, msg):
if not self._update_remote:
return
url = comment.get("url")
self.put(url, data={"body": msg})
if not self._bad_response:
logger.info("Edited PR comment: %s" % url)
@copydoc(GitAPI.is_member)
def is_member(self, team, user):
if user.name == team:
return True
return self._is_group_member(team, user.name)
@copydoc(GitAPI.get_open_prs)
def get_open_prs(self, owner, repo):
path_with_namespace = '%s/%s' % (owner, repo)
url = "%s/merge_requests" % self._repo_url(path_with_namespace)
params = {"state": "opened"}
data = self.get_all_pages(url, params=params)
if not self._bad_response and data is not None:
open_prs = []
for pr in data:
open_prs.append({"number": pr["iid"], "title": pr["title"], "html_url": pr["web_url"]})
return open_prs
return None
def _get_issues(self, path_with_namespace, title):
"""
Get a list of open issues owned by the authenticated user that have the given title
"""
url = "%s/issues" % self._repo_url(path_with_namespace)
params = {"state": "opened", "scope": "created-by-me", "search": title}
data = self.get_all_pages(url, params=params)
matched_issues = []
if not self._bad_response and data:
for i in data:
if i["title"] == title:
matched_issues.append(i)
return matched_issues
def _create_issue(self, path_with_namespace, title, body):
"""
Create an issue on a repo with the given title and body
"""
url = "%s/issues" % self._repo_url(path_with_namespace)
post_data = {"title": title, "description": body}
data = self.post(url, data=post_data)
if not self._bad_response and data:
logger.info("Created issue '%s': %s" % (title, data.json().get("web_url")))
def _edit_issue(self, path_with_namespace, issue_id, title, body):
"""
Modify the given issue on a repo with the given title and body
"""
url = "%s/issues/%s" % (self._repo_url(path_with_namespace), issue_id)
post_data = {"title": title, "description": body}
data = self.put(url, data=post_data)
if not self._bad_response and data:
logger.info("Updated issue '%s': %s" % (title, data.json().get("web_url")))
@copydoc(GitAPI.create_or_update_issue)
def create_or_update_issue(self, owner, repo, title, body, new_comment):
path_with_namespace = '%s/%s' % (owner, repo)
if not self._update_remote:
return
existing_issues = self._get_issues(path_with_namespace, title)
if existing_issues:
issue_id = existing_issues[-1]["iid"]
if new_comment:
url = "%s/issues/%s/notes" % (self._repo_url(path_with_namespace), issue_id)
self.pr_comment(url, body)
else:
self._edit_issue(path_with_namespace, issue_id, title, body)
else:
self._create_issue(path_with_namespace, title, body)
@copydoc(GitAPI.pr_review_comment)
def pr_review_comment(self, url, sha, filepath, position, msg):
self._add_error("GitLab function not implemented: pr_review_comment")
@copydoc(GitAPI.add_pr_label)
def add_pr_label(self, repo, pr_num, label_name):
self._add_error("GitLab function not implemented: add_pr_label")
@copydoc(GitAPI.remove_pr_label)
def remove_pr_label(self, repo, pr_num, label_name):
self._add_error("GitLab function not implemented: remove_pr_label")
@copydoc(GitAPI.automerge)
def automerge(self, repo, pr_num):
return False
| idaholab/civet | ci/gitlab/api.py | Python | apache-2.0 | 19,475 |
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for all nova services.
This script attempts to start all the nova services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import sys
from oslo.config import cfg
from nova import config
from nova.objectstore import s3server
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.conductor.api', group='conductor')
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
LOG = logging.getLogger('nova.all')
utils.monkey_patch()
launcher = service.process_launcher()
# nova-api
for api in CONF.enabled_apis:
try:
should_use_ssl = api in CONF.enabled_ssl_apis
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % '%s-api' % api)
for mod in [s3server, xvp_proxy]:
try:
launcher.launch_service(mod.get_wsgi_server())
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % mod.__name__)
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
'nova-cert', 'nova-conductor', 'nova-kvmha']:
# FIXME(sirp): Most service configs are defined in nova/service.py, but
# conductor has set a new precedent of storing these configs
# nova/<service>/api.py.
#
# We should update the existing services to use this new approach so we
# don't have to treat conductor differently here.
if binary == 'nova-conductor':
topic = CONF.conductor.topic
manager = CONF.conductor.manager
else:
topic = None
manager = None
try:
launcher.launch_service(service.Service.create(binary=binary,
topic=topic,
manager=manager))
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s'), binary)
launcher.wait()
| leilihh/novaha | nova/cmd/all.py | Python | apache-2.0 | 3,403 |
class NodeType(object):
Base = 'base'
Model = 'model'
Analysis = 'analysis'
Test = 'test'
Archive = 'archive'
Macro = 'macro'
Operation = 'operation'
Seed = 'seed'
Documentation = 'documentation'
@classmethod
def executable(cls):
return [
cls.Model,
cls.Test,
cls.Archive,
cls.Analysis,
cls.Operation,
cls.Seed,
cls.Documentation,
]
@classmethod
def refable(cls):
return [
cls.Model,
cls.Seed,
]
class RunHookType:
Start = 'on-run-start'
End = 'on-run-end'
Both = [Start, End]
| nave91/dbt | dbt/node_types.py | Python | apache-2.0 | 686 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeployModel
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_EndpointService_DeployModel_async]
from google.cloud import aiplatform_v1beta1
async def sample_deploy_model():
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
# Initialize request argument(s)
deployed_model = aiplatform_v1beta1.DeployedModel()
deployed_model.dedicated_resources.min_replica_count = 1803
deployed_model.model = "model_value"
request = aiplatform_v1beta1.DeployModelRequest(
endpoint="endpoint_value",
deployed_model=deployed_model,
)
# Make the request
operation = client.deploy_model(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_EndpointService_DeployModel_async]
| googleapis/python-aiplatform | samples/generated_samples/aiplatform_v1beta1_generated_endpoint_service_deploy_model_async.py | Python | apache-2.0 | 1,792 |
import logging
import os
import time
from parsl.providers.error import ScaleOutFailed
from parsl.channels import LocalChannel
from parsl.launchers import AprunLauncher
from parsl.providers.cobalt.template import template_string
from parsl.providers.cluster_provider import ClusterProvider
from parsl.utils import RepresentationMixin, wtime_to_minutes
logger = logging.getLogger(__name__)
translate_table = {
'QUEUED': 'PENDING',
'STARTING': 'PENDING',
'RUNNING': 'RUNNING',
'EXITING': 'COMPLETED',
'KILLING': 'COMPLETED'
}
class CobaltProvider(ClusterProvider, RepresentationMixin):
""" Cobalt Execution Provider
This provider uses cobalt to submit (qsub), obtain the status of (qstat), and cancel (qdel)
jobs. Theo script to be used is created from a template file in this
same module.
Parameters
----------
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~parsl.channels.LocalChannel` (the default),
:class:`~parsl.channels.SSHChannel`, or
:class:`~parsl.channels.SSHInteractiveLoginChannel`.
nodes_per_block : int
Nodes to provision per block.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
walltime : str
Walltime requested per block in HH:MM:SS.
account : str
Account that the job will be charged against.
queue : str
Torque queue to request blocks from.
scheduler_options : str
String to prepend to the submit script to the scheduler.
worker_init : str
Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~parsl.launchers.AprunLauncher` (the default) or,
:class:`~parsl.launchers.SingleNodeLauncher`
"""
def __init__(self,
channel=LocalChannel(),
nodes_per_block=1,
init_blocks=0,
min_blocks=0,
max_blocks=10,
parallelism=1,
walltime="00:10:00",
account=None,
queue=None,
scheduler_options='',
worker_init='',
launcher=AprunLauncher(),
cmd_timeout=10):
label = 'cobalt'
super().__init__(label,
channel=channel,
nodes_per_block=nodes_per_block,
init_blocks=init_blocks,
min_blocks=min_blocks,
max_blocks=max_blocks,
parallelism=parallelism,
walltime=walltime,
launcher=launcher,
cmd_timeout=cmd_timeout)
self.account = account
self.queue = queue
self.scheduler_options = scheduler_options
self.worker_init = worker_init
def _status(self):
""" Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
"""
jobs_missing = list(self.resources.keys())
retcode, stdout, stderr = super().execute_wait("qstat -u $USER")
# Execute_wait failed. Do no update.
if retcode != 0:
return
for line in stdout.split('\n'):
if line.startswith('='):
continue
parts = line.upper().split()
if parts and parts[0] != 'JOBID':
job_id = parts[0]
if job_id not in self.resources:
continue
status = translate_table.get(parts[4], 'UNKNOWN')
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
# squeue does not report on jobs that are not running. So we are filling in the
# blanks for missing jobs, we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['RUNNING', 'KILLING', 'EXITING']:
self.resources[missing_job]['status'] = translate_table['EXITING']
def submit(self, command, blocksize, tasks_per_node, job_name="parsl.auto"):
""" Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
- tasks_per_node (int) : command invocations to be launched per node
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
"""
if self.provisioned_blocks >= self.max_blocks:
logger.warn("[%s] at capacity, cannot add more blocks now", self.label)
return None
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
account_opt = '-A {}'.format(self.account) if self.account is not None else ''
job_name = "parsl.{0}.{1}".format(job_name, time.time())
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
job_config = {}
job_config["scheduler_options"] = self.scheduler_options
job_config["worker_init"] = self.worker_init
logger.debug("Requesting blocksize:%s nodes_per_block:%s tasks_per_node:%s",
blocksize, self.nodes_per_block, tasks_per_node)
# Wrap the command
job_config["user_script"] = self.launcher(command, tasks_per_node, self.nodes_per_block)
queue_opt = '-q {}'.format(self.queue) if self.queue is not None else ''
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
command = 'qsub -n {0} {1} -t {2} {3} {4}'.format(
self.nodes_per_block, queue_opt, wtime_to_minutes(self.walltime), account_opt, channel_script_path)
logger.debug("Executing {}".format(command))
retcode, stdout, stderr = super().execute_wait(command)
# TODO : FIX this block
if retcode != 0:
logger.error("Failed command: {0}".format(command))
logger.error("Launch failed stdout:\n{0} \nstderr:{1}\n".format(stdout, stderr))
logger.debug("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
job_id = None
if retcode == 0:
# We should be getting only one line back
job_id = stdout.strip()
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
else:
logger.error("Submission of command to scale_out failed: {0}".format(stderr))
raise (ScaleOutFailed(self.__class__, "Request to submit job to local scheduler failed"))
logger.debug("Returning job id : {0}".format(job_id))
return job_id
def cancel(self, job_ids):
""" Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
"""
job_id_list = ' '.join(job_ids)
retcode, stdout, stderr = super().execute_wait("qdel {0}".format(job_id_list))
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = translate_table['KILLING'] # Setting state to cancelled
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets
| swift-lang/swift-e-lab | parsl/providers/cobalt/cobalt.py | Python | apache-2.0 | 8,583 |
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
import sys
from .compat import no_limit_int # NOQA
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = ["ScalarFloat", "ExponentialFloat", "ExponentialCapsFloat"]
class ScalarFloat(float):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
width = kw.pop('width', None) # type: ignore
prec = kw.pop('prec', None) # type: ignore
m_sign = kw.pop('m_sign', None) # type: ignore
m_lead0 = kw.pop('m_lead0', 0) # type: ignore
exp = kw.pop('exp', None) # type: ignore
e_width = kw.pop('e_width', None) # type: ignore
e_sign = kw.pop('e_sign', None) # type: ignore
underscore = kw.pop('underscore', None) # type: ignore
v = float.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._prec = prec
v._m_sign = m_sign
v._m_lead0 = m_lead0
v._exp = exp
v._e_width = e_width
v._e_sign = e_sign
v._underscore = underscore
return v
def __iadd__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
return x
def dump(self, out=sys.stdout):
# type: (Any) -> Any
print('ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}|{}, w:{}, s:{})'.format(
self, self._width, self._prec, self._m_sign, self._m_lead0, # type: ignore
self._exp, self._e_width, self._e_sign), file=out) # type: ignore
class ExponentialFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
class ExponentialCapsFloat(ScalarFloat):
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
| Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/ruamel/yaml/scalarfloat.py | Python | apache-2.0 | 3,378 |
#!/usr/local/bin/python2.7
# encoding: utf-8
from __future__ import division
from argparse import ArgumentParser
import logging
import sys
from expertfinding import ExpertFinding
from collections import Counter
def main():
'''Command line options.'''
parser = ArgumentParser()
parser.add_argument("-n", "--names", required=True, action="store", nargs="+", help="Names of people to profile")
parser.add_argument("-s", "--storage_db", required=True, action="store", help="Storage DB file")
args = parser.parse_args()
exf = ExpertFinding(args.storage_db, False)
for name in args.names:
a_id = exf.author_id(name)[0]
print a_id, exf.name(a_id), exf.institution(a_id)
for entity, freq, iaf, ef_iaf, max_rho, years in exf.ef_iaf(a_id)[:50]:
years_freq = ", ".join("{}{}".format(y, "(x{})".format(freq) if freq > 1 else "") for y, freq in sorted(Counter(years).items(), key=lambda p: p[0]))
print u"{} freq={:.1%} importance={:.2f} entity_rarity={:.2f} max_rho={:.3f} years={}".format(entity, freq, ef_iaf*100, iaf, max_rho, years_freq)
print
return 0
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| marcocor/expert-finding | src/main/python/expertfinding/preprocessing/create_profile.py | Python | apache-2.0 | 1,232 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for module management."""
# Do not add any imports to non-standard modules here.
import os
import site
import sys
def _config_modules_directory(root_directory):
"""Get the config modules directory."""
config_dir = os.getenv('CONFIG_DIR_OVERRIDE')
if not config_dir:
config_dir = os.path.join(root_directory, 'src', 'appengine', 'config')
return os.path.join(config_dir, 'modules')
def _patch_appengine_modules_for_bots():
"""Patch out App Engine reliant behaviour from bots."""
if os.getenv('SERVER_SOFTWARE'):
# Not applicable on App Engine.
return
# google.auth uses App Engine credentials based on importability of
# google.appengine.api.app_identity.
try:
from google.auth import app_engine as auth_app_engine
if auth_app_engine.app_identity:
auth_app_engine.app_identity = None
except ImportError:
pass
def fix_module_search_paths():
"""Add directories that we must be able to import from to path."""
root_directory = os.environ['ROOT_DIR']
source_directory = os.path.join(root_directory, 'src')
python_path = os.getenv('PYTHONPATH', '').split(os.pathsep)
third_party_libraries_directory = os.path.join(source_directory,
'third_party')
config_modules_directory = _config_modules_directory(root_directory)
if (os.path.exists(config_modules_directory) and
config_modules_directory not in sys.path):
sys.path.insert(0, config_modules_directory)
python_path.insert(0, config_modules_directory)
if third_party_libraries_directory not in sys.path:
sys.path.insert(0, third_party_libraries_directory)
python_path.insert(0, third_party_libraries_directory)
if source_directory not in sys.path:
sys.path.insert(0, source_directory)
python_path.insert(0, source_directory)
os.environ['PYTHONPATH'] = os.pathsep.join(python_path)
# Add site directory to make from imports work in google namespace.
site.addsitedir(third_party_libraries_directory)
# TODO(ochang): Remove this once SDK is removed from images.
_patch_appengine_modules_for_bots()
| google/clusterfuzz | src/clusterfuzz/_internal/base/modules.py | Python | apache-2.0 | 2,700 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from foo_receiver import FooReceiver
from foo_listener_bf import FooListenerBfHelper
from PyCFFIlib_cffi import ffi, lib
import gc
class FooListenerBfImpl:
def delete_fl_in_fl(self):
print ("Not to be used")
def on_string_change(self, prs):
print ("FooListenerImpl.py: on_string_change prs", prs)
self._prs = prs
return self._prs
def get_string(self):
return self._prs
def set_listener_bf(self,fl):
self._fl = fl
def get_listener_bf(self):
return self._fl
def set_binary(self,b):
print ("setting Binary in FooListenerBfImpl ", b)
self._b = b
def get_binary(self):
return self._b
def send_return(self,fl):
return fl
def create():
# TODO: decide if we want to have this here or make checks in the helper.frompy for all
# methods to exist as attributes on the class more lenient
print ("I don't use it but the +p +c plus the check in fromPy for having all methods needs me to have this")
def fr_set_get(fr, fl, s):
fr.add_listener_bf(fl)
assert fr.set_private_bf_string(s) == s, "test_interface_back_forth failed"
# assert fl._prs == s, "test_interface_back_forth failed"
assert fr.get_listener_bf_string() == s, "test_interface_back_forth failed"
# back and forth via regular calls from python to cpp
def test_interface_back_forth():
print ("start test len ", len(FooListenerBfHelper.c_data_set))
fr = FooReceiver.create()
fl = FooListenerBfImpl() # python implementation of listener
fl_cpp = fr.get_foo_listener_bf() # cpp implementation of listener
# both direct and indirect test for python impl of FooListenerBf
fr_set_get(fr, fl, "Hello world!")
# both direct and indirect test for cpp impl of FooListenerBf
fr_set_get(fr, fl_cpp, "Goodbye world!")
fr_set_get(fr, fl_cpp, "Goodbye world!")
# send python implementation back and forth and see that it can still be used, and that no wrapper was added
fl_1 = fr.send_return(fl)
fl_2 = fr.send_return(fl_1)
fr_set_get(fr, fl_2, "Hello")
assert fl == fl_1 and fl_1 == fl_2, "test_interface_back_forth failed"
# send cpp implementation back and forth and see that is can still be used, and handles hold same implementation
fl_cpp_1 = fr.send_return(fl_cpp)
fl_cpp_2 = fr.send_return(fl_cpp_1)
fr_set_get(fr, fl_cpp_2, "Goodbye")
assert lib.equal_handles_cw__foo_listener_bf(fl_cpp._cpp_impl, fl_cpp_1._cpp_impl) and \
lib.equal_handles_cw__foo_listener_bf(fl_cpp_1._cpp_impl, fl_cpp_2._cpp_impl)
fl = fl_1 = fl_2 = fl_cpp = fl_cpp_1 = fl_cpp_2 = None
gc.collect()
fr = None
gc.collect()
assert 0 == len(FooListenerBfHelper.c_data_set)
def fr_fl_set_get(fr, fl_in_fl, b):
fr.set_listener_bf_in_listener_bf(fl_in_fl)
fr.set_binary_in_listener_bf_in_listener_bf(b)
assert b == fr.get_binary_in_listener_bf_in_listener_bf(), "test_interface_back_forth failed"
# back and forth via callbacks cpp to python
def test_interface_callback_back_forth():
fr = FooReceiver.create()
fl = FooListenerBfImpl()
fr.add_listener_bf(fl)
fl_in_fl = FooListenerBfImpl()
b = b'Some Binary 11'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in python, listener 2 in python
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert fl_in_fl == fl_in_fl_1 and fl_in_fl_1 == fl_in_fl_2, "test_interface_back_forth failed"
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in python, listener 2 in python after back&forth
fl_in_fl = fr.get_foo_listener_bf()
b = b'Some Other Binary 12'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in python, listener 2 in cpp
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert lib.equal_handles_cw__foo_listener_bf(fl_in_fl._cpp_impl, fl_in_fl_1._cpp_impl) and \
lib.equal_handles_cw__foo_listener_bf(fl_in_fl_1._cpp_impl, fl_in_fl_2._cpp_impl)
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in python, listener 2 in cpp after back&forth
fl = fr.get_foo_listener_bf()
fr.add_listener_bf(fl)
fl_in_fl = FooListenerBfImpl()
b = b'Some Binary 21'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in cpp, listener 2 in python
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert fl_in_fl == fl_in_fl_1 and fl_in_fl_1 == fl_in_fl_2, "test_interface_back_forth failed"
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in cpp, listener 2 in python after back&forth
fl_in_fl = fr.get_foo_listener_bf()
b = b'Some Other Binary 22'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in cpp, listener 2 in cpp
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert lib.equal_handles_cw__foo_listener_bf(fl_in_fl._cpp_impl, fl_in_fl_1._cpp_impl) and \
lib.equal_handles_cw__foo_listener_bf(fl_in_fl_1._cpp_impl, fl_in_fl_2._cpp_impl)
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in cpp, listener 2 in cpp after back&forth
fl = fl_in_fl = fl_in_fl_1 = fl_in_fl_2 = None
gc.collect()
fr = None
gc.collect()
assert 0 == len(FooListenerBfHelper.c_data_set)
| trafi/djinni | test-suite/handwritten-src/python/test_proxying.py | Python | apache-2.0 | 5,511 |
"""
Test plugin views.
"""
import os
import glob
from django.test import TestCase
from django.utils import simplejson as json
from django.conf import settings
from django.test.client import Client
from django.contrib.auth.models import User
from ocradmin.core.tests import testutils
from nodetree import script, node
import numpy
from mock import patch
VALID_SCRIPTDIR = "nodelib/scripts/valid"
INVALID_SCRIPTDIR = "nodelib/scripts/invalid"
from ocradmin.nodelib import cache
class ViewsTest(TestCase):
fixtures = [
"presets/fixtures/test_fixtures.json",
"ocrmodels/fixtures/test_fixtures.json"]
def setUp(self):
"""
Setup OCR tests. Creates a test user.
"""
testutils.symlink_model_fixtures()
self.scripts = {}
for fname in os.listdir(VALID_SCRIPTDIR):
if fname.endswith("json"):
with open(os.path.join(VALID_SCRIPTDIR, fname), "r") as f:
self.scripts[fname] = json.load(f)
for fname in os.listdir(INVALID_SCRIPTDIR):
if fname.endswith("json"):
with open(os.path.join(INVALID_SCRIPTDIR, fname), "r") as f:
self.scripts[fname] = json.load(f)
self.testuser = User.objects.create_user("test_user", "test@testing.com", "testpass")
self.client = Client()
self.client.login(username="test_user", password="testpass")
def tearDown(self):
"""
Revert any changes.
"""
#cache.PersistantFileCacher = self.old_cacher
def test_binarise_script(self):
"""
Test a script that should return image data, i.e.
a path to a DZI file.
"""
self._run_script("binarize.json", "SUCCESS", "image", ["output"])
def test_segment_script(self):
"""
Test a script that should return line image geometry.
"""
self._run_script("segment.json", "SUCCESS", "pseg", ["input", "lines"])
def test_ocropus_script(self):
"""
Test a script that should return transcript data.
"""
self._run_script("ocropus.json", "SUCCESS", "hocr", ["data"])
def test_tesseract_native_seg_script(self):
"""
Test a script that should return transcript data.
"""
self._run_script("tesseract_native_seg.json", "SUCCESS", "hocr", ["data"])
def test_tesseract_script(self):
"""
Test a script that should return transcript data.
"""
self._run_script("tesseract.json", "SUCCESS", "hocr", ["data"])
def test_cuneiform_script(self):
"""
Test a script that should return transcript data.
"""
self._run_script("cuneiform.json", "SUCCESS", "hocr", ["data"])
def test_evaluation_script(self):
"""
Test a script that should return transcript data.
"""
self._run_script("evaluation.json", "SUCCESS", "text", ["data"])
def test_invalid_path(self):
"""
Test a script that should return a node error.
"""
script = self.scripts.get("invalid_filein_path.json")
self.assertIsNotNone(script)
r = self.client.post("/presets/run/", dict(
script=json.dumps(script)))
content = json.loads(r.content)
for field in ["status", "errors"]:
self.assertIn(field, content, "No '%s' field in content" % field)
expectedstatus = "VALIDATION"
self.assertEqual(expectedstatus,
content["status"], "Status field is not '%s'" % expectedstatus)
self.assertIn("filein1", content["errors"], "'filein1' not in errors field" )
@patch(settings.NODETREE_PERSISTANT_CACHER, cache.TestMockCacher)
def _run_script(self, scriptname, expectedstatus, expectedtype, expecteddatafields):
"""
Run a script and assert the results resemble what we expect.
"""
script = self.scripts.get(scriptname)
self.assertIsNotNone(script)
r = self.client.post("/presets/run/", dict(script=json.dumps(script)))
content = json.loads(r.content)
for field in ["status", "task_id", "results"]:
self.assertIn(field, content, "No '%s' field in content" % field)
self.assertEqual(expectedstatus,
content["status"], "Status field is not '%s'" % expectedstatus)
for field in ["type"]:
self.assertIn(field, content["results"], "No '%s' field in content results" % field)
self.assertEqual(expectedtype,
content["results"]["type"], "Type field is not '%s'" % expectedtype)
for field in expecteddatafields:
self.assertIn(field, content["results"], "No '%s' field in content results" % field)
return content
| vitorio/ocropodium | ocradmin/presets/tests/test_scripts.py | Python | apache-2.0 | 4,800 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_utils import netutils
from django.core.exceptions import ValidationError
from django.core import validators
from django.utils.translation import gettext_lazy as _
from horizon import conf
def validate_port_range(port):
if not netutils.is_valid_port(port):
raise ValidationError(_("Not a valid port number"))
def validate_icmp_type_range(icmp_type):
if not netutils.is_valid_icmp_type(icmp_type):
if icmp_type == -1:
return
raise ValidationError(_("Not a valid ICMP type"))
def validate_icmp_code_range(icmp_code):
if not netutils.is_valid_icmp_code(icmp_code):
if icmp_code == -1:
return
raise ValidationError(_("Not a valid ICMP code"))
def validate_ip_protocol(ip_proto):
if ip_proto < -1 or ip_proto > 255:
raise ValidationError(_("Not a valid IP protocol number"))
def password_validator():
return conf.HORIZON_CONFIG["password_validator"]["regex"]
def password_validator_msg():
return conf.HORIZON_CONFIG["password_validator"]["help_text"]
def validate_port_or_colon_separated_port_range(port_range):
"""Accepts a port number or a single-colon separated range."""
if port_range.count(':') > 1:
raise ValidationError(_("One colon allowed in port range"))
ports = port_range.split(':')
for port in ports:
validate_port_range(port)
def validate_metadata(value):
error_msg = _('Invalid metadata entry. Use comma-separated'
' key=value pairs')
if value:
specs = value.split(",")
for spec in specs:
keyval = spec.split("=")
# ensure both sides of "=" exist, but allow blank value
if not len(keyval) == 2 or not keyval[0]:
raise ValidationError(error_msg)
# Same as POSIX [:print:]. Accordingly, diacritics are disallowed.
PRINT_REGEX = re.compile(r'^[\x20-\x7E]*$')
validate_printable_ascii = validators.RegexValidator(
PRINT_REGEX,
_("The string may only contain ASCII printable characters."),
"invalid_characters")
| openstack/horizon | horizon/utils/validators.py | Python | apache-2.0 | 2,698 |
# -*- coding: utf-8 -*-
#
# Copyright 2021 Elliot Jordan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from distutils.version import LooseVersion
# pylint: disable=unused-import
from autopkglib import ProcessorError, URLGetter
__all__ = ["PerforceURLProvider"]
# Base URL for the Perforce downloads host.
BASE_URL = "https://cdist2.perforce.com"
# Information about the directory listing URL, subdirectory hierarchy,
# and desired filename of each product.
PRODUCT_DETAILS = {
"P4V": {
"start": "perforce",
"path": [
r"r[\d\.]+/", # Version number prefixed with "r"
r"bin\.macosx[\w]+/", # For example: "bin.macosx1015x86_64/"
r"P4V\.dmg", # Literal filename "P4V.dmg"
],
},
"HelixALM": {
"start": "alm/helixalm",
"path": [
r"r[\d\.]+/", # Version number prefixed with "r"
r"ttmacclientinstall\.zip", # Literal filename "ttmacclientinstall.zip"
],
},
}
class PerforceURLProvider(URLGetter):
"""Provides a download URL for Perforce products. Currently only supports
recent versions of P4V and HelixALM.
"""
input_variables = {
"product": {
"required": False,
"default": "P4V",
"description": "Perforce product to provide a download URL for. "
"'P4V' is the only supported value at this time.",
}
}
output_variables = {
"url": {"description": "URL to the latest P4V download."},
"version": {"description": "Latest version of the product."},
}
description = __doc__
def recurse_subdirs(self, path, url):
"""Recursively process web directory listings looking for matching
paths, and when we have a match, return the URL."""
# Add trailing slash to prevent constant 301 redirects.
if not url.endswith("/"):
url = url + "/"
self.output("Searching %s for %s" % (url, path[0]))
# Get content of directory listing and parse for links matching path regex.
html = self.download(url, text=True)
link_pattern = re.compile('<a href="(%s)">' % path[0])
links = re.findall(link_pattern, html)
if len(links) == 0:
# No match, toss back to parent caller and continue recursing.
return None
if len(links) == 1 and len(path) == 1:
# We found a match, return the URL.
return url + links[0]
# Sort "r"-prefixed versions in reverse LooseVersion order before continuing.
if path[0] == r"r[\d\.]+/":
links = sorted(
links, key=lambda x: LooseVersion(x.lstrip("r")), reverse=True
)
# Recursively search each link in the directory listing.
for link in links:
result = self.recurse_subdirs(path[1:], url + link)
if result:
return result
return None
def main(self):
"""Main process."""
info = PRODUCT_DETAILS[self.env["product"]]
url = self.recurse_subdirs(info["path"], BASE_URL + "/" + info["start"])
if not url:
raise ProcessorError(
"Did not find a matching download URL for %s." % self.env["product"]
)
self.env["url"] = url
self.output("Found url: %s" % self.env["url"])
if __name__ == "__main__":
PROCESSOR = PerforceURLProvider()
PROCESSOR.execute_shell()
| autopkg/homebysix-recipes | Perforce/PerforceURLProvider.py | Python | apache-2.0 | 3,976 |
from pykintone.base_api import BaseAPI
import pykintone.user_api.user_api_result as ur
class Export(BaseAPI):
def __init__(self, account, requests_options=()):
super(Export, self).__init__(account=account, requests_options=requests_options)
def get_users(self, ids=(), codes=(), offset=-1, size=0):
url = "https://{0}.cybozu.com/v1/users.json".format(self.account.domain)
params = {}
if len(ids) > 0:
params["ids"] = ids
if len(codes) > 0:
params["codes"] = codes
if offset > -1:
params["offset"] = offset
if size > 0:
params["size"] = size
resp = self._request("GET", url, params_or_data=params)
r = ur.GetUsersResult(resp)
return r
def get_user_organization_titles(self, code):
url = "https://{0}.cybozu.com/v1/user/organizations.json".format(self.account.domain)
params = {
"code": code
}
resp = self._request("GET", url, params_or_data=params)
r = ur.UserOrganizationTitlesResult(resp)
return r
def get_user_groups(self, code):
url = "https://{0}.cybozu.com/v1/user/groups.json".format(self.account.domain)
params = {
"code": code
}
resp = self._request("GET", url, params_or_data=params)
r = ur.GetUserGroupsResult(resp)
return r
| icoxfog417/pykintone | pykintone/user_api/export.py | Python | apache-2.0 | 1,411 |
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`sciond` --- Reference endhost SCION Daemon
================================================
"""
# Stdlib
import logging
import os
import errno
import threading
import time
from itertools import product
# External
from external.expiring_dict import ExpiringDict
# SCION
from lib.app.sciond import get_default_sciond_path
from lib.defines import (
GEN_CACHE_PATH,
PATH_FLAG_SIBRA,
PATH_REQ_TOUT,
SCIOND_API_SOCKDIR,
)
from lib.errors import SCIONBaseError, SCIONParseError, SCIONServiceLookupError
from lib.log import log_exception
from lib.msg_meta import SockOnlyMetadata
from lib.path_seg_meta import PathSegMeta
from lib.packet.ctrl_pld import CtrlPayload, mk_ctrl_req_id
from lib.packet.path import SCIONPath
from lib.packet.path_mgmt.base import PathMgmt
from lib.packet.path_mgmt.rev_info import (
SignedRevInfoCertFetchError,
RevInfoExpiredError,
RevInfoValidationError,
RevocationInfo,
SignedRevInfo,
SignedRevInfoVerificationError
)
from lib.packet.path_mgmt.seg_req import PathSegmentReply, PathSegmentReq
from lib.packet.scion_addr import ISD_AS
from lib.packet.scmp.types import SCMPClass, SCMPPathClass
from lib.path_combinator import build_shortcut_paths, tuples_to_full_paths
from lib.path_db import DBResult, PathSegmentDB
from lib.rev_cache import RevCache
from lib.sciond_api.as_req import SCIONDASInfoReply, SCIONDASInfoReplyEntry, SCIONDASInfoRequest
from lib.sciond_api.revocation import SCIONDRevReply, SCIONDRevReplyStatus
from lib.sciond_api.host_info import HostInfo
from lib.sciond_api.if_req import SCIONDIFInfoReply, SCIONDIFInfoReplyEntry, SCIONDIFInfoRequest
from lib.sciond_api.base import SCIONDMsg
from lib.sciond_api.path_meta import FwdPathMeta, PathInterface
from lib.sciond_api.path_req import (
SCIONDPathRequest,
SCIONDPathReplyError,
SCIONDPathReply,
SCIONDPathReplyEntry,
)
from lib.sciond_api.revocation import SCIONDRevNotification
from lib.sciond_api.segment_req import (
SCIONDSegTypeHopReply,
SCIONDSegTypeHopReplyEntry,
SCIONDSegTypeHopRequest,
)
from lib.sciond_api.service_req import (
SCIONDServiceInfoReply,
SCIONDServiceInfoReplyEntry,
SCIONDServiceInfoRequest,
)
from lib.sibra.ext.resv import ResvBlockSteady
from lib.socket import ReliableSocket
from lib.thread import thread_safety_net
from lib.types import (
CertMgmtType,
PathMgmtType as PMT,
PathSegmentType as PST,
PayloadClass,
LinkType,
SCIONDMsgType as SMT,
ServiceType,
TypeBase,
)
from lib.util import SCIONTime
from sciond.req import RequestState
from scion_elem.scion_elem import SCIONElement
_FLUSH_FLAG = "FLUSH"
class SCIONDaemon(SCIONElement):
"""
The SCION Daemon used for retrieving and combining paths.
"""
MAX_REQS = 1024
# Time a path segment is cached at a host (in seconds).
SEGMENT_TTL = 300
# Empty Path TTL
EMPTY_PATH_TTL = SEGMENT_TTL
def __init__(self, conf_dir, addr, api_addr, run_local_api=False,
port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False):
"""
Initialize an instance of the class SCIONDaemon.
"""
super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir,
prom_export=prom_export, public=(addr, port))
up_labels = {**self._labels, "type": "up"} if self._labels else None
down_labels = {**self._labels, "type": "down"} if self._labels else None
core_labels = {**self._labels, "type": "core"} if self._labels else None
self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels)
self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels)
self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels)
self.rev_cache = RevCache()
# Keep track of requested paths.
self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT)
self.req_path_lock = threading.Lock()
self._api_sock = None
self.daemon_thread = None
os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
self.api_addr = (api_addr or get_default_sciond_path())
if delete_sock:
try:
os.remove(self.api_addr)
except OSError as e:
if e.errno != errno.ENOENT:
logging.error("Could not delete socket %s: %s" % (self.api_addr, e))
self.CTRL_PLD_CLASS_MAP = {
PayloadClass.PATH: {
PMT.REPLY: self.handle_path_reply,
PMT.REVOCATION: self.handle_revocation,
},
PayloadClass.CERT: {
CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
CertMgmtType.TRC_REPLY: self.process_trc_reply,
CertMgmtType.TRC_REQ: self.process_trc_request,
},
}
self.SCMP_PLD_CLASS_MAP = {
SCMPClass.PATH:
{SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation},
}
if run_local_api:
self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond"))
self._socks.add(self._api_sock, self.handle_accept)
@classmethod
def start(cls, conf_dir, addr, api_addr=None, run_local_api=False, port=0):
"""
Initializes and starts a SCIOND instance.
"""
inst = cls(conf_dir, addr, api_addr, run_local_api, port)
name = "SCIONDaemon.run %s" % inst.addr.isd_as
inst.daemon_thread = threading.Thread(
target=thread_safety_net, args=(inst.run,), name=name, daemon=True)
inst.daemon_thread.start()
logging.debug("sciond started with api_addr = %s", inst.api_addr)
def _get_msg_meta(self, packet, addr, sock):
if sock != self._udp_sock:
return packet, SockOnlyMetadata.from_values(sock) # API socket
else:
return super()._get_msg_meta(packet, addr, sock)
def handle_msg_meta(self, msg, meta):
"""
Main routine to handle incoming SCION messages.
"""
if isinstance(meta, SockOnlyMetadata): # From SCIOND API
try:
sciond_msg = SCIONDMsg.from_raw(msg)
except SCIONParseError as err:
logging.error(str(err))
return
self.api_handle_request(sciond_msg, meta)
return
super().handle_msg_meta(msg, meta)
def handle_path_reply(self, cpld, meta):
"""
Handle path reply from local path server.
"""
pmgt = cpld.union
path_reply = pmgt.union
assert isinstance(path_reply, PathSegmentReply), type(path_reply)
recs = path_reply.recs()
for srev_info in recs.iter_srev_infos():
self.check_revocation(srev_info, lambda x: self.continue_revocation_processing(
srev_info) if not x else False, meta)
req = path_reply.req()
key = req.dst_ia(), req.flags()
with self.req_path_lock:
r = self.requested_paths.get(key)
if r:
r.notify_reply(path_reply)
else:
logging.warning("No outstanding request found for %s", key)
for type_, pcb in recs.iter_pcbs():
seg_meta = PathSegMeta(pcb, self.continue_seg_processing,
meta, type_, params=(r,))
self._process_path_seg(seg_meta, cpld.req_id)
def continue_revocation_processing(self, srev_info):
self.rev_cache.add(srev_info)
self.remove_revoked_segments(srev_info.rev_info())
def continue_seg_processing(self, seg_meta):
"""
For every path segment(that can be verified) received from the path
server this function gets called to continue the processing for the
segment.
The segment is added to pathdb and pending requests are checked.
"""
pcb = seg_meta.seg
type_ = seg_meta.type
# Check that segment does not contain a revoked interface.
if not self.check_revoked_interface(pcb, self.rev_cache):
return
map_ = {
PST.UP: self._handle_up_seg,
PST.DOWN: self._handle_down_seg,
PST.CORE: self._handle_core_seg,
}
map_[type_](pcb)
r = seg_meta.params[0]
if r:
r.verified_segment()
def _handle_up_seg(self, pcb):
if self.addr.isd_as != pcb.last_ia():
return None
if self.up_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Up segment added: %s", pcb.short_desc())
return pcb.first_ia()
return None
def _handle_down_seg(self, pcb):
last_ia = pcb.last_ia()
if self.addr.isd_as == last_ia:
return None
if self.down_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Down segment added: %s", pcb.short_desc())
return last_ia
return None
def _handle_core_seg(self, pcb):
if self.core_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Core segment added: %s", pcb.short_desc())
return pcb.first_ia()
return None
def api_handle_request(self, msg, meta):
"""
Handle local API's requests.
"""
mtype = msg.type()
if mtype == SMT.PATH_REQUEST:
threading.Thread(
target=thread_safety_net,
args=(self._api_handle_path_request, msg, meta),
daemon=True).start()
elif mtype == SMT.REVOCATION:
self._api_handle_rev_notification(msg, meta)
elif mtype == SMT.AS_REQUEST:
self._api_handle_as_request(msg, meta)
elif mtype == SMT.IF_REQUEST:
self._api_handle_if_request(msg, meta)
elif mtype == SMT.SERVICE_REQUEST:
self._api_handle_service_request(msg, meta)
elif mtype == SMT.SEGTYPEHOP_REQUEST:
self._api_handle_seg_type_request(msg, meta)
else:
logging.warning(
"API: type %s not supported.", TypeBase.to_str(mtype))
def _api_handle_path_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDPathRequest), type(request)
req_id = pld.id
dst_ia = request.dst_ia()
src_ia = request.src_ia()
if not src_ia:
src_ia = self.addr.isd_as
thread = threading.current_thread()
thread.name = "SCIONDaemon API id:%s %s -> %s" % (
thread.ident, src_ia, dst_ia)
paths, error = self.get_paths(dst_ia, flush=request.p.flags.refresh)
if request.p.maxPaths:
paths = paths[:request.p.maxPaths]
reply_entries = []
for path_meta in paths:
fwd_if = path_meta.fwd_path().get_fwd_if()
# Set dummy host addr if path is empty.
haddr, port = None, None
if fwd_if:
br = self.ifid2br[fwd_if]
haddr, port = br.int_addrs.public
addrs = [haddr] if haddr else []
first_hop = HostInfo.from_values(addrs, port)
reply_entry = SCIONDPathReplyEntry.from_values(
path_meta, first_hop)
reply_entries.append(reply_entry)
logging.debug("Replying to api request for %s with %d paths:\n%s",
dst_ia, len(paths), "\n".join([p.short_desc() for p in paths]))
self._send_path_reply(req_id, reply_entries, error, meta)
def _send_path_reply(self, req_id, reply_entries, error, meta):
path_reply = SCIONDMsg(SCIONDPathReply.from_values(reply_entries, error), req_id)
self.send_meta(path_reply.pack(), meta)
def _api_handle_as_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDASInfoRequest), type(request)
req_ia = request.isd_as()
if not req_ia or req_ia.is_zero() or req_ia == self.addr.isd_as:
# Request is for the local AS.
reply_entry = SCIONDASInfoReplyEntry.from_values(
self.addr.isd_as, self.is_core_as(), self.topology.mtu)
else:
# Request is for a remote AS.
reply_entry = SCIONDASInfoReplyEntry.from_values(req_ia, self.is_core_as(req_ia))
as_reply = SCIONDMsg(SCIONDASInfoReply.from_values([reply_entry]), pld.id)
self.send_meta(as_reply.pack(), meta)
def _api_handle_if_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDIFInfoRequest), type(request)
all_brs = request.all_brs()
if_list = []
if not all_brs:
if_list = list(request.iter_ids())
if_entries = []
for if_id, br in self.ifid2br.items():
if all_brs or if_id in if_list:
br_addr, br_port = br.int_addrs.public
info = HostInfo.from_values([br_addr], br_port)
reply_entry = SCIONDIFInfoReplyEntry.from_values(if_id, info)
if_entries.append(reply_entry)
if_reply = SCIONDMsg(SCIONDIFInfoReply.from_values(if_entries), pld.id)
self.send_meta(if_reply.pack(), meta)
def _api_handle_service_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDServiceInfoRequest), type(request)
all_svcs = request.all_services()
svc_list = []
if not all_svcs:
svc_list = list(request.iter_service_types())
svc_entries = []
for svc_type in ServiceType.all():
if all_svcs or svc_type in svc_list:
lookup_res = self.dns_query_topo(svc_type)
host_infos = []
for addr, port in lookup_res:
host_infos.append(HostInfo.from_values([addr], port))
reply_entry = SCIONDServiceInfoReplyEntry.from_values(
svc_type, host_infos)
svc_entries.append(reply_entry)
svc_reply = SCIONDMsg(SCIONDServiceInfoReply.from_values(svc_entries), pld.id)
self.send_meta(svc_reply.pack(), meta)
def _api_handle_rev_notification(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDRevNotification), type(request)
self.handle_revocation(CtrlPayload(PathMgmt(request.srev_info())), meta, pld)
def _api_handle_seg_type_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDSegTypeHopRequest), type(request)
segmentType = request.p.type
db = []
if segmentType == PST.CORE:
db = self.core_segments
elif segmentType == PST.UP:
db = self.up_segments
elif segmentType == PST.DOWN:
db = self.down_segments
else:
logging.error("Requesting segment type %s unrecognized.", segmentType)
seg_entries = []
for segment in db(full=True):
if_list = []
for asm in segment.iter_asms():
isd_as = asm.isd_as()
hof = asm.pcbm(0).hof()
egress = hof.egress_if
ingress = hof.ingress_if
if ingress:
if_list.append(PathInterface.from_values(isd_as, ingress))
if egress:
if_list.append(PathInterface.from_values(isd_as, egress))
reply_entry = SCIONDSegTypeHopReplyEntry.from_values(
if_list, segment.get_timestamp(), segment.get_expiration_time())
seg_entries.append(reply_entry)
seg_reply = SCIONDMsg(
SCIONDSegTypeHopReply.from_values(seg_entries), pld.id)
self.send_meta(seg_reply.pack(), meta)
def handle_scmp_revocation(self, pld, meta):
srev_info = SignedRevInfo.from_raw(pld.info.srev_info)
self.handle_revocation(CtrlPayload(PathMgmt(srev_info)), meta)
def handle_revocation(self, cpld, meta, pld=None):
pmgt = cpld.union
srev_info = pmgt.union
rev_info = srev_info.rev_info()
assert isinstance(rev_info, RevocationInfo), type(rev_info)
logging.debug("Received revocation: %s from %s", srev_info.short_desc(), meta)
self.check_revocation(srev_info,
lambda e: self.process_revocation(e, srev_info, meta, pld), meta)
def process_revocation(self, error, srev_info, meta, pld):
rev_info = srev_info.rev_info()
status = None
if error is None:
status = SCIONDRevReplyStatus.VALID
self.rev_cache.add(srev_info)
self.remove_revoked_segments(rev_info)
else:
if type(error) == RevInfoValidationError:
logging.error("Failed to validate RevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.INVALID
if type(error) == RevInfoExpiredError:
logging.info("Ignoring expired Revinfo, %s from %s", srev_info.short_desc(), meta)
status = SCIONDRevReplyStatus.STALE
if type(error) == SignedRevInfoCertFetchError:
logging.error("Failed to fetch certificate for SignedRevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.UNKNOWN
if type(error) == SignedRevInfoVerificationError:
logging.error("Failed to verify SRevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.SIGFAIL
if type(error) == SCIONBaseError:
logging.error("Revocation check failed for %s from %s:\n%s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.UNKNOWN
if pld:
rev_reply = SCIONDMsg(SCIONDRevReply.from_values(status), pld.id)
self.send_meta(rev_reply.pack(), meta)
def remove_revoked_segments(self, rev_info):
# Go through all segment databases and remove affected segments.
removed_up = removed_core = removed_down = 0
if rev_info.p.linkType == LinkType.CORE:
removed_core = self._remove_revoked_pcbs(self.core_segments, rev_info)
elif rev_info.p.linkType in [LinkType.PARENT, LinkType.CHILD]:
removed_up = self._remove_revoked_pcbs(self.up_segments, rev_info)
removed_down = self._remove_revoked_pcbs(self.down_segments, rev_info)
elif rev_info.p.linkType != LinkType.PEER:
logging.error("Bad RevInfo link type: %s", rev_info.p.linkType)
logging.info("Removed %d UP- %d CORE- and %d DOWN-Segments." %
(removed_up, removed_core, removed_down))
def _remove_revoked_pcbs(self, db, rev_info):
"""
Removes all segments from 'db' that have a revoked upstream PCBMarking.
:param db: The PathSegmentDB.
:type db: :class:`lib.path_db.PathSegmentDB`
:param rev_info: The revocation info
:type rev_info: RevocationInfo
:returns: The number of deletions.
:rtype: int
"""
to_remove = []
for segment in db(full=True):
for asm in segment.iter_asms():
if self._check_revocation_for_asm(rev_info, asm, verify_all=False):
logging.debug("Removing segment: %s" % segment.short_desc())
to_remove.append(segment.get_hops_hash())
return db.delete_all(to_remove)
def _flush_path_dbs(self):
self.core_segments.flush()
self.down_segments.flush()
self.up_segments.flush()
def get_paths(self, dst_ia, flags=(), flush=False):
"""Return a list of paths."""
logging.debug("Paths requested for ISDAS=%s, flags=%s, flush=%s",
dst_ia, flags, flush)
if flush:
logging.info("Flushing PathDBs.")
self._flush_path_dbs()
if self.addr.isd_as == dst_ia or (
self.addr.isd_as.any_as() == dst_ia and
self.topology.is_core_as):
# Either the destination is the local AS, or the destination is any
# core AS in this ISD, and the local AS is in the core
empty = SCIONPath()
exp_time = int(time.time()) + self.EMPTY_PATH_TTL
empty_meta = FwdPathMeta.from_values(empty, [], self.topology.mtu, exp_time)
return [empty_meta], SCIONDPathReplyError.OK
paths = self.path_resolution(dst_ia, flags=flags)
if not paths:
key = dst_ia, flags
with self.req_path_lock:
r = self.requested_paths.get(key)
if r is None:
# No previous outstanding request
req = PathSegmentReq.from_values(self.addr.isd_as, dst_ia, flags=flags)
r = RequestState(req.copy())
self.requested_paths[key] = r
self._fetch_segments(req)
# Wait until event gets set.
timeout = not r.e.wait(PATH_REQ_TOUT)
with self.req_path_lock:
if timeout:
r.done()
if key in self.requested_paths:
del self.requested_paths[key]
if timeout:
logging.error("Query timed out for %s", dst_ia)
return [], SCIONDPathReplyError.PS_TIMEOUT
# Check if we can fulfill the path request.
paths = self.path_resolution(dst_ia, flags=flags)
if not paths:
logging.error("No paths found for %s", dst_ia)
return [], SCIONDPathReplyError.NO_PATHS
return paths, SCIONDPathReplyError.OK
def path_resolution(self, dst_ia, flags=()):
# dst as == 0 means any core AS in the specified ISD.
dst_is_core = self.is_core_as(dst_ia) or dst_ia[1] == 0
sibra = PATH_FLAG_SIBRA in flags
if self.topology.is_core_as:
if dst_is_core:
ret = self._resolve_core_core(dst_ia, sibra=sibra)
else:
ret = self._resolve_core_not_core(dst_ia, sibra=sibra)
elif dst_is_core:
ret = self._resolve_not_core_core(dst_ia, sibra=sibra)
elif sibra:
ret = self._resolve_not_core_not_core_sibra(dst_ia)
else:
ret = self._resolve_not_core_not_core_scion(dst_ia)
if not sibra:
return ret
# FIXME(kormat): Strip off PCBs, and just return sibra reservation
# blocks
return self._sibra_strip_pcbs(self._strip_nones(ret))
def _resolve_core_core(self, dst_ia, sibra=False):
"""Resolve path from core to core."""
res = set()
for cseg in self.core_segments(last_ia=self.addr.isd_as, sibra=sibra,
**dst_ia.params()):
res.add((None, cseg, None))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_core_not_core(self, dst_ia, sibra=False):
"""Resolve path from core to non-core."""
res = set()
# First check whether there is a direct path.
for dseg in self.down_segments(
first_ia=self.addr.isd_as, last_ia=dst_ia, sibra=sibra):
res.add((None, None, dseg))
# Check core-down combination.
for dseg in self.down_segments(last_ia=dst_ia, sibra=sibra):
dseg_ia = dseg.first_ia()
if self.addr.isd_as == dseg_ia:
pass
for cseg in self.core_segments(
first_ia=dseg_ia, last_ia=self.addr.isd_as, sibra=sibra):
res.add((None, cseg, dseg))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_not_core_core(self, dst_ia, sibra=False):
"""Resolve path from non-core to core."""
res = set()
params = dst_ia.params()
params["sibra"] = sibra
if dst_ia[0] == self.addr.isd_as[0]:
# Dst in local ISD. First check whether DST is a (super)-parent.
for useg in self.up_segments(**params):
res.add((useg, None, None))
# Check whether dst is known core AS.
for cseg in self.core_segments(**params):
# Check do we have an up-seg that is connected to core_seg.
for useg in self.up_segments(first_ia=cseg.last_ia(), sibra=sibra):
res.add((useg, cseg, None))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_not_core_not_core_scion(self, dst_ia):
"""Resolve SCION path from non-core to non-core."""
up_segs = self.up_segments()
down_segs = self.down_segments(last_ia=dst_ia)
core_segs = self._calc_core_segs(dst_ia[0], up_segs, down_segs)
full_paths = build_shortcut_paths(
up_segs, down_segs, self.rev_cache)
tuples = []
for up_seg in up_segs:
for down_seg in down_segs:
tuples.append((up_seg, None, down_seg))
for core_seg in core_segs:
tuples.append((up_seg, core_seg, down_seg))
full_paths.extend(tuples_to_full_paths(tuples))
return full_paths
def _resolve_not_core_not_core_sibra(self, dst_ia):
"""Resolve SIBRA path from non-core to non-core."""
res = set()
up_segs = set(self.up_segments(sibra=True))
down_segs = set(self.down_segments(last_ia=dst_ia, sibra=True))
for up_seg, down_seg in product(up_segs, down_segs):
src_core_ia = up_seg.first_ia()
dst_core_ia = down_seg.first_ia()
if src_core_ia == dst_core_ia:
res.add((up_seg, down_seg))
continue
for core_seg in self.core_segments(first_ia=dst_core_ia,
last_ia=src_core_ia, sibra=True):
res.add((up_seg, core_seg, down_seg))
return res
def _strip_nones(self, set_):
"""Strip None entries from a set of tuples"""
res = []
for tup in set_:
res.append(tuple(filter(None, tup)))
return res
def _sibra_strip_pcbs(self, paths):
ret = []
for pcbs in paths:
resvs = []
for pcb in pcbs:
resvs.append(self._sibra_strip_pcb(pcb))
ret.append(resvs)
return ret
def _sibra_strip_pcb(self, pcb):
assert pcb.is_sibra()
pcb_ext = pcb.sibra_ext
resv_info = pcb_ext.info
resv = ResvBlockSteady.from_values(resv_info, pcb.get_n_hops())
asms = pcb.iter_asms()
if pcb_ext.p.up:
asms = reversed(list(asms))
iflist = []
for sof, asm in zip(pcb_ext.iter_sofs(), asms):
resv.sofs.append(sof)
iflist.extend(self._sibra_add_ifs(
asm.isd_as(), sof, resv_info.fwd_dir))
assert resv.num_hops == len(resv.sofs)
return pcb_ext.p.id, resv, iflist
def _sibra_add_ifs(self, isd_as, sof, fwd):
def _add(ifid):
if ifid:
ret.append((isd_as, ifid))
ret = []
if fwd:
_add(sof.ingress)
_add(sof.egress)
else:
_add(sof.egress)
_add(sof.ingress)
return ret
def _wait_for_events(self, events, deadline):
"""
Wait on a set of events, but only until the specified deadline. Returns
the number of events that happened while waiting.
"""
count = 0
for e in events:
if e.wait(max(0, deadline - SCIONTime.get_time())):
count += 1
return count
def _fetch_segments(self, req):
"""
Called to fetch the requested path.
"""
try:
addr, port = self.dns_query_topo(ServiceType.PS)[0]
except SCIONServiceLookupError:
log_exception("Error querying path service:")
return
req_id = mk_ctrl_req_id()
logging.debug("Sending path request (%s) to [%s]:%s [id: %016x]",
req.short_desc(), addr, port, req_id)
meta = self._build_meta(host=addr, port=port)
self.send_meta(CtrlPayload(PathMgmt(req), req_id=req_id), meta)
def _calc_core_segs(self, dst_isd, up_segs, down_segs):
"""
Calculate all possible core segments joining the provided up and down
segments. Returns a list of all known segments, and a seperate list of
the missing AS pairs.
"""
src_core_ases = set()
dst_core_ases = set()
for seg in up_segs:
src_core_ases.add(seg.first_ia()[1])
for seg in down_segs:
dst_core_ases.add(seg.first_ia()[1])
# Generate all possible AS pairs
as_pairs = list(product(src_core_ases, dst_core_ases))
return self._find_core_segs(self.addr.isd_as[0], dst_isd, as_pairs)
def _find_core_segs(self, src_isd, dst_isd, as_pairs):
"""
Given a set of AS pairs across 2 ISDs, return the core segments
connecting those pairs
"""
core_segs = []
for src_core_as, dst_core_as in as_pairs:
src_ia = ISD_AS.from_values(src_isd, src_core_as)
dst_ia = ISD_AS.from_values(dst_isd, dst_core_as)
if src_ia == dst_ia:
continue
seg = self.core_segments(first_ia=dst_ia, last_ia=src_ia)
if seg:
core_segs.extend(seg)
return core_segs
def run(self):
"""
Run an instance of the SCION daemon.
"""
threading.Thread(
target=thread_safety_net, args=(self._check_trc_cert_reqs,),
name="Elem.check_trc_cert_reqs", daemon=True).start()
super().run()
| klausman/scion | python/sciond/sciond.py | Python | apache-2.0 | 30,793 |
import asyncio
import enum
import io
import json
import mimetypes
import os
import warnings
from abc import ABC, abstractmethod
from itertools import chain
from typing import (
IO,
TYPE_CHECKING,
Any,
ByteString,
Dict,
Iterable,
Optional,
TextIO,
Tuple,
Type,
Union,
)
from multidict import CIMultiDict
from typing_extensions import Final
from . import hdrs
from .abc import AbstractStreamWriter
from .helpers import (
_SENTINEL,
content_disposition_header,
guess_filename,
parse_mimetype,
sentinel,
)
from .streams import StreamReader
from .typedefs import JSONEncoder, _CIMultiDict
__all__ = (
"PAYLOAD_REGISTRY",
"get_payload",
"payload_type",
"Payload",
"BytesPayload",
"StringPayload",
"IOBasePayload",
"BytesIOPayload",
"BufferedReaderPayload",
"TextIOPayload",
"StringIOPayload",
"JsonPayload",
"AsyncIterablePayload",
)
TOO_LARGE_BYTES_BODY: Final[int] = 2 ** 20 # 1 MB
if TYPE_CHECKING: # pragma: no cover
from typing import List
class LookupError(Exception):
pass
class Order(str, enum.Enum):
normal = "normal"
try_first = "try_first"
try_last = "try_last"
def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload":
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(
factory: Type["Payload"], type: Any, *, order: Order = Order.normal
) -> None:
PAYLOAD_REGISTRY.register(factory, type, order=order)
class payload_type:
def __init__(self, type: Any, *, order: Order = Order.normal) -> None:
self.type = type
self.order = order
def __call__(self, factory: Type["Payload"]) -> Type["Payload"]:
register_payload(factory, self.type, order=self.order)
return factory
PayloadType = Type["Payload"]
_PayloadRegistryItem = Tuple[PayloadType, Any]
class PayloadRegistry:
"""Payload registry.
note: we need zope.interface for more efficient adapter search
"""
def __init__(self) -> None:
self._first = [] # type: List[_PayloadRegistryItem]
self._normal = [] # type: List[_PayloadRegistryItem]
self._last = [] # type: List[_PayloadRegistryItem]
def get(
self,
data: Any,
*args: Any,
_CHAIN: "Type[chain[_PayloadRegistryItem]]" = chain,
**kwargs: Any,
) -> "Payload":
if isinstance(data, Payload):
return data
for factory, type in _CHAIN(self._first, self._normal, self._last):
if isinstance(data, type):
return factory(data, *args, **kwargs)
raise LookupError()
def register(
self, factory: PayloadType, type: Any, *, order: Order = Order.normal
) -> None:
if order is Order.try_first:
self._first.append((factory, type))
elif order is Order.normal:
self._normal.append((factory, type))
elif order is Order.try_last:
self._last.append((factory, type))
else:
raise ValueError(f"Unsupported order {order!r}")
class Payload(ABC):
_default_content_type = "application/octet-stream" # type: str
_size = None # type: Optional[int]
def __init__(
self,
value: Any,
headers: Optional[
Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]]
] = None,
content_type: Union[None, str, _SENTINEL] = sentinel,
filename: Optional[str] = None,
encoding: Optional[str] = None,
**kwargs: Any,
) -> None:
self._encoding = encoding
self._filename = filename
self._headers = CIMultiDict() # type: _CIMultiDict
self._value = value
if content_type is not sentinel and content_type is not None:
assert isinstance(content_type, str)
self._headers[hdrs.CONTENT_TYPE] = content_type
elif self._filename is not None:
content_type = mimetypes.guess_type(self._filename)[0]
if content_type is None:
content_type = self._default_content_type
self._headers[hdrs.CONTENT_TYPE] = content_type
else:
self._headers[hdrs.CONTENT_TYPE] = self._default_content_type
self._headers.update(headers or {})
@property
def size(self) -> Optional[int]:
"""Size of the payload."""
return self._size
@property
def filename(self) -> Optional[str]:
"""Filename of the payload."""
return self._filename
@property
def headers(self) -> _CIMultiDict:
"""Custom item headers"""
return self._headers
@property
def _binary_headers(self) -> bytes:
return (
"".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode(
"utf-8"
)
+ b"\r\n"
)
@property
def encoding(self) -> Optional[str]:
"""Payload encoding"""
return self._encoding
@property
def content_type(self) -> str:
"""Content type"""
return self._headers[hdrs.CONTENT_TYPE]
def set_content_disposition(
self,
disptype: str,
quote_fields: bool = True,
_charset: str = "utf-8",
**params: Any,
) -> None:
"""Sets ``Content-Disposition`` header."""
self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
disptype, quote_fields=quote_fields, _charset=_charset, **params
)
@abstractmethod
async def write(self, writer: AbstractStreamWriter) -> None:
"""Write payload.
writer is an AbstractStreamWriter instance:
"""
class BytesPayload(Payload):
def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, (bytes, bytearray, memoryview)):
raise TypeError(f"value argument must be byte-ish, not {type(value)!r}")
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
if isinstance(value, memoryview):
self._size = value.nbytes
else:
self._size = len(value)
if self._size > TOO_LARGE_BYTES_BODY:
warnings.warn(
"Sending a large body directly with raw bytes might"
" lock the event loop. You should probably pass an "
"io.BytesIO object instead",
ResourceWarning,
source=self,
)
async def write(self, writer: AbstractStreamWriter) -> None:
await writer.write(self._value)
class StringPayload(BytesPayload):
def __init__(
self,
value: str,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
real_encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
real_encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
real_encoding = encoding
super().__init__(
value.encode(real_encoding),
encoding=real_encoding,
content_type=content_type,
*args,
**kwargs,
)
class StringIOPayload(StringPayload):
def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None:
super().__init__(value.read(), *args, **kwargs)
class IOBasePayload(Payload):
_value: IO[Any]
def __init__(
self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any
) -> None:
if "filename" not in kwargs:
kwargs["filename"] = guess_filename(value)
super().__init__(value, *args, **kwargs)
if self._filename is not None and disposition is not None:
if hdrs.CONTENT_DISPOSITION not in self.headers:
self.set_content_disposition(disposition, filename=self._filename)
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
while chunk:
await writer.write(chunk)
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
finally:
await loop.run_in_executor(None, self._value.close)
class TextIOPayload(IOBasePayload):
_value: TextIO
def __init__(
self,
value: TextIO,
*args: Any,
encoding: Optional[str] = None,
content_type: Optional[str] = None,
**kwargs: Any,
) -> None:
if encoding is None:
if content_type is None:
encoding = "utf-8"
content_type = "text/plain; charset=utf-8"
else:
mimetype = parse_mimetype(content_type)
encoding = mimetype.parameters.get("charset", "utf-8")
else:
if content_type is None:
content_type = "text/plain; charset=%s" % encoding
super().__init__(
value,
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
return None
async def write(self, writer: AbstractStreamWriter) -> None:
loop = asyncio.get_event_loop()
try:
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
while chunk:
data = (
chunk.encode(encoding=self._encoding)
if self._encoding
else chunk.encode()
)
await writer.write(data)
chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
finally:
await loop.run_in_executor(None, self._value.close)
class BytesIOPayload(IOBasePayload):
@property
def size(self) -> int:
position = self._value.tell()
end = self._value.seek(0, os.SEEK_END)
self._value.seek(position)
return end - position
class BufferedReaderPayload(IOBasePayload):
@property
def size(self) -> Optional[int]:
try:
return os.fstat(self._value.fileno()).st_size - self._value.tell()
except OSError:
# data.fileno() is not supported, e.g.
# io.BufferedReader(io.BytesIO(b'data'))
return None
class JsonPayload(BytesPayload):
def __init__(
self,
value: Any,
encoding: str = "utf-8",
content_type: str = "application/json",
dumps: JSONEncoder = json.dumps,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(
dumps(value).encode(encoding),
content_type=content_type,
encoding=encoding,
*args,
**kwargs,
)
if TYPE_CHECKING: # pragma: no cover
from typing import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator[bytes]
_AsyncIterable = AsyncIterable[bytes]
else:
from collections.abc import AsyncIterable, AsyncIterator
_AsyncIterator = AsyncIterator
_AsyncIterable = AsyncIterable
class AsyncIterablePayload(Payload):
_iter = None # type: Optional[_AsyncIterator]
def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None:
if not isinstance(value, AsyncIterable):
raise TypeError(
"value argument must support "
"collections.abc.AsyncIterablebe interface, "
"got {!r}".format(type(value))
)
if "content_type" not in kwargs:
kwargs["content_type"] = "application/octet-stream"
super().__init__(value, *args, **kwargs)
self._iter = value.__aiter__()
async def write(self, writer: AbstractStreamWriter) -> None:
if self._iter:
try:
# iter is not None check prevents rare cases
# when the case iterable is used twice
while True:
chunk = await self._iter.__anext__()
await writer.write(chunk)
except StopAsyncIteration:
self._iter = None
class StreamReaderPayload(AsyncIterablePayload):
def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None:
super().__init__(value.iter_any(), *args, **kwargs)
PAYLOAD_REGISTRY = PayloadRegistry()
PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
PAYLOAD_REGISTRY.register(StringPayload, str)
PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader)
# try_last for giving a chance to more specialized async interables like
# multidict.BodyPartReaderPayload override the default
PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last)
| KeepSafe/aiohttp | aiohttp/payload.py | Python | apache-2.0 | 13,689 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.base import * # noqa
from keystoneclient.auth.cli import * # noqa
from keystoneclient.auth.conf import * # noqa
__all__ = [
# auth.base
'AUTH_INTERFACE',
'BaseAuthPlugin',
'get_plugin_class',
'PLUGIN_NAMESPACE',
# auth.cli
'load_from_argparse_arguments',
'register_argparse_arguments',
# auth.conf
'get_common_conf_options',
'get_plugin_options',
'load_from_conf_options',
'register_conf_options',
]
| alexpilotti/python-keystoneclient | keystoneclient/auth/__init__.py | Python | apache-2.0 | 1,031 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM custom gate body.
"""
from ._node import Node
class GateBody(Node):
"""Node for an OPENQASM custom gate body.
children is a list of gate operation nodes.
These are one of barrier, custom_unitary, U, or CX.
"""
def __init__(self, children):
"""Create the gatebody node."""
Node.__init__(self, 'gate_body', children, None)
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
string = ""
for children in self.children:
string += " " + children.qasm(prec) + "\n"
return string
def calls(self):
"""Return a list of custom gate names in this gate body."""
lst = []
for children in self.children:
if children.type == "custom_unitary":
lst.append(children.name)
return lst
| atilag/qiskit-sdk-py | qiskit/qasm/_node/_gatebody.py | Python | apache-2.0 | 1,570 |
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for creating character recognition tasks on EMNIST."""
import enum
from typing import Optional, Union
import tensorflow as tf
from tensorflow_federated.python.learning import keras_utils
from tensorflow_federated.python.learning import model
from tensorflow_federated.python.simulation.baselines import baseline_task
from tensorflow_federated.python.simulation.baselines import client_spec
from tensorflow_federated.python.simulation.baselines import task_data
from tensorflow_federated.python.simulation.baselines.emnist import emnist_models
from tensorflow_federated.python.simulation.baselines.emnist import emnist_preprocessing
from tensorflow_federated.python.simulation.datasets import client_data
from tensorflow_federated.python.simulation.datasets import emnist
class CharacterRecognitionModel(enum.Enum):
"""Enum for EMNIST character recognition models."""
CNN_DROPOUT = 'cnn_dropout'
CNN = 'cnn'
TWO_LAYER_DNN = '2nn'
_CHARACTER_RECOGNITION_MODELS = [e.value for e in CharacterRecognitionModel]
def _get_character_recognition_model(model_id: Union[str,
CharacterRecognitionModel],
only_digits: bool) -> tf.keras.Model:
"""Constructs a `tf.keras.Model` for character recognition."""
try:
model_enum = CharacterRecognitionModel(model_id)
except ValueError:
raise ValueError('The model argument must be one of {}, found {}'.format(
_CHARACTER_RECOGNITION_MODELS, model_id))
if model_enum == CharacterRecognitionModel.CNN_DROPOUT:
keras_model = emnist_models.create_conv_dropout_model(
only_digits=only_digits)
elif model_enum == CharacterRecognitionModel.CNN:
keras_model = emnist_models.create_original_fedavg_cnn_model(
only_digits=only_digits)
elif model_enum == CharacterRecognitionModel.TWO_LAYER_DNN:
keras_model = emnist_models.create_two_hidden_layer_model(
only_digits=only_digits)
else:
raise ValueError('The model id must be one of {}, found {}'.format(
_CHARACTER_RECOGNITION_MODELS, model_id))
return keras_model
def create_character_recognition_task_from_datasets(
train_client_spec: client_spec.ClientSpec,
eval_client_spec: Optional[client_spec.ClientSpec],
model_id: Union[str, CharacterRecognitionModel], only_digits: bool,
train_data: client_data.ClientData,
test_data: client_data.ClientData) -> baseline_task.BaselineTask:
"""Creates a baseline task for character recognition on EMNIST.
Args:
train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to
preprocess train client data.
eval_client_spec: An optional `tff.simulation.baselines.ClientSpec`
specifying how to preprocess evaluation client data. If set to `None`, the
evaluation datasets will use a batch size of 64 with no extra
preprocessing.
model_id: A string identifier for a character recognition model. Must be one
of 'cnn_dropout', 'cnn', or '2nn'. These correspond respectively to a CNN
model with dropout, a CNN model with no dropout, and a densely connected
network with two hidden layers of width 200.
only_digits: A boolean indicating whether to use the full EMNIST-62 dataset
containing 62 alphanumeric classes (`True`) or the smaller EMNIST-10
dataset with only 10 numeric classes (`False`).
train_data: A `tff.simulation.datasets.ClientData` used for training.
test_data: A `tff.simulation.datasets.ClientData` used for testing.
Returns:
A `tff.simulation.baselines.BaselineTask`.
"""
emnist_task = 'character_recognition'
if eval_client_spec is None:
eval_client_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=64, shuffle_buffer_size=1)
train_preprocess_fn = emnist_preprocessing.create_preprocess_fn(
train_client_spec, emnist_task=emnist_task)
eval_preprocess_fn = emnist_preprocessing.create_preprocess_fn(
eval_client_spec, emnist_task=emnist_task)
task_datasets = task_data.BaselineTaskDatasets(
train_data=train_data,
test_data=test_data,
validation_data=None,
train_preprocess_fn=train_preprocess_fn,
eval_preprocess_fn=eval_preprocess_fn)
def model_fn() -> model.Model:
return keras_utils.from_keras_model(
keras_model=_get_character_recognition_model(model_id, only_digits),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=task_datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return baseline_task.BaselineTask(task_datasets, model_fn)
def create_character_recognition_task(
train_client_spec: client_spec.ClientSpec,
eval_client_spec: Optional[client_spec.ClientSpec] = None,
model_id: Union[str, CharacterRecognitionModel] = 'cnn_dropout',
only_digits: bool = False,
cache_dir: Optional[str] = None,
use_synthetic_data: bool = False) -> baseline_task.BaselineTask:
"""Creates a baseline task for character recognition on EMNIST.
The goal of the task is to minimize the sparse categorical crossentropy
between the output labels of the model and the true label of the image. When
`only_digits = True`, there are 10 possible labels (the digits 0-9), while
when `only_digits = False`, there are 62 possible labels (both numbers and
letters).
This classification can be done using a number of different models, specified
using the `model_id` argument. Below we give a list of the different models
that can be used:
* `model_id = cnn_dropout`: A moderately sized convolutional network. Uses
two convolutional layers, a max pooling layer, and dropout, followed by two
dense layers.
* `model_id = cnn`: A moderately sized convolutional network, without any
dropout layers. Matches the architecture of the convolutional network used
by (McMahan et al., 2017) for the purposes of testing the FedAvg algorithm.
* `model_id = 2nn`: A densely connected network with 2 hidden layers, each
with 200 hidden units and ReLU activations.
Args:
train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to
preprocess train client data.
eval_client_spec: An optional `tff.simulation.baselines.ClientSpec`
specifying how to preprocess evaluation client data. If set to `None`, the
evaluation datasets will use a batch size of 64 with no extra
preprocessing.
model_id: A string identifier for a character recognition model. Must be one
of 'cnn_dropout', 'cnn', or '2nn'. These correspond respectively to a CNN
model with dropout, a CNN model with no dropout, and a densely connected
network with two hidden layers of width 200.
only_digits: A boolean indicating whether to use the full EMNIST-62 dataset
containing 62 alphanumeric classes (`True`) or the smaller EMNIST-10
dataset with only 10 numeric classes (`False`).
cache_dir: An optional directory to cache the downloadeded datasets. If
`None`, they will be cached to `~/.tff/`.
use_synthetic_data: A boolean indicating whether to use synthetic EMNIST
data. This option should only be used for testing purposes, in order to
avoid downloading the entire EMNIST dataset.
Returns:
A `tff.simulation.baselines.BaselineTask`.
"""
if use_synthetic_data:
synthetic_data = emnist.get_synthetic()
emnist_train = synthetic_data
emnist_test = synthetic_data
else:
emnist_train, emnist_test = emnist.load_data(
only_digits=only_digits, cache_dir=cache_dir)
return create_character_recognition_task_from_datasets(
train_client_spec, eval_client_spec, model_id, only_digits, emnist_train,
emnist_test)
| tensorflow/federated | tensorflow_federated/python/simulation/baselines/emnist/char_recognition_tasks.py | Python | apache-2.0 | 8,376 |