content stringlengths 5 1.05M |
|---|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import tensorflow as tf
import xlnet
import numpy as np
import model_utils
import random
import json
import collections
import re
import sentencepiece as spm
from sklearn.utils import shuffle
from prepro_utils import preprocess_text, encode_ids
from malaya.text.function import transformer_textcleaning as cleaning
from tensorflow.python.estimator.run_config import RunConfig
with open('topics.json') as fopen:
topics = set(json.load(fopen).keys())
list_topics = list(topics)
sp_model = spm.SentencePieceProcessor()
sp_model.Load('sp10m.cased.v9.model')
def tokenize_fn(text):
text = preprocess_text(text, lower = False)
return encode_ids(sp_model, text)
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
special_symbols = {
'<unk>': 0,
'<s>': 1,
'</s>': 2,
'<cls>': 3,
'<sep>': 4,
'<pad>': 5,
'<mask>': 6,
'<eod>': 7,
'<eop>': 8,
}
VOCAB_SIZE = 32000
UNK_ID = special_symbols['<unk>']
CLS_ID = special_symbols['<cls>']
SEP_ID = special_symbols['<sep>']
MASK_ID = special_symbols['<mask>']
EOD_ID = special_symbols['<eod>']
def F(left_train):
tokens_a = tokenize_fn(left_train)
segment_id = [SEG_ID_A] * len(tokens_a)
tokens_a.append(SEP_ID)
tokens_a.append(CLS_ID)
segment_id.append(SEG_ID_A)
segment_id.append(SEG_ID_CLS)
input_mask = [0] * len(tokens_a)
return tokens_a, segment_id, input_mask
def XY(data):
if len(set(data[1]) & topics) and random.random() > 0.2:
t = random.choice(data[1])
label = 1
else:
s = set(data[1]) | set()
t = random.choice(list(topics - s))
label = 0
X = F(cleaning(data[0]))
Y = F(t)
return X, Y, label
def generate():
with open('trainset-keyphrase.json') as fopen:
data = json.load(fopen)
while True:
data = shuffle(data)
for i in range(len(data)):
X, Y, label = XY(data[i])
yield {
'X': X[0],
'segment': X[1],
'mask': X[2],
'X_b': Y[0],
'segment_b': Y[1],
'mask_b': Y[2],
'label': [label],
}
def get_dataset(
batch_size = 60, shuffle_size = 20, thread_count = 24, maxlen_feature = 1800
):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'X': tf.int32,
'segment': tf.int32,
'mask': tf.int32,
'X_b': tf.int32,
'segment_b': tf.int32,
'mask_b': tf.int32,
'label': tf.int32,
},
output_shapes = {
'X': tf.TensorShape([None]),
'segment': tf.TensorShape([None]),
'mask': tf.TensorShape([None]),
'X_b': tf.TensorShape([None]),
'segment_b': tf.TensorShape([None]),
'mask_b': tf.TensorShape([None]),
'label': tf.TensorShape([None]),
},
)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.padded_batch(
batch_size,
padded_shapes = {
'X': tf.TensorShape([None]),
'segment': tf.TensorShape([None]),
'mask': tf.TensorShape([None]),
'X_b': tf.TensorShape([None]),
'segment_b': tf.TensorShape([None]),
'mask_b': tf.TensorShape([None]),
'label': tf.TensorShape([None]),
},
padding_values = {
'X': tf.constant(0, dtype = tf.int32),
'segment': tf.constant(1, dtype = tf.int32),
'mask': tf.constant(4, dtype = tf.int32),
'X_b': tf.constant(0, dtype = tf.int32),
'segment_b': tf.constant(1, dtype = tf.int32),
'mask_b': tf.constant(4, dtype = tf.int32),
'label': tf.constant(0, dtype = tf.int32),
},
)
return dataset
return get
class Parameter:
def __init__(
self,
decay_method,
warmup_steps,
weight_decay,
adam_epsilon,
num_core_per_host,
lr_layer_decay_rate,
use_tpu,
learning_rate,
train_steps,
min_lr_ratio,
clip,
**kwargs
):
self.decay_method = decay_method
self.warmup_steps = warmup_steps
self.weight_decay = weight_decay
self.adam_epsilon = adam_epsilon
self.num_core_per_host = num_core_per_host
self.lr_layer_decay_rate = lr_layer_decay_rate
self.use_tpu = use_tpu
self.learning_rate = learning_rate
self.train_steps = train_steps
self.min_lr_ratio = min_lr_ratio
self.clip = clip
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match('^(.*):\\d+$', name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if 'xlnet/' + name not in name_to_variable:
continue
assignment_map[name] = name_to_variable['xlnet/' + name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ':0'] = 1
return (assignment_map, initialized_variable_names)
num_train_steps = 300000
warmup_proportion = 0.1
num_warmup_steps = int(num_train_steps * warmup_proportion)
initial_learning_rate = 2e-5
def model_fn(features, labels, mode, params):
kwargs = dict(
is_training = True,
use_tpu = False,
use_bfloat16 = False,
dropout = 0.1,
dropatt = 0.1,
init = 'normal',
init_range = 0.1,
init_std = 0.05,
clamp_len = -1,
)
xlnet_parameters = xlnet.RunConfig(**kwargs)
xlnet_config = xlnet.XLNetConfig(
json_path = 'alxlnet-base-2020-04-10/config.json'
)
training_parameters = dict(
decay_method = 'poly',
train_steps = num_train_steps,
learning_rate = initial_learning_rate,
warmup_steps = num_warmup_steps,
min_lr_ratio = 0.0,
weight_decay = 0.00,
adam_epsilon = 1e-8,
num_core_per_host = 1,
lr_layer_decay_rate = 1,
use_tpu = False,
use_bfloat16 = False,
dropout = 0.1,
dropatt = 0.1,
init = 'normal',
init_range = 0.1,
init_std = 0.05,
clip = 1.0,
clamp_len = -1,
)
training_parameters = Parameter(**training_parameters)
X = features['X']
segment_ids = features['segment']
input_masks = tf.cast(features['mask'], tf.float32)
X_b = features['X_b']
segment_ids_b = features['segment_b']
input_masks_b = tf.cast(features['mask_b'], tf.float32)
Y = features['label'][:, 0]
with tf.compat.v1.variable_scope('xlnet', reuse = False):
xlnet_model = xlnet.XLNetModel(
xlnet_config = xlnet_config,
run_config = xlnet_parameters,
input_ids = tf.transpose(X, [1, 0]),
seg_ids = tf.transpose(segment_ids, [1, 0]),
input_mask = tf.transpose(input_masks, [1, 0]),
)
summary = xlnet_model.get_pooled_out('last', True)
with tf.compat.v1.variable_scope('xlnet', reuse = True):
xlnet_model = xlnet.XLNetModel(
xlnet_config = xlnet_config,
run_config = xlnet_parameters,
input_ids = tf.transpose(X_b, [1, 0]),
seg_ids = tf.transpose(segment_ids_b, [1, 0]),
input_mask = tf.transpose(input_masks_b, [1, 0]),
)
summary_b = xlnet_model.get_pooled_out('last', True)
vectors_concat = [summary, summary_b, tf.abs(summary - summary_b)]
vectors_concat = tf.concat(vectors_concat, axis = 1)
logits = tf.layers.dense(vectors_concat, 2)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = logits, labels = Y
)
)
tf.identity(loss, 'train_loss')
accuracy = tf.metrics.accuracy(
labels = Y, predictions = tf.argmax(logits, axis = 1)
)
tf.identity(accuracy[1], name = 'train_accuracy')
tvars = tf.trainable_variables()
init_checkpoint = 'alxlnet-base-2020-04-10/model.ckpt-300000'
assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(
tvars, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op, learning_rate, _ = model_utils.get_train_op(
training_parameters, loss
)
tf.summary.scalar('learning_rate', learning_rate)
estimator_spec = tf.estimator.EstimatorSpec(
mode = mode, loss = loss, train_op = train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL,
loss = loss,
eval_metric_ops = {'accuracy': accuracy},
)
return estimator_spec
def run_training(
train_fn,
model_fn,
model_dir: str,
gpu_mem_fraction: float = 0.96,
log_step: int = 100,
summary_step: int = 100,
save_checkpoint_step: int = 1000,
max_steps: int = 10000,
eval_step: int = 10,
eval_throttle: int = 120,
train_batch_size: int = 128,
train_hooks = None,
eval_fn = None,
):
tf.logging.set_verbosity(tf.logging.INFO)
dist_strategy = None
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction = gpu_mem_fraction
)
config = tf.ConfigProto(
allow_soft_placement = True, gpu_options = gpu_options
)
run_config = RunConfig(
train_distribute = dist_strategy,
eval_distribute = dist_strategy,
log_step_count_steps = log_step,
model_dir = model_dir,
save_checkpoints_steps = save_checkpoint_step,
save_summary_steps = summary_step,
session_config = config,
)
estimator = tf.estimator.Estimator(
model_fn = model_fn, params = {}, config = run_config
)
if eval_fn:
train_spec = tf.estimator.TrainSpec(
input_fn = train_fn, max_steps = max_steps, hooks = train_hooks
)
eval_spec = tf.estimator.EvalSpec(
input_fn = eval_fn, steps = eval_step, throttle_secs = eval_throttle
)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
else:
estimator.train(
input_fn = train_fn, max_steps = max_steps, hooks = train_hooks
)
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter = 1
)
]
train_dataset = get_dataset()
save_directory = 'alxlnet-base-keyphrase'
run_training(
train_fn = train_dataset,
model_fn = model_fn,
model_dir = save_directory,
log_step = 1,
save_checkpoint_step = 10000,
max_steps = num_train_steps,
train_hooks = train_hooks,
)
|
import numpy as np
import scipy.sparse as sp
def bwdmean(center_array, w):
## Input Parameters
# center_array: 3D array of values defined at cell centers
# w: 'x' or 'y' or 'z' direction in which average is taken
## Out Parameter
# avg_array: 2D array of averaged values
shift = 1
if(w == 'y'):
shift = 2
if(w == 'z'):
shift = 3
center_shifted = np.roll(center_array, shift); #doe sthis generalize easily into 3 Dimensions, CHECK!
avg_array = (center_shifted + center_array) / 2;
return avg_array |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import abc
import copy
import six
from oslo_utils import reflection
from oslo_utils import strutils
from six.moves.urllib import parse
from adjutantclient._i18n import _
from adjutantclient import exc as exceptions
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in (kwargs.copy().items()):
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
class_name = reflection.get_class_name(self, fully_qualified=False)
return "<%s %s>" % (class_name, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion. """
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in (info).items():
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
self._add_details(
{'x_request_id': self.manager.client.last_request_id})
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
return self._info == other._info
def __ne__(self, other):
return not self.__eq__(other)
def is_same_obj(self, other):
"""Identify the two objects are same one with same id."""
if isinstance(other, self.__class__):
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return False
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
|
# Legacy:
def save_to_disfeval_file(p, g, w, f, filename, incremental=False):
'''
INPUT:
p :: predictions
g :: groundtruth
w :: corresponding words
f :: original input gold standard file
OUTPUT:
filename :: name of the file where the predictions
are written. In right format for disfluency evaluation
for computing the performance in terms of precision
recall and f1 score
'''
out = ''
if incremental == False:
for sl, sp, sw in zip(g, p, w):
out += 'BOS O O\n'
for wl, wp, w in zip(sl, sp, sw):
out += w + ' ' + wl + ' ' + wp + '\n'
out += 'EOS O O\n\n'
else:
# We want a less straight forward output- increco style first increment always
# has a start symbol and the first tag
# last one always has the end of utt tag and this may be different from the penultimate one which covers the same
# words, but by virtue of knowing it's the end of the sequence it could change
# always have an iteration over the ground truth utt to give the prefixes
# with the predictions all the prefixes of this
for sl, sp, sw in zip(g, p, w): # for each utterance
prefix = [] # init the prefix, the word and the ground truth
sw.append('EOS') # adding an extra word position
sl.append('O')
sp[-1].append('O') # trivially not evaluated
for wl, pp, w in zip(
sl, sp, sw): # for each prefix in the utt pp = prefix prediciton, not just latest word
prefix.append(w + ' ' + wl + ' ')
assert(len(prefix) == len(pp)), str(prefix) + str(pp)
out += 'BOS O O\n'
for my_prefix, my_prediction in zip(
prefix, pp): # for each prediction
out += my_prefix + " " + my_prediction + "\n"
# last one is final for the prefix, the last one will have an
# EOS
out += "\n"
f = open(filename, 'w')
f.writelines(out)
f.close()
return filename
|
import sqlite3
import logging
import datetime
class Model(object):
CREATE_USER_TABLE = '''CREATE TABLE IF NOT EXISTS user_table(
created_ts TIMESTAMP NOT NULL,
id INTEGER PRIMARY KEY,
email VARCHAR(200) NOT NULL UNIQUE,
phone_number VARCHAR(20) NOT NULL UNIQUE,
full_name VARCHAR(200),
password VARCHAR(100) NOT NULL,
key VARCHAR(100) NOT NULL UNIQUE,
account_key VARCHAR(100) UNIQUE,
metadata VARCHAR(2000)
);
'''
DROP_USER_TABLES = ''' DROP TABLE IF EXISTS user_table;'''
GET_USERS = '''SELECT email, phone_number, full_name, key, account_key, metadata
FROM user_table ORDER BY created_ts DESC;
'''
GET_USERS_BY_NAME = '''SELECT email, phone_number, full_name, key, account_key, metadata
FROM user_table WHERE full_name like ? or email like ? or metadata like ? ORDER BY created_ts DESC;
'''
INSERT_USER_ROW = '''INSERT INTO user_table(created_ts, email, phone_number, full_name, password, key, metadata)
VALUES(?, ?, ?, ?, ?, ?, ?); '''
UPDATE_USER_ROW = '''UPDATE user_table SET account_key = ? WHERE id = ?;'''
DB_NAME = '/user.db'
def __init__(self):
logging.basicConfig(filename='database.log',level=logging.INFO)
self.connection = sqlite3.connect(Model.DB_NAME, timeout=5)
self.logger = logging.getLogger(__name__)
self.cursor = self.connection.cursor()
self.__create_tables()
def __create_tables(self):
try:
self.logger.info("Creating user table")
self.cursor = self.cursor.execute(Model.CREATE_USER_TABLE)
except sqlite3.Error as e:
self.logger.error("Error creating user table " + str(e))
raise e
def add_users(self, user_payload):
try:
created_ts = datetime.datetime.now()
email = user_payload.email
phone_number = user_payload.phone_number
full_name = user_payload.full_name
password = user_payload.password
key = user_payload.key
metadata = user_payload.metadata
self.cursor.execute(Model.INSERT_USER_ROW, (created_ts, email, phone_number, full_name, password, key, metadata))
self.connection.commit()
return self.cursor.lastrowid
except sqlite3.Error as e:
self.logger.error("Error adding user to user table " + str(e))
raise e
def update_user(self, ID, account_key):
try:
self.cursor.execute(Model.UPDATE_USER_ROW, (account_key, ID))
self.connection.commit()
except sqlite3.Error as e:
self.logger.error("Error updating the user record with account key" + str(e))
raise e
def get_users(self):
try:
self.logger.info("Fetching all users from user table")
self.cursor = self.cursor.execute(Model.GET_USERS)
return self.cursor.fetchall()
except sqlite3.Error as e:
self.logger.error("Error getting users from user table" + str(e))
raise e
def get_users_by_query_str(self, query):
try:
self.logger.info("Getting users from user table that matches :" + query)
self.cursor = self.cursor.execute(Model.GET_USERS_BY_NAME, ('%'+query+'%', '%'+query+'%', '%'+query+'%'))
return self.cursor.fetchall()
except sqlite3.Error as e:
self.logger.error("Error getting users from user table that matches query " + query + str(e))
raise e
def __drop_tables(self):
try:
self.logger.info("Deleting user table")
self.cursor = self.cursor.execute(Model.DROP_USER_TABLES)
except sqlite3.Error as e:
self.logger.error("Error deleting user table" + str(e))
raise e
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import sys
import torch
from image_synthesis.utils.io import write_args, save_config_to_yaml
from image_synthesis.distributed.distributed import is_primary
import torch.utils.tensorboard as tensorboard
# USE_TENSORBOARD = True
# try:
# import tensorboard
# except:
# USE_TENSORBOARD = False
class Logger(object):
def __init__(self, args):
self.args = args
self.save_dir = args.save_dir
self.is_primary = is_primary()
if self.is_primary:
os.makedirs(self.save_dir, exist_ok=True)
# save the args and config
self.config_dir = os.path.join(self.save_dir, 'configs')
os.makedirs(self.config_dir, exist_ok=True)
file_name = os.path.join(self.config_dir, 'args.txt')
write_args(args, file_name)
log_dir = os.path.join(self.save_dir, 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir, exist_ok=True)
self.text_writer = open(os.path.join(log_dir, 'log.txt'), 'a') # 'w')
if args.tensorboard:
self.log_info('using tensorboard')
self.tb_writer = torch.utils.tensorboard.SummaryWriter(log_dir=log_dir) # tensorboard.SummaryWriter(log_dir=log_dir)
else:
self.tb_writer = None
def save_config(self, config):
if self.is_primary:
save_config_to_yaml(config, os.path.join(self.config_dir, 'config.yaml'))
def log_info(self, info, check_primary=True):
if self.is_primary or (not check_primary):
print(info)
if self.is_primary:
info = str(info)
time_str = time.strftime('%Y-%m-%d-%H-%M')
info = '{}: {}'.format(time_str, info)
if not info.endswith('\n'):
info += '\n'
self.text_writer.write(info)
self.text_writer.flush()
def add_scalar(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_scalar(**kargs)
def add_scalars(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_scalars(**kargs)
def add_image(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_image(**kargs)
def add_images(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_images(**kargs)
def close(self):
if self.is_primary:
self.text_writer.close()
self.tb_writer.close()
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
import app.views.post_views
import app.views.user_views
from app.views import views, post_views, user_views
app_name = 'app'
urlpatterns = [
path('api_test', views.api_test, name='api_test'),
# path('api/', include(router.urls)),
path('', views.index),
path('index', views.index, name="index"),
path('accounts/login/', views.login_view, name='login'),
path('accounts/logout', views.logout_view, name="logout"),
path('register', views.register_view, name="register"),
path('create-image', app.views.post_views.create_image_view, name="create_image"),
path('images/<str:filename>', views.get_image, name="images"),
path('images/<str:filename>/<str:encoding>', views.get_image, name="images"),
path('my-posts', app.views.post_views.my_posts_view, name="my_posts"),
path('create-post', app.views.post_views.create_post_view, name='create_post'),
path('delete/<uuid:id>/', app.views.post_views.delete_post, name="delete"),
path('author/<uuid:id>', views.profile_view, name="profile"),
path('profile', views.my_profile_view, name="my_profile"),
path('profile/edit/', views.edit_profile, name="edit_profile"),
path('post/edit/<uuid:id>/', post_views.edit_post, name="edit_post"),
path('authors', user_views.all_author_view, name="all_authors"),
path('authors/follow/<uuid:id>/', user_views.follow_view, name="follow"),
# path('followers/follow/<uuid:id>/', user_views.follow_view, name="followers_follow"),
path('authors/unfollow/<uuid:id>/', user_views.unfollow_view, name="unfollow"),
path('authors/unfollow-mutual/<uuid:id>/', user_views.unfollow_mutual_view, name="unfollow_mutual"),
path('public-posts', app.views.post_views.public_post_view, name="public_posts"),
path('private-posts', app.views.post_views.private_friends_posts_view, name="private_posts"),
path('search/', views.search_view, name="search_author"),
path('new_followers/', user_views.new_followers_view, name="new_followers"),
path('followers/', user_views.all_followers_view, name="followers"),
path('mutual-friends/', user_views.mutual_friends_view, name="mutual_friends"),
path('post-detail/<uuid:id>/', post_views.create_comment_view, name="post_detail"),
path('foaf-posts', post_views.foaf_posts_view, name="foaf_posts"),
path('mutual-friend-posts', post_views.mutual_friends_posts_view, name="mutual_friend_posts"),
path('post/<uuid:id>/', post_views.unlisted_post_view, name="unlisted_post"),
path('author/remote/', app.views.user_views.profile_remote_view, name="author_remote"),
path('post-detail/remote/<str:post>', post_views.remote_post_view, name="remote_post"),
path('friend-request/send/<uuid:id>',user_views.send_friend_request, name='add_friend'),
path('friend-request/cancel/<uuid:id>',user_views.cancel_friend_request, name='cancel_friend'),
path('friend-request/accept/<uuid:id>',user_views.accept_friend_request, name='accept_friend'),
path('friendrequest', user_views.all_requests_view, name="requests"),
path('remote-friend-request/accept/',user_views.accept_remote_friend_request, name='accept_remote_friend'),
path('remote-friend-request/cancel/',user_views.cancel_remote_friend_request, name='cancel_remote_friend'),
path('remote-friend-request/send/<str:uuid>',user_views.send_remote_friend_request, name='add_remote_friend'),
path('unfollow-remote-mutual/<str:uuid>', user_views.unfriend_remote_mutual_view, name="unfriend_remote_mutual"),
#path('friendrequest', api_views.friendrequest, name="friendrequest"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import numpy as np
from PIL import Image
import caffe
# load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe
# im = Image.open('pascal/VOC2010/JPEGImages/2007_000129.jpg')
im = Image.open('/home/inoue/external_data/VOCdevkit/VOCdevkit/VOC2007/JPEGImages/000001.jpg')
in_ = np.array(im,dtype=np.float64)
in_ = in_[:,:,::-1]
in_ -= np.array((104.00698793,116.66876762,122.67891434))
in_ = in_.transpose((2,0,1))
# load net
#net = caffe.Net('deploy.prototxt', 'fcn-32s-pascalcontext.caffemodel', caffe.TEST)
net = caffe.Net('deploy.prototxt', 'fcn8s-heavy-pascal.caffemodel', caffe.TEST)
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, *in_.shape)
net.blobs['data'].data[...] = in_
# run net and take argmax for prediction
net.forward()
out = net.blobs['score_fr'].data[0][15]
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib import cm
fig, ax = plt.subplots()
cax = ax.imshow(out, interpolation='nearest', cmap=cm.coolwarm)
ax.set_title('Person\'s Heatmap')
cbar = fig.colorbar(cax, ticks=[0, 30.69, 31])
cbar.ax.set_yticklabels(['0', '30.69', '31'])
out = out[1:,::]
#print(data[0])
#out = net.blobs['score'].data[0].argmax(axis=0)
# save prediction
np.save('out.npy', out)
plt.show()
# save as image
#palette_base = [i for i in xrange(0, 256, 255 / 3)]
#palette = [(palette_base[i], palette_base[j] , palette_base[k]) for i in xrange(4) for j in xrange(4) for k in xrange(4)]
#colors = np.array(palette, dtype=np.uint8)[out]
#Image.fromarray(colors).save('out.png', 'PNG')
|
import socket
import time
import random
if __name__ == "__main__":
while True:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect(("postgres", 5432))
print("Postgres had started")
break
except socket.error:
print("Waiting for postgres")
time.sleep(0.5 + (random.randint(0, 100) / 1000))
|
#!/usr/bin/python3
import RPi.GPIO as GPIO
from time import sleep
import time, math
dist_meas = 0.00
km_per_hour = 0
rpm = 0
elapse = 0
sensor = 17
pulse = 0
start_timer = time.perf_counter()
speed = 0
def init_GPIO(): # initialize GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(sensor, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def calculate_elapse(channel): # callback function
global pulse, start_timer, elapse, speed
pulse += 1 # increase pulse by 1 whenever interrupt occurred
if pulse == 4:
elapse = time.perf_counter() - start_timer # elapse for every 1 complete rotation made!
print(elapse)
start_timer = time.perf_counter() # let current time equals to start_timer
print(start_timer)
speed = pulse * 2.4 / elapse
pulse = 0
# def calculate_speed():
# global pulse,elapse,rpm,dist_km,dist_meas,km_per_sec,km_per_hour
# if elapse !=0: # to avoid DivisionByZero error
# rpm = 1/elapse * 60
# # return km_per_hour
def init_interrupt():
GPIO.add_event_detect(sensor, GPIO.BOTH, callback = calculate_elapse, bouncetime = 20)
if __name__ == '__main__':
init_GPIO()
init_interrupt()
while True:
print("Current speed is {} km/h".format(speed))
# calculate_speed() # call this function with wheel radius as parameter
# print('rpm:{0:.0f}-RPM kmh:{1:.0f}-KMH dist_meas:{2:.2f}m pulse:{3}'.format(rpm,km_per_hour,dist_meas,pulse))
sleep(0.1) |
from ..abstractvector import DocumentVector
class EmptyVector(DocumentVector):
pass
|
import tensorflow as tf
import numpy as np
import time
import os
from utils import decode_captions, write_bleu
from bleu import evaluate
from math import ceil
class Solver(object):
def __init__(self, model, data, **kwargs):
"""
Required Arguments:
- model: caption generating model
- data: Data object
Optional Arguments:
- n_epochs: The number of epochs to run for training.
- batch_size: Mini batch size.
- update_rule: A string giving the name of an update rule
- learning_rate: Learning rate; default value is 0.01.
- print_every: Integer; training losses will be printed every print_every iterations.
- save_every: Integer; model variables will be saved every save_every epoch.
- model_path: String; model path for saving
"""
# model and data
self.model = model
self.data = data
# train related params
self.n_epochs = kwargs.pop('n_epochs', 100)
self.batch_size = kwargs.pop('batch_size', 64)
self.update_rule = kwargs.pop('update_rule', 'adam')
self.learning_rate = kwargs.pop('learning_rate', 0.0001)
self.print_bleu = kwargs.pop('print_bleu', True)
self.print_every = kwargs.pop('print_every', 100)
self.save_every = kwargs.pop('save_every', 1)
self.log_path = kwargs.pop('log_path', './log/')
self.model_path = kwargs.pop('model_path', './model/')
self.test_model = kwargs.pop('test_model', './model/lstm/model-1')
# model related params
self.max_words = kwargs.pop('max_words', 30)
# input shape
self.L, self.D = kwargs.pop('dim_feature', [28, 2048])
# number of gpus
self.num_gpus = kwargs.pop('num_gpus', 8)
# set an optimizer by update rule
if self.update_rule == 'adam':
self.optimizer = tf.train.AdamOptimizer
elif self.update_rule == 'momentum':
self.optimizer = tf.train.MomentumOptimizer
elif self.update_rule == 'rmsprop':
self.optimizer = tf.train.RMSPropOptimizer
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
def average_loss(self, tower_loss):
total_loss = tf.add_n(tower_loss)
return total_loss / len(tower_loss)
def average_gradients(self, tower_grad):
average_grad = []
for grad_list in zip(*tower_grad):
expand_grad = []
for grad in grad_list:
expand_grad.append(tf.expand_dims(grad, 0))
average_grad.append(tf.reduce_mean(tf.concat(expand_grad, 0), 0))
return average_grad
def train(self):
# train/val dataset
train_caps, train_lengths, train_ids = self.data.captions['train'], self.data.lengths['train'], \
self.data.video_ids['train']
n_examples = len(train_caps)
n_iters_per_epoch = int(np.ceil(float(n_examples) / self.batch_size))
tags = ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'METEOR', 'CIDEr', 'ROUGE_L']
# build graphs for training model and sampling captions
with tf.Graph().as_default():
with tf.device('/cpu:0'):
with tf.variable_scope(tf.get_variable_scope()) as vscope:
tower_loss = []
tower_grad = []
tower_generated_cap = []
# create multi gpu train_op, loss_op and generated_captions_op
# create placeholder
self.features = tf.placeholder(tf.float32, [None, self.L, self.D])
self.captions = tf.placeholder(tf.int32, [None, self.max_words + 2])
# create train_op, loss_op and generated_captions_op
for i in range(self.num_gpus):
# on each gpu
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
# create batch input for each gpu
_feat_batch = self.features[
self.batch_size / self.num_gpus * i:
self.batch_size / self.num_gpus * (i + 1), :, :]
_cap_batch = self.captions[
self.batch_size / self.num_gpus * i:
self.batch_size / self.num_gpus * (i + 1), :]
# compute loss
one_loss = self.model.build_model(_feat_batch, _cap_batch)
tower_loss.append(one_loss)
# reuse variables
tf.get_variable_scope().reuse_variables()
alphas, betas, generated_cap = self.model.build_sampler(_feat_batch,
max_len=self.max_words)
tf.get_variable_scope().reuse_variables()
tower_generated_cap.append(generated_cap)
# compute grad
var_list = tf.trainable_variables()
grad = tf.gradients(one_loss, var_list)
tower_grad.append(grad)
# multi gpu loss operation: average loss
loss_op = self.average_loss(tower_loss)
# caption operation
generated_caption_op = tf.concat(tower_generated_cap, 0)
# average grad
average_grad = self.average_gradients(tower_grad)
# initialize optimizer
global_step = tf.Variable(0, trainable=False)
increase_global_step_op = tf.assign(global_step, global_step + 1)
boundaries = [10]
values = [self.learning_rate, 0.1 * self.learning_rate]
piecewise_learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
learning_rate = piecewise_learning_rate
optimizer = self.optimizer(learning_rate=learning_rate, beta1=0.1, beta2=0.001)
# train operation: apply gradients
train_op = optimizer.apply_gradients(zip(average_grad, tf.trainable_variables()))
# summary op
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('batch_loss', loss_op)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in zip(average_grad, tf.trainable_variables()):
tf.summary.histogram(var.op.name + '/gradient', grad)
summary_op = tf.summary.merge_all()
# create session
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
summary_writer = tf.summary.FileWriter(self.log_path, sess.graph)
saver = tf.train.Saver(tf.global_variables())
# initialized variables
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
for epoch in range(self.n_epochs):
# shuffle train data
rand_idxs = np.random.permutation(n_examples)
train_caps = train_caps[rand_idxs]
train_ids = train_ids[rand_idxs]
train_lengths = train_lengths[rand_idxs]
for it in range(n_iters_per_epoch):
captions_batch = train_caps[it * self.batch_size:(it + 1) * self.batch_size]
image_idxs_batch = train_ids[it * self.batch_size:(it + 1) * self.batch_size]
if len(captions_batch) < self.batch_size:
l = len(captions_batch)
captions_batch = np.concatenate((captions_batch, train_caps[:self.batch_size - l]), axis=0)
image_idxs_batch = np.concatenate((image_idxs_batch, train_ids[:self.batch_size - l]),
axis=0)
features_batch = [self.data.feature(vid) for vid in image_idxs_batch]
feed_dict = {self.features: features_batch, self.captions: captions_batch}
_, loss, summary_str = sess.run((train_op, loss_op, summary_op), feed_dict=feed_dict)
# print epoch, it, loss
summary_writer.add_summary(summary_str, epoch * n_iters_per_epoch + it)
if (it + 1) % self.print_every == 0:
print "\nTrain loss at epoch %d & iteration %d (mini-batch): %.5f" % (
epoch + 1, it + 1, loss)
ground_truths = train_caps[train_ids == image_idxs_batch[0]]
decoded = decode_captions(ground_truths[:, 1:], self.data.vocab.idx2word)
for j, gt in enumerate(decoded):
print "Ground truth %d: %s" % (j + 1, gt.encode('utf-8'))
gen_caps = sess.run(generated_caption_op, feed_dict)
decoded = decode_captions(gen_caps, self.data.vocab.idx2word)
print "Generated caption: %s\n" % decoded[0]
self.evaluate_on_split(sess=sess, generated_captions=generated_caption_op,
summary_writer=summary_writer,
epoch=epoch, tags=tags, split='train')
scores = self.evaluate_on_split(sess=sess, generated_captions=generated_caption_op,
summary_writer=summary_writer,
epoch=epoch, tags=tags, split='val')
write_bleu(scores=scores, path=self.model_path, epoch=epoch)
self.evaluate_on_split(sess=sess, generated_captions=generated_caption_op,
summary_writer=summary_writer,
epoch=epoch, tags=tags, split='test')
# save model
saver.save(sess, os.path.join(self.model_path, 'model'), global_step=epoch + 1)
print "model-%s saved." % (epoch + 1)
# increase global step, which is used to decay learning rate
sess.run(increase_global_step_op)
def evaluate_on_split(self, sess, generated_captions, summary_writer, epoch, tags, split='train'):
caps = self.data.captions[split]
ids = self.data.video_ids[split]
unique_ids = list(set(ids))
num_iter = int(ceil(len(unique_ids) / float(self.batch_size)))
while len(unique_ids) < num_iter * self.batch_size:
unique_ids += unique_ids
unique_ids = unique_ids[:num_iter * self.batch_size]
all_gen_cap = np.ndarray((len(unique_ids), self.max_words), dtype=np.int)
for i in range(num_iter):
features_batch = [self.data.feature(vid) for vid in
unique_ids[i * self.batch_size:(i + 1) * self.batch_size]]
# if len(features_batch) < self.batch_size:
# l = len(features_batch)
# features_batch += [self.data.feature(vid) for vid in unique_ids[:self.batch_size - l]]
features_batch = np.asarray(features_batch)
feed_dict = {self.features: features_batch}
gen_cap = sess.run(generated_captions, feed_dict=feed_dict)
all_gen_cap[i * self.batch_size:(i + 1) * self.batch_size] = gen_cap
all_decoded = decode_captions(all_gen_cap, self.data.vocab.idx2word)
# create cand dict
cand = {}
for vid, sentence in zip(unique_ids, all_decoded):
cand[vid] = [sentence]
# create ref dict
ref = {}
for vid in unique_ids:
ref[vid] = decode_captions(caps[ids == vid][:, 1:], self.data.vocab.idx2word)
with open('result/cand_%s_%d.txt' % (split, epoch), 'w') as file:
file.write(str(cand))
with open('result/ref_%s_%d.txt' % (split, epoch), 'w') as file:
file.write(str(ref))
# evaluate
scores = evaluate(ref=ref, cand=cand, get_scores=True)
for tag in tags:
summary = tf.Summary()
summary.value.add(tag=split + tag, simple_value=scores[tag])
summary_writer.add_summary(summary, epoch)
return scores
def test(self, split='train', save_sampled_captions=True):
'''
Args:
- data: dictionary with the following keys:
- features: Feature vectors of shape (5000, 196, 512)
- file_names: Image file names of shape (5000, )
- captions: Captions of shape (24210, 17)
- image_idxs: Indices for mapping caption to image of shape (24210, )
- features_to_captions: Mapping feature to captions (5000, 4~5)
- split: 'train', 'val' or 'test'
- attention_visualization: If True, visualize attention weights with images for each sampled word. (ipthon notebook)
- save_sampled_captions: If True, save sampled captions to pkl file for computing BLEU scores.
'''
caps = self.data.captions[split]
ids = self.data.video_ids[split]
unique_ids = list(set(ids))
n_examples = len(unique_ids)
n_iters_per_epoch = int(np.ceil(float(n_examples) / self.batch_size))
# build a graph to sample captions
alphas, betas, sampled_captions = self.model.build_sampler(
max_len=self.max_words) # (N, max_len, L), (N, max_len)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
all_decoded = []
with tf.Session(config=config) as sess:
saver = tf.train.Saver()
saver.restore(sess, self.test_model)
for i in range(n_iters_per_epoch):
ids_batch = unique_ids[i * self.batch_size: (i + 1) * self.batch_size]
features_batch = [self.data.feature(vid) for vid in ids_batch]
features_batch = np.asarray(features_batch)
feed_dict = {self.model.features: features_batch}
alps, bts, sam_cap = sess.run([alphas, betas, sampled_captions],
feed_dict) # (N, max_len, L), (N, max_len)
decoded = decode_captions(sam_cap, self.data.vocab.idx2word)
all_decoded.extend(decoded)
# generate ref and cand
ref = {}
cand = {}
for vid, dec in zip(unique_ids, all_decoded):
gts = decode_captions(caps[ids == vid][:, 1:], self.data.vocab.idx2word)
ref[vid] = gts
cand[vid] = [dec]
# print ground truths and generated sentences
for vid in unique_ids:
print '---' * 10
for i, gt in enumerate(ref[vid]):
print i + 1, ':', gt
print 'generated :', cand[vid][0]
scores = evaluate(ref, cand, get_scores=True)
tags = ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'METEOR', 'CIDEr', 'ROUGE_L']
for tag in tags:
print tag, ':', scores[tag]
print split, len(unique_ids), len(all_decoded)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
__config__ = pulumi.Config('ecl')
auth_url = __config__.get('authUrl') or utilities.get_env('OS_AUTH_URL')
"""
The Identity authentication URL.
"""
cacert_file = __config__.get('cacertFile') or utilities.get_env('OS_CACERT')
"""
A Custom CA certificate.
"""
cert = __config__.get('cert') or utilities.get_env('OS_CERT')
"""
A client certificate to authenticate with.
"""
cloud = __config__.get('cloud') or utilities.get_env('OS_CLOUD')
"""
An entry in a `clouds.yaml` file to use.
"""
default_domain = __config__.get('defaultDomain') or (utilities.get_env('OS_DEFAULT_DOMAIN') or 'default')
"""
The name of the Domain ID to scope to if no other domain is specified. Defaults to `default` (Identity v3).
"""
domain_id = __config__.get('domainId') or utilities.get_env('OS_DOMAIN_ID')
"""
The ID of the Domain to scope to (Identity v3).
"""
domain_name = __config__.get('domainName') or utilities.get_env('OS_DOMAIN_NAME')
"""
The name of the Domain to scope to (Identity v3).
"""
endpoint_type = __config__.get('endpointType') or utilities.get_env('OS_ENDPOINT_TYPE')
force_sss_endpoint = __config__.get('forceSssEndpoint')
"""
The SSS Endpoint URL to send API.
"""
insecure = __config__.get('insecure') or utilities.get_env_bool('OS_INSECURE')
"""
Trust self-signed certificates.
"""
key = __config__.get('key') or utilities.get_env('OS_KEY')
"""
A client private key to authenticate with.
"""
password = __config__.get('password') or utilities.get_env('OS_PASSWORD')
"""
Password to login with.
"""
project_domain_id = __config__.get('projectDomainId') or utilities.get_env('OS_PROJECT_DOMAIN_ID')
"""
The ID of the domain where the proejct resides (Identity v3).
"""
project_domain_name = __config__.get('projectDomainName') or utilities.get_env('OS_PROJECT_DOMAIN_NAME')
"""
The name of the domain where the project resides (Identity v3).
"""
region = __config__.get('region') or utilities.get_env('OS_REGION_NAME')
"""
The Enterprise Cloud region to connect to.
"""
tenant_id = __config__.get('tenantId') or utilities.get_env('OS_TENANT_ID', 'OS_PROJECT_ID')
"""
The ID of the Tenant (Identity v2) or Project (Identity v3) to login with.
"""
tenant_name = __config__.get('tenantName') or utilities.get_env('OS_TENANT_NAME', 'OS_PROJECT_NAME')
"""
The name of the Tenant (Identity v2) or Project (Identity v3) to login with.
"""
token = __config__.get('token') or utilities.get_env('OS_TOKEN', 'OS_AUTH_TOKEN')
"""
Authentication token to use as an alternative to username/password.
"""
user_domain_id = __config__.get('userDomainId') or utilities.get_env('OS_USER_DOMAIN_ID')
"""
The ID of the domain where the user resides (Identity v3).
"""
user_domain_name = __config__.get('userDomainName') or utilities.get_env('OS_USER_DOMAIN_NAME')
"""
The name of the domain where the user resides (Identity v3).
"""
user_id = __config__.get('userId') or utilities.get_env('OS_USER_ID')
"""
Username to login with.
"""
user_name = __config__.get('userName') or utilities.get_env('OS_USERNAME')
"""
Username to login with.
"""
|
import datetime
DATE_FORMAT = '%Y-%m-%d'
DATE_FOLDER_FORMAT = '%Y%m%d'
def date_format(date, f='%Y-%m-%d'):
return date.strftime(f)
def date_folder_format(date):
return date.strftime(DATE_FOLDER_FORMAT) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from irc3.plugins.command import command
from irc3.compat import asyncio
from irc3.compat import Queue
from datetime import datetime
import irc3
import requests
import objectpath
"""
Helsinki area public transport route planning IRC bot.
"""
class HSL:
"""
Helsinki area public transport route planning client.
"""
def __init__(self):
self.route_endpoint = "https://api.digitransit.fi/routing/v1/routers/hsl/index/graphql"
self.map_endpoint = "https://api.digitransit.fi/geocoding/v1/search"
# https://github.com/HSLdevcom/digitransit-ui/blob/master/app/configurations/config.hsl.js#L121
self.AREA_POLYGON = [
[25.5345, 60.2592],
[25.3881, 60.1693],
[25.3559, 60.103],
[25.3293, 59.9371],
[24.2831, 59.78402],
[24.2721, 59.95501],
[24.2899, 60.00895],
[24.3087, 60.01947],
[24.1994, 60.12753],
[24.1362, 60.1114],
[24.1305, 60.12847],
[24.099, 60.1405],
[24.0179, 60.1512],
[24.0049, 60.1901],
[24.0445, 60.1918],
[24.0373, 60.2036],
[24.0796, 60.2298],
[24.1652, 60.2428],
[24.3095, 60.2965],
[24.3455, 60.2488],
[24.428, 60.3002],
[24.5015, 60.2872],
[24.4888, 60.3306],
[24.5625, 60.3142],
[24.5957, 60.3242],
[24.6264, 60.3597],
[24.666, 60.3638],
[24.7436, 60.3441],
[24.9291, 60.4523],
[24.974, 60.5253],
[24.9355, 60.5131],
[24.8971, 60.562],
[25.0388, 60.5806],
[25.1508, 60.5167],
[25.1312, 60.4938],
[25.0385, 60.512],
[25.057, 60.4897],
[25.0612, 60.4485],
[25.1221, 60.4474],
[25.1188, 60.4583],
[25.149, 60.4621],
[25.1693, 60.5062],
[25.2242, 60.5016],
[25.3661, 60.4118],
[25.3652, 60.3756],
[25.5345, 60.2592]
]
def _get_coords(self, address):
"""
Get coordinates for given address string.
Returns a 2-tuple of floating point lon, lat coordinates.
Returns None when no coordinates found.
"""
if len(address) == 0:
raise self.BadAddress("Argument 'address' is empty")
poly = ",".join(str(x) for x in [" ".join([str(y[0]),str(y[1])]) for y in self.AREA_POLYGON])
params = {'text': address, 'size': 1, 'boundary.polygon': poly}
response = requests.get(self.map_endpoint, params=params)
tree = objectpath.Tree(response.json())
try:
return tuple(tree.execute("$.features[0].geometry.coordinates"))
except TypeError:
return None
def get_route(self, from_address, to_address, arrive = False, time = int(datetime.now().timestamp() * 1000)):
query_text = \
"""
{{
plan(
fromPlace: "{from_address}",
"""
query_text += "from: {{lat: {from_lat}, lon: {from_lon}"
# if not arrive:
# query_text += ", departureTime: {time}"
query_text += "}},"
query_text += " toPlace: \"{to_address}\","
query_text += "to: {{lat: {to_lat}, lon: {to_lon}"
# if arrive:
# query_text += ", arrivalTime: {time}"
query_text += "}},"
query_text += \
"""
numItineraries: 1
) {{
itineraries{{
walkDistance,
duration,
legs {{
mode
startTime
endTime
from {{
name
stop {{
name
code
}}
}},
to {{
name
stop {{
name
code
}}
}},
distance
route {{
shortName
}}
}}
}}
}}
}}
"""
try:
from_lon, from_lat = self._get_coords(from_address)
except ValueError as e:
raise self.BadAddress("No coordinates found for '%s'" % from_address)
try:
to_lon, to_lat = self._get_coords(to_address)
except ValueError as e:
raise self.BadAddress("No coordinates found for '%s'" % to_address)
query_populated = query_text.format(from_address = from_address,
to_address = to_address,
from_lat = from_lat,
from_lon = from_lon,
to_lat = to_lat,
to_lon = to_lon)
graphql_payload = {"query": query_populated}
r = requests.post(self.route_endpoint, json=graphql_payload)
return r.json()
class BadAddress(Exception):
pass
@irc3.plugin
class HSLbot:
"""
Helsinki area public transport route planning IRC bot.
"""
def __init__(self, bot):
self.bot = bot
self.queue = Queue()
self.HSL = HSL()
def _format_distance(self, meters):
"""
Input is meters (a float).
Output is a string representing either meters or kilometers,
nicely rounded.
"""
if meters < 1000:
return str(round(meters)) + " m"
else:
return str(round(meters/1000, 1)) + " km"
def _format_milliseconds(self, ms):
"""
Print milliseconds as hours, minutes and seconds
"""
seconds = round(ms/1000)
hours, seconds = seconds // 3600, seconds % 3600
minutes, seconds = seconds // 60, seconds % 60
timestr = "{} s".format(seconds)
if minutes > 0:
timestr = "{} min ".format(minutes) + timestr
if hours > 0:
timestr = "{} h ".format(hours) + timestr
return timestr
def _format_seconds(self, s):
return self._format_milliseconds(s * 1000)
def _ms_to_time(self, ms):
"""
Print millisecond precision UNIX timestamp as hours:minutes.
"""
dt = datetime.fromtimestamp(ms // 1000)
return dt.strftime("%H:%M")
@command
def route(self, mask, target, args):
"""Find a public transportation route. Place addresses in quotation marks if they contain spaces.
%%route <from_address> to <to_address>
"""
# TODO: support for departure and arrival time
try:
r = self.HSL.get_route(args['<from_address>'], args['<to_address>'])
except self.HSL.BadAddress as e:
self.bot.notice(mask.nick, str(e))
return
itineraries = objectpath.Tree(r).execute("$.data.plan.itineraries")
if len(itineraries) == 0:
self.bot.notice(mask.nick, "No routes found :(")
return
totaldistance = 0
for i, itin in enumerate(itineraries):
for leg in itin["legs"]:
if "stop" in leg["from"] and leg["from"]["stop"] is not None:
from_text = "{} ({})".format(leg["from"]["stop"]["name"],
leg["from"]["stop"]["code"])
else:
from_text = leg["from"]["name"]
if "stop" in leg["to"] and leg["to"]["stop"] is not None:
to_text = "{} ({})".format(leg["to"]["stop"]["name"],
leg["to"]["stop"]["code"])
else:
to_text = leg["to"]["name"]
if "route" in leg and leg["route"] is not None:
modename = " " + leg["route"]["shortName"]
else:
modename = ""
self.bot.notice(mask.nick, "Route #{num} :: {mode}{modename} at {start} from {from_} to {to} :: arrive at {end} :: distance {distance}" \
.format(start = self._ms_to_time(leg["startTime"]),
end = self._ms_to_time(leg["endTime"]),
mode = leg["mode"],
modename = modename,
from_ = from_text,
to = to_text,
distance = self._format_distance(leg["distance"]),
num = i+1))
totaldistance += leg["distance"]
self.bot.notice(mask.nick, "Route #{num} :: Total distance {dist}, total duration {dur}" \
.format(num = i+1,
dist = self._format_distance(totaldistance),
dur = self._format_seconds(itin["duration"])))
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AppsV1alpha1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_apps_v1alpha1_namespaced_stateful_set(self, namespace, body, **kwargs):
"""
create a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_apps_v1alpha1_namespaced_stateful_set(namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_apps_v1alpha1_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_apps_v1alpha1_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
return data
def create_apps_v1alpha1_namespaced_stateful_set_with_http_info(self, namespace, body, **kwargs):
"""
create a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_apps_v1alpha1_namespaced_stateful_set_with_http_info(namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_apps_v1alpha1_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_apps_v1alpha1_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets'.replace('{format}', 'json')
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def delete_apps_v1alpha1_collection_namespaced_stateful_set(self, namespace, **kwargs):
"""
delete collection of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_apps_v1alpha1_collection_namespaced_stateful_set(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: UnversionedStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_apps_v1alpha1_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_apps_v1alpha1_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def delete_apps_v1alpha1_collection_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
"""
delete collection of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_apps_v1alpha1_collection_namespaced_stateful_set_with_http_info(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: UnversionedStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'field_selector', 'label_selector', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_apps_v1alpha1_collection_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_apps_v1alpha1_collection_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets'.replace('{format}', 'json')
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UnversionedStatus',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def delete_apps_v1alpha1_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_apps_v1alpha1_namespaced_stateful_set(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: UnversionedStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def delete_apps_v1alpha1_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: UnversionedStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_apps_v1alpha1_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_apps_v1alpha1_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UnversionedStatus',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def get_apps_v1alpha1_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_apps_v1alpha1_api_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: UnversionedAPIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_apps_v1alpha1_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_apps_v1alpha1_api_resources_with_http_info(**kwargs)
return data
def get_apps_v1alpha1_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_apps_v1alpha1_api_resources_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: UnversionedAPIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_apps_v1alpha1_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UnversionedAPIResourceList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def list_apps_v1alpha1_namespaced_stateful_set(self, namespace, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_apps_v1alpha1_namespaced_stateful_set(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_apps_v1alpha1_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.list_apps_v1alpha1_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def list_apps_v1alpha1_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_apps_v1alpha1_namespaced_stateful_set_with_http_info(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'field_selector', 'label_selector', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_apps_v1alpha1_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_apps_v1alpha1_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets'.replace('{format}', 'json')
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSetList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def list_apps_v1alpha1_stateful_set_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_apps_v1alpha1_stateful_set_for_all_namespaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_apps_v1alpha1_stateful_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_apps_v1alpha1_stateful_set_for_all_namespaces_with_http_info(**kwargs)
return data
def list_apps_v1alpha1_stateful_set_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_apps_v1alpha1_stateful_set_for_all_namespaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['field_selector', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_apps_v1alpha1_stateful_set_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/statefulsets'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSetList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def patch_apps_v1alpha1_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
partially update the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_apps_v1alpha1_namespaced_stateful_set(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UnversionedPatch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def patch_apps_v1alpha1_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UnversionedPatch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_apps_v1alpha1_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_apps_v1alpha1_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def patch_apps_v1alpha1_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_apps_v1alpha1_namespaced_stateful_set_status(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UnversionedPatch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UnversionedPatch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_apps_v1alpha1_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_apps_v1alpha1_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_apps_v1alpha1_namespaced_stateful_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_apps_v1alpha1_namespaced_stateful_set_status`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets/{name}/status'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def read_apps_v1alpha1_namespaced_stateful_set(self, name, namespace, **kwargs):
"""
read the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_apps_v1alpha1_namespaced_stateful_set(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.read_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
return data
def read_apps_v1alpha1_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs):
"""
read the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_apps_v1alpha1_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_apps_v1alpha1_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'exact' in params:
query_params['exact'] = params['exact']
if 'export' in params:
query_params['export'] = params['export']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def read_apps_v1alpha1_namespaced_stateful_set_status(self, name, namespace, **kwargs):
"""
read status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_apps_v1alpha1_namespaced_stateful_set_status(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.read_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
return data
def read_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.read_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_apps_v1alpha1_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_apps_v1alpha1_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_apps_v1alpha1_namespaced_stateful_set_status`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets/{name}/status'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def replace_apps_v1alpha1_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
replace the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_apps_v1alpha1_namespaced_stateful_set(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.replace_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def replace_apps_v1alpha1_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_apps_v1alpha1_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_apps_v1alpha1_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def replace_apps_v1alpha1_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_apps_v1alpha1_namespaced_stateful_set_status(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.replace_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_apps_v1alpha1_namespaced_stateful_set_status_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1alpha1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_apps_v1alpha1_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_apps_v1alpha1_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_apps_v1alpha1_namespaced_stateful_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_apps_v1alpha1_namespaced_stateful_set_status`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/namespaces/{namespace}/statefulsets/{name}/status'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1StatefulSet',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def watch_apps_v1alpha1_namespaced_stateful_set(self, name, namespace, **kwargs):
"""
watch changes to an object of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_apps_v1alpha1_namespaced_stateful_set(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.watch_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.watch_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
return data
def watch_apps_v1alpha1_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs):
"""
watch changes to an object of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_apps_v1alpha1_namespaced_stateful_set_with_http_info(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'field_selector', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_apps_v1alpha1_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `watch_apps_v1alpha1_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_apps_v1alpha1_namespaced_stateful_set`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/watch/namespaces/{namespace}/statefulsets/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedEvent',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def watch_apps_v1alpha1_namespaced_stateful_set_list(self, namespace, **kwargs):
"""
watch individual changes to a list of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_apps_v1alpha1_namespaced_stateful_set_list(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.watch_apps_v1alpha1_namespaced_stateful_set_list_with_http_info(namespace, **kwargs)
else:
(data) = self.watch_apps_v1alpha1_namespaced_stateful_set_list_with_http_info(namespace, **kwargs)
return data
def watch_apps_v1alpha1_namespaced_stateful_set_list_with_http_info(self, namespace, **kwargs):
"""
watch individual changes to a list of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_apps_v1alpha1_namespaced_stateful_set_list_with_http_info(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'field_selector', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_apps_v1alpha1_namespaced_stateful_set_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_apps_v1alpha1_namespaced_stateful_set_list`")
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/watch/namespaces/{namespace}/statefulsets'.replace('{format}', 'json')
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedEvent',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def watch_apps_v1alpha1_stateful_set_list_for_all_namespaces(self, **kwargs):
"""
watch individual changes to a list of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_apps_v1alpha1_stateful_set_list_for_all_namespaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.watch_apps_v1alpha1_stateful_set_list_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.watch_apps_v1alpha1_stateful_set_list_for_all_namespaces_with_http_info(**kwargs)
return data
def watch_apps_v1alpha1_stateful_set_list_for_all_namespaces_with_http_info(self, **kwargs):
"""
watch individual changes to a list of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_apps_v1alpha1_stateful_set_list_for_all_namespaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['field_selector', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_apps_v1alpha1_stateful_set_list_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/apps/v1alpha1/watch/statefulsets'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedEvent',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
|
#!/usr/bin/python
import sys
import json
if len(sys.argv) != 2:
print >> sys.stderr, " Usage: python ExtractBuildArgs.py <ArgName>"
exit (-1)
try:
data = json.load(open('BuildSpec.json'))
if sys.argv[1] == "cmakeFlags" and data["cmakeFlags"] != "":
print(data["cmakeFlags"])
elif sys.argv[1] == "branch" and data["branch"] != "":
print(data["branch"])
except:
print >> sys.stderr, "No related args found in BuildSpec.json"
exit(-1)
|
from typing import Tuple
import numpy as np
from numpy.random import Generator
from delayed_bandit.policies.policy import Policy
class UniformRandom(Policy):
def __init__(self, num_arms: int, rng: Generator):
"""
Create a policy choosing an arm uniformly at random.
"""
self._num_arms = num_arms
self._rng = rng
self._current_arm = -1
self.cumulative_rewards = np.zeros(num_arms, dtype=np.float32)
self.arms_stats = np.zeros(num_arms, dtype=np.int32)
def choice(self, t: int) -> int:
self._current_arm = self._rng.choice(self._num_arms)
return self._current_arm
def feed_reward(self, t: int, arm: int, reward: float):
if arm != self._current_arm:
raise ValueError(f"Expected the reward for arm {self._current_arm}, but got for {arm}")
self.cumulative_rewards[arm] += reward
self.arms_stats[arm] += 1
return
def empirically_best_arm(self) -> Tuple[int, float]:
if np.count_nonzero(self.cumulative_rewards) == 0:
return self._rng.choice(self._num_arms), 0.0
idx = np.where(self.arms_stats != 0)
i = np.argmax(self.cumulative_rewards[idx] / self.arms_stats[idx])
arm = idx[0][i]
return arm, self.cumulative_rewards[arm] / self.arms_stats[arm]
def name(self) -> str:
return "Uniform Random"
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
from functools import partial
import random
import numpy as np
class TestCollectFpnProposalsOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
# it doesn't support std::vector<tensor>
return False
def sample_program_configs(self, draw):
rois_shape = draw(
st.sampled_from([[30, 4], [80, 4], [70, 4], [66, 4]]))
scores_shape = draw(st.sampled_from([[30, 1], [65, 1], [70, 1]]))
post_nms_topN = draw(st.integers(min_value=1, max_value=10))
lod_data = [[1, 1, 1, 1]]
def generate_rois(*args, **kwargs):
return np.random.random(rois_shape).astype(np.float32)
def generate_scores(*args, **kwargs):
return np.random.random(scores_shape).astype(np.float32)
def generate_rois_num(*args, **kwargs):
return np.random.random(rois_shape).astype(np.int32)
collect_fpn_proposals_op = OpConfig(
type="collect_fpn_proposals",
inputs={
"MultiLevelRois": ["multi_level_rois_data"],
"MultiLevelScores": ["multi_level_scores_data"],
"MultiLevelRoIsNum": ["multi_level_rois_num_data"]
},
outputs={
"FpnRois": ["fpn_rois_data"],
"RoisNum": ["rois_num_data"]
},
attrs={"post_nms_topN": post_nms_topN})
program_config = ProgramConfig(
ops=[collect_fpn_proposals_op],
weights={},
inputs={
"multi_level_rois_data":
TensorConfig(data_gen=partial(generate_rois)),
"multi_level_scores_data":
TensorConfig(data_gen=partial(generate_scores)),
"multi_level_rois_num_data":
TensorConfig(data_gen=partial(generate_rois_num))
},
outputs=["fpn_rois_data", "rois_num_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["collect_fpn_proposals"], (1e-5,
1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
'''
|
# -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
from ..feature_importance_test import TestPermutationImportance as _TestPermutationImportance
from . import if_cuml_ready, is_cuml_installed
if is_cuml_installed:
import cudf
@if_cuml_ready
class TestCumlPermutationImportance(_TestPermutationImportance):
@staticmethod
def load_data():
df = _TestPermutationImportance.load_data()
df = cudf.from_pandas(df)
return df
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from __future__ import division, print_function
'''
Image features detection.
All angles shalt be described in degrees with zero pointing east in the
plane of the image with all positive rotations going counter-clockwise.
Therefore a rotation from the x-axis to to the y-axis is positive and follows
the right hand rule.
'''
from PhloxAR.core.image import *
from PhloxAR.core.color import *
from PhloxAR.features.feature import Feature, FeatureSet
from ..base import lazy_property
import numpy as np
__all__ = [
'Corner', 'Line', 'Barcode', 'HaarFeature', 'Chessboard', 'TemplateMatch',
'Circle', 'KeyPoint', 'Motion', 'KeyPointMatch', 'ShapeContextDescriptor',
'ROI'
]
class Corner(Feature):
"""
The Corner features is a point returned by the find_corners function.
Corners are used in machine vision as a very computationally efficient
way to find unique features in an image. These corners can be used in
conjunction with many other algorithms.
"""
def __init__(self, img, x, y):
points = [(x - 1, y - 1), (x - 1, y + 1), (x + 1, y + 1), (x + 1, y - 1)]
super(Corner, self).__init__(img, x, y, points)
def draw(self, color=Color.RED, width=1):
"""
Draw a small circle around the corner. Color tuple is single parameter,
default is Color.RED.
:param color: an RGB _color triplet
:param width: if width is less than zero the draw the features filled in,
otherwise draw the contour using specified width.
:return: None
"""
self._image.draw_circle(self._x, self._y, 4, color, width)
class Line(Feature):
"""
The line features is returned by the find_lines function, also can be
initialized with any two points.
>>> line = Line(Image, (point1, point2))
where point1 and point2 are (x, y) coordinate tuples
>> line.points
Returns a tuple of the two points.
"""
# TODO: calculate the endpoints of the line
def __init__(self, img, line):
self._image = img
self._vector = None
self._y_intercept = None
self._end_pts = copy(line)
if self._end_pts[1][0] - self._end_pts[0][0] == 0:
self._slope = float('inf')
else:
self._slope = (self._end_pts[1][1] - self._end_pts[0][1]) / (
self._end_pts[1][0] - self._end_pts[0][0]
)
# coordinate of the line object is the midpoint
at_x = (line[0][0] + line[1][0]) / 2
at_y = (line[0][1] + line[1][1]) / 2
xmin = int(np.min([line[0][0], line[1][0]]))
xmax = int(np.max([line[0][0], line[1][0]]))
ymin = int(np.min([line[0][1], line[1][1]]))
ymax = int(np.max([line[0][1], line[1][1]]))
points = [(xmin, ymin), (xmin, ymax), (xmax, ymax), [xmax, ymin]]
super(Line, self).__init__(img, at_x, at_y, points)
def draw(self, color=Color.BLUE, width=1):
"""
Draw a the line, default _color is Color.BLUE
:param color: a RGB _color triplet
:param width: draw the line using specified width
:return: None - modify the source image drawing layer
"""
self._image.draw_line(self._end_pts[0], self._end_pts[1], color, width)
@property
def length(self):
"""
Returns the length of the line.
:return: a floating point length value
:Example:
>>> img = Image('lena.jpg')
>>> lines = img.find_lines()
>>> for l in lines:
>>> if l.length > 100:
>>> print("Oh my!, what a big line you have!")
"""
return float(spsd.euclidean(self._end_pts[0], self._end_pts[1]))
def crop(self):
"""
Crops the source image to the location of the features and returns
a new Image.
:return: an Image that is cropped to the features position and size
:Example:
>>> img = Image('edge_test2.png')
>>> l = img.find_lines()
>>> line = l[0].crop()
"""
tl = self.top_left_corner()
return self._image.crop(tl[0], tl[1], self._width, self._height)
def mean_color(self):
"""
Returns the mean _color of pixels under the line.
Note that when the line falls "between" pixels, each pixels _color
contributes to the weighted average.
:return: a RGB triplet corresponding to the mean _color of the features
:Example:
>>> img = Image('lena')
>>> l = img.find_lines()
>>> c = l[0].mean_color()
"""
pt1, pt2 = self._end_pts
# we are going to walk the line, and take the mean _color from all the px
# points -- there's probably a much more optimal way to do this
xmax, xmin, ymax, ymin = self.extents()
dx = xmax - xmin
dy = ymax - ymin
# orient the line so it is going in the positive direction
# if it's a straight line, we can just get mean _color on the slice
if dx == 0.0:
return self._image[pt1[0]:pt1[0] + 1, ymin:ymax].mean_color()
if dy == 0.0:
return self._image[xmin:xmax, pt1[1]:pt1[1] + 1].mean_color()
error = 0.0
derr = dy / dx # this is how much 'error' will increase in every step
px = []
weights = []
if derr < 1:
y = ymin
# iterate over x
for x in range(xmin, xmax):
# this is the pixel we would draw on, check the _color at that px
# weight is reduced from 1.0 by the abs amount of error
px.append(self._image[x, y])
weights.append(1.0 - abs(error))
# if we have error in either direction, we're going to use
# the px above or below
if error > 0:
px.append(self._image[x, y+1])
weights.append(error)
if error < 0:
px.append(self._image[x, y-1])
weights.append(abs(error))
error = error + derr
if error >= 0.5:
y += 1
error -= 1.0
else:
# this is a 'steep' line, so we iterate over x
# copy and paste. ugh, sorry.
x = xmin
for y in range(ymin, ymax):
# this is the pixel we would draw on, check the _color at that px
# weight is reduced from 1.0 by the abs amount of error
px.append(self._image[x, y])
weights.append(1.0 - abs(error))
if error > 0:
px.append(self._image[x + 1, y])
weights.append(error)
if error < 0:
px.append(self._image[x - 1, y])
weights.append(abs(error))
error += 1.0 / derr # we use the reciprocal of error
if error >= 0.5:
x += 1
error -= 1.0
# once we have iterated over every pixel in the line, we avg the weights
clr_arr = np.array(px)
weight_arr = np.array(weights)
# multiply each _color tuple by its weight
weighted_clrs = np.transpose(np.transpose(clr_arr) * weight_arr)
tmp = sum(weighted_clrs / sum(weight_arr))
return float(tmp[0]), float(tmp[1]), float(tmp[2])
def find_intersection(self, line):
"""
Returns the intersection point of two lines.
:param line: the other line to compute intersection
:return: a point tuple
:Example:
>>> img = Image('lena')
>>> l = img.find_lines()
>>> c = l[0].find_intersection(l[1])
"""
# TODO: NEEDS TO RETURN A TUPLE OF FLOATS
if self._slope == float('inf'):
x = self._end_pts[0][0]
y = line.slope * (x - line._end_pts[1][0]) + line._end_pts[1][1]
return x, y
if line.slope == float("inf"):
x = line._end_pts[0][0]
y = self.slope * (x - self._end_pts[1][0]) + self._end_pts[1][1]
return x, y
m1 = self._slope
x12, y12 = self._end_pts[1]
m2 = line.slope
x22, y22 = line._end_pts[1]
x = (m1 * x12 - m2 * x22 + y22 - y12) / float(m1 - m2)
y = (m1 * m2 * (x12 - x22) - m2 * y12 + m1 * y22) / float(m1 - m2)
return x, y
def is_parallel(self, line):
"""
Checks whether two lines are parallel or not.
:param line: the other line to be compared
:return: Bool
:Example:
>>> img = Image('lena')
>>> l = img.find_lines()
>>> c = l[0].is_parallel(l[1])
"""
if self._slope == line.slope:
return True
return False
def is_perpendicular(self, line):
"""
Checks whether two lines are perpendicular or not.
:param line: the other line to be compared
:return: Bool.
:Example:
>>> img = Image('lena')
>>> l = img.find_lines()
>>> c = l[0].is_perpendicular(l[1])
"""
if self._slope == float('inf'):
if line.slope == 0:
return True
return False
if line.slope == float('inf'):
if self.slope == 0:
return True
return False
if self._slope * line.slope == -1:
return True
return False
def image_intersections(self, img):
"""
Returns a set of pixels where the line intersects with the binary image.
:param img: binary image
:return: a list of points
:Example:
>>> img = Image('lena')
>>> l = img.find_lines()
>>> c = l[0].image_intersections(img.binarize())
"""
pixels = []
if self._slope == float('inf'):
for y in range(self._end_pts[0][1], self._end_pts[1][1] + 1):
pixels.append((self._end_pts[0][0], y))
else:
for x in range(self._end_pts[0][0], self._end_pts[1][0] + 1):
pixels.append((x, int(self._end_pts[1][1] +
self._slope * (x - self._end_pts[1][0]))))
for y in range(self._end_pts[0][1], self._end_pts[1][1] + 1):
pixels.append((int(((y - self._end_pts[1][1]) / self._slope) +
self._end_pts[1][0]), y))
pixels = list(set(pixels))
matched_pixels = []
for pixel in pixels:
if img[pixel[0], pixel[1]] == (255.0, 255.0, 255.0):
matched_pixels.append(pixel)
matched_pixels.sort()
return matched_pixels
def angle(self):
"""
Angle of the line, from the left most point to right most point.
Returns angel (theta), with 0 = horizontal,
-pi/2 = vertical positive slope, pi/2 = vertical negative slope.
:return: an angle value in degrees.
:Example:
>>> img = Image('lena')
>>> ls = img.find_lines()
>>> for l in ls:
>>> if l.angle() == 0:
>>> print("Horizontal!")
"""
# first find left most point
a = 0
b = 1
if self._end_pts[a][0] > self._end_pts[b][0]:
b = 0
a = 1
dx = self._end_pts[b][0] - self._end_pts[a][0]
dy = self._end_pts[b][1] - self._end_pts[a][1]
# internal standard if degrees
return float(360.0 * (atan2(dy, dx) / (2 * np.pi)))
def crop2image_edges(self):
"""
**SUMMARY**
Returns the line with endpoints on edges of image. If some endpoints lies inside image
then those points remain the same without extension to the edges.
**RETURNS**
Returns a :py:class:`Line` object. If line does not cross the image's edges or cross at one point returns None.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = Line(img, ((-100, -50), (1000, 25))
>>> cr_l = l.crop2image_edges()
"""
pt1, pt2 = self._end_pts
pt1, pt2 = min(pt1, pt2), max(pt1, pt2)
x1, y1 = pt1
x2, y2 = pt2
w, h = self._image.width - 1, self._image.height - 1
slope = self.slope
ep = []
if slope == float('inf'):
if 0 <= x1 <= w and 0 <= x2 <= w:
ep.append((x1, 0))
ep.append((x2, h))
elif slope == 0:
if 0 <= y1 <= w and 0 <= y2 <= w:
ep.append((0, y1))
ep.append((w, y2))
else:
x = (slope * x1 - y1) / slope # top edge y = 0
if 0 <= x <= w:
ep.append((int(round(x)), 0))
x = (slope * x1 + h - y1) / slope # bottom edge y = h
if 0 <= x <= w:
ep.append((int(round(x)), h))
y = -slope * x1 + y1 # left edge x = 0
if 0 <= y <= h:
ep.append((0, (int(round(y)))))
y = slope * (w - x1) + y1 # right edge x = w
if 0 <= y <= h:
ep.append((w, (int(round(y)))))
ep = list(set(
ep)) # remove duplicates of points if line cross image at corners
ep.sort()
if len(ep) == 2:
# if points lies outside image then change them
if not (0 < x1 < w and 0 < y1 < h):
pt1 = ep[0]
if not (0 < x2 < w and 0 < y2 < h):
pt2 = ep[1]
elif len(ep) == 1:
logger.warning("Line cross the image only at one point")
return None
else:
logger.warning("Line does not cross the image")
return None
return Line(self._image, (pt1, pt2))
@lazy_property
def vector(self):
if self._vector is None:
self._vector = [float(self._end_pts[1][0] - self._end_pts[0][0]),
float(self._end_pts[1][1] - self._end_pts[0][1])]
return self._vector
def dot(self, other):
return np.dot(self.vector, other.vector)
def cross(self, other):
return np.cross(self.vector, other.vector)
def get_y_intercept(self):
"""
**SUMMARY**
Returns the y intercept based on the lines equation. Note that this point is potentially not contained in the image itself
**RETURNS**
Returns a floating point intersection value
**EXAMPLE**
>>> img = Image("lena")
>>> l = Line(img, ((50, 150), (2, 225))
>>> b = l.get_y_intercept()
"""
if self._y_intercept is None:
pt1, pt2 = self._end_pts
m = self.slope
# y = mx + b | b = y-mx
self._y_intercept = pt1[1] - m * pt1[0]
return self._y_intercept
def extend2image_edges(self):
"""
**SUMMARY**
Returns the line with endpoints on edges of image.
**RETURNS**
Returns a :py:class:`Line` object. If line does not lies entirely inside image then returns None.
**EXAMPLE**
>>> img = Image("lena")
>>> l = Line(img, ((50, 150), (2, 225))
>>> cr_l = l.extend2image_edges()
"""
pt1, pt2 = self._end_pts
pt1, pt2 = min(pt1, pt2), max(pt1, pt2)
x1, y1 = pt1
x2, y2 = pt2
w, h = self._image.width - 1, self._image.height - 1
slope = self.slope
if not 0 <= x1 <= w or not 0 <= x2 <= w or not 0 <= y1 <= w or not 0 <= y2 <= w:
logger.warning("At first the line should be cropped")
return None
ep = []
if slope == float('inf'):
if 0 <= x1 <= w and 0 <= x2 <= w:
return Line(self._image, ((x1, 0), (x2, h)))
elif slope == 0:
if 0 <= y1 <= w and 0 <= y2 <= w:
return Line(self._image, ((0, y1), (w, y2)))
else:
x = (slope * x1 - y1) / slope # top edge y = 0
if 0 <= x <= w:
ep.append((int(round(x)), 0))
x = (slope * x1 + h - y1) / slope # bottom edge y = h
if 0 <= x <= w:
ep.append((int(round(x)), h))
y = -slope * x1 + y1 # left edge x = 0
if 0 <= y <= h:
ep.append((0, (int(round(y)))))
y = slope * (w - x1) + y1 # right edge x = w
if 0 <= y <= h:
ep.append((w, (int(round(y)))))
# remove duplicates of points if line cross image at corners
ep = list(set(ep))
ep.sort()
return Line(self._image, ep)
@property
def slope(self):
return self._slope
class Barcode(Feature):
"""
**SUMMARY**
The Barcode Feature wrappers the object returned by find_barcode(),
a zbar symbol
* The x,y coordinate is the center of the code.
* points represents the four boundary points of the features. Note: for
QR codes, these points are the reference rectangls, and are quadrangular,
rather than rectangular with other datamatrix types.
* data is the parsed data of the code.
"""
_data = ''
def __init__(self, img, zbsymbol):
locs = zbsymbol.location
if len(locs) > 4:
xs = [l[0] for l in locs]
ys = [l[1] for l in locs]
xmax = np.max(xs)
xmin = np.min(xs)
ymax = np.max(ys)
ymin = np.min(ys)
points = ((xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin))
else:
points = copy(locs) # hopefully this is in tl clockwise order
self._data = zbsymbol.data
self._points = copy(points)
numpoints = len(self._points)
self._x = 0
self._y = 0
for p in self._points:
self._x += p[0]
self._y += p[1]
if numpoints:
self._x /= numpoints
self._y /= numpoints
super(Barcode, self).__init__(img, 0, 0, points)
def __repr__(self):
return "{}.{} at ({}, {}), read data: {}".format(
self.__class__.__module__, self.__class__.__name__,
self._x, self._y, self._data
)
def draw(self, color=Color.GREEN, width=1):
"""
**SUMMARY**
Draws the bounding area of the barcode, given by points. Note that for
QR codes, these points are the reference boxes, and so may "stray" into
the actual code.
**PARAMETERS**
* *_color* - An RGB _color triplet.
* *width* - if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self._image.draw_line(self._points[0], self._points[1], color, width)
self._image.draw_line(self._points[1], self._points[2], color, width)
self._image.draw_line(self._points[2], self._points[3], color, width)
self._image.draw_line(self._points[3], self._points[0], color, width)
def length(self):
"""
**SUMMARY**
Returns the longest side of the quandrangle formed by the boundary points.
**RETURNS**
A floating point length value.
**EXAMPLE**
>>> img = Image("mycode.jpg")
>>> bc = img.findBarcode()
>>> print(bc[-1].length())
"""
sqform = spsd.squareform(spsd.pdist(self._points, "euclidean"))
# get pairwise distances for all points
# note that the code is a quadrilateral
return max(sqform[0][1], sqform[1][2], sqform[2][3], sqform[3][0])
def area(self):
"""
**SUMMARY**
Returns the area defined by the quandrangle formed by the boundary points
**RETURNS**
An integer area value.
**EXAMPLE**
>>> img = Image("mycode.jpg")
>>> bc = img.findBarcode()
>>> print(bc[-1].area())
"""
# calc the length of each side in a square distance matrix
sqform = spsd.squareform(spsd.pdist(self._points, "euclidean"))
# squareform returns a N by N matrix
# boundry line lengths
a = sqform[0][1]
b = sqform[1][2]
c = sqform[2][3]
d = sqform[3][0]
# diagonals
p = sqform[0][2]
q = sqform[1][3]
# perimeter / 2
s = (a + b + c + d) / 2.0
# i found the formula to do this on wikihow. Yes, I am that lame.
# http://www.wikihow.com/Find-the-Area-of-a-Quadrilateral
return sqrt((s - a) * (s - b) * (s - c) * (s - d) -
(a * c + b * d + p * q) * (a * c + b * d - p * q) / 4)
class HaarFeature(Feature):
"""
The HaarFeature is a rectangle returned by the find_feature function.
- The x,y coordinates are defined by the center of the bounding rectangle.
- The classifier property refers to the cascade file used for detection .
- Points are the clockwise points of the bounding rectangle, starting in
upper left.
"""
classifier = None
_width = None
_height = None
neighbors = None
feature_name = 'None'
def __init__(self, img, haar_obj, haar_classifier=None, cv2flag=True):
if cv2flag is False:
x, y, width, height, self.neighbors = haar_obj
elif cv2flag is True:
x, y, width, height = haar_obj
at_x = x + width / 2
at_y = y + height / 2 # set location of features to middle of rectangle.
points = ((x, y), (x + width, y),
(x + width, y + height), (x, y + height))
# set bounding points of the rectangle
self.classifier = haar_classifier
if haar_classifier is not None:
self.feature_name = haar_classifier.get_name()
super(HaarFeature, self).__init__(img, at_x, at_y, points)
def draw(self, color=Color.GREEN, width=1):
"""
Draw the bounding rectangle, default _color is Color.GREEN
:param color: a RGB _color tuple
:param width: if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
:return: None, modify the source images drawing layer.
"""
self._image.draw_line(self._points[0], self._points[1], color, width)
self._image.draw_line(self._points[1], self._points[2], color, width)
self._image.draw_line(self._points[2], self._points[3], color, width)
self._image.draw_line(self._points[3], self._points[0], color, width)
def __getstate__(self):
d = self.__dict__.copy()
if 'classifier' in d:
del d['classifier']
return d
def mean_color(self):
"""
Find the mean _color of the boundary rectangle
:return: a RGB tuple that corresponds to the mean _color of the features.
:Example:
>>> img = Image('lena')
>>> face = HaarCascade('face.xml')
>>> faces = img.find_haar_features(face)
>>> print(faces[-1].mean_color())
"""
crop = self._image[self._points[0][0]:self._points[1][0],
self._points[0][1]:self._points[2][1]]
return crop.mean_color()
def area(self):
"""
Returns the area of the features in pixels
:return: area of features in pixels.
:Example:
>>> img = Image('lena')
>>> face = HaarCascade('face.xml')
>>> faces = img.find_haar_features(face)
>>> print(faces[-1].area())
"""
return self.width * self.height
class Chessboard(Feature):
"""
Used for Calibration, it uses a chessboard to calibrate from pixels
to real world measurements.
"""
_spcorners = None
_dims = None
def __init__(self, img, dim, subpixelscorners):
self._dims = dim
self._spcorners = subpixelscorners
x = np.average(np.array(self._spcorners)[:, 0])
y = np.average(np.array(self._spcorners)[:, 1])
posdiagsorted = sorted(self._spcorners,
key=lambda corner: corner[0] + corner[1])
# sort corners along the x + y axis
negdiagsorted = sorted(self._spcorners,
key=lambda corner: corner[0] - corner[1])
# sort corners along the x - y axis
points = (posdiagsorted[0], negdiagsorted[-1],
posdiagsorted[-1], negdiagsorted[0])
super(Chessboard, self).__init__(img, x, y, points)
def draw(self, no_need_color=None):
"""
Draws the chessboard corners.
:param no_need_color:
:return: None
"""
cv.DrawChessboardCorners(self._image.bitmap, self._dims,
self._spcorners, 1)
def area(self):
"""
**SUMMARY**
Returns the mean of the distance between corner points in the chessboard
Given that the chessboard is of a known size, this can be used as a
proxy for distance from the camera
:return: the mean distance between the corners.
:Example:
>>> img = Image("corners.jpg")
>>> feats = img.findChessboardCorners()
>>> print feats[-1].area()
"""
# note, copying this from barcode means we probably need a subclass of
# features called "quandrangle"
sqform = spsd.squareform(spsd.pdist(self._points, "euclidean"))
a = sqform[0][1]
b = sqform[1][2]
c = sqform[2][3]
d = sqform[3][0]
p = sqform[0][2]
q = sqform[1][3]
s = (a + b + c + d) / 2.0
return 2 * sqrt((s - a) * (s - b) * (s - c) * (s - d) -
(a * c + b * d + p * q) * (a * c + b * d - p * q) / 4)
class TemplateMatch(Feature):
"""
**SUMMARY**
This class is used for template (pattern) matching in images.
The template matching cannot handle scale or rotation.
"""
_template_image = None
_quality = 0
_w = 0
_h = 0
def __init__(self, img, template, location, quality):
self._template_image = template # -- KAT - TRYING SOMETHING
self._image = img
self._quality = quality
w = template.width
h = template.height
at_x = location[0]
at_y = location[1]
points = [(at_x, at_y), (at_x + w, at_y), (at_x + w, at_y + h),
(at_x, at_y + h)]
super(TemplateMatch, self).__init__(img, at_x, at_y, points)
def _template_overlaps(self, other):
"""
Returns true if this features overlaps another template features.
"""
(maxx, minx, maxy, miny) = self.extents()
overlap = False
for p in other.points:
if maxx >= p[0] >= minx and maxy >= p[1] >= miny:
overlap = True
break
return overlap
def consume(self, other):
"""
Given another template features, make this features the size of the two features combined.
"""
(maxx, minx, maxy, miny) = self.extents()
(maxx0, minx0, maxy0, miny0) = other.extents()
maxx = max(maxx, maxx0)
minx = min(minx, minx0)
maxy = max(maxy, maxy0)
miny = min(miny, miny0)
self._x = minx
self._y = miny
self._points = [(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny)]
self._update_extents()
def rescale(self, w, h):
"""
This method keeps the features's center the same but sets a new width and height
"""
(maxx, minx, maxy, miny) = self.extents()
xc = minx + ((maxx - minx) / 2)
yc = miny + ((maxy - miny) / 2)
x = xc - (w / 2)
y = yc - (h / 2)
self._x = x
self._y = y
self._points = [(x, y),
(x + w, y),
(x + w, y + h),
(x, y + h)]
self._update_extents()
def crop(self):
(maxx, minx, maxy, miny) = self.extents()
return self._image.crop(minx, miny, maxx - minx, maxy - miny)
def draw(self, color=Color.GREEN, width=1):
"""
**SUMMARY**
Draw the bounding rectangle, default _color green.
**PARAMETERS**
* *_color* - An RGB _color triplet.
* *width* - if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self._image.dl().rectangle((self._x, self._y),
(self.width(), self.height()),
color=color, width=width)
class Circle(Feature):
"""
Class for a general circle features with a center at (x,y) and a radius r
"""
_radius = 0.00
_avg_color = None
_contour = None
def __init__(self, img, at_x, at_y, r):
self._radius = r
points = [(at_x - r, at_y - r), (at_x + r, at_y - r),
(at_x + r, at_y + r), (at_x - r, at_y + r)]
self._avg_color = None
super(Circle, self).__init__(img, at_x, at_y, points)
segments = 18
rng = range(1, segments + 1)
self._contour = []
for theta in rng:
rp = 2.0 * np.pi * float(theta) / float(segments)
x = (r * np.sin(rp)) + at_x
y = (r * np.cos(rp)) + at_y
self._contour.append((x, y))
def draw(self, color=Color.GREEN, width=1):
"""
**SUMMARY**
With no dimension information, _color the x,y point for the features.
**PARAMETERS**
* *_color* - An RGB _color triplet.
* *width* - if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self._image.dl().circle((self._x, self._y), self._radius, color, width)
def show(self, color=Color.GREEN):
"""
**SUMMARY**
This function will automatically draw the features on the image and show it.
It is a basically a shortcut function for development and is the same as:
**PARAMETERS**
* *_color* - the _color of the features as an rgb triplet.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
**EXAMPLE**
>>> img = Image("logo")
>>> feat = img.findCircle()
>>> feat[0].show()
"""
self.draw(color)
self._image.show()
def distance_from(self, point=(-1, -1)):
"""
**SUMMARY**
Given a point (default to center of the image), return the euclidean distance of x,y from this point.
**PARAMETERS**
* *point* - The point, as an (x,y) tuple on the image to measure distance from.
**RETURNS**
The distance as a floating point value in pixels.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findCircle()
>>> blobs[-1].distance_from(blobs[-2].coordinates())
"""
if point[0] == -1 or point[1] == -1:
point = np.array(self._image.size()) / 2
return spsd.euclidean(point, [self.x, self.y])
def mean_color(self):
"""
**SUMMARY**
Returns the average _color within the circle.
**RETURNS**
Returns an RGB triplet that corresponds to the mean _color of the features.
**EXAMPLE**
>>> img = Image("lenna")
>>> c = img.find_circle()
>>> c[-1].mean_color()
"""
# generate the mask
if self._avg_color is None:
mask = self._image.zeros(1)
cv.Zero(mask)
cv.Circle(mask, (self._x, self._y), self._radius,
color=(255, 255, 255), thickness=-1)
temp = cv.Avg(self._image.bitmap, mask)
self._avg_color = (temp[0], temp[1], temp[2])
return self._avg_color
@property
def area(self):
"""
Area covered by the features -- for a pixel, 1
**SUMMARY**
Returns a numpy array of the area of each features in pixels.
**RETURNS**
A numpy array of all the positions in the featureset.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.find_blobs()
>>> xs = feats.coordinates()
>>> print(xs)
"""
return self._radius * self._radius * np.pi
@property
def perimeter(self):
"""
Returns the perimeter of the circle features in pixels.
"""
return 2 * np.pi * self._radius
@property
def width(self):
"""
Returns the width of the features -- for compliance just r*2
"""
return self._radius * 2
@property
def height(self):
"""
Returns the height of the features -- for compliance just r*2
"""
return self._radius * 2
@property
def radius(self):
"""
Returns the radius of the circle in pixels.
"""
return self._radius
@property
def diameter(self):
"""
Returns the diameter of the circle in pixels.
"""
return self._radius * 2
def crop(self, no_mask=False):
"""
**SUMMARY**
This function returns the largest bounding box for an image.
**PARAMETERS**
* *noMask* - if noMask=True we return the bounding box image of the circle.
if noMask=False (default) we return the masked circle with the rest of the area set to black
**RETURNS**
The masked circle image.
"""
if no_mask:
return self._image.crop(self.x, self.y, self.width, self.height,
centered=True)
else:
mask = self._image.zeros(1)
result = self._image.zeros()
cv.Zero(mask)
cv.Zero(result)
# if you want to shave a bit of time we go do the crop before the blit
cv.Circle(mask, (self._x, self._y), self._radius,
color=(255, 255, 255), thickness=-1)
cv.Copy(self._image.bitmap, result, mask)
ret = Image(result)
ret = ret.crop(self._x, self._y, self.width, self.height,
centered=True)
return ret
class KeyPoint(Feature):
"""
The class is place holder for SURF/SIFT/ORB/STAR keypoints.
"""
_radius = 0.00
_avg_color = None
_angle = 0
_octave = 0
_response = 0.00
_flavor = ''
_descriptor = None
_keypoint = None
def __init__(self, img, keypoint, descriptor=None, flavor='SURF'):
self._keypoint = keypoint
x = keypoint.pt[1] # KAT
y = keypoint.pt[0]
self._radius = keypoint.size / 2.0
self._avg_color = None
self._image = img
self._angle = keypoint.angle
self._octave = keypoint.octave
self._response = keypoint.response
self._flavor = flavor
self._descriptor = descriptor
r = self._radius
points = ((x + r, y + r), (x + r, y - r),
(x - r, y - r), (x - r, y + r))
super(KeyPoint, self).__init__(img, x, y, points)
segments = 18
rng = range(1, segments + 1)
self._points = []
for theta in rng:
rp = 2.0 * np.pi * float(theta) / float(segments)
x = (r * np.sin(rp)) + self.x
y = (r * np.cos(rp)) + self.y
self._points.append((x, y))
@property
def keypoint(self):
"""
Returns the raw keypoint object
"""
return self._keypoint
@property
def descriptor(self):
"""
Returns the raw keypoint descriptor
"""
return self._descriptor
@property
def quality(self):
"""
Returns the quality metric for the keypoint object.
"""
return self._response
@property
def octave(self):
"""
Returns the raw keypoint's octave (if it has)
"""
return self._octave
@property
def flavor(self):
"""
Returns the type of keypoint as a string (e.g. SURF/MSER/ETC)
"""
return self._flavor
def angle(self):
"""
Return the angle (theta) in degrees of the features. The default is 0 (horizontal).
"""
return self._angle
def draw(self, color=Color.GREEN, width=1):
"""
**SUMMARY**
Draw a circle around the features. Color tuple is single parameter, default is Green.
**PARAMETERS**
* *_color* - An RGB _color triplet.
* *width* - if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self._image.dl().circle((self._x, self._y), self._radius, color, width)
pt1 = (int(self._x), int(self._y))
pt2 = (int(self._x + (self.radius * sin(radians(self.angle)))),
int(self._y + (self.radius * cos(radians(self.angle)))))
self._image.dl().line(pt1, pt2, color, width)
def show(self, color=Color.GREEN):
"""
**SUMMARY**
This function will automatically draw the features on the image and show it.
It is a basically a shortcut function for development and is the same as:
>>> img = Image("logo")
>>> feat = img.find_blobs()
>>> if feat: feat.draw()
>>> img.show()
"""
self.draw(color)
self._image.show()
def distance_from(self, point=(-1, -1)):
"""
**SUMMARY**
Given a point (default to center of the image), return the euclidean distance of x,y from this point
"""
if point[0] == -1 or point[1] == -1:
point = np.array(self._image.size()) / 2
return spsd.euclidean(point, [self._x, self._y])
def mean_color(self):
"""
**SUMMARY**
Return the average _color within the features's radius
**RETURNS**
Returns an RGB triplet that corresponds to the mean _color of the features.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp[0].mean_color()
"""
# generate the mask
if self._avg_color is None:
mask = self._image.zeros(1)
cv.Zero(mask)
cv.Circle(mask, (int(self._x), int(self._y)), int(self._radius),
color=(255, 255, 255), thickness=-1)
temp = cv.Avg(self._image.bitmap, mask)
self._avg_color = (temp[0], temp[1], temp[2])
return self._avg_color
def color_distance(self, color=(0, 0, 0)):
"""
Return the euclidean _color distance of the _color tuple at x,y from a given _color (default black)
"""
return spsd.euclidean(np.array(color), np.array(self.mean_color()))
@property
def perimeter(self):
"""
**SUMMARY**
Returns the perimeter of the circle features in pixels.
"""
return 2 * np.pi * self._radius
@property
def width(self):
"""
Returns the width of the features -- for compliance just r*2
"""
return self._radius * 2
def height(self):
"""
Returns the height of the features -- for compliance just r*2
"""
return self._radius * 2
@property
def radius(self):
return self._radius
@property
def diameter(self):
return self._radius * 2
def crop(self, no_mask=False):
"""
**SUMMARY**
This function returns the largest bounding box for an image.
**PARAMETERS**
* *noMask* - if noMask=True we return the bounding box image of the circle.
if noMask=False (default) we return the masked circle with the rest of the area set to black
**RETURNS**
The masked circle image.
"""
if no_mask:
return self._image.crop(self._x, self._y, self.width, self.height,
centered=True)
else:
mask = self._image.zeros(1)
result = self._image.zeros()
cv.Zero(mask)
cv.Zero(result)
# if you want to shave a bit of time we go do the crop before the blit
cv.Circle(mask, (int(self._x), int(self._y)), int(self._radius),
color=(255, 255, 255), thickness=-1)
cv.Copy(self._image.bitmap, result, mask)
ret = Image(result)
ret = ret.crop(self._x, self._y, self.width, self.height,
centered=True)
return ret
class Motion(Feature):
"""
The motion features is used to encapsulate optical flow vectors. The features
holds the length and direction of the vector.
"""
dx = 0.00
dy = 0.00
norm_dx = 0.00
norm_dy = 0.00
window = 7
def __init__(self, img, at_x, at_y, dx, dy, wndw):
"""
img - the source image.
at_x - the sample x pixel position on the image.
at_y - the sample y pixel position on the image.
dx - the x component of the optical flow vector.
dy - the y component of the optical flow vector.
wndw - the size of the sample window (we assume it is square).
"""
self.dx = dx # the direction of the vector
self.dy = dy
self.window = wndw # the size of the sample window
sz = wndw / 2
# so we center at the flow vector
points = [(at_x + sz, at_y + sz), (at_x - sz, at_y + sz),
(at_x + sz, at_y + sz), (at_x + sz, at_y - sz)]
super(Motion, self).__init__(img, at_x, at_y, points)
def draw(self, color=Color.GREEN, width=1, normalize=True):
"""
**SUMMARY**
Draw the optical flow vector going from the sample point along the length of the motion vector.
**PARAMETERS**
* *_color* - An RGB _color triplet.
* *width* - if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
* *normalize* - normalize the vector size to the size of the block (i.e. the biggest optical flow
vector is scaled to the size of the block, all other vectors are scaled relative to
the longest vector.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
new_x = 0
new_y = 0
if normalize:
win = self.window / 2
w = math.sqrt((win * win) * 2)
new_x = self.norm_dx * w + self.x
new_y = self.norm_dy * w + self.y
else:
new_x = self._x + self.dx
new_y = self._y + self.dy
self._image.dl().line((self.x, self.y), (new_x, new_y), color, width)
def normalize2(self, max_mag):
"""
**SUMMARY**
This helper method normalizes the vector give an input magnitude.
This is helpful for keeping the flow vector inside the sample window.
"""
if max_mag == 0:
self.norm_dx = 0
self.norm_dy = 0
return None
mag = self.magnitude
new_mag = mag / max_mag
unit = self.unit_vec
self.norm_dx = unit[0] * new_mag
self.norm_dy = unit[1] * new_mag
@property
def magnitude(self):
"""
Returns the magnitude of the optical flow vector.
"""
return np.sqrt((self.dx * self.dx) + (self.dy * self.dy))
@property
def unit_vec(self):
"""
Returns the unit vector direction of the flow vector as an (x,y) tuple.
"""
mag = self.magnitude
if mag != 0.00:
return float(self.dx) / mag, float(self.dy) / mag
else:
return 0.00, 0.00
@property
def vector(self):
"""
Returns the raw direction vector as an (x,y) tuple.
"""
return self.dx, self.dy
@property
def window_size(self):
"""
Return the window size that we sampled over.
"""
return self.window
def mean_color(self):
"""
Return the _color tuple from x,y
**SUMMARY**
Return a numpy array of the average _color of the area covered by each Feature.
**RETURNS**
Returns an array of RGB triplets the correspond to the mean _color of the features.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp.mean_color()
"""
x = int(self.x - (self.window / 2))
y = int(self.y - (self.window / 2))
return self._image.crop(x, y, int(self.window),
int(self.window)).mean_color()
def crop(self):
"""
This function returns the image in the sample window around the flow vector.
Returns Image
"""
x = int(self._x - (self.window / 2))
y = int(self._y - (self.window / 2))
return self._image.crop(x, y, int(self.window), int(self.window))
class KeyPointMatch(Feature):
"""
This class encapsulates a keypoint match between images of an object.
It is used to record a template of one image as it appears in another image
"""
_min_rect = []
_avg_color = None
_homography = []
_template = None
def __init__(self, img, template, min_rect, homography):
self._template = template
self._min_rect = min_rect
self._homography = homography
xmax = 0
ymax = 0
xmin = img.width
ymin = img.height
for p in min_rect:
if p[0] > xmax:
xmax = p[0]
if p[0] < xmin:
xmin = p[0]
if p[1] > ymax:
ymax = p[1]
if p[1] < ymin:
ymin = p[1]
width = xmax - xmin
height = ymax - ymin
at_x = xmin + width / 2
at_y = ymin + height / 2
points = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
super(KeyPointMatch, self).__init__(img, at_x, at_y, points)
def draw(self, color=Color.GREEN, width=1):
"""
The default drawing operation is to draw the min bounding
rectangle in an image.
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *_color* - An RGB _color triplet.
* *width* - if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self._image.dl().line(self._min_rect[0], self._min_rect[1], color, width)
self._image.dl().line(self._min_rect[1], self._min_rect[2], color, width)
self._image.dl().line(self._min_rect[2], self._min_rect[3], color, width)
self._image.dl().line(self._min_rect[3], self._min_rect[0], color, width)
def draw_rect(self, color=Color.GREEN, width=1):
"""
This method draws the axes alligned square box of the template
match. This box holds the minimum bounding rectangle that describes
the object. If the minimum bounding rectangle is axes aligned
then the two bounding rectangles will match.
"""
self._image.dl().line(self._points[0], self._points[1], color, width)
self._image.dl().line(self._points[1], self._points[2], color, width)
self._image.dl().line(self._points[2], self._points[3], color, width)
self._image.dl().line(self._points[3], self._points[0], color, width)
def crop(self):
"""
Returns a cropped image of the features match. This cropped version is the
axes aligned box masked to just include the image data of the minimum bounding
rectangle.
"""
tl = self.top_left_corner()
# crop the minbouding rect
raw = self._image.crop(tl[0], tl[1], self.width, self.height)
return raw
def mean_color(self):
"""
return the average _color within the circle
**SUMMARY**
Return a numpy array of the average _color of the area covered by each Feature.
**RETURNS**
Returns an array of RGB triplets the correspond to the mean _color of the features.
**EXAMPLE**
>>> img = Image("lena")
>>> kp = img.find_keypoints()
>>> c = kp.mean_color()
"""
if self._avg_color is None:
tl = self.top_left_corner()
# crop the minbouding rect
raw = self._image.crop(tl[0], tl[0], self.width, self.height)
mask = Image((self.width, self.height))
mask.dl().polygon(self._min_rect, color=Color.WHITE, filled=TRUE)
mask = mask.apply_layers()
ret = cv.Avg(raw.getBitmap(), mask._get_gray_narray())
self._avg_color = ret
else:
ret = self._avg_color
return ret
@property
def min_rect(self):
"""
Returns the minimum bounding rectangle of the features as a list
of (x,y) tuples.
"""
return self._min_rect
@property
def homography(self):
"""
Returns the _homography matrix used to calulate the minimum bounding
rectangle.
"""
return self._homography
class ShapeContextDescriptor(Feature):
_min_rect = []
_avg_color = None
_descriptor = None
_src_blob = None
def __init__(self, img, point, descriptor, blob):
self._descriptor = descriptor
self._sourceBlob = blob
x = point[0]
y = point[1]
points = [(x - 1, y - 1), (x + 1, y - 1), (x + 1, y + 1),
(x - 1, y + 1)]
super(ShapeContextDescriptor, self).__init__(img, x, y, points)
def draw(self, color=Color.GREEN, width=1):
"""
The default drawing operation is to draw the min bounding
rectangle in an image.
**SUMMARY**
Draw a small circle around the corner. Color tuple is single parameter, default is Red.
**PARAMETERS**
* *_color* - An RGB _color triplet.
* *width* - if width is less than zero we draw the features filled in, otherwise we draw the
contour using the specified width.
**RETURNS**
Nothing - this is an inplace operation that modifies the source images drawing layer.
"""
self._image.dl().circle((self._x, self._y), 3, color, width)
class ROI(Feature):
"""
This class creates a region of interest that inherit from one
or more features or no features at all.
"""
w = 0
h = 0
xtl = 0 # top left x
ytl = 0 # top left y
# we are going to assume x,y,w,h is our canonical form
_sub_features = []
_mean_color = None
def __init__(self, x, y=None, w=None, h=None, img=None):
"""
**SUMMARY**
This function can handle just about whatever you throw at it
and makes a it into a features. Valid input items are tuples and lists
of x,y points, features, featuresets, two x,y points, and a
set of x,y,width,height values.
**PARAMETERS**
* *x* - this can be just about anything, a list or tuple of x points,
a corner of the image, a list of (x,y) points, a Feature, a FeatureSet
* *y* - this is usually a second point or set of y values.
* *w* - a width
* *h* - a height.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image('lenna')
>>> x,y = np.where(img.threshold(230).getGrayNumpy() > 128 )
>>> roi = ROI(zip(x,y),img)
>>> roi = ROI(x,y,img)
"""
# After forgetting to set img=Image I put this catch
# in to save some debugging headache.
if isinstance(y, Image):
self._image = y
y = None
elif isinstance(w, Image):
self._image = w
w = None
elif isinstance(h, Image):
self._image = h
h = None
else:
self._image = img
if img is None and isinstance(x, (Feature, FeatureSet)):
if isinstance(x, Feature):
self._image = x.image
if isinstance(x, FeatureSet) and len(x) > 0:
self._image = x[0].image
if isinstance(x, Feature):
self._sub_features = FeatureSet([x])
elif isinstance(x, (list, tuple) and len(x) > 0 and
isinstance(x, Feature)):
self._sub_features = FeatureSet(x)
result = self._standardize(x, y, w, h)
if result is None:
logger.warning("Could not create an ROI from your data.")
return
self._rebase(result)
super(ROI, self).__init__(img, 0, 0, None)
def resize(self, w, h=None, percentage=True):
"""
**SUMMARY**
Contract/Expand the roi. By default use a percentage, otherwise use pixels.
This is all done relative to the center of the roi
**PARAMETERS**
* *w* - the percent to grow shrink the region is the only parameter, otherwise
it is the new ROI width
* *h* - The new roi height in terms of pixels or a percentage.
* *percentage* - If true use percentages (e.g. 2 doubles the size), otherwise
use pixel values.
* *h* - a height.
**RETURNS**
Nothing.
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.resize(2)
>>> roi.show()
"""
if h is None and isinstance(w, (tuple, list)):
h = w[1]
w = w[0]
if percentage:
if h is None:
h = w
nw = self.w * w
nh = self.h * h
nx = self.xtl + ((self.w - nw) / 2.0)
ny = self.ytl + ((self.h - nh) / 2.0)
self._rebase([nx, ny, nw, nh])
else:
nw = self.w + w
nh = self.h + h
nx = self.xtl + ((self.w - nw) / 2.0)
ny = self.ytl + ((self.h - nh) / 2.0)
self._rebase([nx, ny, nw, nh])
def overlaps(self, other):
for p in other.points:
if (self.max_x() >= p[0] >= self.min_x() and
self.max_y() >= p[1] >= self.min_y()):
return True
return False
def translate(self, x=0, y=0):
"""
Move the roi.
**PARAMETERS**
* *x* - Move the ROI horizontally.
* *y* - Move the ROI vertically
**RETURNS**
Nothing.
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> roi.show()
"""
if x == 0 and y == 0:
return
if y == 0 and isinstance(x, (tuple, list)):
y = x[1]
x = x[0]
if isinstance(x, (float, int)) and isinstance(y, (float, int)):
self._rebase([self.xtl + x, self.ytl + y, self.w, self.h])
def to_xywh(self):
"""
Get the ROI as a list of the top left corner's x and y position
and the roi's width and height in pixels.
**RETURNS**
A list of the form [x,y,w,h]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> print(roi.to_xywh())
"""
return [self.xtl, self.ytl, self.w, self.h]
def to_tl_br(self):
"""
Get the ROI as a list of tuples of the ROI's top left
corner and bottom right corner.
**RETURNS**
A list of the form [(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> print(roi.to_tl_br())
"""
return [(self.xtl, self.ytl), (self.xtl + self.w, self.ytl + self.h)]
def to_points(self):
"""
Get the ROI as a list of four points that make up the bounding rectangle.
**RETURNS**
A list of the form [(x,y),(x,y),(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> print(roi.to_points())
"""
tl = (self.xtl, self.ytl)
tr = (self.xtl + self.w, self.ytl)
br = (self.xtl + self.w, self.ytl + self.h)
bl = (self.xtl, self.ytl + self.h)
return [tl, tr, br, bl]
def to_unit_xywh(self):
"""
Get the ROI as a list, the values are top left x, to left y,
width and height. These values are scaled to unit values with
respect to the source image..
**RETURNS**
A list of the form [x,y,w,h]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> print(roi.to_unit_xywh())
"""
if self._image is None:
return None
srcw = float(self._image.width)
srch = float(self._image.height)
x, y, w, h = self.to_xywh()
nx = 0
ny = 0
if x != 0:
nx = x / srcw
if y != 0:
ny = y / srch
return [nx, ny, w / srcw, h / srch]
def to_unit_tl_br(self):
"""
Get the ROI as a list of tuples of the ROI's top left
corner and bottom right corner. These coordinates are in unit
length values with respect to the source image.
**RETURNS**
A list of the form [(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> roi.translate(30,30)
>>> print(roi.to_unit_tl_br())
"""
if self._image is None:
return None
srcw = float(self._image.width)
srch = float(self._image.height)
x, y, w, h = self.to_xywh()
nx = 0
ny = 0
nw = w / srcw
nh = h / srch
if x != 0:
nx = x / srcw
if y != 0:
ny = y / srch
return [(nx, ny), (nx + nw, ny + nh)]
def to_unit_points(self):
"""
Get the ROI as a list of four points that make up the bounding rectangle.
Each point is represented in unit coordinates with respect to the
souce image.
**RETURNS**
A list of the form [(x,y),(x,y),(x,y),(x,y)]
**EXAMPLE**
>>> roi = ROI(10,10,100,100,img)
>>> print(roi.to_unit_points())
"""
if self._image is None:
return None
srcw = float(self._image.width)
srch = float(self._image.height)
pts = self.to_points()
ret = []
for p in pts:
x, y = p
if x != 0:
x /= srcw
if y != 0:
y /= srch
ret.append((x, y))
return ret
def coord_transform_x(self, x, intype='ROI', output='SRC'):
"""
Transform a single or a set of x values from one reference frame to another.
Options are:
SRC - the coordinates of the source image.
ROI - the coordinates of the ROI
ROI_UNIT - unit coordinates in the frame of reference of the ROI
SRC_UNIT - unit coordinates in the frame of reference of source image.
**PARAMETERS**
* *x* - A list of x values or a single x value.
* *intype* - A string indicating the input format of the data.
* *output* - A string indicating the output format of the data.
**RETURNS**
A list of the transformed values.
**EXAMPLE**
>>> img = Image('lenna')
>>> blobs = img.findBlobs()
>>> roi = ROI(blobs[0])
>>> x = roi.crop()..... /find some x values in the crop region
>>> xt = roi.coord_transform_x(x)
>>> #xt are no in the space of the original image.
"""
if self._image is None:
logger.warning("No image to perform that calculation")
return None
if isinstance(x, (float, int)):
x = [x]
intype = intype.upper()
output = output.upper()
if intype == output:
return x
return self._transform(x, self._image.width, self.w, self.xtl, intype,
output)
def coord_transform_y(self, y, intype='ROI', output='SRC'):
"""
Transform a single or a set of y values from one reference frame to another.
Options are:
SRC - the coordinates of the source image.
ROI - the coordinates of the ROI
ROI_UNIT - unit coordinates in the frame of reference of the ROI
SRC_UNIT - unit coordinates in the frame of reference of source image.
**PARAMETERS**
* *y* - A list of y values or a single y value.
* *intype* - A string indicating the input format of the data.
* *output* - A string indicating the output format of the data.
**RETURNS**
A list of the transformed values.
**EXAMPLE**
>>> img = Image('lenna')
>>> blobs = img.findBlobs()
>>> roi = ROI(blobs[0])
>>> y = roi.crop()..... /find some y values in the crop region
>>> yt = roi.coord_transform_y(y)
>>> #yt are no in the space of the original image.
"""
if self._image is None:
logger.warning("No image to perform that calculation")
return None
if isinstance(y, (float, int)):
y = [y]
intype = intype.upper()
output = output.upper()
if intype == output:
return y
return self._transform(y, self._image.height, self.h, self.ytl, intype,
output)
def coord_transform_pts(self, pts, intype='ROI', output='SRC'):
"""
Transform a set of (x,y) values from one reference frame to another.
Options are:
SRC - the coordinates of the source image.
ROI - the coordinates of the ROI
ROI_UNIT - unit coordinates in the frame of reference of the ROI
SRC_UNIT - unit coordinates in the frame of reference of source image.
**PARAMETERS**
* *_track_pts* - A list of (x,y) values or a single (x,y) value.
* *intype* - A string indicating the input format of the data.
* *output* - A string indicating the output format of the data.
**RETURNS**
A list of the transformed values.
**EXAMPLE**
>>> img = Image('lenna')
>>> blobs = img.findBlobs()
>>> roi = ROI(blobs[0])
>>> _track_pts = roi.crop()..... /find some x,y values in the crop region
>>> _track_pts = roi.coord_transform_pts(_track_pts)
>>> #yt are no in the space of the original image.
"""
if self._image is None:
logger.warning("No image to perform that calculation")
return None
if isinstance(pts, tuple) and len(pts) == 2:
pts = [pts]
intype = intype.upper()
output = output.upper()
x = [pt[0] for pt in pts]
y = [pt[1] for pt in pts]
if intype == output:
return pts
x = self._transform(x, self._image.width, self.w, self.xtl, intype,
output)
y = self._transform(y, self._image.height, self.h, self.ytl, intype,
output)
return zip(x, y)
def _transform(self, x, img_size, roi_size, offset, intype, output):
xtemp = []
# we are going to go to src unit coordinates
# and then we'll go back.
if intype == "SRC":
xtemp = [xt / float(img_size) for xt in x]
elif intype == "ROI":
xtemp = [(xt + offset) / float(img_size) for xt in x]
elif intype == "ROI_UNIT":
xtemp = [((xt * roi_size) + offset) / float(img_size) for xt in x]
elif intype == "SRC_UNIT":
xtemp = x
else:
logger.warning("Bad Parameter to CoordTransformX")
return None
ret = []
if output == "SRC":
ret = [int(xt * img_size) for xt in xtemp]
elif output == "ROI":
ret = [int((xt * img_size) - offset) for xt in xtemp]
elif output == "ROI_UNIT":
ret = [int(((xt * img_size) - offset) / float(roi_size)) for xt in
xtemp]
elif output == "SRC_UNIT":
ret = xtemp
else:
logger.warning("Bad Parameter to CoordTransformX")
return None
return ret
def split_x(self, x, unit_vals=False, src_vals=False):
"""
**SUMMARY**
Split the ROI at an x value.
x can be a list of sequentianl tuples of x split points e.g [0.3,0.6]
where we assume the top and bottom are also on the list.
Use units to split as a percentage (e.g. 30% down).
The srcVals means use coordinates of the original image.
**PARAMETERS**
* *x*-The split point. Can be a single point or a list of points. the type is determined by the flags.
* *unitVals* - Use unit vals for the split point. E.g. 0.5 means split at 50% of the ROI.
* *srcVals* - Use x values relative to the source image rather than relative to the ROI.
**RETURNS**
Returns a features set of ROIs split from the source ROI.
**EXAMPLE**
>>> roi = ROI(0,0,100,100,img)
>>> splits = roi.split_x(50) # create two ROIs
"""
retVal = FeatureSet()
if unit_vals and src_vals:
logger.warning("Not sure how you would like to split the features")
return None
if not isinstance(x, (list, tuple)):
x = [x]
if unit_vals:
x = self.coord_transform_x(x, intype="ROI_UNIT", output="SRC")
elif not src_vals:
x = self.coord_transform_x(x, intype="ROI", output="SRC")
for xt in x:
if xt < self.xtl or xt > self.xtl + self.w:
logger.warning("Invalid split point.")
return None
x.insert(0, self.xtl)
x.append(self.xtl + self.w)
for i in range(0, len(x) - 1):
xstart = x[i]
xstop = x[i + 1]
w = xstop - xstart
retVal.append(ROI(xstart, self.ytl, w, self.h, self._image))
return retVal
def split_y(self, y, unit_vals=False, src_vals=False):
"""
Split the ROI at an x value.
y can be a list of sequentianl tuples of y split points e.g [0.3,0.6]
where we assume the top and bottom are also on the list.
Use units to split as a percentage (e.g. 30% down).
The srcVals means use coordinates of the original image.
**PARAMETERS**
* *y*-The split point. Can be a single point or a list of points. the type is determined by the flags.
* *unitVals* - Use unit vals for the split point. E.g. 0.5 means split at 50% of the ROI.
* *srcVals* - Use x values relative to the source image rather than relative to the ROI.
**RETURNS**
Returns a features set of ROIs split from the source ROI.
**EXAMPLE**
>>> roi = ROI(0,0,100,100,img)
>>> splits = roi.split_y(50) # create two ROIs
"""
ret = FeatureSet()
if unit_vals and src_vals:
logger.warning("Not sure how you would like to split the features")
return None
if not isinstance(y, (list, tuple)):
y = [y]
if unit_vals:
y = self.coord_transform_y(y, intype="ROI_UNIT", output="SRC")
elif not src_vals:
y = self.coord_transform_y(y, intype="ROI", output="SRC")
for yt in y:
if yt < self.ytl or yt > self.ytl + self.h:
logger.warning("Invalid split point.")
return None
y.insert(0, self.ytl)
y.append(self.ytl + self.h)
for i in range(0, len(y) - 1):
ystart = y[i]
ystop = y[i + 1]
h = ystop - ystart
ret.append(ROI(self.xtl, ystart, self.w, h, self._image))
return ret
def merge(self, regions):
"""
**SUMMARY**
Combine another region, or regions with this ROI. Everything must be
in the source image coordinates. Regions can be a ROIs, [ROI], features,
FeatureSets, or anything that can be cajoled into a region.
**PARAMETERS**
* *regions* - A region or list of regions. Regions are just about anything that has position.
**RETURNS**
Nothing, but modifies this region.
**EXAMPLE**
>>> blobs = img.find_blobs()
>>> roi = ROI(blobs[0])
>>> print(roi.to_xywh())
>>> roi.merge(blobs[2])
>>> print(roi.to_xywh())
"""
result = self._standardize(regions)
if result is not None:
xo, yo, wo, ho = result
x = np.min([xo, self.xtl])
y = np.min([yo, self.ytl])
w = np.max([self.xtl + self.w, xo + wo]) - x
h = np.max([self.ytl + self.h, yo + ho]) - y
if self._image is not None:
x = np.clip(x, 0, self._image.width)
y = np.clip(y, 0, self._image.height)
w = np.clip(w, 0, self._image.width - x)
h = np.clip(h, 0, self._image.height - y)
self._rebase([x, y, w, h])
if isinstance(regions, ROI):
self._sub_features += regions
elif isinstance(regions, Feature):
self.subFeatures.append(regions)
elif isinstance(regions, (list, tuple)):
if isinstance(regions[0], ROI):
for r in regions:
self._sub_features += r._sub_features
elif isinstance(regions[0], Feature):
for r in regions:
self._sub_features.append(r)
def rebase(self, x, y=None, w=None, h=None):
"""
Completely alter roi using whatever source coordinates you wish.
"""
if isinstance(x, Feature):
self._sub_features.append(x)
elif (isinstance(x, (list, tuple)) and
len(x) > 0 and isinstance(x, Feature)):
self._sub_features += list(x)
result = self._standardize(x, y, w, h)
if result is None:
logger.warning("Could not create an ROI from your data.")
return
self._rebase(result)
def draw(self, color=Color.GREEN, width=3):
"""
**SUMMARY**
This method will draw the features on the source image.
**PARAMETERS**
* *_color* - The _color as an RGB tuple to render the image.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image("RedDog2.jpg")
>>> blobs = img.find_blobs()
>>> blobs[-1].draw()
>>> img.show()
"""
x, y, w, h = self.to_xywh()
self._image.drawRectangle(x,y,w,h,width=width,color=color)
def show(self, color=Color.GREEN, width=2):
"""
**SUMMARY**
This function will automatically draw the features on the image and show it.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image("logo")
>>> feat = img.find_blobs()
>>> feat[-1].show()
"""
self.draw(color, width)
self._image.show()
def mean_color(self):
"""
**SUMMARY**
Return the average _color within the features as a tuple.
**RETURNS**
An RGB _color tuple.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.find_blobs(128)
>>> for b in blobs:
>>> if b.mean_color() == Color.WHITE:
>>> print("Found a white thing")
"""
x, y, w, h = self.to_xywh()
return self._image.crop(x, y, w, h).mean_color()
def _rebase(self, roi):
x, y, w, h = roi
self._max_x = None
self._max_y = None
self._min_x = None
self._min_y = None
self._width = None
self._height = None
self._extents = None
self._bbox = None
self.xtl = x
self.ytl = y
self.w = w
self.h = h
self._points = [(x, y), (x + w, y), (x, y + h), (x + w, y + h)]
# WE MAY WANT TO DO A SANITY CHECK HERE
self._update_extents()
def _standardize(self, x, y=None, w=None, h=None):
if isinstance(x, np.ndarray):
x = x.tolist()
if isinstance(y, np.ndarray):
y = y.tolist()
# make the common case fast
if (isinstance(x, (int, float)) and isinstance(y, (int, float)) and
isinstance(w, (int, float)) and isinstance(h, (int, float))):
if self._image is not None:
x = np.clip(x, 0, self._image.width)
y = np.clip(y, 0, self._image.height)
w = np.clip(w, 0, self._image.width - x)
h = np.clip(h, 0, self._image.height - y)
return [x, y, w, h]
elif isinstance(x, ROI):
x, y, w, h = x.to_xywh()
# If it's a features extract what we need
elif isinstance(x, FeatureSet) and len(x) > 0:
# double check that everything in the list is a features
features = [feat for feat in x if isinstance(feat, Feature)]
xmax = np.max([feat.maxX() for feat in features])
xmin = np.min([feat.minX() for feat in features])
ymax = np.max([feat.maxY() for feat in features])
ymin = np.min([feat.minY() for feat in features])
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
elif isinstance(x, Feature):
feature = x
x = feature.points[0][0]
y = feature.points[0][1]
w = feature.width()
h = feature.height()
# [x,y,w,h] (x,y,w,h)
elif (isinstance(x, (tuple, list)) and len(x) == 4 and
isinstance(x[0], (int, long, float)) and
y is None and w is None and h is None):
x, y, w, h = x
# x of the form [(x,y),(x1,y1),(x2,y2),(x3,y3)]
# x of the form [[x,y],[x1,y1],[x2,y2],[x3,y3]]
# x of the form ([x,y],[x1,y1],[x2,y2],[x3,y3])
# x of the form ((x,y),(x1,y1),(x2,y2),(x3,y3))
elif (isinstance(x, (list, tuple)) and
isinstance(x[0], (list, tuple)) and
len(x) == 4 and len(x[0]) == 2 and
y is None is w is None is h is None):
if (len(x[0]) == 2 and len(x[1]) == 2 and
len(x[2]) == 2 and len(x[3]) == 2):
xmax = np.max([x[0][0], x[1][0], x[2][0], x[3][0]])
ymax = np.max([x[0][1], x[1][1], x[2][1], x[3][1]])
xmin = np.min([x[0][0], x[1][0], x[2][0], x[3][0]])
ymin = np.min([x[0][1], x[1][1], x[2][1], x[3][1]])
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
else:
logger.warning(
"x should be in the form ((x,y),(x1,y1),(x2,y2),(x3,y3))")
return None
# x,y of the form [x1,x2,x3,x4,x5....] and y similar
elif (isinstance(x, (tuple, list)) and
isinstance(y, (tuple, list)) and
len(x) > 4 and len(y) > 4):
if (isinstance(x[0], (int, long, float)) and
isinstance(y[0], (int, long, float))):
xmax = np.max(x)
ymax = np.max(y)
xmin = np.min(x)
ymin = np.min(y)
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
else:
logger.warning("x should be in the form x = [1,2,3,4,5] "
"y =[0,2,4,6,8]")
return None
# x of the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)]
elif isinstance(x, (list, tuple) and len(x) > 4 and
len(x[0]) == 2 and y is None and
w is None and h is None):
if isinstance(x[0][0], (int, long, float)):
xs = [pt[0] for pt in x]
ys = [pt[1] for pt in x]
xmax = np.max(xs)
ymax = np.max(ys)
xmin = np.min(xs)
ymin = np.min(ys)
x = xmin
y = ymin
w = xmax - xmin
h = ymax - ymin
else:
logger.warning("x should be in the form [(x,y),(x,y),(x,y),"
"(x,y),(x,y),(x,y)]")
return None
# x of the form [(x,y),(x1,y1)]
elif (isinstance(x, (list, tuple)) and len(x) == 2 and
isinstance(x[0], (list, tuple)) and
isinstance(x[1], (list, tuple)) and
y is None and w is None and h is None):
if (len(x[0]) == 2 and len(x[1]) == 2):
xt = np.min([x[0][0], x[1][0]])
yt = np.min([x[0][0], x[1][0]])
w = np.abs(x[0][0] - x[1][0])
h = np.abs(x[0][1] - x[1][1])
x = xt
y = yt
else:
logger.warning("x should be in the form [(x1,y1),(x2,y2)]")
return None
# x and y of the form (x,y),(x1,y2)
elif (isinstance(x, (tuple, list)) and isinstance(y, (tuple, list)) and
w is None and h is None):
if len(x) == 2 and len(y) == 2:
xt = np.min([x[0], y[0]])
yt = np.min([x[1], y[1]])
w = np.abs(y[0] - x[0])
h = np.abs(y[1] - x[1])
x = xt
y = yt
else:
logger.warning("if x and y are tuple it should be in the form "
"(x1,y1) and (x2,y2)")
return None
if y is None or w is None or h is None:
logger.warning('Not a valid roi')
elif w <= 0 or h <= 0:
logger.warning("ROI can't have a negative dimension")
return None
if self._image is not None:
x = np.clip(x, 0, self._image.width)
y = np.clip(y, 0, self._image.height)
w = np.clip(w, 0, self._image.width - x)
h = np.clip(h, 0, self._image.height - y)
return [x, y, w, h]
def crop(self):
ret = None
if self._image is not None:
ret = self._image.crop(self.xtl, self.ytl, self.w, self.h)
return ret
|
from rpython.rtyper.module.support import OOSupport
from rpython.rtyper.module.ll_os_path import BaseOsPath
class Implementation(BaseOsPath, OOSupport):
pass
|
# coding=utf-8
from flask import Flask
# 导入flask_sqlalchemy扩展包
from flask_sqlalchemy import SQLAlchemy
from config import Config
app = Flask(__name__)
# 指定数据库的配置信息
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:mysql@localhost/python24'
# 使用配置文件
app.config.from_object(Config)
# 创建sqlalchemy对象,两种实例化形式
db = SQLAlchemy(app)
# db.init_app(app)
# 定义模型类,Role类User类,一对多的关系映射
class Role(db.Model):
# 表名可以不定义,默认创建的是同类名的表名,但不是复数
__tablename__ = 'roles'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(32),unique=True)
# 关系映射定义,反向引用,在数据库中查看表结果没有实体
# 第一个参数为多方的类名backref等于的值可以实现多对一的查询
# us可以实现一对多的查询
us = db.relationship('User',backref='role')
# 输出数据库查询的可读字符串
def __repr__(self):
return 'role:%s' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
email = db.Column(db.String(32),unique=True)
pswd = db.Column(db.String(32))
# 定义外键
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
def __repr__(self):
return 'user:%s' % self.name
if __name__ == '__main__':
# 删除表
db.drop_all()
# 创建表
db.create_all()
# 添加角色数据
ro1 = Role(name='admin')
ro2 = Role(name='user')
# session是数据库会话对象,添加多条数据,add()添加一条数据
db.session.add_all([ro1, ro2])
# 提交数据到数据库
db.session.commit()
us1 = User(name='wang', email='wang@163.com', pswd='123456', role_id=ro1.id)
us2 = User(name='zhang', email='zhang@189.com', pswd='201512', role_id=ro2.id)
us3 = User(name='chen', email='chen@126.com', pswd='987654', role_id=ro2.id)
us4 = User(name='zhou', email='zhou@163.com', pswd='456789', role_id=ro1.id)
db.session.add_all([us1, us2, us3, us4])
db.session.commit()
app.run() |
# -*- coding: utf-8 -*-
from .datasets.rgb_sod import rgb_sod_data
from .methods import (
rgb_sod_methods,
)
total_info = dict(
rgb_sod=dict(
dataset=rgb_sod_data,
method=dict(
drawing=rgb_sod_methods.methods_info_for_drawing,
selecting=rgb_sod_methods.methods_info_for_selecting,
),
),
)
|
from libmyrepo.spam.models import Usuario
def test_salvar_usuario(sessao):
usuario = Usuario(nome='Enos', email='enos@test.com')
sessao.salvar(usuario)
assert isinstance(usuario.id, int)
def test_listar_usuario(sessao):
usuarios = [Usuario(nome='Enos', email='enos@test.com'),
Usuario(nome='Teteo', email='teteo@test.com')]
for usuario in usuarios:
sessao.salvar(usuario)
assert usuarios == sessao.listar()
|
# coding: utf-8
import typing
import functools
import logging
import uuid
import json
X_APPLICATION_TRACE_ID: str = 'x-application-trace-id'
class LamBlackBox():
class Logger():
def __init__(self, trace_id: str, debug: bool = False) ->None:
self.trace_id = trace_id
self.logger = logging.getLogger()
if debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
def set_trace_id(self, obj: typing.Dict)->typing.Dict:
obj.update({X_APPLICATION_TRACE_ID: self.trace_id})
return obj
def debug(self, obj: typing.Dict)->None:
log = json.dumps(self.set_trace_id(obj))
self.logger.debug(log)
def info(self, obj: typing.Dict)->None:
log = json.dumps(self.set_trace_id(obj))
self.logger.info(log)
def warning(self, obj: typing.Dict)->None:
log = json.dumps(self.set_trace_id(obj))
self.logger.warning(log)
def error(self, obj: typing.Dict)->None:
log = json.dumps(self.set_trace_id(obj))
self.logger.error(log)
def critical(self, obj: typing.Dict)->None:
log = json.dumps(self.set_trace_id(obj))
self.logger.critical(log)
def exception(self, obj: typing.Dict)->None:
log = json.dumps(self.set_trace_id(obj))
self.logger.exception(log)
def __init__(self)->None:
pass
def gen_uuid(self)->str:
return str(uuid.uuid4())
def apigateway(func):
@functools.wraps(func)
def wrapper(event, context):
lbb = LamBlackBox()
trace_id = event['headers'].get(X_APPLICATION_TRACE_ID, lbb.gen_uuid())
logger = LamBlackBox.Logger(trace_id)
logger.info({'event': event})
try:
if 'headers' not in event:
event['headers'] = {}
event['headers'][X_APPLICATION_TRACE_ID] = trace_id
ret = func(event, context)
if 'headers' not in ret:
ret['headers'] = {}
ret['headers'][X_APPLICATION_TRACE_ID] = trace_id
logger.info({'result': ret})
except Exception as e:
error_string = str(e)
logger.exception({'result': error_string})
ret = {
'statusCode': 500,
'headers': {
X_APPLICATION_TRACE_ID: trace_id,
},
'body': json.dumps({'result': error_string})
}
return ret
return wrapper
def sqs(func):
@functools.wraps(func)
def wrapper(event, context):
ret: typing.List = []
for record in event['Records']:
lbb = LamBlackBox()
trace_id = record['messageAttributes'].get(X_APPLICATION_TRACE_ID, {}).get('Value', lbb.gen_uuid())
logger = LamBlackBox.Logger(trace_id)
logger.info({'record': record})
try:
result = func(record, context)
logger.info({'result': result})
ret.append(result)
except Exception as e:
error_string = str(e)
logger.exception({'result': error_string})
raise e
return ret
return wrapper
|
# django imports
from django import forms
from django.db import models
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
# portlets imports
from portlets.models import Portlet
# lfs imports
from lfs.catalog.models import Category
from lfs.catalog.utils import get_current_top_category
from lfs.caching.utils import lfs_get_object
from lfs.marketing.models import FeaturedProduct
class FeaturedPortlet(Portlet):
"""A portlet for displaying featured products.
"""
class Meta:
app_label = 'portlet'
name = _("Featured products")
limit = models.IntegerField(_(u"Limit"), default=5)
current_category = models.BooleanField(_(u"Use current category"), default=False)
slideshow = models.BooleanField(_(u"Slideshow"), default=False)
@property
def rendered_title(self):
return self.title or self.name
def render(self, context):
"""Renders the portlet as html.
"""
request = context.get("request")
if self.current_category:
obj = context.get("category") or context.get("product")
if obj:
category = obj if isinstance(obj, Category) else obj.get_current_category(request)
categories = [category]
categories.extend(category.get_all_children())
filters = {"product__categories__in": categories}
products = [x.product for x in FeaturedProduct.objects.filter(**filters)[:self.limit]]
else:
products = None
else:
products = [x.product for x in FeaturedProduct.objects.all()[:self.limit]]
return render_to_string("lfs/portlets/featured.html", RequestContext(request, {
"title": self.rendered_title,
"slideshow": self.slideshow,
"products": products,
"MEDIA_URL": context.get("MEDIA_URL"),
}))
def form(self, **kwargs):
"""
"""
return FeaturedForm(instance=self, **kwargs)
def __unicode__(self):
return "%s" % self.id
class FeaturedForm(forms.ModelForm):
"""
"""
class Meta:
model = FeaturedPortlet
|
import pentacene as pen
import numpy as np
import math
import os
def main(aggWeight, aggDistance, aggCount, aggChance):
#after some testing, it seems like the desired effect does
#not happen until aggDistance > 0.20
#so that is why this is our lower bound
for b in np.linspace(0.20, aggDistance, 5):
folder = "distance=%s/"%(b)
path = os.path.join('./plots/',folder)
os.mkdir(path)
for a in np.linspace(0.10, 0.30, 5):
for c in np.linspace(1,aggCount,5):
for d in np.linspace(1/10*aggChance,aggChance,5):
print(a,b,c,d)
plt=pen.main(a,b,math.floor(c),d)
plt.savefig(os.path.join(path,"aggWeight=%s,count=%s,chance=%s.png"%(a,c,d)))
plt.close()
if __name__ == '__main__':
main(1,0.40,25,1)
|
"""
flare profiles for paper Bisikalo et al. 2018
https://doi.org/10.3847/1538-4357/aaed21
import warnings
warnings.filterwarnings('ignore')
"""
import matplotlib.pyplot as plt
from sunpy.timeseries import TimeSeries
from sunpy.time import TimeRange, parse_time
from sunpy.net import hek, Fido, attrs as a
import numpy as np
###############################################################################
# Let's first grab GOES XRS data for a particular time of interest
tr = TimeRange(['2011-06-07 06:00', '2011-06-07 10:00'])
results = Fido.search(a.Time(tr), a.Instrument('XRS'))
results
###############################################################################
# Then download the data and load it into a TimeSeries
files = Fido.fetch(results)
goes = TimeSeries(files)
###############################################################################
# Next lets grab the HEK data for this time from the NOAA Space Weather
# Prediction Center (SWPC)
client = hek.HEKClient()
flares_hek = client.search(hek.attrs.Time(tr.start, tr.end),
hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')
###############################################################################
# Finally lets plot everything together
plt.close('all')
goes.peek()
plt.axvline(parse_time(flares_hek[0].get('event_peaktime')))
plt.axvspan(parse_time(flares_hek[0].get('event_starttime')),
parse_time(flares_hek[0].get('event_endtime')),
alpha=0.6, label=flares_hek[0].get('fl_goescls'))
plt.legend(loc=2)
plt.show()
#################convert data to np.array
#https://www.ngdc.noaa.gov/stp/satellite/goes/doc/GOES_XRS_readme.pdf
AU_planet=0.0
xrsa=np.array(goes.data.xrsa.tolist())*1./(0.04747**2)
xrsb=np.array(goes.data.xrsb.tolist())*1./(0.04747**2)
print(goes.units)
print('seconds per datapoint')
print(goes.time_range.seconds/len(xrsb))
time_hours=np.arange(0,round(goes.time_range.seconds.to_value())-1,goes.time_range.seconds.to_value()/len(xrsb))/3600
plt.figure(3)
time_hours_minres=np.arange(0,time_hours[-1],1/60)
xrsa_interp=np.interp(time_hours_minres,time_hours,xrsa)
xrsb_interp=np.interp(time_hours_minres,time_hours,xrsb)
plt.plot(time_hours_minres,np.log10(xrsa_interp))
plt.plot(time_hours_minres,np.log10(xrsb_interp))
plt.xlabel('Time in hours')
plt.ylabel('log10 W / m^2')
forsaving = np.column_stack((time_hours_minres, xrsa_interp,xrsb_interp))
np.savetxt('exo_flare_plots/flare_m_2_5.txt',forsaving, fmt='%10.10f',delimiter=" ")
|
# Given an array of integers arr, return true if and only if it is a valid mountain array.
#
# Recall that arr is a mountain array if and only if:
# arr.length >= 3
# There exists some i with 0 < i < arr.length - 1 such that:
# arr[0] < arr[1] < ... < arr[i - 1] < arr[i]
# arr[i] > arr[i + 1] > ... > arr[arr.length - 1]
#
# Example 1:
# Input: arr = [2, 1]
# Output: false
#
# Example 2:
# Input: arr = [3, 5, 5]
# Output: false
#
# Example 3:
# Input: arr = [0, 3, 2, 1]
# Output: true
#
# Constraints:
#
# 1 <= arr.length <= 104
# 0 <= arr[i] <= 104
class Solution:
def validMountainArray(self, arr: List[int]) -> bool:
if len(arr) < 3 or arr[0] >= arr[1]:
return False
increasing = True
for i in range(len(arr) - 1):
if increasing:
if arr[i] == arr[i + 1]:
return False
if arr[i] > arr[i + 1]:
increasing = False
elif not increasing:
if arr[i] <= arr[i + 1]:
return False
return not increasing
|
"""
Django settings for horseradish project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ADMINS = (
('Jeremy', 'jcarbaugh@sunlightfoundation.com'),
)
MANAGERS = ADMINS
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', '').lower() == 'true'
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '').split(",")
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_markup',
'googleauth',
'haystack',
'imagekit',
'pagination_bootstrap',
'raven.contrib.django.raven_compat',
'taggit',
'photolib',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pagination_bootstrap.middleware.PaginationMiddleware',
)
ROOT_URLCONF = 'horseradish.urls'
WSGI_APPLICATION = 'horseradish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {'default': dj_database_url.config()}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.environ.get('STATIC_ROOT')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'horseradish', 'static'),)
# templates
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'horseradish', 'templates'),)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
)
# auth
AUTHENTICATION_BACKENDS = (
'googleauth.backends.GoogleAuthBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = "/login/"
LOGOUT_URL = "/logout/"
LOGIN_REDIRECT_URL = "/"
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
#
# custom stuff
#
DEBUG_TOOLBAR_PATCH_SETTINGS = False
INTERNAL_IPS = ('127.0.0.1', '::1')
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_KEY')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_BUCKET')
AWS_LOCATION = os.environ.get('AWS_LOCATION', '')
AWS_QUERYSTRING_AUTH = False
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': os.environ.get('ELASTICSEARCH_URL'),
'INDEX_NAME': os.environ.get('ELASTICSEARCH_INDEX'),
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
PHOTOS_PER_PAGE = 20
PHOTO_SOURCES = [(s, s) for s in sorted((
'Flickr',
'iStockphoto',
))]
GOOGLEAUTH_IS_STAFF = False
GOOGLEAUTH_USE_HTTPS = os.environ.get('GOOGLEAUTH_USE_HTTPS', 'True') == 'True'
GOOGLEAUTH_CLIENT_ID = os.environ.get('GOOGLEAUTH_CLIENT_ID')
GOOGLEAUTH_CLIENT_SECRET = os.environ.get('GOOGLEAUTH_CLIENT_SECRET')
GOOGLEAUTH_CALLBACK_DOMAIN = os.environ.get('GOOGLEAUTH_CALLBACK_DOMAIN')
GOOGLEAUTH_APPS_DOMAIN = os.environ.get('GOOGLEAUTH_APPS_DOMAIN')
IMAGEKIT_SPEC_CACHEFILE_NAMER = 'photolib.namers.size_aware'
RAVEN_CONFIG = {
'dsn': os.environ.get('RAVEN_DSN'),
}
# logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
from func import set_discovery_issuer
from func import set_webfinger_resource
from jwkest import BadSignature
from oic.exception import IssuerMismatch
from oic.exception import PyoidcError
from otest.func import set_op_args
from otest.func import set_request_args
from oidctest.op.oper import AccessToken
from oidctest.op.oper import Discovery
from oidctest.op.oper import Registration
from oidctest.op.oper import SubjectMismatch
from oidctest.op.oper import SyncAuthn
from oidctest.op.oper import UpdateProviderKeys
from oidctest.op.oper import UserInfo
from oidctest.op.oper import Webfinger
from oidctest.testfunc import expect_exception
from oidctest.testfunc import resource
from oidctest.testfunc import set_jwks_uri
__author__ = 'roland'
ORDDESC = ["rp-discovery", "rp-registration",
"rp-response_type", "rp-response_mode",
"rp-token_endpoint", "rp-id_token",
"rp-userinfo", "rp-claims", "rp-claims-request",
"rp-request_uri", "rp-scope", "rp-nonce", "rp-key-rotation"]
FLOWS = {
"rp-discovery-webfinger-url": {
"sequence": [(Webfinger, {set_webfinger_resource: {}})],
"desc": "Can Discover Identifiers using URL Syntax",
"profile": ".T..",
},
"rp-discovery-webfinger-acct": {
"sequence": [(Webfinger, {resource: {"pattern": "acct:{}@{}"},
set_webfinger_resource: {}})],
"desc": "Can Discover Identifiers using acct Syntax",
"profile": ".T..",
},
"rp-discovery-openid-configuration": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}})
],
"profile": "..T.",
"desc": "Uses openid-configuration Discovery Information"
},
# "rp-discovery-issuer-not-matching-config": {
# "sequence": [
# (Webfinger, {set_webfinger_resource: {}}),
# (Discovery, {expect_exception: IssuerMismatch})
# ],
# "profile": "..T.",
# "desc": "Retrieve openid-configuration information for OpenID
# Provider from the .well-known/openid-configuration path. Verify that
# the issuer in the openid-configuration matches the one returned by
# WebFinger"
# },
"rp-discovery-jwks_uri-keys": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}})
],
"profile": "..T.",
"desc": "Can read and understand jwks_uri",
"tests": {
"providerinfo-has-jwks_uri": {},
"bare-keys": {}
}
},
"rp-discovery-issuer-not-matching-config": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {},
expect_exception: IssuerMismatch})
],
"profile": "..T.",
"desc": "Will detect a faulty issuer claim in OP config"
},
"rp-registration-dynamic": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration
],
"profile": "...T",
"desc": "Uses Dynamic Registration"
},
"rp-registration-redirect_uris": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {set_request_args: {"redirect_uris": [""]},
expect_exception: PyoidcError}),
Registration
],
"profile": "...T",
"desc": "Sends redirect_uris value which only contains a empty string "
"while doing a registration request. Then send a valid "
"redirect_uris list"
},
"rp-registration-uses-https-endpoints": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration,
{set_request_args: {"redirect_uris": ["http://test.com"]},
expect_exception: PyoidcError}),
],
"profile": "I,IT,CI,CT,CIT...T",
"desc": "Sends a redirect_uri endpoint which does not use https. The "
"a valid redirect_uri is sent to the OP"
},
"rp-registration-well-formed-jwk": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {set_request_args: {"jwks": {
"keys": [{
"use": "sig",
"n":
"tAAzYdbiWDAKI8Q3s1crQRuVp0QXpyGgnzx_sGItC2rhdug68gE9v5mfK-7SJCBpuZXzX1YevJ25B0LhNQSWqvb6gYwlNHs33G8VmSzjpqFazItnhKMPnEehCXmPl7iFi8VV0NCC5_uH9xP61TClWsE8B7i4CV6y9B0hZI22p2M",
"e": "AQAB",
"kty": "RSA",
"kid": "a1"
}]},
"id_token_signed_response_alg": "RS256",}}),
(SyncAuthn, {set_op_args: {"response_type": ["id_token"]}}),
],
"profile": "...T",
"desc": ""
},
"rp-response_type-code": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn
],
"profile": "C...",
"desc": "Can Make Request with 'code' Response Type"
},
"rp-response_type-id_token": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration,
{set_request_args: {"id_token_signed_response_alg": "RS256"}}),
SyncAuthn
],
"desc": "Can Make Request with 'id_token' Response Type",
"profile": "I...",
},
"rp-response_type-id_token+token": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration,
{set_request_args: {"id_token_signed_response_alg": "RS256"}}),
SyncAuthn
],
"profile": "IT...",
"desc": "Can Make Request with 'id_token token' Response Type"
},
"rp-response_mode-form_post": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration,
{set_request_args: {"id_token_signed_response_alg": "RS256"}}),
(SyncAuthn, {set_request_args: {"response_mode": ["form_post"]}})
],
"profile": "I,IT,CI,CT,CIT,...",
"desc": "Can Make Request with response_mode=form_post"
},
"rp-authentication-client_secret_basic": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn,
(AccessToken,
{set_op_args: {"authn_method": "client_secret_basic"}})
],
"profile": "C,CI,CIT...",
"desc": "Can Make Access Token Request with 'client_secret_basic' "
"Authentication"
},
"rp-authentication-client_secret_post": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration,
{set_request_args: {
"token_endpoint_auth_method": "client_secret_post"}}),
SyncAuthn,
(AccessToken,
{set_op_args: {"authn_method": "client_secret_post"}})
],
"profile": "C,CI,CIT...",
"desc": "Can Make Access Token Request with 'client_secret_post' "
"Authentication"
},
"rp-authentication-client_secret_jwt": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration,
{set_request_args: {
"token_endpoint_auth_method": "client_secret_jwt"}}),
SyncAuthn,
(AccessToken,
{set_op_args: {"authn_method": "client_secret_jwt"}})
],
"profile": "C,CI,CIT...",
"desc": "Can Make Access Token Request with 'client_secret_jwt' "
"Authentication"
},
"rp-authentication-private_key_jwt": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"token_endpoint_auth_method": "private_key_jwt"},
set_jwks_uri: None
}),
SyncAuthn,
(AccessToken,
{set_request_args: {"authn_method": "private_key_jwt"}})
],
"profile": "C,CI,CIT...",
"desc": "Can Make Access Token Request with 'private_key_jwt' "
"Authentication"
},
"rp-id_token-sig-rs256": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"id_token_signed_response_alg": "RS256"
}
}),
(SyncAuthn, {set_op_args: {"response_type": ["id_token"]}}),
],
"profile": "I...T",
"desc": "Accept Valid Asymmetric ID Token Signature"
},
"rp-id_token-sig-hs256": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"id_token_signed_response_alg": "HS256"
}
}),
(SyncAuthn, {set_op_args: {"response_type": ["id_token"]}})
],
"profile": "I...T",
"desc": "Accept Valid Symmetric ID Token Signature"
},
"rp-id_token-bad-sig-rs256": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"id_token_signed_response_alg": "RS256"
}
}),
(SyncAuthn, {
set_op_args: {"response_type": ["id_token"]},
expect_exception: BadSignature
}),
],
"profile": "I...T",
"desc": "Reject Invalid Asymmetric ID Token Signature"
},
"rp-id_token-bad-sig-es256": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"id_token_signed_response_alg": "ES256"
}
}),
(SyncAuthn, {
set_op_args: {"response_type": ["id_token"]},
expect_exception: BadSignature
})
],
"profile": "I...T",
"desc": "Reject Invalid Asymmetric ID Token Signature"
},
"rp-id_token-bad-sig-hs256": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"id_token_signed_response_alg": "HS256"
}
}),
(SyncAuthn, {
set_op_args: {"response_type": ["id_token"]},
expect_exception: BadSignature
})
],
"profile": "I...T",
"desc": "Reject Invalid Symmetric ID Token Signature"
},
"rp-id_token-sig+enc": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"id_token_signed_response_alg": "HS256",
"id_token_encrypted_response_alg": "RSA1_5",
"id_token_encrypted_response_enc": "A128CBC-HS256"},
set_jwks_uri: None
}),
(SyncAuthn, {set_op_args: {"response_type": ["id_token"]}}),
],
"profile": "I...T",
"desc": "Can Request and Use Signed and Encrypted ID Token Response",
},
"rp-id_token-sig-none": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {"id_token_signed_response_alg": "none"}
}),
(SyncAuthn, {set_op_args: {"response_type": ["code"]}}),
AccessToken
],
"profile": "C,CT,CIT...T",
"desc": "Can Request and Use unSigned ID Token Response"
},
"rp-userinfo-bad-sub-claim": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn,
AccessToken,
(UserInfo, {expect_exception: SubjectMismatch})
],
"profile": "C,CI,CT,CIT...",
"desc": "Reject UserInfo with Invalid Sub claim"
},
"rp-claims-request-id_token": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
(SyncAuthn, {set_request_args: {
"claims": {
"id_token": {
"auth_time": {
"essential": True
},
"email": {
"essential": True
},
}
}
}}),
AccessToken
],
"profile": "...",
"desc": "The Relying Party can ask for a specific claim using the "
"'claims' request parameter. The claim should be returned in "
"an ID Token"
},
"rp-claims-request-userinfo": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
(SyncAuthn, {
set_request_args: {
"claims": {
"userinfo": {
"email": {
"essential": True
},
}
}
},
}),
AccessToken,
UserInfo
],
"profile": "C,IT,CI,CT,CIT...",
"desc": "The Relying Party can ask for a specific claim using the "
"'claims' request parameter. The claim should be returned in "
"a UserInfo response",
},
"rp-scope-openid": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
(SyncAuthn, {set_request_args: {"scope": ["wrong"]}}),
],
"profile": "...",
"desc": "The Relying Party should always add the openid scope value "
"while sending an Authorization Request.",
},
"rp-scope-userinfo-claims": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
(SyncAuthn,
{set_request_args: {"scope": ["openid", "email", "profile"]}}),
AccessToken,
UserInfo
],
"profile": "IT,CT,CIT...",
"desc": "The Relying Party should be able to request claims using "
"Scope Values",
},
"rp-userinfo-bearer-body": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn,
AccessToken,
(UserInfo, {
set_request_args: {
"behavior": "token_in_message_body"
}
})
],
"profile": "C,CI,CT,CIT...",
"desc": "Accesses UserInfo Endpoint with form-encoded body method"
},
"rp-userinfo-bearer-header": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn,
AccessToken,
(UserInfo, {
set_request_args: {
"behavior": "use_authorization_header"
}
})
],
"profile": "C,CI,CT,CIT...",
"desc": "Accesses UserInfo Endpoint with Header Method "
},
"rp-userinfo-enc": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"userinfo_encrypted_response_alg": "RSA1_5",
"userinfo_encrypted_response_enc": "A128CBC-HS256"
},
set_jwks_uri: None
}),
SyncAuthn,
AccessToken,
UserInfo
],
"profile": "C,CI,CT,CIT...",
"desc": "Can Request and Use Encrypted UserInfo Response "
},
"rp-userinfo-sig+enc": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"userinfo_signed_response_alg": "RS256",
"userinfo_encrypted_response_alg": "RSA1_5",
"userinfo_encrypted_response_enc": "A128CBC-HS256"
},
set_jwks_uri: None
}),
SyncAuthn,
AccessToken,
UserInfo
],
"profile": "C,CI,CT,CIT...",
"desc": "Can Request and Use Signed and Encrypted UserInfo Response"
},
"rp-userinfo-sign": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
(Registration, {
set_request_args: {
"userinfo_signed_response_alg": "RS256",
},
set_jwks_uri: None
}),
SyncAuthn,
AccessToken,
UserInfo
],
"profile": "C,CI,CT,CIT...",
"desc": "Can Request and Use Signed UserInfo Response"
},
"rp-claims-aggregated": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn,
AccessToken,
UserInfo
],
"profile": "C,CI,CT,CIT...",
"desc": "Can handle aggregated user information"
},
"rp-claims-distributed": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn,
AccessToken,
UserInfo
],
"profile": "C,CI,CT,CIT...",
"desc": "Handles distributed user information"
},
"rp-nonce-invalid": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn
],
"profile": "I,IT,CI,CIT...",
"desc": "If a nonce value was sent in the Authentication Request the "
"Relying Party must validate the nonce returned in the ID "
"Token."
},
"rp-nonce-unless-code-flow": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
(SyncAuthn, {set_request_args: {"nonce": None}})
],
"profile": "I,IT,CI,CIT...",
"desc": "The Relying Party should always send a nonce as a request "
"parameter while using implicit or hybrid flow. "
"Since the server is suppose to return the nonce in the ID "
"Token return from Authorization Endpoint, "
"see ID Token required claims in hybrid flow or implicit "
"flow. When using Code flow the the nonce is not "
"required, see ID Token validation for code flow"
},
"rp-request_uri-enc": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
(SyncAuthn, {set_op_args: {"request_method": "file",
"request_object_encryption_alg":
"RSA1_5",
"request_object_encryption_enc":
"A128CBC-HS256",
"local_dir": "./request_objects",
"base_path":
"https://localhost:8088/request_objects/"
}})
],
"profile": "...",
"desc": "The Relying Party can pass a Request Object by reference "
"using the request_uri parameter. "
"Encrypt the Request Object using RSA1_5 and A128CBC-HS256 "
"algorithms"
},
"rp-key-rotation-op-enc-key": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
(SyncAuthn, {
set_op_args: {
"request_method": "request",
"request_object_encryption_alg": "RSA1_5",
"request_object_encryption_enc": "A128CBC-HS256"
}
}),
UpdateProviderKeys,
(SyncAuthn, {
set_op_args: {
"request_method": "request",
"request_object_encryption_alg": "RSA1_5",
"request_object_encryption_enc": "A128CBC-HS256"
}
}),
],
"profile": "...",
"desc": "Support OP Encryption Key Rollover"
},
"rp-key-rotation-op-sign-key": {
"sequence": [
(Webfinger, {set_webfinger_resource: {}}),
(Discovery, {set_discovery_issuer: {}}),
Registration,
SyncAuthn,
SyncAuthn
],
"profile": "I,IT,CI,CIT...",
"desc": "Support OP Signing Key Rollover"
}
}
|
"""Flag for whether a date falls on a public holiday in Singapore."""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
import pandas as pd
# https://github.com/rjchow/singapore_public_holidays
def make_holiday_frame():
return dt.fread(
"""
Date,Name,Day,Observance,Observance Strategy
2016-01-01,New Year's Day,Friday,2016-01-01,actual_day
2016-02-08,Chinese New Year Day 1,Monday,2016-02-08,actual_day
2016-02-09,Chinese New Year Day 2,Tuesday,2016-02-09,actual_day
2016-03-25,Good Friday,Friday,2016-03-25,actual_day
2016-05-01,Labour Day,Sunday,2016-05-01,next_monday
2016-05-21,Vesak Day,Saturday,2016-05-21,actual_day
2016-07-06,Hari Raya Puasa,Wednesday,2016-07-06,actual_day
2016-08-09,National Day,Tuesday,2016-08-09,actual_day
2016-09-12,Hari Raya Haji,Monday,2016-09-12,actual_day
2016-10-29,Deepavali,Saturday,2016-10-29,actual_day
2016-12-25,Christmas Day,Sunday,2016-12-26,next_monday
2017-01-01,New Year's Day,Sunday,2017-01-02,next_monday
2017-01-28,Chinese New Year Day 1,Saturday,2017-01-28,actual_day
2017-01-29,Chinese New Year Day 2,Sunday,2017-01-30,next_monday
2017-04-14,Good Friday,Friday,2017-04-14,actual_day
2017-05-01,Labour Day,Monday,2017-05-01,actual_day
2017-05-10,Vesak Day,Wednesday,2017-05-10,actual_day
2017-06-25,Hari Raya Puasa,Sunday,2017-06-26,next_monday
2017-08-09,National Day,Wednesday,2017-08-09,actual_day
2017-09-01,Hari Raya Haji,Friday,2017-09-01,actual_day
2017-10-18,Deepavali,Wednesday,2017-10-18,actual_day
2017-12-25,Christmas Day,Monday,2017-12-25,actual_day
2018-01-01,New Year's Day,Monday,2018-01-01,actual_day
2018-02-16,Chinese New Year Day 1,Friday,2018-02-16,actual_day
2018-02-17,Chinese New Year Day 2,Saturday,2018-02-17,actual_day
2018-03-30,Good Friday,Friday,2018-03-30,actual_day
2018-05-01,Labour Day,Tuesday,2018-05-01,actual_day
2018-05-29,Vesak Day,Tuesday,2018-05-29,actual_day
2018-06-15,Hari Raya Puasa,Friday,2018-06-15,actual_day
2018-08-09,National Day,Thursday,2018-08-09,actual_day
2018-08-22,Hari Raya Haji,Wednesday,2018-08-22,actual_day
2018-11-06,Deepavali,Tuesday,2018-11-06,actual_day
2018-12-25,Christmas Day,Tuesday,2018-12-25,actual_day
2019-01-01,New Year's Day,Monday,2019-01-01,actual_day
2019-02-05,Chinese New Year Day 1,Tuesday,2019-02-05,actual_day
2019-02-06,Chinese New Year Day 2,Wednesday,2019-02-06,actual_day
2019-04-19,Good Friday,Friday,2019-04-19,actual_day
2019-05-01,Labour Day,Wednesday,2019-05-01,actual_day
2019-05-19,Vesak Day,Sunday,2019-05-20,next_monday
2019-06-05,Hari Raya Puasa,Wednesday,2019-06-05,actual_day
2019-08-09,National Day,Friday,2019-08-09,actual_day
2019-08-11,Hari Raya Haji,Sunday,2019-08-12,next_monday
2019-10-27,Deepavali,Sunday,2019-10-27,next_monday
2019-12-25,Christmas Day,Wednesday,2019-12-25,actual_day
2020-01-01,New Year's Day,Wednesday,2020-01-01,actual_day
2020-01-25,Chinese New Year Day 1,Saturday,2020-01-25,actual_day
2020-01-26,Chinese New Year Day 2,Sunday,2020-01-27,next_monday
2020-04-10,Good Friday,Friday,2020-04-10,actual_day
2020-05-01,Labour Day,Friday,2020-05-01,actual_day
2020-05-07,Vesak Day,Thursday,2020-05-07,actual_day
2020-05-24,Hari Raya Puasa,Sunday,2020-05-25,next_monday
2020-07-31,Hari Raya Haji,Friday,2020-07-31,actual_day
2020-08-09,National Day,Sunday,2020-08-10,next_monday
2020-11-14,Deepavali,Saturday,2020-11-14,actual_day
2020-12-25,Christmas Day,Friday,2020-12-25,actual_day
""").to_pandas()
class SingaporePublicHolidayTransformer(CustomTransformer):
@staticmethod
def get_default_properties():
return dict(col_type="date", min_cols=1, max_cols=1, relative_importance=1)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.time_column = self.input_feature_names[0]
hdays = make_holiday_frame()['Observance']
self.memo = pd.DataFrame(hdays, columns=[self.time_column], dtype='datetime64[ns]')
self.memo['year'] = self.memo[self.time_column].dt.year
self.memo['doy'] = self.memo[self.time_column].dt.dayofyear
self.memo.drop(self.time_column, axis=1, inplace=True)
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.transform(X)
def transform(self, X: dt.Frame):
X = X[:, self.time_column]
if X[:, self.time_column].ltypes[0] != dt.ltype.str:
assert self.datetime_formats[self.time_column] in ["%Y%m%d", "%Y%m%d%H%M", "%Y", "%Y%m"]
X[:, self.time_column] = dt.stype.str32(dt.stype.int64(dt.f[0]))
X.replace(['', 'None'], None)
X = X.to_pandas()
X.loc[:, self.time_column] = pd.to_datetime(X[self.time_column],
format=self.datetime_formats[self.time_column])
X['year'] = X[self.time_column].dt.year
X['doy'] = X[self.time_column].dt.dayofyear
X.drop(self.time_column, axis=1, inplace=True)
feat = 'is_holiday'
self.memo[feat] = 1
X = X.merge(self.memo, how='left', on=['year', 'doy']).fillna(0)
self.memo.drop(feat, axis=1, inplace=True)
X = X[[feat]].astype(int)
return X
|
#!/usr/bin/python
# -*- coding: utf-8 -*-f
#Import modules for CGI handling
import cgi, cgitb
import Cookie, os, time
try:
import pigpio
except:
pass
#============================ config ALERT ===================== #
import ConfigParser
Config = ConfigParser.ConfigParser()
setting = Config.read('setting/config.ini')
settingSec = Config.sections()
#print settingSec
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
#Reading config ALERT
def readALERT():
try:
ALERT = ConfigSectionMap('ALERT')
return ALERT['status']
except:
print 'Data not found.'
# Update config ALERT
def writeALERT(value):
# lets create that config file for next time...
cfgfile = open("setting/config.ini",'wb')
# add update the settings to the structure of the file, and lets write it out...
Config.set('ALERT','status', value)
Config.write(cfgfile)
cfgfile.close()
#==================================================================================
cgitb.enable()
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
AlertStatus = form.getvalue('AlertStatus')
if AlertStatus is None:
AlertStatus = readALERT()
else:
AlertStatus = form.getvalue('AlertStatus')
writeALERT(AlertStatus)
#==================================================================================
cookie = Cookie.SimpleCookie()
cookie_string = os.environ.get('HTTP_COOKIE')
def getCookies():
if not cookie_string:
return False
else:
# load() parses the cookie string
cookie.load(cookie_string)
# Use the value attribute of the cookie to get it
txt = str(cookie['login'].value)
if txt == 'success':
return True
else:
return False
if getCookies() == False:
print 'Content-Type: text/html\n'
print '<html><head>'
homeIP = 'siczones.coe.psu.ac.th'
print ('''<meta http-equiv="refresh" content="0.1;http://%s">'''%(homeIP))
print '</head></html>'
else:
print ("Content-type:text/html\r\n\r\n")
print ('''<!DOCTYPE html>
<html lang="en">
<head>
<title>Alert</title>
<meta charset="utf-8">
<link href="../favicon.ico" rel="icon" type="image/x-icon"/>
<link href="../favicon.ico" rel="shortcut icon" type="image/x-icon"/>
<!-- This file has been downloaded from Bootsnipp.com. Enjoy! -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Montserrat:400,700" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Kaushan+Script' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Droid+Serif:400,700,400italic,700italic' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Roboto+Slab:400,100,300,700' rel='stylesheet' type='text/css'>
<!-- Theme CSS -->
<link href="/css/agency.css" rel="stylesheet">
<link href="/css/siczones.css" rel="stylesheet">
<script src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script>
<script>
$(document).ready(function(){
$(window).scroll(function () {
if ($(this).scrollTop() > 50) {
$('#back-to-top').fadeIn();
} else {
$('#back-to-top').fadeOut();
}
});
// scroll body to 0px on click
$('#back-to-top').click(function () {
$('#back-to-top').tooltip('hide');
$('body,html').animate({
scrollTop: 0
}, 800);
return false;
});
$('#back-to-top').tooltip('show');
});
</script>
</head>''')
print ('''
<body>
<!-- ==================== Nav Tabs ======================= -->
<nav class="nav nav-tabs navbar-default navbar-fixed-top">
<div class = "container">
<ul class="nav nav-tabs">
<li role="presentation"><a href="index.py"><span class="glyphicon glyphicon-home"/> Home</a></li>
<li role="presentation"><a href="mode.py">Mode</a></li>
<li role="presentation" class="dropdown active">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button" aria-haspopup="true" aria-expanded="false">
Other<span class="caret"></span>
</a>
<ul class="dropdown-menu">
<li><a href="status.py">Status</a></li>
<li><a href="device.py">Device</a></li>
<li><a href="alert.py">Alert</a></li>
<li role="separator" class="divider"></li>
<li><a href="logout.py" onmouseover="style.color='red'" onmouseout="style.color='black'">Log out</a></li>
</ul>
</li>
</ul>
</div>
</nav>
<br/><br/><br>
<div class="container-fluid">
<div class="container">
<div class="row">
<div class="col-sm-3 col-md-3 col-xs-5">
<!-- <img src="/img/brand.png" width="50px" height="50px" alt="Brand" style="display: block; margin-left: auto; margin-right: auto;"> -->
<img src="/img/brand/Brand.png" style="max-height: 100px; display: block; margin-left: auto; margin-right: auto;" class="img-responsive" alt="Header">
<br>
</div>
<div class="col-sm-9 col-md-9 col-xxs-7">
<br>
<brand style="display: block; margin-left: auto; margin-right: auto;">
Safety in residential system
</brand>
<hr>
</div>
</div>
</div>
</div>
<!-- ========================== Nav Tabs ======================= -->
<div class = "container bg-all">
<div class="wrapper">
<center><fieldset>
<h4 class="form-signin-heading">Alert configuration</h4>
<hr class="colorgraph"><br>
<!-- ////////////// Data //////////////// -->''')
print('''
<div class="form-signin">
<!-- ================= Enable | Disable ======================= -->
<fieldset class="form-control btn-form"><label onmouseover="style.color='red'" onmouseout="style.color='black'">Alert Status</label><br>
''')
pin=4
try:
pi = pigpio.pi() # Connect to local Pi.
pi.set_mode(pin, pigpio.OUTPUT)
if AlertStatus == 'ON':
print """<p class="form-control" >Status : <span class="label label-success ">ON</span></p>"""
pi.write(pin, 1)
elif AlertStatus == 'OFF':
print """<p class="form-control" >Status : <span class="label label-default ">OFF</span></p>"""
pi.write(pin, 0)
pi.stop() # Disconnect from local Pi.
except:
print "<label>Are you sure your server that is running on RPi?</label>"
print "<p>Alert not working!</p>"
pass
print('''
<form action="alert.py" method="GET" class="btn-group btn-group-justified" role="group" aria-label="...">
<div class="btn-group" role="group">
<button name="AlertStatus" VALUE="OFF" Type="submit" class="btn btn-default"><span class="label label-default ">OFF</span></button>
</div>
<div class="btn-group" role="group">
<button name="AlertStatus" VALUE="ON" Type="submit" class="btn btn-default"><span class="label label-danger">Alarm !!</span></button>
</div>
</form>
</fieldset>
<br />
<form action="lineAlert.py" class="btn-form"><button class="btn btn-lg btn-info btn-block" Type="submit" VALUE="Status" onmouseover="style.color='yellow'" onmouseout="style.color='white'">LINE Alert</button></form>
<form action="history.py" class="btn-form"><button class="btn btn-lg btn-info btn-block" Type="submit" VALUE="Status" onmouseover="style.color='yellow'" onmouseout="style.color='white'">History</button></form>
''')
print('''
</div>
<!-- ////////////// End Data //////////////// -->
<br><input class="btn btn-lg btn-primary btn-block" Type="button" VALUE="Back" onClick="history.go(-1);return true;">
</fieldset></center>
</div>
</div>
<!-- ============== Footer ============ -->
<br/><br/><div class="navbar navbar-default navbar-fixed-bottom">
<div class="container">
<p class="navbar-text pull-left">Copyright © 2016-2017 Siczones.</p>
<!-- a id="back-to-top" href="#" class="navbar-btn btn-danger btn pull-right" role="button" data-toggle="tooltip" data-placement="left"><span class="glyphicon glyphicon-chevron-up"></span></a -->
<!-- Split button -->
<div class="navbar-btn btn-group dropup pull-right">
<button id="back-to-top" href="#" type="button" class="btn btn-warning"><span class="glyphicon glyphicon-chevron-up"></span> Top</button>
</div>
</div>
</div>
<!-- ============== End Footer ============ -->''')
print('''
</body>''')
print ("</html>")
|
Sigma_1 = np.diag(1 / img_flat)
np.linalg.inv(X.T @ Sigma_1 @ X) @ X.T @ Sigma_1 @ img_flat
|
import logging
import numpy as np
import os
import re
from pystella.rf import band
from pystella.rf.lc import SetLightCurve, LightCurve
__author__ = 'bakl'
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class StellaTt:
def __init__(self, name, path='./'):
"""Creates a StellaTt model instance. Required parameters: name."""
self.name = name
self.path = path # path to model files
def __str__(self):
return "%s, path: %s" % (self.name, self.path)
def __repr__(self):
return "%s, path: %s" % (self.name, self.path)
# return "%s" % self.name
@property
def Info(self):
return StellaTtInfo(self.name, self.path)
# return "%s" % self.name
def load(self, ext='tt', line_header=40):
"""
Read tt-data
Columns: time Tbb rbb Teff Rlast_sc Rph Mbol MU MB MV MI MR Mbolavg gdepos
:type ext: extension of tt-file, default: 'tt'
:param line_header: skip_rows - 1
:return: data in np.array
"""
fname = os.path.join(self.path, self.name + '.' + ext)
header = ''
i = 0
with open(fname, "r") as f:
for line in f:
i += 1
if i < line_header:
continue
if line.lstrip().startswith("time"):
header = line
line_header = i
break
# time Tbb rbb Teff Rlast_sc R(tau2/3) Mbol MU MB MV MI MR Mbolavg gdepos
# time Tbb rbb Teff Rlast_sc R(tau2/3) Mbol MU MB MV MI MR Mbolavg gdepos
# print(i, header)
if header != '':
names = map(str.strip, header.split())
names = [w.replace('R(tau2/3)', 'Rph') for w in names]
dtype = np.dtype({'names': names, 'formats': [np.float64] * len(names)})
block = np.loadtxt(fname, skiprows=line_header + 1, dtype=dtype)
return block
else:
return None
def read_curves(self):
block = self.load()
header = 'Mbol MU MB MV MI MR'.split()
curves = SetLightCurve(self.name)
time = block['time']
for col in header:
b = band.band_by_name(col.replace('M', ''))
mag = block[col]
lc = LightCurve(b, time, mag)
curves.add(lc)
return curves
def read_curves_gri(self):
block = self.load(ext='gri', line_header=1)
# header = 'L_bol Mu MB Mg Mr Mi'.split()
# header = 'MB MV'.split()
header = 'L_bol Mu MB MV Mg Mr Mi J H K '.split()
# header = 'MB MV '.split()
# header = 'L_bol L_ubvgri Mu MB MV Mg Mr Mi'.split()
curves = SetLightCurve(self.name)
time = block['time']
for col in header:
b = band.band_by_name(col.replace('M', '').replace('L_', ''))
mag = block[col]
lc = LightCurve(b, time, mag)
curves.add(lc)
return curves
class StellaTtInfo:
def __init__(self, name, path='./'):
"""Creates a StellaTtInfo model instance. Required parameters: name."""
self._dict = None
self._name = name
self._path = path # path to model files
self.parse()
def parse(self, header_end=29):
fname = os.path.join(self._path, self._name + ".tt")
self._dict = {'fname': fname}
# prepare pattern
pattern = re.compile(r"(.*?)\s*=\s*([+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?)")
# run pattern
i = 0
with open(fname) as f:
for line in f:
i += 1
if i == header_end:
break
if '= ' not in line:
continue
res = pattern.findall(line)
if len(res) > 0:
for k, v in res:
logger.debug("key: %s v: %f " % (k, float(v)))
self._dict[str.strip(k)] = float(v)
return self
@property
def Name(self):
return self._name
@property
def Path(self):
return self._path
@property
def Data(self):
return self._dict
@property
def R(self):
return self._dict['RADIUS(SOLAR)']
@property
def M(self):
return self._dict['MASS(SOLAR)']
@property
def E(self):
return self._dict['Ebstht'] / 10. # to FOE
@property
def Mni(self):
return self._dict['XMNI']
@property
def TcurA(self):
return self._dict['TcurA']
@property
def TcurB(self):
return self._dict['TcurB']
@property
def Rce(self):
return self._dict['Rce']
def show(self, o=None, comment=''):
# print "INFO %s" % self.name
# print " %40s: R = %7.2f M = %6.2f E = %6.2f " % (self.name, self.R, self.M, self.E)
s = "| %40s | %7.2f | %6.2f | %6.2f | %s" % (self._name, self.R, self.M, self.E, comment)
if o is not None and o:
return s
else:
print(o)
def tex(self, o=None, lend=''):
# print "INFO %s" % self.name
# print " %40s: R = %7.2f M = %6.2f E = %6.2f " % (self.name, self.R, self.M, self.E)
s = " \\mbox{%s} & %7.2f & %6.2f & %6.2f \\\ %s " % (self._name, self.R, self.M, self.E, lend)
if o is not None and o:
return s
else:
print(o)
|
#pytest_getattr_mwe.py
def __getattr__(key):
return None
# raise AttributeError(key)
def test_1():
return 1
|
from time import time
def main():
for i in range(1, 1_000_000): 1+1
if __name__ == '__main__':
start = time()
main()
end = time()
print("main method executed in %fs" % (end-start)) |
__version__ = "v1.0"
__copyright__ = "Copyright 2021"
__license__ = "MIT"
__lab__ = "Adam Cribbs lab"
import time
import pysam
import pandas as pd
from mclumi.util.Console import console
class read(object):
def __init__(self, bam_fpn, verbose=False):
"""
# pos_last = 0
# chr_last = 0
# if read.pos <= (pos_last + 1000) and read.reference_id == chr_last:
# print(read.pos, pos_last, read.reference_id)
# pos_last = read.pos
# chr_last = read.reference_id
Parameters
----------
bam_fpn
verbose
"""
self.console = console()
self.console.verbose = verbose
self.console.print('===>reading the bam file... {}'.format(bam_fpn))
read_bam_stime = time.time()
self.pysam_bam = pysam.AlignmentFile(bam_fpn, "rb")
self.console.print('===>reading BAM time: {:.2f}s'.format(time.time() - read_bam_stime))
def bycol(self, col='sname'):
"""
Parameters
----------
col
Returns
-------
"""
t = []
if col == 'sname':
for id, read in enumerate(self.pysam_bam):
# print(read)
# print(read.get_tags())
if read.reference_id != -1:
tt = read.query_name
else:
continue
t.append(tt)
return pd.DataFrame(t)
def todf(self, tags=[]):
"""
Notes
-----
11 columns from alignments deciphered by Pysam
# read.query_name
# read.flag
# read.reference_id
# read.reference_start
# read.mapping_quality
# read.cigar
# read.query_sequence
# read.next_reference_id
# read.next_reference_start
# read.template_length
# read.query_qualities
See Also
--------
https://pysam.readthedocs.io/en/latest/usage.html#creating-bam-cram-sam-files-from-scratch
https://pysam.readthedocs.io/_/downloads/en/v0.12.0/pdf/
Parameters
----------
tags
Returns
-------
"""
l = []
self.console.print('=========>start converting bam to df...')
import time
stime = time.time()
for id, read in enumerate(self.pysam_bam):
# print(read)
read_tags = read.get_tags()
# print(read_tags)
rt_dict = {k: v for k, v in read_tags}
rt_keys = [*rt_dict.keys()]
# print(rt_keys)
tag_keys = [rt_dict[k] if k in rt_keys else 'None' for k in tags]
# print(tag_keys)
vignette = [
id,
read.query_name,
read.flag,
read.reference_id,
read.pos,
read.mapping_quality,
read.cigar,
read.query_sequence,
read.next_reference_id,
read.next_reference_start,
read.template_length,
read.query_qualities,
read,
] + tag_keys
l.append(vignette)
df = pd.DataFrame(
l,
columns=[
'id',
'query_name',
'flag',
'reference_id',
'pos',
'mapping_quality',
'cigar',
'query_sequence',
'next_reference_id',
'next_reference_start',
'template_length',
'query_qualities',
'read',
] + tags,
)
if 'XS' in tags and 'XT' in tags:
stat_XT = df['XS'].value_counts()
if 'None' in stat_XT.keys():
if stat_XT['None'] == df.shape[0]:
df['XS'] = df['XT'].apply(lambda x: 'Assigned' if x != 'None' else 'Unassigned')
# print(df['XA'].loc[df['reference_id'] != -1].shape)
# print(df['MD'].loc[df['MD'] != 'None'].shape)
# print(df['NM'].loc[df['NM'] != 'None'].shape)
# print(df['XS'].loc[df['XS'] != 'None'].shape)
# print(df['XT'].loc[df['XT'] != 'None'].shape)
self.console.print('=========>time to df: {:.3f}s'.format(time.time() - stime))
return df
def todf11(self, ):
"""
Notes
-----
11 columns from alignments deciphered by Pysam
# read.query_name
# read.flag
# read.reference_id
# read.reference_start
# read.mapping_quality
# read.cigar
# read.query_sequence
# read.next_reference_id
# read.next_reference_start
# read.template_length
# read.query_qualities
See Also
--------
https://pysam.readthedocs.io/en/latest/usage.html#creating-bam-cram-sam-files-from-scratch
https://pysam.readthedocs.io/_/downloads/en/v0.12.0/pdf/
Returns
-------
"""
l = []
self.console.print('=========>start converting bam to df...')
stime = time.time()
for id, read in enumerate(self.pysam_bam):
l.append([
id,
read.query_name,
read.flag,
read.reference_id,
read.reference_start,
read.mapping_quality,
read.cigar,
read.query_sequence,
read.next_reference_id,
read.next_reference_start,
read.template_length,
read.query_qualities,
])
df = pd.DataFrame.from_dict(
l,
columns=[
'id',
'query_name',
'flag',
'reference_id',
'reference_start',
'mapping_quality',
'cigar',
'query_sequence',
'next_reference_id',
'next_reference_start',
'template_length',
'query_qualities',
],
)
self.console.print('=========>time to df: {:.3f}s'.format(time.time() - stime))
return df
def todf11_depr(self, ):
"""
Note
----
Deprecated.
Returns
-------
Dataframe of a bam file
"""
l = []
self.console.print('=========>start converting bam to df')
import time
stime = time.time()
for id, read in enumerate(self.pysam_bam):
read_piece = {
'id': id,
'query_name': read.query_name,
'flag': read.flag,
'reference_id': read.reference_id,
'reference_start': read.reference_start,
'mapping_quality': read.mapping_quality,
'cigar': read.cigar,
'query_sequence': read.query_sequence,
'next_reference_id': read.next_reference_id,
'next_reference_start': read.next_reference_start,
'template_length': read.template_length,
'query_qualities': read.query_qualities,
}
l.append(read_piece)
df = pd.DataFrame.from_dict(l)
self.console.print('=========>time to df: {:.3f}s'.format(time.time() - stime))
return df
if __name__ == "__main__":
from mclumi.Path import to
umikit = read(
# bam_fpn=to('example/data/example.bam'),
# bam_fpn=to('example/data/example_buddle.bam'),
# to('example/data/assigned_sorted.bam')
# to('example/data/assigned_sorted_dedup.bam')
# bam_fpn=to('example/data/deduplicated.bam'),
# bam_fpn=to('example/data/RM82CLK1_S3_featurecounts_gene_sorted.bam'),
bam_fpn=to('example/data/RM82_CLK1_DMSO_2_XT.bam'),
)
# df = umikit.todf(tags=['PO'])
# df = umikit.todf(tags=['XS', 'XT'])
# print(df)
#
# df = df.loc[df['XS'] == 'Assigned']
# print(df)
df = umikit.todf(tags=['XT', 'XS'])
print(df)
|
#!/usr/bin/python
import time
from time import sleep
import datetime
import os
import sys
import subprocess
from subprocess import Popen
import pynma
p = pynma.PyNMA( "12842c4d5f6061eb9543674248c3518edda9dd83343ebe19" )
application="alertpi boot"
event="DoorBell OnBoot"
description="doorbell just turned on"
priority=2
p.push(application, event, description)
subprocess.Popen([sys.executable, "/opt/doorbell/sendsms.py BootedUpJustNow" ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
|
# -*- coding: utf-8 -*-
"""Module grouping utility functions for DOV XML services."""
import os
from owslib.etree import etree
from pydov.util.errors import XmlParseError
from pydov.util.hooks import HookRunner
from pydov.util.net import SessionFactory
def build_dov_url(path):
"""Build the DOV url consisting of the fixed DOV base url, appended with
the given path.
Returns
-------
str
The absolute DOV url.
"""
if 'PYDOV_BASE_URL' in os.environ:
base_url = os.environ['PYDOV_BASE_URL']
else:
base_url = 'https://www.dov.vlaanderen.be/'
return base_url + path.lstrip('/')
def get_remote_url(url, session=None):
"""Request the URL from the remote service and return its contents.
Parameters
----------
url : str
URL to download.
session : requests.Session
Session to use to perform HTTP requests for data. Defaults to None,
which means a new session will be created for each request.
Returns
-------
xml : bytes
The raw XML data as bytes.
"""
if session is None:
session = SessionFactory.get_session()
request = session.get(url)
request.encoding = 'utf-8'
return request.text.encode('utf8')
def get_xsd_schema(url):
"""Request the XSD schema from DOV webservices and return it.
Parameters
----------
url : str
URL of the XSD schema to download.
Returns
-------
xml : bytes
The raw XML data of this XSD schema as bytes.
"""
response = HookRunner.execute_inject_meta_response(url)
if response is None:
response = get_remote_url(url)
HookRunner.execute_meta_received(url, response)
return response
def get_dov_xml(url, session=None):
"""Request the XML from the remote DOV webservices and return it.
Parameters
----------
url : str
URL of the DOV object to download.
session : requests.Session
Session to use to perform HTTP requests for data. Defaults to None,
which means a new session will be created for each request.
Returns
-------
xml : bytes
The raw XML data of this DOV object as bytes.
"""
response = HookRunner.execute_inject_xml_response(url)
if response is None:
response = get_remote_url(url, session)
HookRunner.execute_xml_received(url, response)
return response
def parse_dov_xml(xml_data):
"""Parse the given XML data into an ElementTree.
Parameters
----------
xml_data : bytes
The raw XML data of a DOV object as bytes.
Returns
-------
tree : etree.ElementTree
Parsed XML tree of the DOV object.
"""
try:
parser = etree.XMLParser(ns_clean=True, recover=True)
except TypeError:
parser = etree.XMLParser()
try:
tree = etree.fromstring(xml_data, parser=parser)
return tree
except Exception:
raise XmlParseError("Failed to parse XML record.")
|
from . import auth
from flask_login import login_user,logout_user,login_required
from flask import request,redirect,url_for,flash,render_template
from .forms import RegistrationForm,LoginForm
from app.models import User
from app import db
@auth.route("/login",methods=['GET','POST'])
def login():
if request.method == "POST":
username = request.form.get("email",None)
email = request.form.get("email",None)
password = request.form.get("pass",None)
if email is None or password is None:
flash("There is an error in your fields")
return redirect(url_for('auth.login'))
u = User.query.filter_by(email=email).first()
if u is not None and u.verify_password(password):
login_user(u)
flash("Login successfull")
return redirect(url_for('main.index'))
return render_template("auth/login.html")
@auth.route("/register",methods=['GET','POST'])
def register():
if request.method == 'POST':
username = request.form.get("username",None)
email = request.form.get("email",None)
password = request.form.get("pass",None)
cpass = request.form.get("cpass",None)
if username is not None and email is not None and password == password and password != None:
u = User.query.filter_by(email=email).first()
if u is not None:
flash("Use with that email is already registered")
return redirect(url_for('auth.register'))
u = User(email=email,name=username,password=password)
db.session.add(u)
flash("User created successfully")
return redirect(url_for('auth.login'))
flash("all fields are required")
return redirect(url_for('auth.register'))
return render_template("auth/register.html")
@auth.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('auth.login')) |
__version__ = '1.2.0'
__author__ = 'Aashutosh Rathi <aashutoshrathi@gmail.com>'
__all__ = []
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import datetime
from collections import namedtuple
from functools import partial
from aniso8601.builders import (
BaseTimeBuilder,
DatetimeTuple,
DateTuple,
Limit,
TimeTuple,
TupleBuilder,
cast,
range_check,
)
from aniso8601.exceptions import (
DayOutOfBoundsError,
HoursOutOfBoundsError,
ISOFormatError,
LeapSecondError,
MidnightBoundsError,
MinutesOutOfBoundsError,
MonthOutOfBoundsError,
SecondsOutOfBoundsError,
WeekOutOfBoundsError,
YearOutOfBoundsError,
)
from aniso8601.utcoffset import UTCOffset
DAYS_PER_YEAR = 365
DAYS_PER_MONTH = 30
DAYS_PER_WEEK = 7
HOURS_PER_DAY = 24
MINUTES_PER_HOUR = 60
MINUTES_PER_DAY = MINUTES_PER_HOUR * HOURS_PER_DAY
SECONDS_PER_MINUTE = 60
SECONDS_PER_DAY = MINUTES_PER_DAY * SECONDS_PER_MINUTE
MICROSECONDS_PER_SECOND = int(1e6)
MICROSECONDS_PER_MINUTE = 60 * MICROSECONDS_PER_SECOND
MICROSECONDS_PER_HOUR = 60 * MICROSECONDS_PER_MINUTE
MICROSECONDS_PER_DAY = 24 * MICROSECONDS_PER_HOUR
MICROSECONDS_PER_WEEK = 7 * MICROSECONDS_PER_DAY
MICROSECONDS_PER_MONTH = DAYS_PER_MONTH * MICROSECONDS_PER_DAY
MICROSECONDS_PER_YEAR = DAYS_PER_YEAR * MICROSECONDS_PER_DAY
TIMEDELTA_MAX_DAYS = datetime.timedelta.max.days
FractionalComponent = namedtuple(
"FractionalComponent", ["principal", "microsecondremainder"]
)
def year_range_check(valuestr, limit):
YYYYstr = valuestr
# Truncated dates, like '19', refer to 1900-1999 inclusive,
# we simply parse to 1900
if len(valuestr) < 4:
# Shift 0s in from the left to form complete year
YYYYstr = valuestr.ljust(4, "0")
return range_check(YYYYstr, limit)
def fractional_range_check(conversion, valuestr, limit):
if valuestr is None:
return None
if "." in valuestr:
castfunc = partial(_cast_to_fractional_component, conversion)
else:
castfunc = int
value = cast(valuestr, castfunc, thrownmessage=limit.casterrorstring)
if type(value) is FractionalComponent:
tocheck = float(valuestr)
else:
tocheck = int(valuestr)
if limit.min is not None and tocheck < limit.min:
raise limit.rangeexception(limit.rangeerrorstring)
if limit.max is not None and tocheck > limit.max:
raise limit.rangeexception(limit.rangeerrorstring)
return value
def _cast_to_fractional_component(conversion, floatstr):
# Splits a string with a decimal point into an int, and
# int representing the floating point remainder as a number
# of microseconds, determined by multiplying by conversion
intpart, floatpart = floatstr.split(".")
intvalue = int(intpart)
preconvertedvalue = int(floatpart)
convertedvalue = (preconvertedvalue * conversion) // (10 ** len(floatpart))
return FractionalComponent(intvalue, convertedvalue)
class PythonTimeBuilder(BaseTimeBuilder):
# 0000 (1 BC) is not representable as a Python date
DATE_YYYY_LIMIT = Limit(
"Invalid year string.",
datetime.MINYEAR,
datetime.MAXYEAR,
YearOutOfBoundsError,
"Year must be between {0}..{1}.".format(datetime.MINYEAR, datetime.MAXYEAR),
year_range_check,
)
TIME_HH_LIMIT = Limit(
"Invalid hour string.",
0,
24,
HoursOutOfBoundsError,
"Hour must be between 0..24 with " "24 representing midnight.",
partial(fractional_range_check, MICROSECONDS_PER_HOUR),
)
TIME_MM_LIMIT = Limit(
"Invalid minute string.",
0,
59,
MinutesOutOfBoundsError,
"Minute must be between 0..59.",
partial(fractional_range_check, MICROSECONDS_PER_MINUTE),
)
TIME_SS_LIMIT = Limit(
"Invalid second string.",
0,
60,
SecondsOutOfBoundsError,
"Second must be between 0..60 with " "60 representing a leap second.",
partial(fractional_range_check, MICROSECONDS_PER_SECOND),
)
DURATION_PNY_LIMIT = Limit(
"Invalid year duration string.",
None,
None,
YearOutOfBoundsError,
None,
partial(fractional_range_check, MICROSECONDS_PER_YEAR),
)
DURATION_PNM_LIMIT = Limit(
"Invalid month duration string.",
None,
None,
MonthOutOfBoundsError,
None,
partial(fractional_range_check, MICROSECONDS_PER_MONTH),
)
DURATION_PNW_LIMIT = Limit(
"Invalid week duration string.",
None,
None,
WeekOutOfBoundsError,
None,
partial(fractional_range_check, MICROSECONDS_PER_WEEK),
)
DURATION_PND_LIMIT = Limit(
"Invalid day duration string.",
None,
None,
DayOutOfBoundsError,
None,
partial(fractional_range_check, MICROSECONDS_PER_DAY),
)
DURATION_TNH_LIMIT = Limit(
"Invalid hour duration string.",
None,
None,
HoursOutOfBoundsError,
None,
partial(fractional_range_check, MICROSECONDS_PER_HOUR),
)
DURATION_TNM_LIMIT = Limit(
"Invalid minute duration string.",
None,
None,
MinutesOutOfBoundsError,
None,
partial(fractional_range_check, MICROSECONDS_PER_MINUTE),
)
DURATION_TNS_LIMIT = Limit(
"Invalid second duration string.",
None,
None,
SecondsOutOfBoundsError,
None,
partial(fractional_range_check, MICROSECONDS_PER_SECOND),
)
DATE_RANGE_DICT = BaseTimeBuilder.DATE_RANGE_DICT
DATE_RANGE_DICT["YYYY"] = DATE_YYYY_LIMIT
TIME_RANGE_DICT = {"hh": TIME_HH_LIMIT, "mm": TIME_MM_LIMIT, "ss": TIME_SS_LIMIT}
DURATION_RANGE_DICT = {
"PnY": DURATION_PNY_LIMIT,
"PnM": DURATION_PNM_LIMIT,
"PnW": DURATION_PNW_LIMIT,
"PnD": DURATION_PND_LIMIT,
"TnH": DURATION_TNH_LIMIT,
"TnM": DURATION_TNM_LIMIT,
"TnS": DURATION_TNS_LIMIT,
}
@classmethod
def build_date(cls, YYYY=None, MM=None, DD=None, Www=None, D=None, DDD=None):
YYYY, MM, DD, Www, D, DDD = cls.range_check_date(YYYY, MM, DD, Www, D, DDD)
if MM is None:
MM = 1
if DD is None:
DD = 1
if DDD is not None:
return PythonTimeBuilder._build_ordinal_date(YYYY, DDD)
if Www is not None:
return PythonTimeBuilder._build_week_date(YYYY, Www, isoday=D)
return datetime.date(YYYY, MM, DD)
@classmethod
def build_time(cls, hh=None, mm=None, ss=None, tz=None):
# Builds a time from the given parts, handling fractional arguments
# where necessary
hours = 0
minutes = 0
seconds = 0
microseconds = 0
hh, mm, ss, tz = cls.range_check_time(hh, mm, ss, tz)
if type(hh) is FractionalComponent:
hours = hh.principal
microseconds = hh.microsecondremainder
elif hh is not None:
hours = hh
if type(mm) is FractionalComponent:
minutes = mm.principal
microseconds = mm.microsecondremainder
elif mm is not None:
minutes = mm
if type(ss) is FractionalComponent:
seconds = ss.principal
microseconds = ss.microsecondremainder
elif ss is not None:
seconds = ss
(
hours,
minutes,
seconds,
microseconds,
) = PythonTimeBuilder._distribute_microseconds(
microseconds,
(hours, minutes, seconds),
(MICROSECONDS_PER_HOUR, MICROSECONDS_PER_MINUTE, MICROSECONDS_PER_SECOND),
)
# Move midnight into range
if hours == 24:
hours = 0
# Datetimes don't handle fractional components, so we use a timedelta
if tz is not None:
return (
datetime.datetime(
1, 1, 1, hour=hours, minute=minutes, tzinfo=cls._build_object(tz)
)
+ datetime.timedelta(seconds=seconds, microseconds=microseconds)
).timetz()
return (
datetime.datetime(1, 1, 1, hour=hours, minute=minutes)
+ datetime.timedelta(seconds=seconds, microseconds=microseconds)
).time()
@classmethod
def build_datetime(cls, date, time):
return datetime.datetime.combine(
cls._build_object(date), cls._build_object(time)
)
@classmethod
def build_duration(
cls, PnY=None, PnM=None, PnW=None, PnD=None, TnH=None, TnM=None, TnS=None
):
# PnY and PnM will be distributed to PnD, microsecond remainder to TnS
PnY, PnM, PnW, PnD, TnH, TnM, TnS = cls.range_check_duration(
PnY, PnM, PnW, PnD, TnH, TnM, TnS
)
seconds = TnS.principal
microseconds = TnS.microsecondremainder
return datetime.timedelta(
days=PnD,
seconds=seconds,
microseconds=microseconds,
minutes=TnM,
hours=TnH,
weeks=PnW,
)
@classmethod
def build_interval(cls, start=None, end=None, duration=None):
start, end, duration = cls.range_check_interval(start, end, duration)
if start is not None and end is not None:
# <start>/<end>
startobject = cls._build_object(start)
endobject = cls._build_object(end)
return (startobject, endobject)
durationobject = cls._build_object(duration)
# Determine if datetime promotion is required
datetimerequired = (
duration.TnH is not None
or duration.TnM is not None
or duration.TnS is not None
or durationobject.seconds != 0
or durationobject.microseconds != 0
)
if end is not None:
# <duration>/<end>
endobject = cls._build_object(end)
# Range check
if type(end) is DateTuple and datetimerequired is True:
# <end> is a date, and <duration> requires datetime resolution
return (
endobject,
cls.build_datetime(end, TupleBuilder.build_time()) - durationobject,
)
return (endobject, endobject - durationobject)
# <start>/<duration>
startobject = cls._build_object(start)
# Range check
if type(start) is DateTuple and datetimerequired is True:
# <start> is a date, and <duration> requires datetime resolution
return (
startobject,
cls.build_datetime(start, TupleBuilder.build_time()) + durationobject,
)
return (startobject, startobject + durationobject)
@classmethod
def build_repeating_interval(cls, R=None, Rnn=None, interval=None):
startobject = None
endobject = None
R, Rnn, interval = cls.range_check_repeating_interval(R, Rnn, interval)
if interval.start is not None:
startobject = cls._build_object(interval.start)
if interval.end is not None:
endobject = cls._build_object(interval.end)
if interval.duration is not None:
durationobject = cls._build_object(interval.duration)
else:
durationobject = endobject - startobject
if R is True:
if startobject is not None:
return cls._date_generator_unbounded(startobject, durationobject)
return cls._date_generator_unbounded(endobject, -durationobject)
iterations = int(Rnn)
if startobject is not None:
return cls._date_generator(startobject, durationobject, iterations)
return cls._date_generator(endobject, -durationobject, iterations)
@classmethod
def build_timezone(cls, negative=None, Z=None, hh=None, mm=None, name=""):
negative, Z, hh, mm, name = cls.range_check_timezone(negative, Z, hh, mm, name)
if Z is True:
# Z -> UTC
return UTCOffset(name="UTC", minutes=0)
tzhour = int(hh)
if mm is not None:
tzminute = int(mm)
else:
tzminute = 0
if negative is True:
return UTCOffset(name=name, minutes=-(tzhour * 60 + tzminute))
return UTCOffset(name=name, minutes=tzhour * 60 + tzminute)
@classmethod
def range_check_duration(
cls,
PnY=None,
PnM=None,
PnW=None,
PnD=None,
TnH=None,
TnM=None,
TnS=None,
rangedict=None,
):
years = 0
months = 0
days = 0
weeks = 0
hours = 0
minutes = 0
seconds = 0
microseconds = 0
PnY, PnM, PnW, PnD, TnH, TnM, TnS = BaseTimeBuilder.range_check_duration(
PnY, PnM, PnW, PnD, TnH, TnM, TnS, rangedict=cls.DURATION_RANGE_DICT
)
if PnY is not None:
if type(PnY) is FractionalComponent:
years = PnY.principal
microseconds = PnY.microsecondremainder
else:
years = PnY
if years * DAYS_PER_YEAR > TIMEDELTA_MAX_DAYS:
raise YearOutOfBoundsError("Duration exceeds maximum timedelta size.")
if PnM is not None:
if type(PnM) is FractionalComponent:
months = PnM.principal
microseconds = PnM.microsecondremainder
else:
months = PnM
if months * DAYS_PER_MONTH > TIMEDELTA_MAX_DAYS:
raise MonthOutOfBoundsError("Duration exceeds maximum timedelta size.")
if PnW is not None:
if type(PnW) is FractionalComponent:
weeks = PnW.principal
microseconds = PnW.microsecondremainder
else:
weeks = PnW
if weeks * DAYS_PER_WEEK > TIMEDELTA_MAX_DAYS:
raise WeekOutOfBoundsError("Duration exceeds maximum timedelta size.")
if PnD is not None:
if type(PnD) is FractionalComponent:
days = PnD.principal
microseconds = PnD.microsecondremainder
else:
days = PnD
if days > TIMEDELTA_MAX_DAYS:
raise DayOutOfBoundsError("Duration exceeds maximum timedelta size.")
if TnH is not None:
if type(TnH) is FractionalComponent:
hours = TnH.principal
microseconds = TnH.microsecondremainder
else:
hours = TnH
if hours // HOURS_PER_DAY > TIMEDELTA_MAX_DAYS:
raise HoursOutOfBoundsError("Duration exceeds maximum timedelta size.")
if TnM is not None:
if type(TnM) is FractionalComponent:
minutes = TnM.principal
microseconds = TnM.microsecondremainder
else:
minutes = TnM
if minutes // MINUTES_PER_DAY > TIMEDELTA_MAX_DAYS:
raise MinutesOutOfBoundsError(
"Duration exceeds maximum timedelta size."
)
if TnS is not None:
if type(TnS) is FractionalComponent:
seconds = TnS.principal
microseconds = TnS.microsecondremainder
else:
seconds = TnS
if seconds // SECONDS_PER_DAY > TIMEDELTA_MAX_DAYS:
raise SecondsOutOfBoundsError(
"Duration exceeds maximum timedelta size."
)
(
years,
months,
weeks,
days,
hours,
minutes,
seconds,
microseconds,
) = PythonTimeBuilder._distribute_microseconds(
microseconds,
(years, months, weeks, days, hours, minutes, seconds),
(
MICROSECONDS_PER_YEAR,
MICROSECONDS_PER_MONTH,
MICROSECONDS_PER_WEEK,
MICROSECONDS_PER_DAY,
MICROSECONDS_PER_HOUR,
MICROSECONDS_PER_MINUTE,
MICROSECONDS_PER_SECOND,
),
)
# Note that weeks can be handled without conversion to days
totaldays = years * DAYS_PER_YEAR + months * DAYS_PER_MONTH + days
# Check against timedelta limits
if (
totaldays
+ weeks * DAYS_PER_WEEK
+ hours // HOURS_PER_DAY
+ minutes // MINUTES_PER_DAY
+ seconds // SECONDS_PER_DAY
> TIMEDELTA_MAX_DAYS
):
raise DayOutOfBoundsError("Duration exceeds maximum timedelta size.")
return (
None,
None,
weeks,
totaldays,
hours,
minutes,
FractionalComponent(seconds, microseconds),
)
@classmethod
def range_check_interval(cls, start=None, end=None, duration=None):
# Handles concise format, range checks any potential durations
if start is not None and end is not None:
# <start>/<end>
# Handle concise format
if cls._is_interval_end_concise(end) is True:
end = cls._combine_concise_interval_tuples(start, end)
return (start, end, duration)
durationobject = cls._build_object(duration)
if end is not None:
# <duration>/<end>
endobject = cls._build_object(end)
# Range check
if type(end) is DateTuple:
enddatetime = cls.build_datetime(end, TupleBuilder.build_time())
if enddatetime - datetime.datetime.min < durationobject:
raise YearOutOfBoundsError("Interval end less than minimium date.")
else:
mindatetime = datetime.datetime.min
if end.time.tz is not None:
mindatetime = mindatetime.replace(tzinfo=endobject.tzinfo)
if endobject - mindatetime < durationobject:
raise YearOutOfBoundsError("Interval end less than minimium date.")
else:
# <start>/<duration>
startobject = cls._build_object(start)
# Range check
if type(start) is DateTuple:
startdatetime = cls.build_datetime(start, TupleBuilder.build_time())
if datetime.datetime.max - startdatetime < durationobject:
raise YearOutOfBoundsError(
"Interval end greater than maximum date."
)
else:
maxdatetime = datetime.datetime.max
if start.time.tz is not None:
maxdatetime = maxdatetime.replace(tzinfo=startobject.tzinfo)
if maxdatetime - startobject < durationobject:
raise YearOutOfBoundsError(
"Interval end greater than maximum date."
)
return (start, end, duration)
@staticmethod
def _build_week_date(isoyear, isoweek, isoday=None):
if isoday is None:
return PythonTimeBuilder._iso_year_start(isoyear) + datetime.timedelta(
weeks=isoweek - 1
)
return PythonTimeBuilder._iso_year_start(isoyear) + datetime.timedelta(
weeks=isoweek - 1, days=isoday - 1
)
@staticmethod
def _build_ordinal_date(isoyear, isoday):
# Day of year to a date
# https://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
builtdate = datetime.date(isoyear, 1, 1) + datetime.timedelta(days=isoday - 1)
return builtdate
@staticmethod
def _iso_year_start(isoyear):
# Given an ISO year, returns the equivalent of the start of the year
# on the Gregorian calendar (which is used by Python)
# Stolen from:
# http://stackoverflow.com/questions/304256/whats-the-best-way-to-find-the-inverse-of-datetime-isocalendar
# Determine the location of the 4th of January, the first week of
# the ISO year is the week containing the 4th of January
# http://en.wikipedia.org/wiki/ISO_week_date
fourth_jan = datetime.date(isoyear, 1, 4)
# Note the conversion from ISO day (1 - 7) and Python day (0 - 6)
delta = datetime.timedelta(days=fourth_jan.isoweekday() - 1)
# Return the start of the year
return fourth_jan - delta
@staticmethod
def _date_generator(startdate, timedelta, iterations):
currentdate = startdate
currentiteration = 0
while currentiteration < iterations:
yield currentdate
# Update the values
currentdate += timedelta
currentiteration += 1
@staticmethod
def _date_generator_unbounded(startdate, timedelta):
currentdate = startdate
while True:
yield currentdate
# Update the value
currentdate += timedelta
@staticmethod
def _distribute_microseconds(todistribute, recipients, reductions):
# Given a number of microseconds as int, a tuple of ints length n
# to distribute to, and a tuple of ints length n to divide todistribute
# by (from largest to smallest), returns a tuple of length n + 1, with
# todistribute divided across recipients using the reductions, with
# the final remainder returned as the final tuple member
results = []
remainder = todistribute
for index, reduction in enumerate(reductions):
additional, remainder = divmod(remainder, reduction)
results.append(recipients[index] + additional)
# Always return the remaining microseconds
results.append(remainder)
return tuple(results)
|
import logging
from kubedriver.kegd.model import ReadyResult
from kubedriver.keg import CompositionLoader
from kubedriver.sandbox import Sandbox, SandboxConfiguration, SandboxError, ExecuteError
from kubedriver.kegd.scripting import KegCollection, ReadyResultHolder
logger = logging.getLogger(__name__)
#Different to regular task handlers
class ReadyCheckHandler:
def handle(self, operation_name, keg_name, keg_status, location_context, ready_check_task, resource_context_properties):
ready_script_file_name = ready_check_task.script_file_name
ready_script = ready_check_task.script
sandbox = self.__build_sandbox()
api_ctl = location_context.api_ctl
helm_client = location_context.kube_location.helm_client
composition = self.__load_composition(keg_status, api_ctl, helm_client)
result_holder = ReadyResultHolder()
inputs = self.__build_inputs(composition, result_holder, resource_context_properties)
complete_script = self.__build_script(ready_script)
try:
execute_result = sandbox.run(complete_script, file_name=ready_script_file_name, inputs=inputs)
except SandboxError as e:
full_detail = None
if isinstance(e, ExecuteError) and hasattr(e, 'execution_log') and getattr(e, 'execution_log') != None:
full_detail = ': '
full_detail += e.execution_log.summarise()
logger.exception(f'Error occurred during execution of ready check script {ready_script_file_name}{full_detail}')
return ReadyResult.failed(f'Error occurred during execution of ready check script {ready_script_file_name}: {e}')
log_msg = f'Ready script {ready_script_file_name} complete, result: {result_holder}'
if execute_result.log != None and execute_result.log.has_entries():
log_msg += '\n'
log_msg += execute_result.log.summarise()
logger.debug(log_msg)
if result_holder.is_ready():
return ReadyResult.ready()
else:
failed, reason = result_holder.has_failed()
if failed:
return ReadyResult.failed(f'{ready_script_file_name}: {reason}')
return ReadyResult.not_ready()
def __load_composition(self, keg_status, api_ctl, helm_client):
return CompositionLoader(api_ctl, helm_client).load_composition(keg_status)
def __build_sandbox(self):
config = SandboxConfiguration()
config.include_log = True
config.log_member_name = 'log'
return Sandbox(config)
def __build_inputs(self, composition, result_holder, resource_context_properties):
inputs = {}
inputs['keg'] = KegCollection(composition)
inputs['resultBuilder'] = result_holder
inputs['props'] = resource_context_properties
return inputs
def __build_script(self, ready_script):
script = ready_script
script += '\ncheckReady(keg, props, resultBuilder, log)'
return script
|
from syntax.transformers.tissue_mask.otsu import OtsuTissueMask
|
''' This example shows how the read_timeout() method works. This
example models an attendant that goes to drink water in each 3
seconds, and returns to attend customers. The customers request,
at different times, the call center services.
'''
from pade.acl.messages import ACLMessage
from pade.behaviours.types import CyclicBehaviour, WakeUpBehaviour, OneShotBehaviour
from pade.core.agent import Agent
from pade.misc.utility import display_message, start_loop
# Attendant Agent
class Attendant(Agent):
def setup(self):
self.add_behaviour(CheckQueue(self))
class CheckQueue(CyclicBehaviour):
def action(self):
# Waits for a call for 3 seconds (using the read_timeout() method)
call = self.read_timeout(3)
# If there is at least one call to reply...
if call != None: # You must handle None objects when using read_timeout()
reply = call.create_reply() # Creates a reply
reply.set_content('Here is your help.')
self.send(reply) # Sends the reply
display_message(self.agent, 'Help sent to %s.' % call.sender.getName())
else:
# Goes to drink water
display_message(self.agent, "I'm gonna drink water.")
self.wait(10)
display_message(self.agent, 'I returned from water. e.e')
# Customer Agent
class Customer(Agent):
# We're using the __init__() method to handle the input
# parameters for this agent
def __init__(self, aid, time, attendant):
# This super().__init__(aid) call is needed
super().__init__(aid)
self.time = time # The time to customer make a call
self.attendant = attendant # The address of attendant
def setup(self):
self.add_behaviour(Call(self, self.time))
self.add_behaviour(CloseCall(self))
class Call(WakeUpBehaviour):
def on_wake(self):
# Preparing a message
call = ACLMessage(ACLMessage.REQUEST)
call.set_content('I need help!')
call.add_receiver(self.agent.attendant)
self.send(call) # Sending a message
display_message(self.agent, "I'm making a call.")
class CloseCall(OneShotBehaviour):
def action(self):
# The customer only ends the call when gets a response
response = self.read()
# You don't need to handle None objects, because the read()
# method always returns an ACLMessage object. The behaviour
# will remain blocked until a message arrives.
display_message(self.agent, " received help and I'm closing the call. Thank you. =)")
display_message(self.agent, 'Help content: %s' % response.content)
if __name__ == '__main__':
agents = list()
attendant = Attendant('attendant')
agents.append(attendant)
# Passing the attendant address for each customer
agents.append(Customer('customer-1', 2, attendant.aid))
agents.append(Customer('customer-2', 10, attendant.aid))
agents.append(Customer('customer-3', 20, attendant.aid))
start_loop(agents) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Mar-29-21 18:37
# @Author : Kan HUANG (kan.huang@connect.ust.hk)
# @RefLink : https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import random
import os
import unicodedata
import string
import torch
def findFiles(path): return glob.glob(path)
all_letters = string.ascii_letters + " .,;'-" # Six extra letters
n_letters = len(all_letters) + 1 # Plus EOS marker
def unicodeToAscii(s):
"""unicodeToAscii
Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
For example, 'Ślusàrski' -> 'Slusarski'
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
def readLines(filename):
"""
Read a file and split into lines.
Each file of 'data/names/*.txt' is a text file with names belonging to a category.
Every line of these files is a name string belonging to the category of the file.
"""
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
for filename in findFiles('data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingPair():
"""randomTrainingPair
Get a random category and random line from that category.
Output:
category: a random category, e.g., "English".
line: a random line of name belonging to above category.
"""
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
def categoryTensor(category):
"""categoryTensor
Convert category to one-hot vector.
Output: One-hot vector for category.
"""
li = all_categories.index(category)
tensor = torch.zeros(1, n_categories)
tensor[0][li] = 1
return tensor
def inputTensor(line):
"""inputTensor
Same function as `lineToTensor`
Output:
One-hot matrix of first to last letters (not including EOS) for input
"""
tensor = torch.zeros(len(line), 1, n_letters)
for li in range(len(line)):
letter = line[li]
tensor[li][0][all_letters.find(letter)] = 1
return tensor
def targetTensor(line):
"""targetTensor
Output:
LongTensor/torch.int64 (with shape 1 x (len(line)+1) ) of second letter to end (EOS) for target
"""
letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))]
letter_indexes.append(n_letters - 1) # EOS's index
return torch.LongTensor(letter_indexes)
def randomTrainingExample():
"""
Make category, input, and target tensors from a random category, line pair
"""
category, line = randomTrainingPair()
category_tensor = categoryTensor(category)
input_line_tensor = inputTensor(line)
target_line_tensor = targetTensor(line)
return category_tensor, input_line_tensor, target_line_tensor
def main():
pass
if __name__ == "__main__":
main()
|
import numpy as np
def rank(data, axis=1):
"""Transform matrix to matrix of ranks along the axis
Parameters
----------
data : array
Array to sort.
axis : int, default : 1
Rank the values along the axis.
Returns
-------
array, shape (n_rows, n_cols)
Ranked data array.
"""
# see: https://stackoverflow.com/a/51081190/3986320
if axis not in [0, 1]:
raise ValueError('use 0 or 1 for axis')
sidx = np.argsort(-data, axis=axis)
m, n = data.shape
out = np.empty((m, n), dtype=int)
if axis == 1:
out[np.arange(m)[:, None], sidx] = np.arange(n)
else:
out[sidx, np.arange(n)] = np.arange(m)[:, None]
return out
def top_k_ranks(data, k=5, axis=1):
"""Return indexes of top k ranked columns or rows
k : int, default : 5
Limit results to top k ranks.
axis : int, default : 1
Rank the values along the axis.
Returns
-------
array, shape (n_rows or n_cols, k)
Ranked data array.
"""
k = max(1, min(k, data.shape[axis]))
ranked = rank(data, axis=axis)
if axis == 1:
out = np.zeros(shape=(data.shape[0], k), dtype=np.int32)
else:
out = np.zeros(shape=(k, data.shape[1]), dtype=np.int32)
for r in range(k):
idx = np.argwhere(ranked == r)
if axis == 1:
out[idx[:, 0], r] = idx[:, 1]
else:
out[r, idx[:, 1]] = idx[:, 0]
return out |
'''
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import json
import argparse
import unittest
import re
from os import linesep as LINESEP
# backward compatibility with 2.x
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
import rados
except ModuleNotFoundError as noModErr:
print("Error: %s\nExiting the script..." % noModErr)
sys.exit(1)
try:
# for 2.7.x
from StringIO import StringIO
except ModuleNotFoundError:
# for 3.x
from io import StringIO
class ExecutionFailureException(Exception):
pass
class RadosJSON:
EXTERNAL_USER_NAME = "client.healthchecker"
EMPTY_OUTPUT_LIST = "Empty output list"
@classmethod
def gen_arg_parser(cls, args_to_parse=None):
argP = argparse.ArgumentParser()
argP.add_argument("--verbose", "-v",
action='store_true', default=False)
argP.add_argument("--ceph-conf", "-c",
help="Provide a ceph conf file.", type=str)
argP.add_argument("--run-as-user", "-u",
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'",
default=cls.EXTERNAL_USER_NAME, type=str)
argP.add_argument("--format", "-t", choices=["json", "bash"],
default='json', help="Provides the output format (json | bash)")
argP.add_argument("--cluster-name", default="openshift-storage",
help="Ceph cluster name")
argP.add_argument("--output", "-o", default="",
help="Output will be stored into the provided file")
argP.add_argument("--cephfs-filesystem-name", default="",
help="Provides the name of the Ceph filesystem")
argP.add_argument("--cephfs-data-pool-name", default="",
help="Provides the name of the cephfs data pool")
argP.add_argument("--rbd-data-pool-name", default="", required=True,
help="Provides the name of the RBD datapool")
argP.add_argument("--namespace", default="",
help="Namespace where CephCluster is running")
argP.add_argument("--rgw-pool-prefix", default="default",
help="RGW Pool prefix")
argP.add_argument("--rgw-endpoint", default="", required=True,
help="Rados GateWay endpoint (in <IP>:<PORT> format)")
if args_to_parse:
assert type(args_to_parse) == list, \
"Argument to 'gen_arg_parser' should be a list"
else:
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)
def _invalid_endpoint(self, endpoint_str):
try:
ipv4, port = endpoint_str.split(':')
except ValueError:
raise ExecutionFailureException(
"Not a proper endpoint: {}, <IP>:<PORT>, format is expected".format(endpoint_str))
ipParts = ipv4.split('.')
if len(ipParts) != 4:
raise ExecutionFailureException(
"Not a valid IP address: {}".format(ipv4))
for eachPart in ipParts:
if not eachPart.isdigit():
raise ExecutionFailureException(
"IP address parts should be numbers: {}".format(ipv4))
intPart = int(eachPart)
if intPart < 1 or intPart > 254:
raise ExecutionFailureException(
"Out of range IP addresses: {}".format(ipv4))
if not port.isdigit():
raise ExecutionFailureException("Port not valid: {}".format(port))
intPort = int(port)
if intPort < 1 or intPort > 2**16-1:
raise ExecutionFailureException(
"Out of range port number: {}".format(port))
return False
def __init__(self, arg_list=None):
self.out_map = {}
self._excluded_keys = set()
self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
self.output_file = self._arg_parser.output
self.ceph_conf = self._arg_parser.ceph_conf
self.run_as_user = self._arg_parser.run_as_user
if not self.run_as_user:
self.run_as_user = self.EXTERNAL_USER_NAME
if self.ceph_conf:
self.cluster = rados.Rados(conffile=self.ceph_conf)
else:
self.cluster = rados.Rados()
self.cluster.conf_read_file()
self.cluster.connect()
def shutdown(self):
if self.cluster.state == "connected":
self.cluster.shutdown()
def get_fsid(self):
return str(self.cluster.get_fsid())
def _common_cmd_json_gen(self, cmd_json):
cmd = json.dumps(cmd_json, sort_keys=True)
ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b'')
if self._arg_parser.verbose:
print("Command Input: {}".format(cmd))
print("Return Val: {}\nCommand Output: {}\nError Message: {}\n----------\n".format(
ret_val, cmd_out, err_msg))
json_out = {}
if ret_val == 0:
json_out = json.loads(cmd_out)
return ret_val, json_out, err_msg
def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'quorum_status' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
q_leader_name = json_out['quorum_leader_name']
q_leader_details = {}
q_leader_matching_list = [l for l in json_out['monmap']['mons']
if l['name'] == q_leader_name]
if len(q_leader_matching_list) == 0:
raise ExecutionFailureException("No matching 'mon' details found")
q_leader_details = q_leader_matching_list[0]
ip_port = str(q_leader_details['public_addr'].split('/')[0])
return "{}={}".format(str(q_leader_name), ip_port)
def create_cephCSIKeyring_cephFSProvisioner(self):
'''
command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
'''
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-provisioner",
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_cephFSNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-cephfs-node",
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-node' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_RBDProvisioner(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-provisioner",
"caps": ["mon", "profile rbd",
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'fs ls' command failed.\nError: {}".format(
err_msg if ret_val != 0 else "No filesystem detected."))
# if there are multiple filesystems present, and
# no specific filesystem name and data-pool names are provided,
# then raise an exception
if len(json_out) > 1 and not self._arg_parser.cephfs_filesystem_name:
# if the arguments are not provided, generate an error message
raise ExecutionFailureException(
"More than ONE filesystems detected.\n" +
"{}\n\n".format(json_out) +
"Please manually provide the details for " +
"'--cephfs-filesystem-name'")
matching_json_out = {}
# if '--cephfs-filesystem-name' argument is provided,
# check whether the provided filesystem-name exists or not
if self._arg_parser.cephfs_filesystem_name:
# get the matching list
matching_json_out_list = [matched for matched in json_out
if str(matched['name']) == self._arg_parser.cephfs_filesystem_name]
# unable to find a matching fs-name, raise an error
if len(matching_json_out_list) == 0:
raise ExecutionFailureException(
("Filesystem provided, '{}', " +
"is not found in the fs-list: '{}'").format(
self._arg_parser.cephfs_filesystem_name,
[str(x['name']) for x in json_out]))
matching_json_out = matching_json_out_list[0]
else:
self._arg_parser.cephfs_filesystem_name = str(json_out[0]['name'])
matching_json_out = json_out[0]
if type(matching_json_out['data_pools']) == list:
if len(matching_json_out['data_pools']) == 0:
raise ExecutionFailureException("No 'data_pools' found.")
# if the user has already provided data-pool-name,
# through --cephfs-data-pool-name
if self._arg_parser.cephfs_data_pool_name:
# if the provided name is not matching with the one in the list
if self._arg_parser.cephfs_data_pool_name not in matching_json_out['data_pools']:
raise ExecutionFailureException(
("Provided 'data-pool-name': '{}', " +
"doesn't match from the data-pools' list: {}").format(
self._arg_parser.cephfs_data_pool_name,
[str(x) for x in matching_json_out['data_pools']]))
else:
self._arg_parser.cephfs_data_pool_name = str(
matching_json_out['data_pools'][0])
if len(matching_json_out['data_pools']) > 1:
print("WARNING: Multiple data pools detected.\n" +
"{}\n".format([str(x) for x in matching_json_out['data_pools']]) +
"Using the data-pool: {}\n".format(self._arg_parser.cephfs_data_pool_name))
def create_cephCSIKeyring_RBDNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": "client.csi-rbd-node",
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-node' command failed\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_checkerKey(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": self.run_as_user,
"caps": ["mon", "allow r, allow command quorum_status",
"osd", ("allow rwx pool={0}.rgw.meta, " +
"allow r pool=.rgw.root, " +
"allow rw pool={0}.rgw.control, " +
"allow x pool={0}.rgw.buckets.index").format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create {}' command failed\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def _gen_output_map(self):
if self.out_map:
return
self._invalid_endpoint(self._arg_parser.rgw_endpoint)
if not self.cluster.pool_exists(self._arg_parser.rbd_data_pool_name):
raise ExecutionFailureException(
"The provided 'rbd-data-pool-name': {}, don't exists".format(
self._arg_parser.rbd_data_pool_name))
self._excluded_keys.add('CLUSTER_NAME')
self.get_cephfs_data_pool_details()
self.out_map['NAMESPACE'] = self._arg_parser.namespace
self.out_map['CLUSTER_NAME'] = self._arg_parser.cluster_name
self.out_map['ROOK_EXTERNAL_FSID'] = self.get_fsid()
self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user
self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data()
self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey()
self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_RBDNode()
self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_RBDProvisioner()
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode()
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name
self.out_map['CEPHFS_FS_NAME'] = self._arg_parser.cephfs_filesystem_name
self.out_map['RBD_POOL_NAME'] = self._arg_parser.rbd_data_pool_name
self.out_map['RGW_POOL_PREFIX'] = self._arg_parser.rgw_pool_prefix
def gen_shell_out(self):
self._gen_output_map()
shOutIO = StringIO()
for k, v in self.out_map.items():
if v and k not in self._excluded_keys:
shOutIO.write('export {}={}{}'.format(k, v, LINESEP))
shOut = shOutIO.getvalue()
shOutIO.close()
return shOut
def gen_json_out(self):
self._gen_output_map()
json_out = [
{
"name": "rook-ceph-mon-endpoints",
"kind": "ConfigMap",
"data": {
"data": self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'],
"maxMonId": "0",
"mapping": {}
}
},
{
"name": "rook-ceph-mon",
"kind": "Secret",
"data": {
"admin-secret": "admin-secret",
"cluster-name": self.out_map['CLUSTER_NAME'],
"fsid": self.out_map['ROOK_EXTERNAL_FSID'],
"mon-secret": "mon-secret"
},
},
{
"name": "rook-ceph-operator-creds",
"kind": "Secret",
"data": {
"userID": self.out_map['ROOK_EXTERNAL_USERNAME'],
"userKey": self.out_map['ROOK_EXTERNAL_USER_SECRET']
}
},
{
"name": "rook-csi-rbd-node",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-node',
"userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET']
}
},
{
"name": "rook-csi-rbd-provisioner",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-provisioner',
"userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET']
},
},
{
"name": "rook-csi-cephfs-node",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node',
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
},
{
"name": "rook-csi-cephfs-provisioner",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner',
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
},
{
"name": "ceph-rbd",
"kind": "StorageClass",
"data": {
"pool": self.out_map['RBD_POOL_NAME']
}
},
{
"name": "cephfs",
"kind": "StorageClass",
"data": {
"fsName": self.out_map['CEPHFS_FS_NAME'],
"pool": self.out_map['CEPHFS_POOL_NAME']
}
},
{
"name": "ceph-rgw",
"kind": "StorageClass",
"data": {
"endpoint": self.out_map['RGW_ENDPOINT']
}
}
]
return json.dumps(json_out)+LINESEP
def main(self):
generated_output = ''
if self._arg_parser.format == 'json':
generated_output = self.gen_json_out()
elif self._arg_parser.format == 'bash':
generated_output = self.gen_shell_out()
else:
raise ExecutionFailureException("Unsupported format: {}".format(
self._arg_parser.format))
print('{}'.format(generated_output))
if self.output_file and generated_output:
fOut = open(self.output_file, 'w')
fOut.write(generated_output)
fOut.close()
################################################
##################### MAIN #####################
################################################
if __name__ == '__main__':
rjObj = RadosJSON()
try:
rjObj.main()
except ExecutionFailureException as err:
print("Excecution Failed: {}".format(err))
except KeyError as kErr:
print("KeyError: %s", kErr)
except OSError as osErr:
print("Error while trying to output the data: {}".format(osErr))
finally:
rjObj.shutdown()
################################################
##################### TEST #####################
################################################
# this is mainly for testing and could be used where 'rados' is not available
class DummyRados(object):
def __init__(self):
self.return_val = 0
self.err_message = ''
self.state = 'connected'
self.cmd_output_map = {}
self.cmd_names = {}
self._init_cmd_output_map()
def _init_cmd_output_map(self):
self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}'''
self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}'''
# all the commands and their output
self.cmd_output_map[self.cmd_names['fs ls']] = \
'''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-data0"]}]'''
self.cmd_output_map[self.cmd_names['quorum_status']] = \
'''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = \
'''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = \
'''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = \
'''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}'''] = \
'''[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs *=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = \
'''[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=*"}}]'''
def shutdown(self):
pass
def get_fsid(self):
return 'af4e1673-0b72-402d-990a-22d2919d0f1c'
def conf_read_file(self):
pass
def connect(self):
pass
def mon_command(self, cmd, out):
json_cmd = json.loads(cmd)
json_cmd_str = json.dumps(json_cmd, sort_keys=True)
cmd_output = self.cmd_output_map[json_cmd_str]
return self.return_val, \
cmd_output, \
"{}".format(self.err_message).encode('utf-8')
@classmethod
def Rados(conffile=None):
return DummyRados()
# inorder to test the package,
# cd <script_directory>
# python -m unittest --verbose <script_name_without_dot_py>
class TestRadosJSON(unittest.TestCase):
def setUp(self):
print("{}".format("I am in setup"))
self.rjObj = RadosJSON(['--rbd-data-pool-name=abc',
'--rgw-endpoint=10.10.212.122:9000', '--format=json'])
# for testing, we are using 'DummyRados' object
self.rjObj.cluster = DummyRados.Rados()
def tearDown(self):
print("{}".format("I am tearing down the setup"))
self.rjObj.shutdown()
def test_method_main_output(self):
print("JSON Output")
self.rjObj._arg_parser.format = "json"
self.rjObj.main()
print("\n\nShell Output")
self.rjObj._arg_parser.format = "bash"
self.rjObj.main()
print("\n\nNon compatible output (--abcd)")
try:
self.rjObj._arg_parser.format = 'abcd'
self.rjObj.main()
self.fail("Function should have thrown an Exception")
except ExecutionFailureException as err:
print("Exception thrown successfully: {}".format(err))
def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("{}".format(csiKeyring))
def test_non_zero_return_and_error(self):
self.rjObj.cluster.return_val = 1
self.rjObj.cluster.err_message = "Dummy Error"
try:
self.rjObj.create_checkerKey()
self.fail("Failed to raise an exception, 'ExecutionFailureException'")
except ExecutionFailureException as err:
print("Successfully thrown error.\nError: {}".format(err))
def test_multi_filesystem_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
second_fs_details = dict(cmd_json_out[0])
second_fs_details['name'] += '-2'
cmd_json_out.append(second_fs_details)
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
# multiple filesystem present,
# but no specific '--cephfs-filesystem-name' argument provided
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# pass an existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name = second_fs_details['name']
self.rjObj.get_cephfs_data_pool_details()
except ExecutionFailureException as err:
self.fail("Should not have thrown error: {}".format(err))
# pass a non-existing filesystem name
try:
self.rjObj._arg_parser.cephfs_filesystem_name += "-non-existing-fs-name"
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty file-system array
try:
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps([])
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_multi_data_pool_scenario(self):
cmd_key = self.rjObj.cluster.cmd_names['fs ls']
cmd_out = self.rjObj.cluster.cmd_output_map[cmd_key]
cmd_json_out = json.loads(cmd_out)
first_fs_details = cmd_json_out[0]
new_data_pool_name = 'myfs-data1'
first_fs_details['data_pools'].append(new_data_pool_name)
print("Modified JSON Cmd Out: {}".format(cmd_json_out))
self.rjObj._arg_parser.cephfs_data_pool_name = new_data_pool_name
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
self.rjObj.get_cephfs_data_pool_details()
# use a non-existing data-pool-name
bad_data_pool_name = 'myfs-data3'
self.rjObj._arg_parser.cephfs_data_pool_name = bad_data_pool_name
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# empty data-pool scenario
first_fs_details['data_pools'] = []
self.rjObj.cluster.cmd_output_map[cmd_key] = json.dumps(cmd_json_out)
try:
self.rjObj.get_cephfs_data_pool_details()
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
def test_valid_rgw_endpoint(self):
self.rjObj._invalid_endpoint("10.10.212.133:8000")
# invalid port
try:
self.rjObj._invalid_endpoint("10.10.212.133:238000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# out of range IP
try:
self.rjObj._invalid_endpoint("10.1033.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
# mal formatted IP
try:
self.rjObj._invalid_endpoint("10.103..212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.103.212.133::8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
try:
self.rjObj._invalid_endpoint("10.10.103.212.133:8000")
self.fail("An Exception was expected to be thrown")
except ExecutionFailureException as err:
print("Successfully thrown error: {}".format(err))
|
from norminette.rules import Rule
types = ["INT", "FLOAT", "CHAR", "DOUBLE", "LONG", "SHORT"]
class CheckFuncDeclaration(Rule):
def __init__(self):
super().__init__()
self.depends_on = ["IsFuncDeclaration", "IsFuncPrototype"]
def run(self, context):
"""
Maximum 4 arguments in a function
Function declaration must be preceded by a newline
"""
i = 0
tmp = 0
start = 0
arg = 1
while context.check_token(tmp, ["SEMI_COLON", "NEWLINE"]) is False:
if context.check_token(tmp, "LBRACE") is True:
context.new_error("BRACE_NEWLINE", context.peek_token(tmp))
tmp += 1
# if tmp < context.tkn_scope - 2:
# context.new_error("NEWLINE_IN_DECL", context.peek_token(tmp))
# this is a func declaration
if context.history[-1] == 'IsFuncDeclaration':
# if context.check_token(tmp, "SEMI_COLON") is False:
i = 2
length = len(context.history)
while length - i >= 0 and (context.history[-i] == "IsPreprocessorStatement"
or context.history[-i] == "IsComment"
or context.history[-i] == "IsFuncDeclaration"
):
i += 1
if length - i > 0 and context.history[-i] != "IsEmptyLine":
context.new_error("NEWLINE_PRECEDES_FUNC", context.peek_token(start))
i = context.fname_pos + 1
while (context.check_token(i, ["RPARENTHESIS"])) is True: #, "SPACE", "TAB"])) is True:
i += 1
if context.check_token(i, "LPARENTHESIS") is False:
context.new_error("EXP_PARENTHESIS", context.peek_token(i))
i += 1
deep = 1
while deep > 0:
if context.check_token(i, "LPARENTHESIS"):
i = context.skip_nest(i)
if context.check_token(i, "RPARENTHESIS"):
deep -= 1
if context.check_token(i, "COMMA"):
arg += 1
i += 1
if context.check_token(i - 1, ["SPACE", "TAB"]) is True:
tmp = i - 1
while context.check_token(tmp, ["SPACE", "TAB"]) is True:
tmp -= 1
if context.check_token(tmp, "NEWLINE") is False:
context.new_error("NO_SPC_BFR_PAR", context.peek_token(i))
if arg > 4:
context.new_error("TOO_MANY_ARGS", context.peek_token(i))
arg = []
while context.check_token(i, ["NEWLINE", "SEMI_COLON"]) is False:
i += 1
if context.check_token(i - 1, ["TAB", "SPACE"]):
context.new_error("SPC_BEFORE_NL", context.peek_token(i))
return False, 0
|
import re
# Taken from inflection library
def underscore(word):
word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
word = word.replace('-', '_')
return word.lower()
|
import astropy.units as u
import numpy as np
import pytest
from ndcube import utils
missing_axes_none = [False] * 3
missing_axes_0_2 = [True, False, True]
missing_axes_1 = [False, True, False]
axes_length = 3
extra_coords_dict = {"time": {"axis": 0, "value": u.Quantity(range(axes_length), unit=u.pix)},
"hello": {"axis": 1, "value": u.Quantity(range(axes_length), unit=u.pix)}}
extra_coords_input = [('time', 0, u.Quantity(range(axes_length), unit=u.pix)),
('hello', 1, u.Quantity(range(axes_length), unit=u.pix))]
extra_coords_dict_wcs = {"time": {"wcs axis": 0,
"value": u.Quantity(range(axes_length), unit=u.pix)},
"hello": {"wcs axis": 1,
"value": u.Quantity(range(axes_length), unit=u.pix)}}
@pytest.mark.parametrize("test_input", [
([('name', 0)], np.array([0, 1]), 2, (1, 2)),
([(0, 0, 0)], np.array([0, 1]), 2, (1, 2)),
([('name', '0', 0)], np.array([0, 1]), 2, (1, 2))
])
def test_format_input_extra_coords_to_extra_coords_wcs_axis_value(test_input):
with pytest.raises(ValueError):
utils.cube._format_input_extra_coords_to_extra_coords_wcs_axis(*test_input)
@pytest.mark.parametrize("test_input,expected", [
((extra_coords_dict, np.array([0, 1, 2]), 3), extra_coords_input),
((extra_coords_dict_wcs, np.array([0, 1, 2]), 3),
[('time', 2, u.Quantity(range(axes_length), unit=u.pix)),
('hello', 1, u.Quantity(range(axes_length), unit=u.pix))]),
((extra_coords_dict_wcs, np.array([0, 2]), 3),
[('time', 1, u.Quantity(range(axes_length), unit=u.pix)),
('hello', None, u.Quantity(range(axes_length), unit=u.pix))])
])
def test_convert_extra_coords_dict_to_input_format(test_input, expected):
output = utils.cube.convert_extra_coords_dict_to_input_format(*test_input)
if len(output) != len(expected):
raise AssertionError(f"{output} != {expected}")
for output_tuple in output:
j = 0
while j < len(expected):
if output_tuple[0] == expected[j][0]:
assert len(output_tuple) == len(expected[j])
for k, el in enumerate(output_tuple):
try:
assert el == expected[j][k]
except ValueError as err:
if err.args[0] == "The truth value of an array with more than" + \
" one element is ambiguous. Use a.any() or a.all()":
assert (el == expected[j][k]).all()
else:
raise err
j = len(expected) + 1
else:
j += 1
if j == len(expected):
raise AssertionError(f"{output} != {expected}")
def test_convert_extra_coords_dict_to_input_format_error():
with pytest.raises(KeyError):
utils.cube.convert_extra_coords_dict_to_input_format(
{"time": {"not axis": 0, "value": []}}, [0, 1, 2], 3)
|
def to_dictionary(text_path='', code='utf-8'):
with open(text_path, 'rb') as file:
info_list = [part.decode(code, 'ignore').strip() for part in file.readlines()]
string = ''.join(info_list)
setting = set(string)
dictionary = {key : value for key, value in enumerate(setting)}
return dictionary
if __name__ == '__main__':
#to_dictionary('')
list_a = [3,4,5,8]
list_b = [3,4,5,6,7]
set_c = set(list_a) & set(list_b)
list_c = list(set_c)
print(list_c)
set_a = set(list_a)
set_b = set(list_b)
set_a.difference_update(set_b)
print(set_a) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
import datetime as dt
from pyuploadcare.dj.models import ImageField
from django.db.models.signals import post_save
from django.utils import timezone
from django.core.urlresolvers import reverse
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User,related_name='profile')
picture = ImageField()
phone_number = models.CharField(max_length=13)
bio = models.TextField()
def __str__(self):
return self.user.username
def get_absolute_url(self):
return reverse('dump', kwargs={'pk':self.pk})
class Event(models.Model):
user = models.ForeignKey(User,related_name='event')
day = models.DateField(u'Day of the event', help_text=u'Day of the event')
start_time = models.TimeField(u'Starting time', help_text=u'Starting time')
end_time = models.TimeField(u'Final time', help_text=u'Final time')
notes = models.TextField(u'Textual Notes', help_text=u'Textual Notes', blank=True, null=True)
class Meta:
verbose_name = u'Scheduling'
verbose_name_plural = u'Scheduling'
def check_overlap(self, fixed_start, fixed_end, new_start, new_end):
overlap = False
if new_start == fixed_end or new_end == fixed_start: #edge case
overlap = False
elif (new_start >= fixed_start and new_start <= fixed_end) or (new_end >= fixed_start and new_end <= fixed_end): #innner limits
overlap = True
elif new_start <= fixed_start and new_end >= fixed_end: #outter limits
overlap = True
return overlap
def get_absolute_url(self):
url = reverse('admin:%s_%s_change' % (self._meta.app_label, self._meta.model_name), args=[self.id])
return u'<a href="%s">%s</a>' % (url, str(self.start_time))
def clean(self):
if self.end_time <= self.start_time:
raise ValidationError('Ending hour must be after the starting hour')
events = Event.objects.filter(day=self.day)
if events.exists():
for event in events:
if self.check_overlap(event.start_time, event.end_time, self.start_time, self.end_time):
raise ValidationError(
'There is an overlap with another event: ' + str(event.day) + ', ' + str(
event.start_time) + '-' + str(event.end_time))
def __str__(self):
return self.notes
|
# -*- coding: utf-8 -*-
import pytest
from marshmallow import Schema, fields, class_registry
from marshmallow.exceptions import RegistryError
def test_serializer_has_class_registry():
class MySchema(Schema):
pass
class MySubSchema(Schema):
pass
assert 'MySchema' in class_registry._registry
assert 'MySubSchema' in class_registry._registry
class A:
def __init__(self, _id, b=None):
self.id = _id
self.b = b
class B:
def __init__(self, _id, a=None):
self.id = _id
self.a = a
class C:
def __init__(self, _id, bs=None):
self.id = _id
self.bs = bs or []
class ASchema(Schema):
id = fields.Integer()
b = fields.Nested('BSchema', exclude=('a', ))
class BSchema(Schema):
id = fields.Integer()
a = fields.Nested('ASchema')
class CSchema(Schema):
id = fields.Integer()
bs = fields.Nested('BSchema', many=True)
def test_two_way_nesting():
a_obj = A(1)
b_obj = B(2, a=a_obj)
a_obj.b = b_obj
a_serialized = ASchema().dump(a_obj)
b_serialized = BSchema().dump(b_obj)
assert a_serialized['b']['id'] == b_obj.id
assert b_serialized['a']['id'] == a_obj.id
def test_nesting_with_class_name_many():
c_obj = C(1, bs=[B(2), B(3), B(4)])
c_serialized = CSchema().dump(c_obj)
assert len(c_serialized['bs']) == len(c_obj.bs)
assert c_serialized['bs'][0]['id'] == c_obj.bs[0].id
def test_invalid_class_name_in_nested_field_raises_error(user):
class MySchema(Schema):
nf = fields.Nested('notfound')
sch = MySchema()
with pytest.raises(RegistryError) as excinfo:
sch.dump({'nf': None})
assert 'Class with name {0!r} was not found'.format('notfound') in str(excinfo)
class FooSerializer(Schema):
_id = fields.Integer()
def test_multiple_classes_with_same_name_raises_error():
# Import a class with the same name
from .foo_serializer import FooSerializer as FooSerializer1 # noqa
class MySchema(Schema):
foo = fields.Nested('FooSerializer')
# Using a nested field with the class name fails because there are
# two defined classes with the same name
sch = MySchema()
with pytest.raises(RegistryError) as excinfo:
sch.dump({'foo': {'_id': 1}})
msg = 'Multiple classes with name {0!r} were found.'\
.format('FooSerializer')
assert msg in str(excinfo)
def test_multiple_classes_with_all():
# Import a class with the same name
from .foo_serializer import FooSerializer as FooSerializer1 # noqa
classes = class_registry.get_class('FooSerializer', all=True)
assert len(classes) == 2
def test_can_use_full_module_path_to_class():
from .foo_serializer import FooSerializer as FooSerializer1 # noqa
# Using full paths is ok
class Schema1(Schema):
foo = fields.Nested('tests.foo_serializer.FooSerializer')
sch = Schema1()
# Note: The arguments here don't matter. What matters is that no
# error is raised
assert sch.dump({'foo': {'_id': 42}})
class Schema2(Schema):
foo = fields.Nested('tests.test_registry.FooSerializer')
sch = Schema2()
assert sch.dump({'foo': {'_id': 42}})
|
"""A library allowing you to create an auto method-binding dict.
Mainly used for event handlers, a binding dict consists of a mapping between
any events or keys, to their appropriate handler functions within a class.
Upon key lookup, the dict will bind the appropriate function to the instance
of the class.
For an example:
class Server:
def __init__(self, name):
self.name = name
def on_connect(self, remote_host):
print(self.name, remote_host)
def on_connect(self, remote_host):
print(self.name, remote_host)
handlers = BDict({NewConnectionEvent: on_connect,
DisconnectonEvent: on_disconnect})
>>> s = Server("myserver")
>>> s.handlers[NewConnectionEvent]("1.2.3.4")
myserver 1.2.3.4
As you can see, after accessing the handlers dict, and upon key lookup,
the dict bound the handler functions to the instance.
"""
__author__ = "Bar Harel"
__version__ = "0.1.0"
__license__ = "MIT"
__all__ = ["BDict"]
from collections import ChainMap as _ChainMap
from itertools import chain as _chain
from typing import (
Any as _Any, Callable as _Callable, cast as _cast, Dict as _Dict,
Iterable as _Iterable, Mapping as _Mapping,
MutableMapping as _MutableMapping, Optional as _Optional,
overload as _overload, Tuple as _Tuple, Type as _Type, TypeVar as _TypeVar,
Union as _Union)
from weakref import ref as _ref, WeakKeyDictionary as _WeakKeyDictionary
_T = _TypeVar("_T")
_KT = _TypeVar("_KT")
_VT = _TypeVar("_VT", bound=_Callable)
BDICT_INPUT_TYPE = _Union[_Iterable[_Tuple[_KT, _VT]], _Mapping[_KT, _VT]]
class _custom:
"""Marker for custom value that shouldn't be auto-bound"""
__slots__ = ("value",)
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
class BDict(_Dict[_KT, _VT]):
"""An auto method-binding dict"""
__slots__ = ("_instance_data")
# Holds a mapping between an instance and it's unique custom dictionary
_instance_data: _WeakKeyDictionary
class BoundDict(_MutableMapping):
"""A dict bound to an instance
Accessing the dict results in methods being automatically bound.
Adding values to the dict adds them to a custom instance dict which
holds external inserts. Adding external values results in them being
stored internally inside BDict, allowing you to retain external values
throughout the instance lifetime.
Attributes:
inst: Functions will be autobound to this instance.
"""
__slots__ = ("inst", "_mapping")
_deleted = object()
def __init__(self, inst, bdict, instance_data, _ChainMap=_ChainMap):
self.inst = inst
self._mapping = _ChainMap(instance_data, bdict)
def __repr__(self):
repr_items = []
for key, value in self._mapping.items():
if value.__class__ is _custom:
repr_items.append(f"{key!r}: {value!r}")
else:
repr_items.append(f"{key!r}: (autobinding) {value!r}")
return (f"{self.__class__.__name__}({', '.join(repr_items)})"
f" bound to {self.inst!r}")
def autobind(self, key, value):
"""Add a function that will be autobound"""
self._mapping[key] = value
def __getitem__(self, key, _custom=_custom, _deleted=_deleted):
try:
value = self._mapping[key]
except KeyError:
raise KeyError(key) from None
if value.__class__ is _custom:
return value.value
if value is _deleted:
raise KeyError(key)
inst = self.inst
return value.__get__(inst, inst.__class__)
def __setitem__(self, key, value):
self._mapping[key] = _custom(value)
def __delitem__(self, key, _deleted=_deleted):
mapping = self._mapping
try:
value = self._mapping[key]
except KeyError:
raise KeyError(key) from None
if value is _deleted:
raise KeyError(key)
if key not in mapping.parents:
del mapping[key]
return
mapping[key] = _deleted
def __iter__(self, _deleted=_deleted):
return (key for key, value in self._mapping.items()
if value is not _deleted)
def __len__(self):
return sum(1 for key in self)
def pop(self, key, default=_deleted, _deleted=_deleted,
_custom=_custom):
mapping = self._mapping
value = mapping.get(key, default)
if value is _deleted:
if default is _deleted:
raise KeyError(key)
return default
if key in mapping.parents:
mapping[key] = _deleted
else:
del mapping[key]
if value.__class__ is _custom:
return value.value
inst = self.inst
return value.__get__(inst, inst.__class__)
def clear(self):
self._mapping = _ChainMap()
class ClassBoundDict(_MutableMapping):
"""Temporary proxy bound to the original class
Accessing this dict results in binding of methods to the class.
It is useful mainly for classmethods.
Attributes:
bdict: Original BDict to proxy all __getitem__ and __setitem__ to.
owner: Original class BDict was created in. Methods will be bound
to this one.
"""
__slots__ = ("owner", "bdict")
def __init__(self, owner, bdict):
self.bdict = bdict
self.owner = owner
def autobind(self, key, value):
"""Add a function that will be autobound"""
self.bdict[key] = value
def __repr__(self):
return f"<classbound proxy to {self.bdict!r}>"
def __getitem__(self, key, _custom=_custom):
value = self.bdict[key]
if value.__class__ is _custom:
return value.value
return self.bdict[key].__get__(None, self.owner)
def __setitem__(self, key, value):
self.bdict[key] = _custom(value)
def __delitem__(self, key):
del self.bdict[key]
def __iter__(self):
return iter(self.bdict)
def __len__(self):
return len(self.bdict)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._instance_data = _WeakKeyDictionary()
def __repr__(self):
repr_items = []
for key, value in self.items():
if value.__class__ is _custom:
repr_items.append(f"{key!r}: {value!r}")
else:
repr_items.append(f"{key!r}: (autobinding) {value!r}")
return (f"{self.__class__.__name__}({', '.join(repr_items)})")
@_overload
def __get__(self, inst: None, owner: _Type) -> ClassBoundDict:
...
@_overload
def __get__(self, inst: _T, owner: _Type[_T]) -> BoundDict:
...
def __get__(self, inst, owner, BoundDict=BoundDict,
ClassBoundDict=ClassBoundDict):
if inst is None:
return ClassBoundDict(owner, self)
bdict = BoundDict(inst, self,
self._instance_data.setdefault(inst, {}))
return bdict
|
import json
import os
from helper import FileHelper, AwsHelper, S3Helper
from metadata import DocumentLineageClient, PipelineOperationsClient
PIPELINE_STAGE = "EXTENSION_DETECTOR"
syncBucketName = os.environ.get('TARGET_SYNC_BUCKET', None)
asyncBucketName = os.environ.get('TARGET_ASYNC_BUCKET', None)
metadataTopic = os.environ.get('METADATA_SNS_TOPIC_ARN', None)
if not syncBucketName or not asyncBucketName or not metadataTopic:
raise Exception("Missing lambda environment variables")
pipeline_client = PipelineOperationsClient(metadataTopic)
lineage_client = DocumentLineageClient(metadataTopic)
def processRequest(documentId, bucketName, objectName, callerId):
output = ""
pipeline_client.body = {
"documentId": documentId,
"bucketName": bucketName,
"objectName": objectName,
"stage": PIPELINE_STAGE
}
pipeline_client.stageInProgress()
print("Input Object: {}/{}".format(bucketName, objectName))
ext = FileHelper.getFileExtension(objectName.lower())
print("Extension: {}".format(ext))
if(ext and ext in ["jpg", "jpeg", "png"]):
targetBucketName = syncBucketName
elif (ext in ["pdf"]):
targetBucketName = asyncBucketName
else:
raise Exception("Incorrect file extension")
targetFileName = "{}/{}".format(documentId, objectName)
if(targetBucketName):
print("Doing S3 Object Copy for documentId: {}, object: {}/{}".format(documentId, targetBucketName, targetFileName))
try:
S3Helper().copyToS3(bucketName, objectName, targetBucketName, targetFileName)
except Exception as e:
print(e)
pipeline_client.stageFailed()
else:
print("")
pipeline_client.stageFailed()
output = "Completed S3 Object Copy for documentId: {}, object: {}/{}".format(documentId, targetBucketName, targetFileName)
lineage_client.recordLineageOfCopy({
"documentId": documentId,
"callerId": callerId,
"sourceBucketName": bucketName,
"targetBucketName": targetBucketName,
"sourceFileName": objectName,
"targetFileName": targetFileName,
})
pipeline_client.stageSucceeded()
print(output)
def processRecord(record, syncBucketName, asyncBucketName, callerId):
newImage = record["dynamodb"]["NewImage"]
documentId = None
bucketName = None
objectName = None
if("documentId" in newImage and "S" in newImage["documentId"]):
documentId = newImage["documentId"]["S"]
if("bucketName" in newImage and "S" in newImage["bucketName"]):
bucketName = newImage["bucketName"]["S"]
if("objectName" in newImage and "S" in newImage["objectName"]):
objectName = newImage["objectName"]["S"]
print("DocumentId: {}, BucketName: {}, ObjectName: {}".format(documentId, bucketName, objectName))
if(documentId and bucketName and objectName):
processRequest(documentId, bucketName, objectName, callerId)
def lambda_handler(event, context):
callerId = context.invoked_function_arn
print(callerId)
try:
print("event: {}".format(event))
if("Records" in event and event["Records"]):
for record in event["Records"]:
try:
print("Processing record: {}".format(record))
if("eventName" in record and record["eventName"] == "INSERT"):
if("dynamodb" in record and record["dynamodb"] and "NewImage" in record["dynamodb"]):
processRecord(record, syncBucketName, asyncBucketName, callerId)
except Exception as e:
print("Failed to process record. Exception: {}".format(e))
except Exception as e:
print("Failed to process records. Exception: {}".format(e)) |
import instaloader
import pandas as pd
import sys
import os
import re
class InstaloaderException(Exception):
'''Copied from source code to hadnle error'''
pass
class QueryReturnedNotFoundException(InstaloaderException):
'''Copied from source code to hadnle error'''
pass
def scraper(username = None, maxPostLimit = None, maxCommentLimit = None):
'''Scrape Instagram with specific hashtags and username'''
L = instaloader.Instaloader(sleep = False,max_connection_attempts = 10)
profile = instaloader.Profile.from_username(L.context, username)
colName = ['ID','shortcode','text','hashtags','comments','likes']
df = pd.DataFrame(columns = colName)
seq = 1
for post in profile.get_posts():
# try:
# postid = post._full_metadata['id']
# except KeyError:
# continue
# try:
# shortcode = post._full_metadata['shortcode']
# except KeyError:
# continue
postid,shortcode,caption,likes,hashtagList = '','','',0,''
listOfComments = ''
try:
allComments = post.get_comments()
caption = post.caption
i = 0
for itr in allComments:
listOfComments = '|'.join([listOfComments.replace(',',''),itr[2]]) # comment text
if i > maxCommentLimit:
break
likes = post.likes
hashtagList = ' '.join([hashtag for hashtag in post.caption_hashtags])
# print(hashtagList)
if listOfComments and hashtagList:
df.loc[seq] = ['','',caption.replace(',',''),hashtagList,listOfComments,likes]
if seq % 10 == 0:
print(seq)
seq += 1
if seq == maxPostLimit:
break
except QueryReturnedNotFoundException:
continue
except:
raise
if not os.path.isdir('./data'):
os.mkdir('./data')
df.to_csv(f'data/{username}.csv', sep =',',encoding = 'utf-8')
if __name__ == '__main__':
user,maxPostLimit,maxCommentLimit,seedHashtag = '',0,0,[]
try:
user = sys.argv[1] # Mandatory argument
except IndexError:
print('Error: Scraper Needs a User Name')
sys.exit(1)
try:
maxPostLimit = int(sys.argv[2]) # Optional argument
except IndexError:
maxPostLimit = 200
except:
raise
try:
maxCommentLimit = int(sys.argv[3]) # Optional argument
except IndexError:
maxCommentLimit = 10
except:
raise
scraper(user,maxPostLimit,maxCommentLimit)
|
#!/usr/bin/env python3
"""
Creates an file with a list of sha1 hashes in the same directory as the raw data
under /data/spectro. This script run like
boss_sha1sum.py MJD [MJD2] [MJD3]...
Created by Stephen Bailey (LBNL) Fall 2011
2020-06-01 dgatlin Completely rewrote it for Python 3 and modern
libraries. It now includes tests and runs more flexibly.
"""
import hashlib
from argparse import ArgumentParser
from pathlib import Path
from bin import sjd
from sdssobstools import sdss_paths
__version__ = '3.0.0'
def create_hash_line(file):
file = Path(file)
hsh = hashlib.sha1(file.read_bytes())
out = '{} {}\n'.format(hsh.hexdigest(), file.name)
return out
def write_hashes(path, output_file):
path = Path(path)
with output_file.open('w') as out:
for fits in path.glob('*.fit.gz'):
out.write(create_hash_line(fits))
def parseargs():
parser = ArgumentParser(description='Creates a file with a list of sha1'
' hashes in the same directory as the'
' data, which is stored at the provided'
' mjd. If no mjd is provided, then it'
' is run for today.')
parser.add_argument('mjds', nargs='?', default=[sjd.sjd()],
help='The mjd (or mjds) which you want to create a sum'
' for')
parser.add_argument('-f', '--file',
help='The location of the sha1sum file for output,'
' default is /data/spectro/<mjd>/<mjd>.sha1sum.'
' Only works if one or fewer mjds is provided.')
args = parser.parse_args()
return args
def main():
args = parseargs()
for mj in args.mjds:
data_dir = sdss_paths.boss / f"{mj}"
if args.file:
output_file = Path(args.file)
else:
output_file = data_dir / '{}.sha1sum'.format(mj)
print(args.file)
write_hashes(data_dir, output_file)
if __name__ == "__main__":
main()
|
import numpy as np
import pymc as pm
from IPython.core.pylabtools import figsize
from ch1 import count_data
import matplotlib.pyplot as plt
plt.style.use('ggplot')
colors = ["#348ABD", "#A60628", "#7A68A6", "#467821"]
n_count_data = len(count_data)
alpha = 1. / count_data.mean()
lambda_1 = pm.Exponential("lambda_1", alpha)
lambda_2 = pm.Exponential("lambda_2", alpha)
tau = pm.DiscreteUniform("tau", lower = 0, upper = n_count_data)
@pm.deterministic
def lambda_(tau = tau, lambda_1 = lambda_1, lambda_2 = lambda_2):
out = np.zeros(n_count_data)
out[:tau] = lambda_1
out[tau:] = lambda_2
return out
observation = pm.Poisson("obs", lambda_, value = count_data, observed = True)
model = pm.Model([observation, lambda_1, lambda_2, tau])
mcmc = pm.MCMC(model)
mcmc.sample(40000, 10000)
lambda_1_samples = mcmc.trace('lambda_1')[:]
lambda_2_samples = mcmc.trace('lambda_2')[:]
tau_samples = mcmc.trace('tau')[:]
figsize(14.5, 10)
ax = plt.subplot(311)
ax.set_autoscaley_on(False)
plt.hist(lambda_1_samples, histtype='stepfilled', bins=31, alpha=0.85,
label="posterior of $\lambda1$", color=colors[1], normed=True)
plt.legend(loc="upper left")
plt.title(r"""Posterior distributions of the parameters\
$\lambda_1,\;\lambda_2,\;\tau$""")
plt.xlim([15, 30])
plt.xlabel("$\lambda_1$ value")
plt.ylabel("Density")
ax = plt.subplot(312)
ax.set_autoscaley_on(False)
plt.hist(lambda_2_samples, histtype='stepfilled', bins=31, alpha=0.85,
label="posterior of $\lambda2$", color=colors[2], normed=True)
plt.legend(loc="upper left")
plt.xlim([15, 30])
plt.xlabel("$\lambda_2$ value")
plt.ylabel("Density")
plt.subplot(313)
w = 1.0 / tau_samples.shape[0] * np.ones_like(tau_samples)
plt.hist(tau_samples, bins=n_count_data, alpha=1,
label="posterior of $\tau$", color=colors[3],
weights=w, rwidth=2)
plt.xticks(np.arange(n_count_data))
plt.legend(loc="upper left")
plt.ylim([0, .75])
plt.xlim([35, len(count_data)-20])
plt.xlabel(r"$\tau$ (in days)")
plt.ylabel("Probability")
plt.show()
|
# Part of Speach (POS) Based Entity Extraction
# Aimed at Extracting Relationships
# Use regex matching to identify relational phrases
# VP?
# V(W*P)?
#
# V1: 'RB'
# V2: 'MD', 'VB', 'VBD', 'VBP', 'VBZ', 'VBG', 'VBN'
# V3: 'RP', 'RB'
# (V1)? + (V2) + (V3)?
#
# P1: 'RB'
# P2: 'IN', 'TO', 'RP'
# P3: 'RB'
# (P1)? + (P2) + (P3)?
#
# W: 'PRP$', 'CD', 'DT', 'JJ', 'JJS', 'JJR', 'NN', 'NNS', 'NNP', 'NNPS', 'POS', 'RB', 'RBR', 'RBS', 'VBN', 'VBG'
# (W1)
#
import nltk
from entity_extract.extractor.parsers.base_parser import BaseTreeParser
class RelationGrammerParser(BaseTreeParser):
RelPhraseGrammer = r"""
V: {<RB>?<MD|VB|VBD|VBP|VBG|VBN><RP|RB>?}
P: {<RB>?<IN|TO|RP><RB>?}
RelP1: {(<V><P>?)*}
RelP2: {(<V>((<PRP$|CD|DT|JJ|JJS|JJR|NN|NNS|NNP|NNPS|POS|RB|RBR|RBS|VBN|VBG>)*<P>)?)*}
RelPhrase: {(<RelP1>*|<RelP2>*)?}
"""
def __init__(self, grammer = None):
self.grammer = grammer or self.RelPhraseGrammer
self.parser = nltk.RegexpParser(self.grammer)
def parse(self, tokenized_sent):
return self.parser.parse(tokenized_sent)
|
"""
Abstract Summarization
===============================================================================
>>> from techminer2 import *
>>> directory = "data/"
>>> abstract_summarization(
... texts=["fintech", "blockchain"],
... n_phrases=5,
... directory=directory,
... )
The research on data science and ai in FINTECH involves many latest progress made in smart
FINTECH for bankingtech, tradetech, lendtech, insurtech, wealthtech, paytech, risktech,
cryptocurrencies, and BLOCKCHAIN, and the dsai techniques including complex system
methods, quantitative methods, intelligent interactions, recognition and responses, data
analytics, deep learning, federated learning, privacy-preserving processing, augmentation,
optimization, and system intelligence enhancement... Fourth, the traditional assets, gold
and oil, as well as modern assets, green bonds, are useful as good hedgers compared with
other assets because shock transmissions from them to FINTECH, kftx are below 0.1% and,
more importantly, the total volatility spill-over of all assets in the sample is
moderately average, accounting for 44.39%... From the theoretical point of view, our
research indicates, that besides key growth driving factors, outlined in existing
literature, such as strategy, prerequisites for rapid growth, business model choice,
international business networks, entrepreneur's characteristics, product development or
theoretical frameworks for development, especially within the international market, the
quality of digital logistics performance of FINTECH companies seem to matter... The most
important factors that influence the level of satisfaction when using FINTECH services
were considered: comfort and ease of use, legal regulations, ease of account opening,
mobile payments features, crowdfunding options, international money transfers features,
reduced costs associated with transactions, peer-to-peer lending, insurances options,
online brokerage, cryptocoins options and exchange options... Internet banking, mobile
banking, atm,cash deposit machines, instant payment services, online trading in stock
markets, online funds transfers, e-wallets,wealth management, peer to peer lending,
BLOCKCHAIN technology are various FINTECH products and services.
"""
import os
import textwrap
import nltk
import pandas as pd
from nltk.stem import PorterStemmer
from ._read_records import read_filtered_records
from .load_abstracts import load_abstracts
def abstract_summarization(
texts=None,
n_phrases=10,
sufix="",
directory="./",
):
if isinstance(texts, str):
texts = [texts]
abstracts = load_abstracts(directory)
documents = read_filtered_records(directory)
regex = r"\b(" + "|".join(texts) + r")\b"
abstracts = abstracts[abstracts.phrase.str.contains(regex, regex=True)]
abstracts = abstracts[["record_no", "phrase"]]
# -----------------------------------------------------------------------------------
porter_stemmer = PorterStemmer()
abstracts["formatted_text"] = abstracts.phrase.copy()
abstracts["formatted_text"] = abstracts["formatted_text"].str.replace(
r"[[0-9]]*", " "
)
abstracts["formatted_text"] = abstracts["formatted_text"].str.replace(r"s+", " ")
abstracts["formatted_text"] = abstracts["formatted_text"].str.replace(
r"[a-zA-Z]", " "
)
abstracts["formatted_text"] = abstracts["formatted_text"].str.replace(r"s+", " ")
#
stopwords = nltk.corpus.stopwords.words("english")
word_frequencies = {}
for phrase in abstracts["formatted_text"].values:
for word in nltk.word_tokenize(phrase):
word = porter_stemmer.stem(word)
if word not in stopwords:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
#
maximum_frequncy = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = word_frequencies[word] / maximum_frequncy
#
abstracts["sentence_scores"] = 0
for index, row in abstracts.iterrows():
for word in nltk.word_tokenize(row["formatted_text"]):
word = porter_stemmer.stem(word)
if word in word_frequencies.keys():
abstracts.at[index, "sentence_scores"] += word_frequencies[word]
# length = len(nltk.word_tokenize(row["formatted_text"]))
# abstracts.at[index, "sentence_scores"] /= max(1, length)
abstracts = abstracts.sort_values(by=["sentence_scores"], ascending=False)
abstracts = abstracts.head(n_phrases)
abstracts["phrase"] = abstracts.phrase.str.capitalize()
for text in texts:
abstracts["phrase"] = abstracts["phrase"].str.replace(text, text.upper())
abstracts["phrase"] = abstracts["phrase"].str.replace(
text.capitalize(), text.upper()
)
with open(
os.path.join(directory, "reports", f"abstract_summarization{sufix}.txt"), "w"
) as out_file:
for index, row in abstracts.iterrows():
paragraph = textwrap.fill(
row["phrase"],
width=90,
)
document_id = documents[documents.record_no == row["record_no"]].document_id
document_id = document_id.iloc[0]
print("*** " + document_id, file=out_file)
print(paragraph, file=out_file)
print("\n", file=out_file)
summary = ".. ".join(abstracts.phrase.values)
print(textwrap.fill(summary, width=90))
# return abstracts
|
#! /usr/bin/env python
"""Read infofiles.
"""
import glob
import os, os.path
import sys
import threading
import time
import skytools
import cc.util
from cc import json
from cc.daemon import CCDaemon
from cc.message import is_msg_req_valid
from cc.reqs import InfofileMessage
class InfoStamp:
def __init__(self, fn, st):
self.filename = fn
self.filestat = st
self.modified = 1
def check_send(self, st):
if (st.st_mtime != self.filestat.st_mtime
or st.st_size != self.filestat.st_size):
# st changed, new mod
self.modified = 1
self.filestat = st
return 0
elif self.modified:
return 1
else:
return 0
class InfofileCollector(CCDaemon):
log = skytools.getLogger('d:InfofileCollector')
def reload(self):
super(InfofileCollector, self).reload()
self.infodir = self.cf.getfile('infodir')
self.infomask = self.cf.get('infomask')
self.compression = self.cf.get ('compression', 'none')
if self.compression not in (None, '', 'none', 'gzip', 'bzip2'):
self.log.error ("unknown compression: %s", self.compression)
self.compression_level = self.cf.getint ('compression-level', '')
self.maint_period = self.cf.getint ('maint-period', 60 * 60)
self.stats_period = self.cf.getint ('stats-period', 30)
self.msg_suffix = self.cf.get ('msg-suffix', '')
if self.msg_suffix and not is_msg_req_valid (self.msg_suffix):
self.log.error ("invalid msg-suffix: %s", self.msg_suffix)
self.msg_suffix = None
self.use_blob = self.cf.getbool ('use-blob', True)
def startup(self):
super(InfofileCollector, self).startup()
# fn -> stamp
self.infomap = {}
# activate periodic maintenance
self.do_maint()
def process_file(self, fs):
f = open(fs.filename, 'rb')
st = os.fstat(f.fileno())
if fs.check_send(st):
body = f.read()
if len(body) != st.st_size:
return
fs.modified = 0
self.log.debug('Sending: %s', fs.filename)
self.send_file(fs, body)
self.stat_inc('count')
f.close()
def send_file(self, fs, body):
cfb = cc.util.compress (body, self.compression, {'level': self.compression_level})
self.log.debug ("file compressed from %i to %i", len(body), len(cfb))
if self.use_blob:
data = ''
blob = cfb
else:
data = cfb.encode('base64')
blob = None
msg = InfofileMessage(
filename = fs.filename.replace('\\', '/'),
mtime = fs.filestat.st_mtime,
comp = self.compression,
data = data)
if self.msg_suffix:
msg.req += '.' + self.msg_suffix
self.ccpublish (msg, blob)
self.stat_inc ('infosender.bytes.read', len(body))
self.stat_inc ('infosender.bytes.sent', len(cfb))
def find_new(self):
fnlist = glob.glob (os.path.join (self.infodir, self.infomask))
newlist = []
for fn in fnlist:
try:
st = os.stat(fn)
except OSError, e:
self.log.info('%s: %s', fn, e)
continue
if fn not in self.infomap:
fstamp = InfoStamp(fn, st)
self.infomap[fn] = fstamp
else:
old = self.infomap[fn]
if old.check_send(st):
newlist.append(old)
self.log.debug ("files found - all: %i, new: %i", len(fnlist), len(newlist))
return newlist
def _work (self):
self.connect_cc()
newlist = self.find_new()
for fs in newlist:
try:
self.process_file(fs)
except (OSError, IOError), e:
self.log.info('%s: %s', fs.filename, e)
self.stat_inc('changes', len(newlist))
def work (self):
t = time.time()
while self.looping and self.stats_period > time.time() - t:
self._work()
self.sleep(1)
return 1
def stop (self):
""" Called from signal handler """
super(InfofileCollector, self).stop()
self.log.info ("stopping")
self.maint_timer.cancel()
def do_maint (self):
""" Drop removed files from our cache """
self.log.info ("cleanup")
current = glob.glob (os.path.join (self.infodir, self.infomask))
removed = set(self.infomap) - set(current)
for fn in removed:
self.log.debug ("forgetting file %s", fn)
del self.infomap[fn]
self.log.info ("current: %i, removed: %i", len(current), len(removed))
self.maint_timer = threading.Timer (self.maint_period, self.do_maint)
self.maint_timer.start()
if __name__ == '__main__':
s = InfofileCollector('infofile_collector', sys.argv[1:])
s.start()
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Pivot main library test."""
import warnings
from collections import namedtuple
from datetime import datetime, timedelta
from typing import Optional
import pytest
import pytest_check as check
from msticpy.common.timespan import TimeSpan
from msticpy.data import QueryProvider
from msticpy.data.query_container import QueryContainer
from msticpy.datamodel import entities
from msticpy.datamodel.pivot import Pivot
from msticpy.sectools import GeoLiteLookup, TILookup
__author__ = "Ian Hellen"
pytestmark = pytest.mark.filterwarnings("ignore::UserWarning")
# pylint: disable=redefined-outer-name
_KQL_IMP_OK = False
try:
# pylint: disable=unused-import
from msticpy.data.drivers import kql_driver
del kql_driver
_KQL_IMP_OK = True
except ImportError:
pass
_SPLUNK_IMP_OK = False
try:
from msticpy.data.drivers import splunk_driver
del splunk_driver
_SPLUNK_IMP_OK = True
except ImportError:
pass
_IPSTACK_IMP_OK = False
ip_stack_cls: Optional[type]
try:
from msticpy.sectools import IPStackLookup as ip_stack_cls
_IPSTACK_IMP_OK = True
except ImportError:
ip_stack_cls = None
pytestmark = pytest.mark.skipif(not _KQL_IMP_OK, reason="Partial msticpy install")
@pytest.fixture(scope="session")
def data_providers():
"""Return dict of providers."""
prov_dict = {}
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
if _KQL_IMP_OK:
prov_dict["az_sent_prov"] = QueryProvider("AzureSentinel")
prov_dict["mdatp_prov"] = QueryProvider("MDE")
if _SPLUNK_IMP_OK:
prov_dict["splunk_prov"] = QueryProvider("Splunk")
prov_dict["ti_lookup"] = TILookup()
prov_dict["geolite"] = GeoLiteLookup()
if _IPSTACK_IMP_OK:
prov_dict["ip_stack"] = ip_stack_cls()
return prov_dict
def _reset_entities():
"""Clear any query containers in entities."""
for entity_name in ("Host", "IpAddress", "Account", "Url"):
entity = getattr(entities, entity_name)
for attr in dir(entity):
if isinstance(getattr(entity, attr), QueryContainer):
delattr(entity, attr)
PivotTestCase = namedtuple("PivotTestCase", "entity, container, funcs")
_ENTITY_FUNCS = [
pytest.param(PivotTestCase("Host", "AzureSentinel", 25), id="Host-AzureSentinel"),
pytest.param(PivotTestCase("Host", "MDE", 2), id="Host-MDE"),
pytest.param(PivotTestCase("Host", "util", 3), id="Host-util"),
pytest.param(
PivotTestCase("IpAddress", "AzureSentinel", 16), id="IpAddress-AzureSentinel"
),
pytest.param(PivotTestCase("IpAddress", "MDE", 2), id="IpAddress-MDE"),
pytest.param(PivotTestCase("IpAddress", "ti", 8), id="IpAddress-ti"),
pytest.param(PivotTestCase("IpAddress", "util", 4), id="IpAddress-util"),
pytest.param(
PivotTestCase("Account", "AzureSentinel", 19), id="Account-AzureSentinel"
),
pytest.param(PivotTestCase("Account", "MDE", 4), id="Account-MDE"),
pytest.param(PivotTestCase("Url", "AzureSentinel", 7), id="Url-AzureSentinel"),
pytest.param(PivotTestCase("Url", "MDE", 2), id="Url-MDE"),
pytest.param(PivotTestCase("Url", "ti", 4), id="Url-ti"),
pytest.param(PivotTestCase("Url", "util", 5), id="Url-util"),
]
@pytest.fixture(scope="session")
def _create_pivot_list(data_providers):
_reset_entities()
providers = data_providers.values()
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
return Pivot(providers=providers)
@pytest.mark.parametrize("test_case", _ENTITY_FUNCS)
def test_pivot_providers(_create_pivot_list, test_case):
"""
Test pivot intialized from provider list.
Notes
-----
Test that the expected number of functions have been added to entities.
"""
entity = getattr(entities, test_case.entity)
query_contr = getattr(entity, test_case.container)
check.is_not_none(query_contr)
query_attrs = repr(query_contr).split("\n")
check.greater_equal(len(query_attrs), test_case.funcs)
# # Generate test cases for pivot functions
# def test_gen_tests(_create_pivot):
# """Function_docstring."""
# for entity_name in ("Host", "IpAddress", "Account", "Url"):
# entity = getattr(entities, entity_name)
# for container in ("AzureSentinel", "Splunk", "MDE", "ti", "util"):
# query_contr = getattr(entity, container, None)
# if not query_contr:
# continue
# query_attrs = repr(query_contr).split("\n")
# piv_case = f'PivotTestCase("{entity_name}", "{container}", {len(query_attrs)})'
# print(f' pytest.param({piv_case}, id=f"{entity_name}-{container}"),')
# assert False
@pytest.fixture(scope="session")
def _create_pivot_ns(data_providers):
_reset_entities()
locals().update(data_providers)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
return Pivot(namespace=locals())
@pytest.mark.parametrize("test_case", _ENTITY_FUNCS)
def test_pivot_providers_namespace(_create_pivot_ns, test_case):
"""
Test pivot initialized from globals/namespace.
Notes
-----
Test that the expected number of functions have been added to entities.
"""
entity = getattr(entities, test_case.entity)
query_contr = getattr(entity, test_case.container)
check.is_not_none(query_contr)
query_attrs = repr(query_contr).split("\n")
check.greater_equal(len(query_attrs), test_case.funcs)
def _fake_provider_connected(provider):
# Lie to the query provider so that it will allow the call
# pylint: disable=protected-access
provider._query_provider._loaded = True
provider._query_provider._connected = True
# pylint: enable=protected-access
class _TimeObj:
def __init__(self, start, end):
self.start = start
self.end = end
def test_pivot_time(data_providers):
"""Function_docstring."""
providers = data_providers.values()
end = datetime.utcnow()
start = end - timedelta(1)
timespan = TimeSpan(start=start, end=end)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
pivot = Pivot(providers=providers, timespan=timespan)
check.equal(pivot.start, start)
check.equal(pivot.end, end)
end = end - timedelta(1)
start = start - timedelta(1)
# Test different ways of setting the time
timespan = TimeSpan(start=start, end=end)
pivot.timespan = timespan
check.equal(pivot.start, start)
check.equal(pivot.end, end)
pivot.timespan = _TimeObj(start=timespan.start, end=timespan.end)
check.equal(pivot.start, start)
check.equal(pivot.end, end)
pivot.set_timespan(timespan)
check.equal(pivot.start, start)
check.equal(pivot.end, end)
pivot.set_timespan(start=timespan.start, end=timespan.end)
check.equal(pivot.start, start)
check.equal(pivot.end, end)
# Make sure the values provided to queries match.
_fake_provider_connected(data_providers["az_sent_prov"])
query = entities.Host.AzureSentinel.SecurityEvent_list_host_processes(
host_name="test", print=True
)
check.is_in(start.isoformat(), query)
check.is_in(end.isoformat(), query)
EntityQuery = namedtuple("EntityQuery", "entity, args, provider, pivot_func, expected")
_ENTITY_QUERIES = [
pytest.param(
EntityQuery(
"Host",
dict(HostName="testhost", DnsDomain="contoso.com"),
"AzureSentinel",
"SecurityEvent_list_host_processes",
'Computer has "testhost.contoso.com"',
),
id="Host",
),
pytest.param(
EntityQuery(
"Account",
dict(Name="testacct"),
"AzureSentinel",
"SecurityEvent_list_logons_by_account",
'where Account has "testacct"',
),
id="Account",
),
pytest.param(
EntityQuery(
"IpAddress",
dict(Address="192.168.1.2"),
"AzureSentinel",
"Heartbeat_get_info_by_ipaddress",
'| where ComputerIP == "192.168.1.2"',
),
id="IpAddress",
),
]
@pytest.mark.parametrize("test_case", _ENTITY_QUERIES)
def test_entity_attr_funcs(_create_pivot_ns, test_case):
"""Test calling function with entity attributes."""
# Test entity
ent_cls = getattr(entities, test_case.entity)
entity = ent_cls(test_case.args)
_fake_provider_connected(_create_pivot_ns.get_provider("AzureSentinel"))
func = getattr(getattr(entity, test_case.provider), test_case.pivot_func)
query = func(entity, print_query=True)
check.is_in(test_case.expected, query)
def test_misc_functions(_create_pivot_ns):
"""Test some additional methods of pivot.py."""
check.greater(len(_create_pivot_ns.providers), 2)
t_span = TimeSpan(end=datetime.utcnow(), period="1D")
_create_pivot_ns.edit_query_time(timespan=t_span)
check.equal(_create_pivot_ns.start, t_span.start)
check.equal(_create_pivot_ns.end, t_span.end)
check.equal(_create_pivot_ns.timespan, t_span)
_ENTITY_PIVOTS = [
pytest.param(entities.Host, 25, id="Host"),
pytest.param(entities.IpAddress, 25, id="IpAddress"),
pytest.param(entities.Account, 20, id="Account"),
]
@pytest.mark.parametrize("entity, expected_funcs", _ENTITY_PIVOTS)
def test_entity_list_piv_functions(_create_pivot_list, entity, expected_funcs):
"""Test the pivot_funcs property."""
check.greater(len(entity.get_pivot_list()), expected_funcs)
|
#
# PySNMP MIB module A3COM-HUAWEI-LswMSTP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-LswMSTP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:05:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
lswCommon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "lswCommon")
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
dot1dStpPortEntry, dot1dStpPort = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dStpPortEntry", "dot1dStpPort")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, MibIdentifier, Bits, IpAddress, NotificationType, ObjectIdentity, Gauge32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Integer32, Counter32, iso, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "MibIdentifier", "Bits", "IpAddress", "NotificationType", "ObjectIdentity", "Gauge32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Integer32", "Counter32", "iso", "Counter64")
DisplayString, TruthValue, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "TextualConvention")
hwdot1sMstp = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14))
hwdot1sMstp.setRevisions(('2001-06-29 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwdot1sMstp.setRevisionsDescriptions(('',))
if mibBuilder.loadTexts: hwdot1sMstp.setLastUpdated('200106290000Z')
if mibBuilder.loadTexts: hwdot1sMstp.setOrganization('')
if mibBuilder.loadTexts: hwdot1sMstp.setContactInfo('')
if mibBuilder.loadTexts: hwdot1sMstp.setDescription('')
class EnabledStatus(TextualConvention, Integer32):
description = 'A simple status value for the object.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
class BridgeId(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class Hwdot1sFormatStatus(TextualConvention, Integer32):
description = 'Legacy means that the BPDU format is legacy. Dot1s means that the BPDU format is IEEE 802.1s. Auto means that the format of BPDU sending on the port is determined by the BPDU format of its connective port.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("legacy", 1), ("dot1s", 2), ("auto", 3))
hwdot1sStpStatus = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 1), EnabledStatus().clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sStpStatus.setStatus('current')
if mibBuilder.loadTexts: hwdot1sStpStatus.setDescription('Whether the Bridge MSTP is enabled.')
hwdot1sStpForceVersion = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 3))).clone(namedValues=NamedValues(("stp", 0), ("rstp", 2), ("mstp", 3))).clone('mstp')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sStpForceVersion.setStatus('current')
if mibBuilder.loadTexts: hwdot1sStpForceVersion.setDescription(' The mode of this Bridge spanning-tree protocol.')
hwdot1sStpDiameter = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(2, 7)).clone(7)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sStpDiameter.setStatus('current')
if mibBuilder.loadTexts: hwdot1sStpDiameter.setDescription('The diameter of Bridge.')
hwdot1sMstBridgeMaxHops = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 40)).clone(20)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstBridgeMaxHops.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstBridgeMaxHops.setDescription('The maximum value of the Bridge hops.')
hwdot1sMstMasterBridgeID = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 5), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstMasterBridgeID.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstMasterBridgeID.setDescription('The Bridge Identifier of the current Master Bridge.')
hwdot1sMstMasterPathCost = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstMasterPathCost.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstMasterPathCost.setDescription('The CIST path cost from the transmitting Bridge to the Master Bridge.')
hwdot1sMstBpduGuard = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 7), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstBpduGuard.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstBpduGuard.setDescription('Whether the Bridge BPDU Guard function is enabled. If the function is enabled, the port will shutdown when received BPDU and the port is configured as portfast.')
hwdot1sMstAdminFormatSelector = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstAdminFormatSelector.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstAdminFormatSelector.setDescription('The administrative Configuration Identifier Format Selector in use by the Bridge. This has a value of 0 indicate the format specified in the Standard of IEEE 802.1s.')
hwdot1sMstAdminRegionName = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstAdminRegionName.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstAdminRegionName.setDescription('This MSTP administrative region name.')
hwdot1sMstAdminRevisionLevel = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstAdminRevisionLevel.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstAdminRevisionLevel.setDescription('This MSTP administrative revision level.')
hwdot1sMstOperFormatSelector = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstOperFormatSelector.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstOperFormatSelector.setDescription('The operative Configuration Identifier Format Selector in use by the Bridge. This has a value of 0 indicate the format specified in the Standard of IEEE 802.1s.')
hwdot1sMstOperRegionName = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstOperRegionName.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstOperRegionName.setDescription('This MSTP operative region name.')
hwdot1sMstOperRevisionLevel = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstOperRevisionLevel.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstOperRevisionLevel.setDescription('This MSTP operative revision level.')
hwdot1sMstOperConfigDigest = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstOperConfigDigest.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstOperConfigDigest.setDescription("This MSTP Region's Configuration Digest Signature Key.")
hwdot1sMstRegionConfActive = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstRegionConfActive.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstRegionConfActive.setDescription('Active the region configuration.')
hwdot1sMstDefaultVlanAllo = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("enable", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstDefaultVlanAllo.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstDefaultVlanAllo.setDescription('Set default configuration about VLAN allocation and all VLANs are mapped to CIST.')
hwdot1sMstDefaultRegionName = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("enable", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstDefaultRegionName.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstDefaultRegionName.setDescription('Set default region name.')
hwdot1sVIDAllocationTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 18), )
if mibBuilder.loadTexts: hwdot1sVIDAllocationTable.setStatus('current')
if mibBuilder.loadTexts: hwdot1sVIDAllocationTable.setDescription('')
hwdot1sVIDAllocationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 18, 1), ).setIndexNames((0, "A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sMstVID"))
if mibBuilder.loadTexts: hwdot1sVIDAllocationEntry.setStatus('current')
if mibBuilder.loadTexts: hwdot1sVIDAllocationEntry.setDescription('')
hwdot1sMstVID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 18, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstVID.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstVID.setDescription('VLAN Identifier')
hwdot1sAdminMstID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 18, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4094))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sAdminMstID.setStatus('current')
if mibBuilder.loadTexts: hwdot1sAdminMstID.setDescription('Administrative Multiple spanning-tree instance Identifier.')
hwdot1sOperMstID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 18, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sOperMstID.setStatus('current')
if mibBuilder.loadTexts: hwdot1sOperMstID.setDescription('Operative Multiple spanning-tree instance Identifier.')
hwdot1sInstanceTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19), )
if mibBuilder.loadTexts: hwdot1sInstanceTable.setStatus('current')
if mibBuilder.loadTexts: hwdot1sInstanceTable.setDescription('')
hwdot1sInstanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1), ).setIndexNames((0, "A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sInstanceID"))
if mibBuilder.loadTexts: hwdot1sInstanceEntry.setStatus('current')
if mibBuilder.loadTexts: hwdot1sInstanceEntry.setDescription('')
hwdot1sInstanceID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sInstanceID.setStatus('current')
if mibBuilder.loadTexts: hwdot1sInstanceID.setDescription('Multiple spanning-tree instance Identifier')
hwdot1sMstiBridgeID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 2), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiBridgeID.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiBridgeID.setDescription('The Bridge Identifier for the spanning tree instance identified by MSTID')
hwdot1sMstiBridgePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 61440)).clone(32768)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiBridgePriority.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiBridgePriority.setDescription('The Bridge Priority for the spanning tree instance identified by MSTID. Step of 4096')
hwdot1sMstiDesignedRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 4), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiDesignedRoot.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiDesignedRoot.setDescription('The Bridge Identifier of the Root Bridge for the spanning tree instance identified by MSTID')
hwdot1sMstiRootPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiRootPathCost.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiRootPathCost.setDescription('The path cost from the transmitting Bridge to the Root Bridge for the spanning tree instance identified by MSTID')
hwdot1sMstiRootPort = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiRootPort.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiRootPort.setDescription('The Root Port for the spanning tree instance identified by the MSTID')
hwdot1sMstiRootType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("secondary", 1), ("primary", 2))).clone('normal')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiRootType.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiRootType.setDescription('Config this Bridge as a primary root or secondary root and or cancel the root for this spanning tree instance identified by MSTID')
hwdot1sMstiRemainingHops = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiRemainingHops.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiRemainingHops.setDescription('The remaining hops of the spanning tree instance identified by MSTID')
hwdot1sMstiAdminMappedVlanListLow = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 256))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiAdminMappedVlanListLow.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiAdminMappedVlanListLow.setDescription(' The lower part of administrative Vlan list mapped to the spanning tree instance identified by MSTID')
hwdot1sMstiAdminMappedVlanListHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 256))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiAdminMappedVlanListHigh.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiAdminMappedVlanListHigh.setDescription(' The higher part of administrative Vlan list mapped to the spanning tree instance identified by MSTID')
hwdot1sMstiOperMappedVlanListLow = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 256))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiOperMappedVlanListLow.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiOperMappedVlanListLow.setDescription(' The lower part of operative Vlan list mapped to the spanning tree instance identified by MSTID')
hwdot1sMstiOperMappedVlanListHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 19, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 256))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiOperMappedVlanListHigh.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiOperMappedVlanListHigh.setDescription(' The higher part of operative Vlan list mapped to the spanning tree instance identified by MSTID')
hwdot1sPortTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20), )
if mibBuilder.loadTexts: hwdot1sPortTable.setStatus('current')
if mibBuilder.loadTexts: hwdot1sPortTable.setDescription('')
hwdot1sPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1), ).setIndexNames((0, "A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sInstanceID"), (0, "A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sMstiPortIndex"))
if mibBuilder.loadTexts: hwdot1sPortEntry.setStatus('current')
if mibBuilder.loadTexts: hwdot1sPortEntry.setDescription('')
hwdot1sMstiPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiPortIndex.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiPortIndex.setDescription('The index of the Bridge Port')
hwdot1sMstiState = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5))).clone(namedValues=NamedValues(("disabled", 1), ("discarding", 2), ("learning", 4), ("forwarding", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiState.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiState.setDescription('The current state of the Port (i.e., Disabled, Discarding , Learning, Forwarding)')
hwdot1sMstiPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 240)).clone(128)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiPortPriority.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiPortPriority.setDescription('The value of the priority field which is contained in the first (in network byte order) four bits of the (2 octet long) Port ID. The other octet of the Port ID is given by the value of mstiPortIndex. And step of 16')
hwdot1sMstiPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 200000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiPathCost.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiPathCost.setDescription('The contribution of this port to the path cost of paths towards the spanning tree root which include this port. The range of path cost is 1..65535 for 802.1d standard, is 1..200000000 for 802.1t standard, and is 1..200000 for the legacy standard. ')
hwdot1sMstiDesignatedRoot = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 5), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiDesignatedRoot.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiDesignatedRoot.setDescription('The Bridge Identifier of the Root Bridge for the port of the Spanning Tree instance identified by the MSTID')
hwdot1sMstiDesignatedCost = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiDesignatedCost.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiDesignatedCost.setDescription('The path cost of the Designated Port of the segment connected to this port. This value is compared to the Root Path Cost field in received bridge PDUs.')
hwdot1sMstiDesignatedBridge = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 7), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiDesignatedBridge.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiDesignatedBridge.setDescription("The Bridge Identifier of the bridge which this port considers to be the Designated Bridge for this port's segment.")
hwdot1sMstiDesignatedPort = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiDesignatedPort.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiDesignatedPort.setDescription("The Port Identifier of the port on the Designated Bridge for this port's segment.")
hwdot1sMstiMasterBridgeID = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 9), BridgeId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiMasterBridgeID.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiMasterBridgeID.setDescription('The Bridge Idnetifier of the current Master Bridge. Effective in CIST.')
hwdot1sMstiMasterPortCost = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiMasterPortCost.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiMasterPortCost.setDescription('The CIST path cost from the transmitting Bridge to the Master Bridge. Effective in CIST.')
hwdot1sMstiStpPortEdgeport = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 11), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpPortEdgeport.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortEdgeport.setDescription(' Whether the port fast is enabled. Effective in CIST.')
hwdot1sMstiStpPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("forceTrue", 1), ("forceFalse", 2), ("auto", 3))).clone('auto')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpPortPointToPoint.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortPointToPoint.setDescription(' Whether the port connects the point to point link. Effective in CIST.')
hwdot1sMstiStpMcheck = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("enable", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpMcheck.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpMcheck.setDescription(' Forcing the state machine to send MST BPDUs in this manner can be used to test whether all legacy Bridges on a given LAN have been removed. Effective in CIST.')
hwdot1sMstiStpTransLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpTransLimit.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpTransLimit.setDescription('The value used by the Port Transmit state machine to limit the maximum transmission rate. Effective in CIST.')
hwdot1sMstiStpRXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpRXStpBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpRXStpBPDU.setDescription('The number of received Config BPDU. Effective in CIST.')
hwdot1sMstiStpTXStpBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpTXStpBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpTXStpBPDU.setDescription('The number of transmitted Config BPDU. Effective in CIST.')
hwdot1sMstiStpRXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpRXTCNBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpRXTCNBPDU.setDescription('The number of received TCN BPDU. Effective in CIST.')
hwdot1sMstiStpTXTCNBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpTXTCNBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpTXTCNBPDU.setDescription('The number of transimitted TCN BPDU. Effective in CIST.')
hwdot1sMstiStpRXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpRXRSTPBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpRXRSTPBPDU.setDescription('The number of received RST BPDU. Effective in CIST.')
hwdot1sMstiStpTXRSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpTXRSTPBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpTXRSTPBPDU.setDescription('The number of transimitted RST BPDU. Effective in CIST.')
hwdot1sMstiStpRXMSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpRXMSTPBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpRXMSTPBPDU.setDescription('The number of received MST BPDU. Effective in CIST.')
hwdot1sMstiStpTXMSTPBPDU = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpTXMSTPBPDU.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpTXMSTPBPDU.setDescription('The number of transimitted MST BPDU. Effective in CIST.')
hwdot1sMstiStpClearStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("clear", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpClearStatistics.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpClearStatistics.setDescription('Clear the spanning tree statistic. Effective in CIST.')
hwdot1sMstiStpDefaultPortCost = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 65535))).clone(namedValues=NamedValues(("enable", 1), ("unused", 65535)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpDefaultPortCost.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpDefaultPortCost.setDescription('Set default Port path cost. ')
hwdot1sMstiStpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 25), EnabledStatus().clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpStatus.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpStatus.setDescription('Whether the spanning tree protocol is enabled on this port. Effective in CIST.')
hwdot1sMstiPortRootGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 26), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiPortRootGuard.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiPortRootGuard.setDescription('Whether the root guard is enabled. Effective in CIST.')
hwdot1sMstiPortLoopGuard = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 27), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiPortLoopGuard.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiPortLoopGuard.setDescription('Whether the loop protection is enabled. Effective in CIST.')
hwdot1sMstiStpPortSendingBPDUType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("stp", 1), ("rstp", 2), ("mstp", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpPortSendingBPDUType.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortSendingBPDUType.setDescription('Type of BPDU which the port is sending.')
hwdot1sMstiStpOperPortPointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpOperPortPointToPoint.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpOperPortPointToPoint.setDescription('This object indicates whether the port has connected to a point-to-point link or not. The value of the node is an operative value. The administrative value can be read from the node hwdot1sMstiStpPortPointToPoint. If the value of hwdot1sMstiStpPortPointToPoint is auto, the value of this node should be calculated by the network topology of this port. If the value of hwdot1sMstiStpPortPointToPoint is forceFalse, the value of this node is false. If the value of hwdot1sMstiStpPortPointToPoint is forceTrue, the value of this node is true.')
hwdot1sMstiStpPortAdminBPDUFmt = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 30), Hwdot1sFormatStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpPortAdminBPDUFmt.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortAdminBPDUFmt.setDescription('The value of the node is an administrative value. Value legacy means that the MST BPDU format is forced to legacy. Value dot1s means that the MST BPDU format is forced to IEEE 802.1s. Value auto means that the format of MST BPDU sending on the port is determined by the MST BPDU that the port has received. Effective in CIST.')
hwdot1sMstiStpPortOperBPDUFmt = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 31), Hwdot1sFormatStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpPortOperBPDUFmt.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortOperBPDUFmt.setDescription('The format of MST BPDU which the port is sending. Value legacy means that the format of MST BPDU sending on the port is legacy. Value dot1s means that the format of MST BPDU sending on the port is IEEE 802.1s. Effective in CIST.')
hwdot1sMstiStpPortRoleRestriction = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 32), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpPortRoleRestriction.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortRoleRestriction.setDescription('Whether the role-restriction is enabled. Effective in CIST. The role-restriction causes the port not to be selected as root port for the CIST or any MSTI.')
hwdot1sMstiStpPortTcRestriction = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 33), EnabledStatus().clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sMstiStpPortTcRestriction.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortTcRestriction.setDescription('Whether the tc-restriction is enabled. Effective in CIST. The tc-restriction causes the port not to propagate topology changes to other ports.')
hwdot1sMstiStpPortDisputed = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 20, 1, 34), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1sMstiStpPortDisputed.setStatus('current')
if mibBuilder.loadTexts: hwdot1sMstiStpPortDisputed.setDescription('Whether the port is disputed for the CIST or MSTI.')
hwdot1sStpPathCostStandard = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("legacy", 0), ("dot1d-1998", 1), ("dot1t", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1sStpPathCostStandard.setStatus('current')
if mibBuilder.loadTexts: hwdot1sStpPathCostStandard.setDescription('Path cost standard of the bridge. Value dot1d-1998 is IEEE 802.1d standard in 1998, value dot1t is IEEE 802.1t standard, and value legacy is a private legacy standard.')
hwMstpEventsV2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 0))
if mibBuilder.loadTexts: hwMstpEventsV2.setStatus('current')
if mibBuilder.loadTexts: hwMstpEventsV2.setDescription('Definition point for Mstp notifications.')
hwPortMstiStateForwarding = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 0, 1)).setObjects(("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sInstanceID"), ("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sMstiPortIndex"))
if mibBuilder.loadTexts: hwPortMstiStateForwarding.setStatus('current')
if mibBuilder.loadTexts: hwPortMstiStateForwarding.setDescription('The SNMP trap that is generated when a port turns into forwarding state form other state.')
hwPortMstiStateDiscarding = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 0, 2)).setObjects(("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sInstanceID"), ("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sMstiPortIndex"))
if mibBuilder.loadTexts: hwPortMstiStateDiscarding.setStatus('current')
if mibBuilder.loadTexts: hwPortMstiStateDiscarding.setDescription('The SNMP trap that is generated when a port turns into discarding state form forwarding state.')
hwBridgeLostRootPrimary = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 0, 3)).setObjects(("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sInstanceID"))
if mibBuilder.loadTexts: hwBridgeLostRootPrimary.setStatus('current')
if mibBuilder.loadTexts: hwBridgeLostRootPrimary.setDescription('The SNMP trap that is generated when the bridge is no longer the root bridge of the instance. Another switch with higher priority has already been the root bridge of the instance.')
hwPortMstiRootGuarded = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 0, 4)).setObjects(("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sInstanceID"), ("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sMstiPortIndex"))
if mibBuilder.loadTexts: hwPortMstiRootGuarded.setStatus('current')
if mibBuilder.loadTexts: hwPortMstiRootGuarded.setDescription('The SNMP trap that is generated when a root-guard port receives a superior message on the relevant instance.')
hwPortMstiBpduGuarded = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 0, 5)).setObjects(("BRIDGE-MIB", "dot1dStpPort"))
if mibBuilder.loadTexts: hwPortMstiBpduGuarded.setStatus('current')
if mibBuilder.loadTexts: hwPortMstiBpduGuarded.setDescription('The SNMP trap that is generated when an edged port of the BPDU-guard switch receives BPDU packets.')
hwPortMstiLoopGuarded = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 2, 23, 1, 14, 0, 6)).setObjects(("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sInstanceID"), ("A3COM-HUAWEI-LswMSTP-MIB", "hwdot1sMstiPortIndex"))
if mibBuilder.loadTexts: hwPortMstiLoopGuarded.setStatus('current')
if mibBuilder.loadTexts: hwPortMstiLoopGuarded.setDescription('The SNMP trap that is generated when an Alternate-Port or Root-Port is aged out.')
mibBuilder.exportSymbols("A3COM-HUAWEI-LswMSTP-MIB", hwdot1sStpForceVersion=hwdot1sStpForceVersion, hwdot1sMstiStpRXRSTPBPDU=hwdot1sMstiStpRXRSTPBPDU, hwPortMstiBpduGuarded=hwPortMstiBpduGuarded, hwdot1sMstiPortRootGuard=hwdot1sMstiPortRootGuard, hwdot1sMstiRootPathCost=hwdot1sMstiRootPathCost, hwdot1sStpStatus=hwdot1sStpStatus, hwdot1sMstDefaultRegionName=hwdot1sMstDefaultRegionName, hwdot1sMstiStpMcheck=hwdot1sMstiStpMcheck, hwdot1sStpPathCostStandard=hwdot1sStpPathCostStandard, hwdot1sMstiPortLoopGuard=hwdot1sMstiPortLoopGuard, hwdot1sMstOperRegionName=hwdot1sMstOperRegionName, hwdot1sMstiDesignatedPort=hwdot1sMstiDesignatedPort, hwdot1sOperMstID=hwdot1sOperMstID, hwdot1sMstiStpTXStpBPDU=hwdot1sMstiStpTXStpBPDU, hwdot1sAdminMstID=hwdot1sAdminMstID, hwPortMstiStateForwarding=hwPortMstiStateForwarding, hwdot1sMstiRootPort=hwdot1sMstiRootPort, hwdot1sStpDiameter=hwdot1sStpDiameter, hwdot1sMstiState=hwdot1sMstiState, hwdot1sInstanceTable=hwdot1sInstanceTable, hwdot1sMstAdminFormatSelector=hwdot1sMstAdminFormatSelector, hwdot1sMstBpduGuard=hwdot1sMstBpduGuard, hwdot1sMstiStpPortSendingBPDUType=hwdot1sMstiStpPortSendingBPDUType, hwdot1sMstiStpPortRoleRestriction=hwdot1sMstiStpPortRoleRestriction, hwdot1sMstiDesignatedRoot=hwdot1sMstiDesignatedRoot, hwdot1sMstiDesignatedCost=hwdot1sMstiDesignatedCost, hwdot1sMstBridgeMaxHops=hwdot1sMstBridgeMaxHops, hwdot1sMstOperFormatSelector=hwdot1sMstOperFormatSelector, BridgeId=BridgeId, hwdot1sMstiBridgeID=hwdot1sMstiBridgeID, hwdot1sMstiStpPortOperBPDUFmt=hwdot1sMstiStpPortOperBPDUFmt, hwdot1sMstAdminRevisionLevel=hwdot1sMstAdminRevisionLevel, hwdot1sMstRegionConfActive=hwdot1sMstRegionConfActive, hwdot1sMstiDesignedRoot=hwdot1sMstiDesignedRoot, hwPortMstiStateDiscarding=hwPortMstiStateDiscarding, hwdot1sMstp=hwdot1sMstp, hwdot1sMstiPortPriority=hwdot1sMstiPortPriority, hwdot1sMstiBridgePriority=hwdot1sMstiBridgePriority, hwdot1sMstiMasterPortCost=hwdot1sMstiMasterPortCost, Hwdot1sFormatStatus=Hwdot1sFormatStatus, hwdot1sMstOperConfigDigest=hwdot1sMstOperConfigDigest, hwdot1sMstMasterBridgeID=hwdot1sMstMasterBridgeID, hwdot1sMstiStpTXMSTPBPDU=hwdot1sMstiStpTXMSTPBPDU, hwdot1sMstiStpPortAdminBPDUFmt=hwdot1sMstiStpPortAdminBPDUFmt, PYSNMP_MODULE_ID=hwdot1sMstp, hwdot1sMstiStpTransLimit=hwdot1sMstiStpTransLimit, hwdot1sMstiStpClearStatistics=hwdot1sMstiStpClearStatistics, hwdot1sMstMasterPathCost=hwdot1sMstMasterPathCost, hwdot1sMstiRootType=hwdot1sMstiRootType, hwdot1sPortEntry=hwdot1sPortEntry, hwdot1sMstiStpPortPointToPoint=hwdot1sMstiStpPortPointToPoint, hwdot1sVIDAllocationTable=hwdot1sVIDAllocationTable, hwBridgeLostRootPrimary=hwBridgeLostRootPrimary, hwdot1sMstiStpDefaultPortCost=hwdot1sMstiStpDefaultPortCost, hwdot1sMstiStpPortTcRestriction=hwdot1sMstiStpPortTcRestriction, hwdot1sMstiOperMappedVlanListLow=hwdot1sMstiOperMappedVlanListLow, hwdot1sVIDAllocationEntry=hwdot1sVIDAllocationEntry, hwdot1sMstDefaultVlanAllo=hwdot1sMstDefaultVlanAllo, hwdot1sMstiPathCost=hwdot1sMstiPathCost, hwMstpEventsV2=hwMstpEventsV2, hwdot1sMstiStpTXTCNBPDU=hwdot1sMstiStpTXTCNBPDU, hwdot1sMstiDesignatedBridge=hwdot1sMstiDesignatedBridge, hwPortMstiRootGuarded=hwPortMstiRootGuarded, hwdot1sMstiOperMappedVlanListHigh=hwdot1sMstiOperMappedVlanListHigh, hwdot1sMstOperRevisionLevel=hwdot1sMstOperRevisionLevel, hwdot1sMstiAdminMappedVlanListLow=hwdot1sMstiAdminMappedVlanListLow, hwdot1sMstiStpPortEdgeport=hwdot1sMstiStpPortEdgeport, hwPortMstiLoopGuarded=hwPortMstiLoopGuarded, hwdot1sMstVID=hwdot1sMstVID, hwdot1sMstiStpOperPortPointToPoint=hwdot1sMstiStpOperPortPointToPoint, hwdot1sMstiAdminMappedVlanListHigh=hwdot1sMstiAdminMappedVlanListHigh, hwdot1sMstiStpRXStpBPDU=hwdot1sMstiStpRXStpBPDU, hwdot1sMstiStpRXTCNBPDU=hwdot1sMstiStpRXTCNBPDU, hwdot1sInstanceEntry=hwdot1sInstanceEntry, hwdot1sMstiStpRXMSTPBPDU=hwdot1sMstiStpRXMSTPBPDU, EnabledStatus=EnabledStatus, hwdot1sMstiRemainingHops=hwdot1sMstiRemainingHops, hwdot1sInstanceID=hwdot1sInstanceID, hwdot1sPortTable=hwdot1sPortTable, hwdot1sMstiStpTXRSTPBPDU=hwdot1sMstiStpTXRSTPBPDU, hwdot1sMstiStpPortDisputed=hwdot1sMstiStpPortDisputed, hwdot1sMstiMasterBridgeID=hwdot1sMstiMasterBridgeID, hwdot1sMstiPortIndex=hwdot1sMstiPortIndex, hwdot1sMstiStpStatus=hwdot1sMstiStpStatus, hwdot1sMstAdminRegionName=hwdot1sMstAdminRegionName)
|
# -*- coding=UTF-8 -*-
# pyright: strict
from typing import Text
from .. import action, config, templates, template
def daily_race(race_name: Text):
while True:
tmpl, pos = action.wait_image(
templates.DAILY_RACE_TICKET_NOT_ENOUGH,
templates.CONNECTING,
templates.RETRY_BUTTON,
templates.DAILY_RACE,
templates.DAILY_RACE_REWARD_CLEAR,
templates.RACE_START_BUTTON,
templates.RACE_CONFIRM_BUTTON,
templates.GREEN_NEXT_BUTTON,
templates.RACE_RESULT_BUTTON,
templates.RACE_AGAIN_BUTTON,
templates.RACE_RESULT_NO1,
templates.RACE_RESULT_NO2,
templates.RACE_RESULT_NO3,
templates.RACE_RESULT_NO4,
templates.RACE_RESULT_NO5,
templates.RACE_RESULT_NO6,
templates.RACE_RESULT_NO8,
templates.RACE_RESULT_NO10,
race_name,
templates.RACE_BUTTON,
templates.LIMITED_SALE_OPEN,
)
name = tmpl.name
if name == templates.CONNECTING:
pass
elif name == templates.DAILY_RACE_TICKET_NOT_ENOUGH:
break
elif name == templates.LIMITED_SALE_OPEN:
config.on_limited_sale()
elif name == templates.DAILY_RACE_REWARD_CLEAR:
_, pos = sorted(
template.match(template.screenshot(), tmpl), key=lambda x: x[1]
)[0]
action.tap(pos)
else:
action.tap(pos)
|
c=input("enter city name:").strip()
if c=='Gujarat':
print('namaste')
elif c=='Hydrabad':
print('vannakam')
else:
print('city not availabel') |
"""Added teams table and team-user relation
Revision ID: 5197815c452d
Revises: 6336478c122c
Create Date: 2020-09-17 13:22:58.294645
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5197815c452d'
down_revision = '6336478c122c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('teams',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.add_column('users', sa.Column('team_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'users', 'teams', ['team_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='foreignkey')
op.drop_column('users', 'team_id')
op.drop_table('teams')
# ### end Alembic commands ###
|
import sys
from screen import *
from place import Place
from PyQt5 import QtWidgets, QtCore
import matplotlib
import matplotlib.dates as mdates
import logging
from manager import Manager
import constants
_translate = QtCore.QCoreApplication.translate
matplotlib.use('QT5Agg') # Use QT5Agg as backend
class DialogApplication(QtWidgets.QDialog):
"""
Main class for interface
"""
def __init__(self):
super().__init__()
# Set up the dialog
self.dialog = Ui_Dialog()
self.dialog.setupUi(self)
# Create manger instance
self.manager = Manager()
# Plot
self.ax1: matplotlib.axes._subplots.AxesSubplot = self.dialog.mplwidget.canvas.axes # Left axis
self.ax2: matplotlib.axes._subplots.AxesSubplot = self.ax1.twinx() # Right axis
print(self.ax1.__class__)
# Variables the user can change
self.current_country_txt = 'Global'
self.current_country: Place = self.manager.countries[0]
self.current_state_txt = 'All'
self.current_moving_average = 1
self.current_data_to_plot = constants.CASES
self.current_option = constants.OPTION_CUMULATIVE
# Populate widgets
self.populate_widgets()
# Set UI listeners
self.dialog.comboBoxCountries.currentTextChanged.connect(self.handle_country_change)
self.dialog.sliderAverage.valueChanged.connect(self.handle_slider_change)
self.dialog.comboBoxRegions.currentTextChanged.connect(self.update_plot)
self.dialog.radioButtonCases.clicked.connect(self.handle_change_data_to_plot)
self.dialog.radioButtonDeaths.clicked.connect(self.handle_change_data_to_plot)
self.dialog.radioButtonBoth.clicked.connect(self.handle_change_data_to_plot)
self.dialog.radioButtonCumulative.clicked.connect(self.handle_change_option)
self.dialog.radioButtonDaily.clicked.connect(self.handle_change_option)
# Update the plot
self.update_plot()
# Present the dialog
self.show()
def populate_widgets(self):
"""
Set default values for the combo box
:return: None
"""
for country in self.manager.countries:
self.dialog.comboBoxCountries.addItem(country.name)
pass
self.dialog.comboBoxRegions.addItem('All')
def handle_slider_change(self):
"""
Update the attribute value.
Update the label.
Update the graph after.
:return: None
"""
# Get slider value
self.current_moving_average = int(self.dialog.sliderAverage.value())
# Update the label
if self.current_moving_average != 1:
text = "Average of %d days" % self.current_moving_average
else:
text = "Average of %d day" % self.current_moving_average
self.dialog.labelAverageOfDays.setText(_translate("Dialog", text))
# Update the plot
self.update_plot()
def handle_country_change(self):
"""
Update the attribute value.
Fill the combo box of states according to the selected country
Update the graph after.
:return: None
"""
self.current_country_txt: str = self.dialog.comboBoxCountries.currentText()
# Get the country selected
self.current_country = self.manager.get_country_by_name(self.current_country_txt)
# Clean the combo box for states
self.dialog.comboBoxRegions.clear()
self.dialog.comboBoxRegions.addItem('All')
# Populate the state combo box according to the country
for state in self.current_country.states:
self.dialog.comboBoxRegions.addItem(state.name)
self.update_plot()
def handle_change_data_to_plot(self):
"""
Update the attribute value.
Update the graph after.
:return: None
"""
if self.dialog.radioButtonCases.isChecked():
self.current_data_to_plot = constants.CASES
elif self.dialog.radioButtonDeaths.isChecked():
self.current_data_to_plot = constants.DEATHS
else:
self.current_data_to_plot = constants.CASES_AND_DEATHS
self.update_plot()
def handle_change_option(self):
"""
Update the attribute value.
Update the graph after.
:return: None
"""
if self.dialog.radioButtonCumulative.isChecked():
self.current_option = constants.OPTION_CUMULATIVE
else:
self.current_option = constants.OPTION_DAILY
self.update_plot()
def update_plot(self):
"""
Ask for the data to the manager and then call the method @paint_graph to pain the data
:return: None
"""
logging.info("Updating graph")
self.current_state_txt = self.dialog.comboBoxRegions.currentText()
cases_to_plot, deaths_to_plot = self.manager.get_data(
country=self.current_country,
state=self.current_state_txt,
data_to_plot=self.current_data_to_plot,
option=self.current_option
)
self.clean_graph()
# Update the title we the data
self.update_title()
# Paint the data
self.paint_graph(axis=self.ax1, data=cases_to_plot, color='tab:blue', label='Cases')
self.paint_graph(axis=self.ax2, data=deaths_to_plot, color='tab:red', label='Deaths')
self.dialog.mplwidget.canvas.draw()
def paint_graph(self, axis, data, color, label):
"""
:param axis: axis for paint the data in it
:param data: target data to present
:param color: color to paint
:param label: label on the y axis
:return: None
"""
if data is not None:
months = mdates.MonthLocator()
axis.xaxis.set_major_locator(months)
months_fmt = mdates.DateFormatter('%m/%d')
axis.xaxis.set_major_formatter(months_fmt)
axis.plot(data.rolling(window=self.current_moving_average).mean(), color=color)
axis.tick_params(axis='y', labelcolor=color)
axis.set_ylabel(label, color=color)
def clean_graph(self):
"""
Clean the graph from previous presented data
:return: None
"""
self.dialog.mplwidget.canvas.axes.clear()
self.ax1.clear()
self.ax2.clear()
def update_title(self):
"""
Update the label title according to the data presented
:return: None
"""
title = ''
if self.current_option == constants.OPTION_CUMULATIVE:
title += 'Cumulative Number of '
else:
title += 'Daily Number of '
if self.current_data_to_plot == constants.CASES:
title += 'Cases in '
elif self.current_data_to_plot == constants.DEATHS:
title += 'Deaths in '
else:
title += 'Cases and Deaths in '
title += self.current_country_txt
if self.current_state_txt != 'All':
title += f"-{self.current_state_txt}"
title += f" ({self.current_moving_average}-day mean)"
self.dialog.labelTitle.setText(title)
if __name__ == '__main__':
"""
Present the dialog application
"""
logging.basicConfig(level=logging.INFO)
logging.debug('Starting test for manager.py')
app = QtWidgets.QApplication(sys.argv)
dialog = DialogApplication()
dialog.show()
sys.exit(app.exec_())
|
from django.contrib import admin
from .models import Person, Property, ManageGroup, InstrGroup
# Register your models here.
admin.site.register(Person)
admin.site.register(Property)
admin.site.register(ManageGroup)
admin.site.register(InstrGroup) |
# -*- coding: utf-8 -*-
import pandas as pd
d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
data = {'domain': pd.Series(['google.com'], index=['google']),
'user': pd.Series(['f2a'], index=['google']),
'password': pd.Series(['1234qwerty'], index=['google'])}
dataf = pd.DataFrame(data)
data2 = {'domain': 'gmail.com',
'user': 'f2a',
'password': '1234'}
dataf2 = pd.DataFrame(data2, index=['gmail'])
print(dataf2)
dataf3 = dataf2.append(dataf)
print(dataf3.loc['google'])
domain_column_find = dataf3.loc[dataf3['domain'] == 'google.com']
user_column_find = dataf3.loc[dataf3['user'] == 'f2a']
recover_for_domain = dataf3.loc[dataf3['domain'] == 'gmail.com']
recover_pass = dataf3.get_value('gmail','password')
dataf3.to_csv('../outputs/pass.csv',sep=';') |
# -*- coding: utf-8 -*-
# Copyright (C) Cardiff University (2018-2021)
# SPDX-License-Identifier: MIT
"""Tests for :mod:`gwosc.utils`
"""
import pytest
from .. import utils
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
def test_url_segment():
seg = utils.url_segment('X-TEST-123-456.ext')
assert seg == (123, 579)
@pytest.mark.parametrize('url, segment, result', [
('A-B-10-1.ext', (0, 10), False),
('A-B-10-1.ext', (5, 11), True),
('A-B-10-1.ext', (10, 15), True),
('A-B-10-1.ext', (11, 15), False),
])
def test_url_overlaps_segment(url, segment, result):
assert utils.url_overlaps_segment(url, segment) is result
@pytest.mark.remote
@pytest.mark.parametrize('segment, result', [
((0, 64), True),
((1, 63), True),
((-1, 63), False),
((-1, 64), False),
((0, 65), False),
((1, 65), False),
((-1, 64), False),
])
def test_full_coverage(mock_urls, segment, result):
assert utils.full_coverage(mock_urls, segment) is result
def test_full_coverage_empty():
assert utils.full_coverage([], (0, 1)) is False
@pytest.mark.parametrize('seg1, seg2, result', [
((10, 11), (0, 10), False),
((10, 11), (5, 11), True),
((10, 11), (10, 15), True),
((10, 11), (11, 15), False),
])
def test_segments_overlap(seg1, seg2, result):
assert utils.segments_overlap(seg1, seg2) is result
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import os
import sys
import tempfile
from absl.testing import absltest
import numpy as np
from six.moves import cPickle
from simulation_research.traffic import file_util
class UtilTest(absltest.TestCase):
def setUp(self):
super(UtilTest, self).setUp()
self._output_dir = tempfile.mkdtemp(dir=absltest.get_default_test_tmpdir())
def test_append_line_to_file(self):
r"""Tests the output file.
The output file contains the following.
hello world
(hello) "world"
(hello) !!!!!!!!!!! @~#$%^&*()_+"world"
aaaaaaaa
bbbbbbbbbb
backslash\ backslash
backslash\ backslash
backslash\\ backslash
backslash\\\ backslash
backslash\\ backslash
"""
input_lines = ['hello world',
'(hello) "world"',
'(hello) !!!!!!!!!!! @~#$%^&*()_+"world"',
'aaaaaaaa\nbbbbbbbbbb',
r'backslash\ backslash',
'backslash\\ backslash',
r'backslash\\ backslash',
r'backslash\\\ backslash',
'backslash\\\\ backslash']
file_path = os.path.join(self._output_dir, 'test_append_line_to_file.txt')
for line in input_lines:
file_util.append_line_to_file(file_path, line)
self.assertTrue(file_util.f_exists(file_path))
# Note that the linebreak in the input_lines[3].
target_lines = ['hello world',
'(hello) "world"',
'(hello) !!!!!!!!!!! @~#$%^&*()_+"world"',
'aaaaaaaa',
'bbbbbbbbbb',
r'backslash\ backslash',
'backslash\\ backslash',
r'backslash\\ backslash',
r'backslash\\\ backslash',
'backslash\\\\ backslash']
with file_util.f_open(file_path, 'r') as actual_file:
line_counter = 0
read_lines = actual_file.readlines()
for line in read_lines:
# Linebreak is appended to the target string.
self.assertEqual(line, target_lines[line_counter] + '\n')
line_counter += 1
target_line_number = len(target_lines)
self.assertEqual(target_line_number, line_counter)
def test_save_load_variable(self):
file_path = os.path.join(self._output_dir, 'test_output_data.pkl')
# Case 1: Nested dictionary.
data = {'zz': 1, 'b': 234, 123: 'asdfa', 'dict': {'a': 123, 't': 123}}
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 2: 2-level nested dictionary.
data = collections.defaultdict(
lambda: collections.defaultdict(list))
data['first']['A'] = [1, 2, 3]
data['first']['B'] = [1, 2, 3]
data['second']['B'] = [1, 2, 3]
data['second']['C'] = [1, 2, 3]
data['third']['C'] = [1, 2, 3]
data['third']['D'] = [1, 2, 3]
data['path'] = 'asdfas/asdf/asdfasdf/'
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertEqual(data, actual_variable)
self.assertIsInstance(actual_variable, dict)
# Case 3: Large array. If the size is too large, the test will timeout.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10000
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
self.assertListEqual(data, actual_variable)
self.assertIsInstance(actual_variable, list)
# Case 4: numpy array.
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] * 10
data = np.array(data)
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
np.testing.assert_array_equal(data, actual_variable)
self.assertIsInstance(actual_variable, np.ndarray)
# Case 5: A list of tuples.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
# Saving zip variable does not affect the iterative variable.
file_util.save_variable(file_path, data)
actual_variable = file_util.load_variable(file_path)
# python2 treats `actual_variable` as a list, however, python3 treats it as
# an iterative object.
self.assertListEqual(list(actual_variable), list(data))
# Case 6: In python2, the itertools.tee cannot be saved by cPickle. However,
# in python3, it can be saved.
x = [1, 2, 3]
y = ['a', 'b', 'c']
data = zip(x, y)
data_tee, _ = itertools.tee(data)
python_version = sys.version_info[0]
try:
file_util.save_variable(file_path, data_tee)
pickle_save_correctly = True
except cPickle.PicklingError:
pickle_save_correctly = False
self.assertTrue((pickle_save_correctly and python_version == 3) or
(not pickle_save_correctly and python_version == 2))
if __name__ == '__main__':
absltest.main()
|
n=0
suma = 0
while n!=-1:
n = input('Inserta un numero')
n = int(n)
if n!=-1:
suma = suma +n
print('La suma vale', suma)
|
# Licensed to Tomaz Muraus under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Tomaz muraus licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HybridCryptoMixin(object):
block_size = 16
header_size = 4 # Encrypted message header size (in bytes)
# When message can't be encryted using only PKC and hybrid mode
# is used
long_message_threshold = (2048 / 8) - 41
aes_key_size = 256 / 8 # Size of the generated AES key (in bytes)
delimiter = ':'
def _pad_data(self, data):
"""
Pad provided data using PKCS#7.
"""
# Data needs to be padded with at least 1 byte
pad_len = self.block_size - (len(data) % self.block_size)
pad_byte = chr(pad_len)
pad_value = pad_len * pad_byte
padded_data = data + pad_value
return padded_data
def _unpad_data(self, data):
"""
Unpad provided data using PKCS#7.
"""
pad_len = ord(data[-1])
if pad_len > len(data):
raise ValueError('Corrupted data')
unpadded_data = data[:-pad_len]
return unpadded_data
def _get_header(self, data):
"""
Return header from the provided data.
"""
if len(data) < self.header_size:
raise ValueError('Corrupted data - missing or invalid header')
header = data[:self.header_size]
return header
def _remove_header(self, data):
"""
Remove header from the provided data.
"""
if len(data) < self.header_size:
raise ValueError('Corrupted data - missing or invalid header')
data = data[self.header_size:]
return data
|
from django.contrib import messages
from django.shortcuts import render, redirect
from django.db import transaction
from airmozilla.base.utils import paginate
from airmozilla.main.models import Channel
from airmozilla.manage import forms
from airmozilla.staticpages.models import StaticPage
from .decorators import (
staff_required,
permission_required,
cancel_redirect
)
@staff_required
@permission_required('staticpages.change_staticpage')
def staticpages(request):
staticpages_paged = paginate(
StaticPage.objects.all(),
request.GET.get('page'),
10
)
context = {
'paginate': staticpages_paged,
}
return render(request, 'manage/staticpages.html', context)
@staff_required
@permission_required('staticpages.change_staticpage')
@cancel_redirect('manage:staticpages')
@transaction.atomic
def staticpage_new(request):
if request.method == 'POST':
form = forms.StaticPageEditForm(request.POST, instance=StaticPage())
if form.is_valid():
instance = form.save()
instance.save()
if instance.url.startswith('sidebar_'):
__, location, channel_slug = instance.url.split('_', 2)
channel = Channel.objects.get(
slug=channel_slug
)
instance.title = 'Sidebar (%s) %s' % (location, channel.name)
instance.save()
messages.success(request, 'Page created.')
return redirect('manage:staticpages')
else:
form = forms.StaticPageEditForm()
form.fields['url'].help_text = (
"for example '/my-page' or 'sidebar_top_main' (see below)"
)
return render(
request,
'manage/staticpage_new.html',
{'form': form,
'channels': Channel.objects.all().order_by('slug')}
)
@staff_required
@permission_required('staticpages.change_staticpage')
@cancel_redirect('manage:staticpages')
@transaction.atomic
def staticpage_edit(request, id):
staticpage = StaticPage.objects.get(id=id)
if request.method == 'POST':
form = forms.StaticPageEditForm(request.POST, instance=staticpage)
if form.is_valid():
instance = form.save(commit=False)
# Need to manually save the 'headers' field because
# otherwise, if it's empty, it won't save.
instance.headers = form.cleaned_data['headers']
instance.save()
if instance.url.startswith('sidebar_'):
__, location, channel_slug = instance.url.split('_', 2)
channel = Channel.objects.get(
slug=channel_slug
)
instance.title = 'Sidebar (%s) %s' % (location, channel.name)
instance.save()
messages.info(request, 'Page %s saved.' % staticpage.url)
return redirect('manage:staticpages')
else:
form = forms.StaticPageEditForm(instance=staticpage)
return render(request, 'manage/staticpage_edit.html',
{'form': form, 'staticpage': staticpage})
@staff_required
@permission_required('staticpages.delete_staticpage')
@transaction.atomic
def staticpage_remove(request, id):
if request.method == 'POST':
staticpage = StaticPage.objects.get(id=id)
staticpage.delete()
messages.info(request, 'Page "%s" removed.' % staticpage.title)
return redirect('manage:staticpages')
|
# -*- coding: utf-8 -*-
# This is a simple echo bot using the decorator mechanism.
# It echoes any incoming text messages.
# Author: Sara Borroni <sara.borroni@pangeaformazione.it>
# Contributor: Alessio Cimarelli <jenkin@dataninja.it>
import requests, pickle, time, telebot
from dateutil.parser import parse
from threading import Thread
# Bot's uid
API_TOKEN = '238417787:AAHSJSezf5JLxsT01NcarHx6xIgyuK5Jr_A'
bot = telebot.TeleBot(API_TOKEN)
# Source parameters
search = ""
without = ""
location = ""
# Databases (simple dicts)
try:
with open("history.pickle") as f:
history = pickle.load(f)
except:
history = {}
try:
with open("subscriptions.pickle") as f:
subscriptions = pickle.load(f)
except:
subscriptions = {}
limit = 5
interval = 3600 # Check every hour
def now():
return time.strftime("%Y-%m-%d %H:%M:%S")
def save(obj,filename):
with open(filename+".pickle","w") as f:
pickle.dump(obj,f)
# Handle '/start' and '/help'
@bot.message_handler(commands=['help', 'start'])
def welcome(message):
chat_id = message.chat.id
print "[%s] Help requested by %d" % ( now(), chat_id )
bot.send_message(
chat_id,
"\n".join([
"Ciao! A che argomento sei interessato oggi?",
"Invia la tua stringa di ricerca dopo il comando /search. Esempio: /search matrimoni",
"Invia /subscribe per salvare una stringa di ricerca e ricevere le notifiche ogni ora. Esempio: /subscribe matrimoni",
"Invia /subscription per controllare la ricerca attiva.",
"Invia /unsubscribe per annullarla.",
"Tip: se invii solo /subscribe, ti salvo l'ultima ricerca che mi hai mandato!",
"Warning: /subscribe backend not implemented, yet!"
])
)
# Handle all other messages with content_type 'text' (content_types defaults to ['text'])
#@bot.message_handler(func=lambda message: True)
@bot.message_handler(commands=['search'])
def search(message):
chat_id = message.chat.id
text = message.text.replace("/search","").strip()
print "[%s] Answer to %s request from %d" % ( now(), text, chat_id )
ask(chat_id, text)
def ask(chat_id, text, verbose = True):
# Remote feed url
feed = "http://dev.dataninja.it/AlboPOP-enhanced/feed/"
if verbose:
bot.send_message(chat_id, "Ok, fammi dare un'occhiata...")
# If something goes wrong during fetching...
try:
results = fetch(chat_id, feed, { "format": "json", "search": text })
if not results:
if verbose:
bot.send_message(chat_id, "Mi spiace, non ho trovato niente al riguardo ;(")
else:
# Send only new documents from the last same search (if exists and texts match)
# Parse datetime string to datetime class: http://dateutil.readthedocs.io/en/latest/parser.html
if chat_id in history and text == history[chat_id]['text']:
docs = [
res for res in results
if parse(res['@timestamp']) > history[chat_id]['update']
]
else:
docs = results
if not docs:
if verbose:
bot.send_message(chat_id, "Mi spiace, non c'è nulla di nuovo :|")
else:
send(chat_id, docs[0:limit])
if verbose:
bot.send_message(chat_id, "Ci sono %d documenti nuovi riguardo \"%s\" (te ne ho mandati solo %d) :D" % ( len(docs), text, limit ))
# Remember only the last search
history[chat_id] = {
"id": chat_id,
"text": text,
"update": parse(docs[0]['@timestamp'])
}
save(history,"history")
# ... alert the error!
except:
error(chat_id)
@bot.message_handler(commands=['subscribe'])
def subscribe(message):
chat_id = message.chat.id
text = message.text.replace("/subscribe","").strip()
print "[%s] Subscribe %d to %s" % ( now(), chat_id, text )
# With search string, ask before subscription
if text:
ask(chat_id, text)
if chat_id in history:
subscriptions[chat_id] = history[chat_id]
bot.send_message(chat_id, u"Ottimo, ho salvato la ricerca \"%s\", ti avverto quando c'è qualcosa di nuovo :P" % subscriptions[chat_id]['text'])
save(subscriptions,"subscriptions")
else:
error(chat_id)
@bot.message_handler(commands=['subscription'])
def subscription(message):
chat_id = message.chat.id
print "[%s] Send active subscription to %d" % ( now(), chat_id )
if chat_id in subscriptions:
bot.send_message(chat_id, u"In questo momento hai la ricerca \"%s\" attiva ;)" % subscriptions[chat_id]['text'])
else:
bot.send_message(chat_id, u"Nessuna ricerca attiva :(")
@bot.message_handler(commands=['unsubscribe'])
def unsubscribe(message):
chat_id = message.chat.id
print "[%s] Unsubscribe %d" % ( now(), chat_id )
if chat_id in subscriptions:
del subscriptions[chat_id]
save(subscriptions,"subscriptions")
bot.send_message(chat_id, u"Nessuna ricerca attiva :(")
# Fetch data from remote source, using requests: http://docs.python-requests.org/en/master/
def fetch(chat_id, url, params):
r = requests.get(url, params = params)
print "[%s] Fetch %s" % ( now(), r.url )
return r.json()
def send(chat_id, docs):
print "[%s] Send %d documents to %d" % ( now(), len(docs), chat_id )
for doc in docs:
bot.send_message(
chat_id,
"\n".join([ doc['updated'], doc['title'], ", ".join(doc['source']['tags']), doc['link'] ])
)
def check():
global subscriptions
while True:
print "[%s] Automatic check for %d subscriptions" % ( now(), len(subscriptions) )
for chat_id in subscriptions:
ask(chat_id, subscriptions[chat_id]['text'], False)
subscriptions[chat_id] = history[chat_id]
save(subscriptions,"subscriptions")
time.sleep(interval)
def error(chat_id):
print "[%s] Errore per %d" % ( now(), chat_id )
bot.send_message(chat_id, "Mi spiace, ho avuto un problema e ora sono confuso :O")
def go():
try:
bot.polling(none_stop=False, interval=0)
except:
go()
thread = Thread(target=check)
thread.start()
go()
|
from tests.utils import W3CTestCase
class TestFlexbox_MarginLeftEx(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_margin-left-ex'))
|
import pytest
import Bhaskara
class TestBhaskara:
@pytest.fixture
def b(self):
return Bhaskara.Bhaskara()
def testa_uma_raiz(self, b):
assert b.calcula_raiz(1, 0, 0) == (1, 0)
def testa_duas_raizes(self, b):
assert b.calcula_raiz(1, -5, 6) == (2, 3, 2)
def testa_zero_raizes(self, b):
assert b.calcula_raiz(10, 10, 10) == (0)
def testa_raiz_negativa(self, b):
assert b.calcula_raiz(10, 20, 10) == (1, -1) |
# coding: utf-8
###################################################################
# Copyright (c) 2016-2020 European Synchrotron Radiation Facility #
# #
# Author: Marius Retegan #
# #
# This work is licensed under the terms of the MIT license. #
# For further information, see https://github.com/mretegan/crispy #
###################################################################
"""Quanty preferences dialog."""
import logging
import os
from PyQt5.QtCore import QSize, QPoint
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QFileDialog
from PyQt5.uic import loadUi
from crispy import resourceAbsolutePath
from crispy.config import Config
logger = logging.getLogger(__name__)
settings = Config().read()
class PreferencesDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
uiPath = os.path.join("quanty", "uis", "preferences.ui")
loadUi(resourceAbsolutePath(uiPath), baseinstance=self, package="crispy")
self.pathBrowsePushButton.clicked.connect(self.setExecutablePath)
ok = self.buttonBox.button(QDialogButtonBox.Ok)
ok.clicked.connect(self.acceptSettings)
cancel = self.buttonBox.button(QDialogButtonBox.Cancel)
cancel.clicked.connect(self.rejectSettings)
def showEvent(self, event):
self.loadSettings()
super().showEvent(event)
def closeEvent(self, event):
self.saveSettings()
super().closeEvent(event)
def loadSettings(self):
settings.beginGroup("Quanty")
size = settings.value("Size")
if size is not None:
self.resize(QSize(size))
pos = settings.value("Position")
if pos is not None:
self.move(QPoint(pos))
path = settings.value("Path")
self.pathLineEdit.setText(path)
self.pathLineEdit.setCursorPosition(0)
verbosity = settings.value("Verbosity")
self.verbosityLineEdit.setText(verbosity)
denseBorder = settings.value("DenseBorder")
self.denseBorderLineEdit.setText(denseBorder)
shiftSpectra = settings.value("ShiftSpectra", type=bool)
self.shiftSpectraCheckBox.setChecked(shiftSpectra)
removeFiles = settings.value("RemoveFiles", type=bool)
self.removeFilesCheckBox.setChecked(removeFiles)
settings.endGroup()
def saveSettings(self):
settings.beginGroup("Quanty")
settings.setValue("Path", self.pathLineEdit.text())
settings.setValue("Verbosity", self.verbosityLineEdit.text())
settings.setValue("DenseBorder", self.denseBorderLineEdit.text())
settings.setValue("ShiftSpectra", self.shiftSpectraCheckBox.isChecked())
settings.setValue("RemoveFiles", self.removeFilesCheckBox.isChecked())
settings.setValue("Size", self.size())
settings.setValue("Position", self.pos())
settings.endGroup()
settings.sync()
def acceptSettings(self):
self.saveSettings()
self.close()
def rejectSettings(self):
self.loadSettings()
self.close()
def setExecutablePath(self):
home = os.path.expanduser("~")
path, _ = QFileDialog.getOpenFileName(self, "Select File", home)
if path:
self.pathLineEdit.setText(path)
self.pathLineEdit.setCursorPosition(0)
|
#!/usr/bin/env python
#Created by Spencer Hance and Trevor Gale on January 18th 2015
#Northeastern University Computer Architecture Research Group
#Licensed under MIT License
import sys
import matplotlib.pyplot as plt
import numpy as np
from pylab import cm
import re
import random
from scipy.misc import comb
import argparse
import warnings
def parseBBV(input_filename):
"""Parses a Basic Block Vector and converts data
into a Numpy array
"""
with open(input_filename, 'r') as f:
input_list = []
# Opens file into a list
for line in f.readlines():
# Ignores BBV comments, which are any line that starts with a "#"
if not line.strip().startswith('#'):
input_list.append(line.split())
# Removes empty list elements
input_list = filter(None, input_list)
num_intervals = len(input_list)
# Determines the total number of basic blocks
max_list = []
for line in input_list:
for j in range(0, len(line)):
m = re.search(":(\d+):(\d+)", line[j])
max_list.append(int(m.groups()[0]))
num_bb = max(max_list)
# Initializes array and adds basic block data
bbv_array = np.zeros((num_intervals, num_bb))
for i in range(0, num_intervals):
for j in range(0, len(input_list[i])):
m = re.search(":(\d+):(\d+)", input_list[i][j])
bbv_array[i, int(m.groups()[0])-1] = int(m.groups()[1])
# Update user on current progress
print 'Parsing Completed\n'
return bbv_array
def reduceArray(bbv_array):
"""Takes in numpy array of bbv vectors and reduces dimensions to 15.
Returns the reduced array
"""
# Initializes an array with the same number of rows
# as the BBV numpy array and 15 columns
random_array = np.zeros((bbv_array.shape[1], 15))
# Fills the array with a random float between -1 and 1
for i in range(0, random_array.shape[0]):
for j in range(0, random_array.shape[1]):
random_array[i, j] = random.uniform(-1,1)
# Takes the dot product of the two arrays to reduce
# the total dimensions to 15
reduced_array = np.dot(bbv_array, random_array)
return reduced_array
def mDistCompute(a, b):
"""Takes in two 1D arrays and computes sum of
manhattan distances. This function is an inner function of mDist()
"""
# Initialize the sum value
sum_dist = 0
# Both arrays must be of of the same length
length = len(a)
# Compute sum of differences
for i in range(0, length):
sum_dist += abs(a[i]- b[i])
return sum_dist
def mDist(bbv_array):
"""Takes in bbv array and calls mDistCompute to compute
manhattan distance between the vectors. Returns an
array with differences.
"""
# Determines the size of the array
mDist_length = bbv_array.shape[0]
# Initializes a new array to store distance values
mDist_array = np.zeros((mDist_length, mDist_length))
# Determines total number of steps for progress bar
total_steps = float(comb(mDist_length, 2, exact=True))
# Initializes step counter for progress bar
step = 0
# Compute distances by using mDistCompute() for each comparison
print 'Computing Manhattan Distances'
for i in range(0, mDist_length):
for j in range(1+i, mDist_length):
sum_dist = mDistCompute(bbv_array[i], bbv_array[j])
mDist_array[i, j] = sum_dist
# Calculations for progress counter
step += len(range(1+i, mDist_length))
sys.stdout.write('\r')
sys.stdout.write('Completion: ' + \
str(int(round((step/total_steps)*100))) + '%')
sys.stdout.flush()
print '\n'
return mDist_array
def normMatrix(mDist_values):
"""Takes in array of manhattan distance values and
returns the array normalized to the maximum value
"""
#Renames input to norm_array
norm_array = mDist_values
#Determines the largest distance to normalize to
max_val = max(max(l) for l in norm_array)
# Update user on current progress
print 'Normalizing Matrix\n'
#Replaces every value with the new normalized value
for i in range(0, norm_array.shape[0]):
for j in range(0, norm_array.shape[1]):
norm_array[i, j] /= max_val
return norm_array
def plotNormData(norm_values, show=True):
"""Takes in normalized values and plots
the data
"""
# Initialize lists for plt.scatter
x, y, colors = [], [], []
# Determines the height of the array for the graph's Y-Value
yval = norm_values.shape[0]
# The size of each point
# Dividing by 4.5 usually provides enough granularity, however this should
# be adjusted if a different resolution requirement is needed
SIZE = yval/4.5
# Update user on current progress
print 'Plotting Norm Data\n'
#Adds data to x, y, and colors lists
for i in range(0, yval):
for j in range(i, yval):
x.append(j)
y.append(i)
colors.append(norm_values[i,j])
#Plots data with gray colormap and aligns both axes to 0
plt.scatter(x, y, c = colors, cmap=cm.gray, s = SIZE)
plt.xlim(0)
plt.ylim(0)
#Inverts y axis to show similarity accurately
plt.gca().invert_yaxis()
if show == True:
plt.show()
def commandParser():
"""Uses argparse module to parse command line options
"""
parser = argparse.ArgumentParser(description='Similarity Matrix Generator \
for Basic Block Vectors')
parser.add_argument('-i',dest='filename', required=True, help='input BBV file',
metavar='file')
parser.add_argument('-s','--simmatrix', help='Create and display a similarity matrix' ,
action='store_true')
parser.add_argument('-dr','--do-not-reduce',
help='Do not reduce input matrix for similarity matrix', action='store_true')
args = parser.parse_args()
if not args.filename:
print 'Error: Not enough input arguments'
if args.do_not_reduce:
print 'Starting Similarity Matrix Process (with unreduced array)\n'
plotNormData(normMatrix(mDist(parseBBV(args.filename))))
else:
print 'Starting Similarity Matrix Process\n'
plotNormData(normMatrix(mDist(reduceArray(parseBBV(args.filename)))))
def main():
"""Main Function"""
commandParser()
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
str_or_none,
try_get
)
from ..compat import (
compat_str
)
import re
class HanimetvBaseIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
(?:[^/]+\.)?
hanime\.tv/videos/hentai/
)
(?P<id>[a-zA-Z0-9-]+)
'''
_TESTS = [{
'url': 'https://hanime.tv/videos/hentai/enjo-kouhai-1',
'md5': 'a3a08ac2180ed75ee731aff92d16f447',
'info_dict': {
'id': 'enjo-kouhai-1',
'ext': 'mp4',
'title': 'Enjo Kouhai 1',
'alt_title': 'Assisted Mating',
'age_limit': 18,
'upload_date': '20200130',
'description': 'md5:81b00795abd5ffa50a2e463ea321886e',
'timestamp': 1580398865,
'dislike_count': int,
'like_count': int,
'view_count': int,
'tags': 'count:14',
'creator': 'Majin Petit',
'release_date': '20200130',
}
}, {
'url': 'https://hanime.tv/videos/hentai/enjo-kouhai-2',
'md5': '5fad67745e1ba911c041031d9e1ce2a7',
'info_dict': {
'id': 'enjo-kouhai-2',
'ext': 'mp4',
'title': 'Enjo Kouhai 2',
'alt_title': 'Assisted Mating',
'age_limit': 18,
'upload_date': '20200228',
'description': 'md5:5277f19882544683e698b91f9e2634e3',
'timestamp': 1582850492,
'dislike_count': int,
'like_count': int,
'view_count': int,
'tags': 'count:12',
'creator': 'Majin Petit',
'release_date': '20200228',
}
}, {
'url': 'https://hanime.tv/videos/hentai/enjo-kouhai-3',
'md5': 'a3a08ac2180ed75ee731aff92d16f447',
'info_dict': {
'id': 'enjo-kouhai-3',
'ext': 'mp4',
'title': 'Enjo Kouhai 3',
'alt_title': 'Assisted Mating',
'age_limit': 18,
'upload_date': '20200326',
'timestamp': 1585237316,
'description': 'md5:0d67e22b89a5f7e1ca079d974019d08d',
'dislike_count': int,
'like_count': int,
'view_count': int,
'tags': 'count:15',
'creator': 'Majin Petit',
'release_date': '20200326',
}
}, {
'url': 'https://hanime.tv/videos/hentai/chizuru-chan-kaihatsu-nikki-1',
'md5': 'b54b00535369c8cc0ad344cbef3429f5',
'info_dict': {
'id': 'chizuru-chan-kaihatsu-nikki-1',
'ext': 'mp4',
'title': 'Chizuru-chan Kaihatsu Nikki 1',
'alt_title': '千鶴ちゃん開発日記',
'age_limit': 18,
'upload_date': '20210930',
'timestamp': 1633016879,
'description': 'A serious honor student "Chizuru Shiina" was shunned by her classmates due to her being a teacher\'s pet, but none of that mattered whenever she ran into her favorite teacher that she so deeply admired...',
'dislike_count': int,
'like_count': int,
'view_count': int,
'tags': 'count:17',
'creator': 'Bunnywalker',
'release_date': '20210930',
}
}, {
'url': 'https://hanime.tv/videos/hentai/chizuru-chan-kaihatsu-nikki-2',
'md5': 'b54b00535369c8cc0ad344cbef3429f5',
'info_dict': {
'id': 'chizuru-chan-kaihatsu-nikki-2',
'ext': 'mp4',
'title': 'Chizuru-chan Kaihatsu Nikki 2',
'alt_title': '千鶴ちゃん開発日記',
'age_limit': 18,
'upload_date': '20210930',
'timestamp': 1633016880,
'description': 'A serious honor student "Chizuru Shiina" was shunned by her classmates due to her being a teacher\'s pet, but none of that mattered whenever she ran into her favorite teacher that she so deeply admired...',
'dislike_count': int,
'like_count': int,
'view_count': int,
'tags': 'count:17',
'creator': 'Bunnywalker',
'release_date': '20210930',
}
}
]
HTTP_HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.152 Safari/537.36'}
DEFAULT_HOST = 'hanime.tv'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
json_data = self._html_search_regex(r"window.__NUXT__=(.+?);<\/script>", webpage, 'Hanime.tv Inline JSON', fatal=True)
json_data = self._parse_json(json_data, video_id)['state']['data']['video']
server_data_dict = json_data['videos_manifest']['servers']
url_list = list()
for server in range(len(server_data_dict)):
for stream in range(len(server_data_dict[server]['streams'])):
stream_data_dict = server_data_dict[server]['streams']
if len(url_list) == len(stream_data_dict):
break
else:
tmp_list = {
'url': stream_data_dict[stream]['url'],
'width': int_or_none(stream_data_dict[stream]['width']),
'height': int_or_none(stream_data_dict[stream]['height'])
}
url_list.append(tmp_list)
url_list = sorted(url_list, key=lambda val: val['width'] * val['height'])
title = json_data['hentai_video']['name'] or video_id
alt_title = try_get(json_data, lambda val: val['hentai_video']['titles'][0]['title'])
description = clean_html(try_get(json_data, lambda val: val['hentai_video']['description']))
publisher = try_get(json_data, lambda val: val['hentai_video']['brand'])
tags = list()
tag_dict = try_get(json_data, lambda val: val['hentai_video']['hentai_tags'])
if tag_dict:
for i in range(len(tag_dict)):
tags.append(try_get(tag_dict, lambda val: val[i]['text'], compat_str))
formats = list()
for i in range(len(url_list)):
if url_list[i]['url'] == '':
continue
formats.append(
{
'url': url_list[i]['url'],
'width': url_list[i].get('width'),
'height': url_list[i].get('height'),
'resolution': str_or_none(url_list[i]['width']) + "x" + str_or_none(url_list[i]['height']),
'container': 'mp4',
'ext': 'mp4',
'protocol': 'm3u8',
'preference': 1 if url_list[i]['height'] == 720 else None,
})
self._remove_duplicate_formats(formats)
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'description': description,
'creator': publisher,
'title': title,
'alt_title': alt_title,
'tags': tags,
'release_date': try_get(json_data, lambda val: val['hentai_video']['released_at'][:10].replace('-', '')),
'timestamp': try_get(json_data, lambda val: val['hentai_video']['released_at_unix']),
'view_count': try_get(json_data, lambda val: val['hentai_video']['views']),
'like_count': try_get(json_data, lambda val: val['hentai_video']['likes']),
'dislike_count': try_get(json_data, lambda val: val['hentai_video']['dislikes']),
'age_limit': 18,
}
class HanimetvPlaylistIE(HanimetvBaseIE):
_VALID_URL = r'''(?x)
https?://
(?P<host>(:[^/]+\.)*hanime\.tv)
(?:/playlists/)
(?P<id>[a-zA-Z0-9-]+)
'''
_TESTS = [{
'url': 'https://hanime.tv/playlists/kjllqq5qxrocq6j0wcp9',
'only_matching': True,
}, {
'url': 'https://hanime.tv/playlists/b1ase43cby9s97h0w1kr',
'only_matching': True,
}, {
'url': 'https://hanime.tv/playlists/y8km5n4rmanckpx06vxs',
'only_matching': True,
}, {
'url': 'https://hanime.tv/playlists/qngysuiwk4ukmh8ykp3k',
'only_matching': True,
}
]
def _get_next_vid_id(self, webpage, playlist_id):
json_data = self._html_search_regex(r"window.__NUXT__=(.+?);<\/script>", webpage, f"Next video in {playlist_id}", fatal=True)
result = try_get(self._parse_json(json_data, playlist_id), lambda val: val['state']['data']['video']['next_hentai_video']['slug'])
if result:
return result
else:
return None
def _extract_entries(self, url, item_id, title):
return [
self.url_result(
url,
HanimetvBaseIE.ie_key(), video_id=item_id,
video_title=title)
]
def _entries(self, url, host, playlist_id):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
base_video_url = f'https://{host}/videos/hentai/%s?playlist_id={playlist_id}'
webpage = self._download_webpage(url, playlist_id, note=f"Downloading webpage: {url}")
json_data = self._html_search_regex(r"window.__NUXT__=(.+?);<\/script>", webpage, 'Hanime.tv Inline JSON')
json_data = self._parse_json(json_data, playlist_id)['state']['data']['playlist']['playlist_videos']
known_vid_ids = list()
for video in json_data['hentai_videos']:
known_vid_ids.append(video['slug'])
expected_vids = int_or_none(json_data['num_records']) or len(known_vid_ids)
curr_vid_url = base_video_url % known_vid_ids[0]
first_video = curr_vid_url
last_known_vid_url = base_video_url % known_vid_ids[-1]
processed_vids = 0
for vid_id in known_vid_ids:
processed_vids += 1
for e in self._extract_entries(base_video_url % vid_id, processed_vids, vid_id):
yield e
"""
Since only a certain number of videos are available via directly
loading the playlist page, get the rest by downloading each video page
"""
seek_next_vid = True
while (seek_next_vid):
webpage = self._download_webpage(last_known_vid_url, playlist_id)
curr_vid_id = self._get_next_vid_id(webpage, playlist_id)
if curr_vid_id is None:
seek_next_vid = False
break
curr_vid_url = base_video_url % curr_vid_id
if curr_vid_url != first_video and processed_vids <= expected_vids:
processed_vids += 1
last_known_vid_url = curr_vid_url
for e in self._extract_entries(curr_vid_url, processed_vids, curr_vid_id):
yield e
else:
seek_next_vid = False
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host')
playlist_id = mobj.group('id')
return self.playlist_result(self._entries(url, host, playlist_id), playlist_id)
|
from bokeh.core.json_encoder import pd
from . import get_logger, freeze, subdict
from sklearn import preprocessing
_LOG = get_logger('Recsys')
def process_user_info():
HPs = pd.read_excel('/Users/bekyilma/Documents/Projects/vr/Multi-Stakeholder_Recommendation/Data/Alberto.xlsx', index_col=0)
HPs = HPs[['How likely are you interested in visiting popular paintings?',
'How likely are you interested in visiting diverse content?',
'How tolerant are you to crowd in exhibition areas?',
'How tolerant are you towards walking in a museum?']]
HPs = HPs.transpose()
HPs.columns = ['Values']
# Normalize Hyperparameters
x = HPs[['Values']].values.astype(float)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_normalized = pd.DataFrame(x_scaled)
HPs['norm_Values'] = df_normalized.values
# Translate Hyperparameters
Beta = HPs.at['How likely are you interested in visiting popular paintings?', 'norm_Values']
Epsilon = HPs.at['How likely are you interested in visiting diverse content?', 'norm_Values']
LAMBDA = HPs.at['How tolerant are you towards walking in a museum?', 'norm_Values']
Crowd_tolerance = HPs.at['How tolerant are you to crowd in exhibition areas?', 'norm_Values']
# -------------- Import preference information (weights) -------------- #
df = pd.read_excel('/Users/bekyilma/Documents/Projects/vr/Rec/Alberto/Alberto.xlsx', index_col=0)
df1 = df.drop(['Date submitted', 'Last page', 'Start language', 'Seed',
'Please select your choice below. Clicking on the "agree" button below indicates that: You have read the above information and you voluntarily agree to participate. If you do not wish to participate in the research study, please decline participation by clicking on the "disagree" button.',
'Which of the following best describes your museum visiting style?',
'How likely are you interested in visiting popular paintings?',
'How likely are you interested in visiting diverse content?',
'How tolerant are you to crowd in exhibition areas?',
'How tolerant are you towards walking in a museum?',
'Do you want to get contacted for a further interview to validate our recommender system and explain your choices?',
'If you answer YES, please provide your prefered means of communication? (email.)'], axis=1)
df1.rename(columns={'000-0419-0000 \xa0': '000-0419-0000'}, inplace=True)
selected_images_keys_list = list(df1.columns)
# df1.dropna(axis=1, how='all')
df1 = df1.transpose()
df1.columns = ['a']
df1.dropna(subset=['a'], inplace=True)
df1['weights'] = df1['a'].astype('int')
# Create x, where x the 'scores' column's values as floats
x = df1[['weights']].values.astype(float)
# Create a minimum and maximum processor object
min_max_scaler = preprocessing.MinMaxScaler()
# Create an object to transform the data to fit minmax processor
x_scaled = min_max_scaler.fit_transform(x)
# Run the normalizer on the dataframe
df_normalized = pd.DataFrame(x_scaled)
df1['norm_weights'] = df_normalized.values
weights = df1['norm_weights'].tolist()
# wrap preference info as a dictionary
preference_dict = dict(zip(selected_images_keys_list, weights))
_LOG.debug('Parameters {}k {} {} {} {}'.format(Beta, Epsilon, LAMBDA, Crowd_tolerance, preference_dict))
return Beta, Epsilon, LAMBDA, Crowd_tolerance, preference_dict
|
# Author: Michael Lissner
# Date created: 2013-06-06
import re
import requests
from datetime import date
from datetime import datetime
from juriscraper.opinions.united_states.state import nd
from juriscraper.DeferringList import DeferringList
class Site(nd.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
today = date.today()
self.url = "http://www.ndcourts.gov/opinions/month/%s.htm" % (
today.strftime("%b%Y")
)
def _get_download_urls(self):
"""We use a fetcher and a DeferringList object and a HEAD request
to test whether the wpd exists for a case"""
def fetcher(html_link):
if self.test_mode_enabled():
return html_link # Can't fetch remote during tests
case_number = re.search(r"(\d+)", html_link).group(0)
wpd_link = "http://www.ndcourts.gov/wp/%s.wpd" % case_number
r = requests.head(
wpd_link,
allow_redirects=False,
headers={"User-Agent": "Juriscraper"},
)
if r.status_code == 200:
return wpd_link
else:
return html_link
if self.crawl_date >= date(1998, 10, 1):
path = '//a/@href[contains(., "/court/opinions/")]'
seed = list(self.html.xpath(path))
else:
path = "//ul//a[text()]/@href"
seed = list(self.html.xpath(path))
return DeferringList(seed=seed, fetcher=fetcher)
def _get_case_names(self):
if self.crawl_date >= date(1998, 10, 1):
path = '//a[contains(@href, "/court/opinions/")]/text()'
return list(self.html.xpath(path))
else:
path = "//ul//a/text()"
names = self.html.xpath(path)
case_names = []
if self.crawl_date < date(1996, 11, 1):
# A bad time.
for name in names:
name = name.rsplit("-")[0]
case_names.append(name)
return case_names
else:
return list(names)
def _get_case_dates(self):
# A tricky one. We get the case dates, but each can have different number of cases below it, so we have to
# count them.
case_dates = []
if self.crawl_date >= date(1998, 10, 1):
test_path = "//body/a"
if len(self.html.xpath(test_path)) == 0:
# It's a month with no cases (like Jan, 2009)
return []
path = "//body/a|//body/font"
for e in self.html.xpath(path):
if e.tag == "font":
date_str = e.text
dt = datetime.strptime(date_str, "%B %d, %Y").date()
elif e.tag == "a":
try:
case_dates.append(dt)
except NameError:
# When we don't yet have the date
continue
else:
path = "//h4|//li"
for e in self.html.xpath(path):
if e.tag == "h4":
# We make dates on h4's because there's one h4 per date.
date_str = e.text.strip()
dt = datetime.strptime(date_str, "%B %d, %Y").date()
elif e.tag == "li":
try:
# We append on li's, because there's one li per case.
case_dates.append(dt)
except NameError:
# When we don't yet have the date
continue
return case_dates
def _get_precedential_statuses(self):
return ["Published"] * len(self.case_names)
def _get_docket_numbers(self):
if self.crawl_date >= date(1998, 10, 1):
path = '//a/@href[contains(., "/court/opinions/")]'
else:
path = "//ul//a[text()]/@href"
docket_numbers = []
for html_link in self.html.xpath(path):
try:
docket_numbers.append(re.search(r"(\d+)", html_link).group(0))
except AttributeError:
continue
return docket_numbers
def _get_neutral_citations(self):
if self.crawl_date < date(1997, 2, 1):
# Old format, but no neutral cites, thus short circuit the function.
return None
elif self.crawl_date < date(1998, 10, 1):
# Old format with: 1997 ND 30 - Civil No. 960157 or 1997 ND 30
path = "//li/text()"
elif self.crawl_date >= date(1998, 10, 1):
# New format with: 1997 ND 30
path = "//body/text()"
neutral_cites = []
for t in self.html.xpath(path):
try:
neutral_cites.append(
re.search(
r"^.{0,5}(\d{4} ND (?:App )?\d{1,4})", t, re.MULTILINE
).group(1)
)
except AttributeError:
continue
return neutral_cites
def _post_parse(self):
# Remove any information that applies to non-appellate cases.
if self.neutral_citations:
delete_items = []
for i in range(0, len(self.neutral_citations)):
if "App" in self.neutral_citations[i]:
delete_items.append(i)
for i in sorted(delete_items, reverse=True):
del self.download_urls[i]
del self.case_names[i]
del self.case_dates[i]
del self.precedential_statuses[i]
del self.docket_numbers[i]
del self.neutral_citations[i]
else:
# When there aren't any neutral cites that means they're all supreme court cases.
pass
def _download_backwards(self, d):
self.crawl_date = d
self.url = "http://www.ndcourts.gov/opinions/month/%s.htm" % (
d.strftime("%b%Y")
)
self.html = self._download()
|
"""
...
"""
# ...
ANSWER = 42
def test_simple():
assert True
|
from sympy.printing.pycode import PythonCodePrinter
from sympy.printing.printer import Printer
from sympy import Add, Mul
class SymEnginePrinter(Printer):
def _print_Pow(self, expr):
return "pow({0}, {1})".format(self._print(expr.base), self._print(expr.exp))
def _print_Add(self, expr):
if len(expr.args) != 2:
return "add({}, {})".format(
self._print(expr.args[0]),
self._print(Add.fromiter(expr.args[1:]))
)
return "add({}, {})".format(
self._print(expr.args[0]),
self._print(expr.args[1]),
)
def _print_Mul(self, expr):
if len(expr.args) >= 2:
return "mul({}, {})".format(
self._print(expr.args[0]),
self._print(Mul.fromiter(expr.args[1:])),
)
else:
return self._print(expr.args[0])
def _print_Integer(self, expr):
return "integer({})".format(expr)
def _print_int(self, expr):
return self._print_Integer(expr)
def symengine_print(expr):
printer = SymEnginePrinter()
return printer.doprint(expr)
|
from django.shortcuts import render
import requests
from requests.api import get
import covid
from covid import Covid
import datetime
from datetime import date
import json
import os
from dotenv import load_dotenv
load_dotenv()
list_cont = []
chart_cases = [['date','active','recovered','total']]
covid = Covid()
#covid functions
def country_list():
countries = covid.list_countries()
for i in range(len(countries)):
x = countries[i]
list_cont.append(x['name'])
return list_cont
def active_cases(country):
list_cases = covid.get_status_by_country_name(country)
# print(list_cases)
container = []
container.append(list_cases['confirmed'])
container.append(list_cases['active'])
container.append(list_cases['deaths'])
return container
def get_data_by_date(country,date,d_short):
url = "https://covid-193.p.rapidapi.com/history"
querystring = {"country":country,"day":date}
headers = {
'x-rapidapi-key': os.getenv("COVID_API"),
'x-rapidapi-host': "covid-193.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
data = response.tex
res = json.loads(data)
data = res['response']
data = data[0]['cases']
cases_today = []
cases_today.append(d_short)
cases_today.append(data['active'])
cases_today.append(data['recovered'])
cases_today.append(data['total'])
return cases_today
def graph(country):
today = date.today()
d1 = datetime.datetime.now()
arr = [['date','active','recovered','total']]
for i in range (0,150,10):
d2 = datetime.timedelta(days = i)
d2 = d1 - d2
d_form = d2.strftime("%Y-%m-%d")
d_short = d2.strftime("%d %b")
x = get_data_by_date(country,d_form,d_short)
chart_cases.append(x)
n = len(chart_cases)
for i in range (n-1):
arr.append(chart_cases[n-i-1])
return arr
def country_list():
countries = covid.list_countries()
for i in range(len(countries)):
if i<=4:
x = countries[i]
list_cont.append(x['name'])
return list_cont
# a_list=[]
# def countryactive_list():
# countries = covid.list_countries()
# for i in range(len(countries)):
# if i<=4:
# x = countries[i]
# a_list.append(active_cases(str(x['name'])))
# return a_list
w_active= covid.get_total_active_cases()
w_deaths= covid.get_total_deaths()
w_confirmed= covid.get_total_confirmed_cases()
top_countries=country_list()
top1=active_cases(top_countries[0])
top2=active_cases(top_countries[1])
top3=active_cases(top_countries[2])
top4=active_cases(top_countries[3])
top5=active_cases(top_countries[4])
# Create your views here.
def index(request):
context={
'w_active':w_active,
'w_confirmed':w_confirmed,
'w_deaths':w_deaths,
'top_countries':top_countries,
'top1':top1,
'top2':top2,
'top3':top3,
'top4':top4,
'top5':top5,
# 'top_active_countries':top_active_countries,
}
return render(request,r'c_stats\stats.html',context) |
class Grafo:
def __init__(self, V):
self.V = V
self.lista = [[] for i in range(V)]
def add_aresta(self, orig, dest):
self.lista[orig].append(dest)
def dfs(self, v):
pilha, pilha_rec = [], []
visitados = [False for i in range(self.V)]
pilha_rec = [False for i in range(self.V)]
while True:
achou_vizinho = False
if not visitados[v]:
pilha.append(v)
visitados[v] = pilha_rec[v] = True
aux_adj = None
for adj in self.lista[v]:
aux_adj = adj
# se o vizinho está na pilha, é porque existe ciclo
if pilha_rec[adj]:
return True
elif not visitados[adj]:
# se não está na pilha e não foi visitado, indica que achou
achou_vizinho = True
break
if not achou_vizinho:
# marca que saiu da pilha
pilha_rec[pilha[-1]] = False
# remove da pilha
pilha.pop()
if len(pilha) == 0:
break
v = pilha[-1]
else:
v = adj
return False
def tem_ciclo(self):
for i in range(self.V):
if self.dfs(i):
return True
return False
g = Grafo(3)
g.add_aresta(0, 1)
g.add_aresta(1, 2)
g.add_aresta(2, 0)
print(g.tem_ciclo()) |
def lif_neuron_inh(n_steps=1000, alpha=0.5, beta=0.1, exc_rate=10, inh_rate=10):
""" Simulate a simplified leaky integrate-and-fire neuron with both excitatory
and inhibitory inputs.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
beta (float): The membrane potential leakage factor
exc_rate (int): The mean rate of the incoming excitatory spikes
inh_rate (int): The mean rate of the incoming inhibitory spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(exc_rate).rvs(n_steps)
inh = stats.poisson(inh_rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
for i in range(1, n_steps):
dv = -beta * v[i-1] + alpha * (exc[i] - inh[i])
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
v, spike_times = lif_neuron_inh()
with plt.xkcd():
plot_neuron_stats(v, spike_times) |
"""Example Process class."""
class Process(object):
"""Example process."""
def __init__(self, num=0):
"""Initialize a new Process.
:num: Number
"""
self.num = num
def get_double(self):
"""Return the number."""
return {"double": (2 * self.num)}
|
#!/usr/bin/env python
# uuid: a2a8bd34-9d35-4648-9500-d6c62a2557c9
# MIT License+uuid License
# For details of these license see
# https://github.com/a-bentofreire/uuid-licenses/blob/master/MIT-uuid-license.md
# Copyright (c) 2018 Alexandre Bento Freire. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice, the uuid, and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from gimpfu import *
def bounding_box(timg):
has_selection, x1, y1, x2, y2 = pdb.gimp_selection_bounds(timg)
if has_selection:
pdb.gimp_message(str(x2 - x1) + ',' + str(y2 - y1))
pdb.gimp_image_select_rectangle(timg, 0, x1, y1, x2 - x1, y2 - y1)
register(
"python_fu_bounding_box",
"Bounding box",
"Selects the Bounding box",
"Alexandre Bento Freire",
"Alexandre Bento Freire",
"2018",
"Bounding box",
"",
[
(PF_IMAGE, "image", "Input image", None)
],
[],
bounding_box, menu="<Image>/Select")
main()
|
del_items(0x800A0CD8)
SetType(0x800A0CD8, "void VID_OpenModule__Fv()")
del_items(0x800A0D98)
SetType(0x800A0D98, "void InitScreens__Fv()")
del_items(0x800A0E88)
SetType(0x800A0E88, "void MEM_SetupMem__Fv()")
del_items(0x800A0EB4)
SetType(0x800A0EB4, "void SetupWorkRam__Fv()")
del_items(0x800A0F44)
SetType(0x800A0F44, "void SYSI_Init__Fv()")
del_items(0x800A1050)
SetType(0x800A1050, "void GM_Open__Fv()")
del_items(0x800A1074)
SetType(0x800A1074, "void PA_Open__Fv()")
del_items(0x800A10AC)
SetType(0x800A10AC, "void PAD_Open__Fv()")
del_items(0x800A10F0)
SetType(0x800A10F0, "void OVR_Open__Fv()")
del_items(0x800A1110)
SetType(0x800A1110, "void SCR_Open__Fv()")
del_items(0x800A1140)
SetType(0x800A1140, "void DEC_Open__Fv()")
del_items(0x800A13B4)
SetType(0x800A13B4, "char *GetVersionString__FPc(char *VersionString2)")
del_items(0x800A1488)
SetType(0x800A1488, "char *GetWord__FPc(char *VStr)")
|
import json
from instaloader import Instaloader, Profile
with open('auth.json') as f:
auth = json.load(f)
login_name = auth['login']
target_profile = auth['target_profile']
loader = Instaloader()
# login
try:
loader.load_session_from_file(login_name)
except FileNotFoundError:
loader.context.log("Session file does not exist yet - Logging in.")
if not loader.context.is_logged_in:
loader.interactive_login(login_name)
loader.save_session_to_file()
profile = Profile.from_username(loader.context, target_profile)
followers = profile.get_followers()
loader.context.log()
loader.context.log('Profile {} has {} followers. Writing to file'.format(profile.username, profile.followers))
loader.context.log()
followers_file = open('followers.txt', 'a')
for follower in followers:
followers_file.write(follower.username + '\n')
loader.context.log('Finished.')
followers_file.close() |
#!/usr/bin/env python
"""my-new-package setup script."""
from setuptools import find_packages, setup
# https://github.com/PyCQA/pylint/issues/3826
with open("README.rst", encoding="utf8") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst", encoding="utf8") as history_file:
history = history_file.read()
requirements = ["Click>=7.0"]
test_requirements = ["pytest>=6.0"]
__version__ = '0.1.0'
setup(
author="Mark Sevelj",
author_email='mark@example.com',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
description="An example package for cookiecutter-py3-package.",
entry_points={
'console_scripts': [
'my_new_package=my_new_package.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='my_new_package',
name='my_new_package',
packages=find_packages(include=['my_new_package', 'my_new_package.*']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/imAsparky/my-new-package',
version=__version__,
zip_safe=False,
)
|
from absl import app
from absl import flags
from typing import Text
import os
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen, StatisticsGen, SchemaGen, Trainer, Transform, Evaluator, Pusher, ResolverNode, BulkInferrer
from tfx.components.base import executor_spec
from tfx.components.trainer.executor import GenericExecutor
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.proto import bulk_inferrer_pb2
from tfx.utils.dsl_utils import external_input
from tfx.orchestration import pipeline
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from hello_component import component
from tfx.orchestration.kubeflow import kubeflow_dag_runner
FLAGS = flags.FLAGS
def generate_pipeline(pipeline_name, pipeline_root, train_data, test_data, train_steps, eval_steps, pusher_target, runner):
module_file = 'util.py' # util.py is a file in the same folder
# RuntimeParameter is only supported on KubeflowDagRunner currently
if runner == 'kubeflow':
pipeline_root_param = os.path.join('gs://{{kfp-default-bucket}}', pipeline_name, '{{workflow.uid}}')
train_data_param = data_types.RuntimeParameter(name='train-data', default='gs://renming-mlpipeline-kubeflowpipelines-default/kaggle/santander/train', ptype=Text)
test_data_param = data_types.RuntimeParameter(name='test-data', default='gs://renming-mlpipeline-kubeflowpipelines-default/kaggle/santander/test', ptype=Text)
pusher_target_param = data_types.RuntimeParameter(name='pusher-destination', default='gs://renming-mlpipeline-kubeflowpipelines-default/kaggle/santander/serving', ptype=Text)
else:
pipeline_root_param = pipeline_root
train_data_param = train_data
test_data_param = test_data
pusher_target_param = pusher_target
examples = external_input(train_data_param)
example_gen = CsvExampleGen(input=examples, instance_name="train")
test_examples = external_input(test_data_param)
test_example_gen = CsvExampleGen(input=test_examples, output_config={'split_config': {'splits': [{'name':'test', 'hash_buckets':1}]}}, instance_name="test")
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=True) # infer_feature_shape controls sparse or dense
# Transform is too slow in my side.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
trainer = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
module_file=module_file,
train_args=trainer_pb2.TrainArgs(num_steps=train_steps),
eval_args=trainer_pb2.EvalArgs(num_steps=eval_steps),
instance_name="train",
enable_cache=False)
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='target')],
# tfma.SlicingSpec(feature_keys=['var_0', 'var_1']) when add more, Evaluator can't ouptput BLESSED status. It should be a bug in TFMA.
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'binary_accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.4}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
# baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config,
instance_name="eval5")
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination={'filesystem': {
'base_directory': pusher_target_param}})
bulk_inferrer = BulkInferrer(
examples=test_example_gen.outputs['examples'],
model=trainer.outputs['model'],
# model_blessing=evaluator.outputs['blessing'],
data_spec=bulk_inferrer_pb2.DataSpec(),
model_spec=bulk_inferrer_pb2.ModelSpec(),
instance_name="bulkInferrer"
)
hello = component.HelloComponent(
input_data=bulk_inferrer.outputs['inference_result'], instance_name='csvGen')
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root_param,
components=[
example_gen, statistics_gen, schema_gen, transform, trainer,
model_resolver, evaluator, pusher, hello, test_example_gen, bulk_inferrer
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
os.path.join(pipeline_root, 'metadata.sqlite')),
beam_pipeline_args=['--direct_num_workers=0'])
def main(_):
pipeline = generate_pipeline(
flags.FLAGS.pipeline_name,
flags.FLAGS.pipeline_root,
flags.FLAGS.train_data,
flags.FLAGS.test_data,
flags.FLAGS.train_steps,
flags.FLAGS.eval_steps,
flags.FLAGS.pusher_target,
flags.FLAGS.runner)
if flags.FLAGS.runner == 'local':
BeamDagRunner().run(pipeline)
#elif flags.FLAGS.runner == 'flink':
# need to slightly change TFX codes to support other Beam-runners
# BeamDagRunner(pipelineOptions).run(pipeline)
elif flags.FLAGS.runner == 'kubeflow':
metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
tfx_image = os.environ.get('KUBEFLOW_TFX_IMAGE', None)
runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config,
tfx_image=tfx_image)
kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(
pipeline)
else:
exit(1)
if __name__ == '__main__':
flags.DEFINE_string(
name="pipeline_name", default="santander",
help="pipeline name used to identity different pipelines")
flags.DEFINE_string(
name="pipeline_root", default="/var/tmp/santander/keras-tft/",
help="pipeline root for storing artifacts, it's not used in KFP runner")
flags.DEFINE_string(
name="train_data", default="/var/tmp/santander/data/train",
help="Folder for Kaggle train.csv. No test.csv in the folder, it's not used in KFP runner")
flags.DEFINE_string(
name="test_data", default="/var/tmp/santander/data/test",
help="Folder for Kaggle test.csv. No train.csv in the folder, it's not used in KFP runner")
flags.DEFINE_integer(
name="train_steps", default=10000,
help="Steps to train a model")
flags.DEFINE_integer(
name="eval_steps", default=1000,
help="Steps to train a model")
flags.DEFINE_string(
name="pusher_target", default="/var/tmp/santander/pusher",
help="Pusher can't create this folder for you, it's not used in KFP runner")
flags.DEFINE_enum(
name="runner", default="kubeflow",
enum_values=['local', 'kubeflow'],
help="Pusher can't create this folder for you")
app.run(main)
|
#!/usr/bin/env python
#
# maths.py
#
# Copyright © 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Parts based on https://github.com/rufuspollock/markdown2latex
# BSD Licensed
# Authored by Rufus Pollock: <http://www.rufuspollock.org/>
# Reworked by Julian Wulfheide (ju.wulfheide@gmail.com) and
# Pedro Gaudencio (pmgaudencio@gmail.com)
#
# stdlib
import re
# 3rd party
import markdown.postprocessors
# this package
from py2latex.markdown_parser.utils import unescape_latex_entities
__all__ = ["MathTextPostProcessor"]
class MathTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, instr):
"""
Convert all math sections in {text} whether LaTeX, asciimathml or latexmathml formatted to LaTeX.
This assumes you are using $ for inline math and ``$$`` for blocks as your mathematics delimiter
(*not* the standard asciimathml or latexmathml delimiter).
"""
def repl_1(matchobj) -> str:
"""
:param matchobj:
:type matchobj:
:return:
:rtype: str
"""
text = unescape_latex_entities(matchobj.group(1))
return f"\\[{text}\\]"
def repl_2(matchobj) -> str:
"""
:param matchobj:
"""
text = unescape_latex_entities(matchobj.group(1))
return f"\\({text}\\)"
# This $$x=3$$ is block math
pat = re.compile(r"\$\$([^$]*)\$\$")
out = pat.sub(repl_1, instr)
# This $x=3$ is inline math
pat2 = re.compile(r"\$([^$]*)\$")
out = pat2.sub(repl_2, out)
# some extras due to asciimathml
out = out.replace("\\lt", '<')
out = out.replace(" * ", " \\cdot ")
out = out.replace("\\del", "\\partial")
return out
|
'''
Exercise 2 :
Write a function concat_dico(dico1, dico2) that takes two dictionaries as parameters
and returns a single dictionary containing the pairs from both dictionaries. An important
requirement is that both dictionaries are NOT modified by the function.
For example:
>>> concat_dico ({“one”:1, “two”:2, “three”:3},
{“four”:4, “five”:5})
{“one”:1, “two”:2, “three”:3, “four”:4, “five”:5}
The Advanced bit:
An issue may arise when both dictionaries share a least one common key. Rewrite the function
so that the method store the values in a list if dico1 and dico2 share a common key. In the
example below both dictionaries share the keys “two” and “five”.
>>> concatDico ({“one”:1, “two”:2, “five”:5},
{“two”: ”10”, “five”:”101”})
{“one”:1, “two”:[2, ”10”], “five”:[5,”101”]}
'''
def concatDico(dico1, dico2):
ayy = {}
for k,v in dico1.items():
ayy[k]=v
for k,v in dico2.items():
ayy[k]=v
return (ayy)
print('For Starters ',concatDico({'“one”':1, '“two”':2, '“three”':3}, {'“four”':4, '“five”':5}))
def concatDico2(dico1, dico2):
ayy = {}
for k,v in dico1.items():
if k in dico2:
ayy[k]=[v,dico2[k]]
del dico2[k]
else:
ayy[k]=v
for k,v in dico2.items():
ayy[k]=v
return (ayy)
print('For Advance Bit ',concatDico2({'“one”':1, '“two”':2, '“five”':5},{'“two”': '”10”', '“five”':'”101”'}))
|
# import unittest
# from Tiptabs.TiptabsDB import *
# class testTiptabsDB(unittest.TestCase):
# #TODO: Add setup for DB for further testing of storage / input.
# def testCheckInputs(self):
# input = ['users', 'Gary', 'Gary', 'JPYtoUSD']
# expected = [True, "Valid input entered."]
# result = TiptabsDB.check_inputs(self, input)
# return self.assertEqual(expected, result)
# def testCheckInputs_InvalidInputSize(self):
# invalid_size_input = ['users', 'Alex', 'Alex']
# expected = [False, "Invalid number of entry items entered."]
# result = TiptabsDB.check_inputs(self, invalid_size_input)
# return self.assertEqual(expected, result)
# def testCheckInputs_NoneInput(self):
# none_input = None
# expected = [False, "ERROR: None entries are not allowed to be stored into the Tiptabs database."]
# result = TiptabsDB.check_inputs(self, none_input)
# return self.assertEqual(expected, result)
# def testCheckInputs_NoneArrayEntries(self):
# null_array_entries = [None, None, None, None]
# expected = [False, 'Invalid item was attempted to be added.']
# result = TiptabsDB.check_inputs(self, null_array_entries)
# return self.assertEqual(expected, result)
# def testCheckInputs_NoneEntry(self):
# none_entry = ['users', 'Barry', 'Barry', None]
# expected = [False, "Invalid item was attempted to be added."]
# result = TiptabsDB.check_inputs(self, none_entry)
# return self.assertEqual(expected, result)
# def testCheckInputs_EmptyStrings(self):
# empty_string_input = ["", "", "", ""]
# expected = [False, "Invalid item was attempted to be added."]
# result = TiptabsDB.check_inputs(self, empty_string_input)
# return self.assertEqual(expected, result)
# if __name__ == '__main__':
# unittest.main()
|
#!/usr/bin/env python3
"""Example evaluation script to evaluate a policy.
This is an example evaluation script for evaluating a "RandomPolicy". Use this
as a base for your own script to evaluate your policy. All you need to do is
to replace the `RandomPolicy` and potentially the Gym environment with your own
ones (see the TODOs in the code below).
This script will be executed in an automated procedure. For this to work, make
sure you do not change the overall structure of the script!
This script expects the following arguments in the given order:
- Difficulty level (needed for reward computation)
- initial pose of the cube (as JSON string)
- goal pose of the cube (as JSON string)
- file to which the action log is written
It is then expected to initialize the environment with the given initial pose
and execute exactly one episode with the policy that is to be evaluated.
When finished, the action log, which is created by the TriFingerPlatform class,
is written to the specified file. This log file is crucial as it is used to
evaluate the actual performance of the policy.
"""
import sys
import gym
from rrc_simulation.gym_wrapper.envs import cube_env
from rrc_simulation.tasks import move_cube
class RandomPolicy:
"""Dummy policy which uses random actions."""
def __init__(self, action_space):
self.action_space = action_space
def predict(self, observation):
return self.action_space.sample()
def main():
try:
difficulty = int(sys.argv[1])
initial_pose_json = sys.argv[2]
goal_pose_json = sys.argv[3]
output_file = sys.argv[4]
except IndexError:
print("Incorrect number of arguments.")
print(
"Usage:\n"
"\tevaluate_policy.py <difficulty_level> <initial_pose>"
" <goal_pose> <output_file>"
)
sys.exit(1)
# the poses are passes as JSON strings, so they need to be converted first
initial_pose = move_cube.Pose.from_json(initial_pose_json)
goal_pose = move_cube.Pose.from_json(goal_pose_json)
# create a FixedInitializer with the given values
initializer = cube_env.FixedInitializer(
difficulty, initial_pose, goal_pose
)
# TODO: Replace with your environment if you used a custom one.
env = gym.make(
"rrc_simulation.gym_wrapper:real_robot_challenge_phase_1-v1",
initializer=initializer,
action_type=cube_env.ActionType.POSITION,
visualization=False,
)
# TODO: Replace this with your model
# Note: You may also use a different policy for each difficulty level (difficulty)
policy = RandomPolicy(env.action_space)
# Execute one episode. Make sure that the number of simulation steps
# matches with the episode length of the tasks. When using the default Gym
# environment, this is the case when looping until is_done == True. Make
# sure to adjust this in case your custom environment behaves differently!
is_done = False
observation = env.reset()
accumulated_reward = 0
while not is_done:
action = policy.predict(observation)
observation, reward, is_done, info = env.step(action)
accumulated_reward += reward
print("Accumulated reward: {}".format(accumulated_reward))
# store the log for evaluation
env.platform.store_action_log(output_file)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.