content
stringlengths 5
1.05M
|
|---|
import json
from django.shortcuts import render
from django.views.generic import TemplateView, View, FormView
from django.http import JsonResponse
from django.db.models import Q
from django.http import Http404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie
from django.utils.translation import gettext_lazy as _
from geodata.models import NPA
from business.models import Business, Request
from business.forms import AddForm
@method_decorator(ensure_csrf_cookie, name='dispatch')
class HomeView(TemplateView):
template_name = "home.html"
class HomeLocationView(View):
def get(self, request, *args, **kwargs):
query = self.request.GET.get('q')
npas = None
# Convert
try:
# Search NPA number
npas = NPA.objects.filter(npa__startswith=int(query)).order_by('npa')
except ValueError:
# Search city name
npas = NPA.objects.rewrite(False).filter(name__icontains=query).order_by('name')
# Response
if npas is not None and npas.count() > 0:
out = []
for npa in npas:
out.append("%d %s" % (npa.npa, npa.name))
return JsonResponse(out, safe=False)
# Default empty response
return JsonResponse([], safe=False)
class ContentView(TemplateView):
template_name = "content.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
npa = NPA.objects.rewrite(False).get(npa__exact=kwargs['npa'], name__exact=kwargs['name'])
except NPA.DoesNotExist as e:
raise e
raise Http404(_("NPA does not exist"))
context['npa'] = npa
context['businesses'] = Business.objects.filter(
Q(location=npa)
|
Q(delivers_to__in=[npa])
).distinct().order_by('name')
return context
class AboutView(TemplateView):
template_name = "about.html"
class AddView(FormView):
template_name = "add.html"
form_class = AddForm
success_url = '/add/success/'
def form_valid(self, form):
form.save_request()
return super().form_valid(form)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
//******************************************************************************************
// SGDI, Práctica 1, Apartado 4: INDEX
// Dan Cristian Rotaru y Gorka Suárez García
//******************************************************************************************
Utilizando los archivos de texto plano “Adventures_of_Huckleberry_Finn.txt”, “Hamlet.txt”
y “Moby_Dick.txt”, generar un índice inverso de palabras y archivos en los que aparecen.
No se debe hacer distinción entre mayúsculas y minúsculas, y las palabras no deben
contener signos de puntuación. El índice no debe contener todas las palabras que aparecen
en los libros, únicamente aquellas que sean populares, es decir, que aparecen más de 20
veces en alguno de los libros. A cada palabra popular del índice le debe acompañar una
lista de parejas (libro, número de apariciones) ordenado por número de apariciones, y este
listado contendrá todos los libros donde aparece la palabra, incluidos aquellos donde
aparezca 20 veces o menos.
El resultado será un listado como el siguiente:
...
"wind" "(Moby Dick.txt, 66), (Adventures of Huckleberry Finn.txt, 13)"
"windlass" "(Moby Dick.txt, 21)"
...
Pista: será necesario obtener el nombre del fichero del que se ha obtenido la tupla, y
este valor no aparece en las claves ni los valores que se leen de los ficheros. Habrá que
acceder a ellos utilizando el entorno (Mrjob) o el argumento context (Hadoop).
"""
import os
from operator import itemgetter
from mrjob.job import MRJob
# Para obtener las palabras de una línea:
def splitter(victim):
for chunk in victim.split():
word = ""
for item in chunk:
if item.isalpha():
word += item.lower()
elif len(word) > 0:
yield word
word = ""
class MRIndex(MRJob):
# MAP -> key:_, line:string
def mapper(self, key, line):
fileName = os.environ['map_input_file']
for word in splitter(line):
yield word, fileName
# REDUCE -> key:string, line:[string]
def reducer(self, key, values):
files = [item for item in values]
if len(files) > 0:
# Contar las apariciones por fichero:
counter = {}
for item in files:
if item in counter:
counter[item] += 1
else:
counter[item] = 1
# Cambiar el formato y comprobar que son populares:
words = []; makeYield = False
for k, v in counter.iteritems():
words.append((v, "(" + k + ", " + str(v) + ")"))
if v > 20:
makeYield = True
# Fusionar la lista de elementos y lanzarlo:
if makeYield and len(words) > 0:
words = sorted(words, key=itemgetter(0), reverse=True)
msg = itemgetter(1)(words[0])
for item in words[1:]:
msg += (", " + itemgetter(1)(item))
yield key, msg
if __name__ == '__main__':
MRIndex.run()
|
from aiogram import Bot, Dispatcher
from settings import API_TOKEN
bot = Bot(token=API_TOKEN)
dp = Dispatcher(bot=bot)
|
import numpy as np
import scipy.stats as stats
from hic3defdr.util.scaled_nb import logpmf, fit_mu_hat
def lrt(raw, f, disp, design, refit_mu=True):
"""
Performs a likelihood ratio test on raw data ``raw`` given scaling factors
``f`` and dispersion ``disp``.
Parameters
----------
raw, f, disp : np.ndarray
Matrices of raw values, combined scaling factors, and dispersions,
respectively. Rows correspond to pixels, columns correspond to
replicates.
design : np.ndarray
Describes the grouping of replicates into conditions. Rows correspond to
replicates, columns correspond to conditions, and values should be True
where a replicate belongs to a condition and False otherwise.
Returns
-------
pvalues : np.ndarray
The LRT p-values per pixel.
llr : np.ndarray
The log likelihood ratio per pixel.
mu_hat_null, mu_hat_alt : np.ndarray
The fitted mean parameters under the null and alt models, respectively,
per pixel.
"""
if refit_mu:
mu_hat_null = fit_mu_hat(raw, f, disp)
mu_hat_alt = np.array(
[fit_mu_hat(raw[:, design[:, c]],
f[:, design[:, c]],
disp[:, design[:, c]])
for c in range(design.shape[1])]).T
else:
mu_hat_null = np.mean(raw / f, axis=1)
mu_hat_alt = np.array(
[np.mean(raw[:, design[:, c]] / f[:, design[:, c]], axis=1)
for c in range(design.shape[1])]).T
mu_hat_alt_wide = np.dot(mu_hat_alt, design.T)
null_ll = np.sum(logpmf(raw, mu_hat_null[:, None] * f, disp), axis=1)
alt_ll = np.sum(logpmf(raw, mu_hat_alt_wide * f, disp), axis=1)
llr = null_ll - alt_ll
pvalues = stats.chi2(design.shape[1] - 1).sf(-2 * llr)
return pvalues, llr, mu_hat_null, mu_hat_alt
|
import instatools.base.payload_formatter as payload_formatter_base
import instatools.auth.password_credentials as password_credentials
class InstagramWebPayloadFormatter(payload_formatter_base.PayloadFormatter):
@staticmethod
def generate(instance):
payload = {}
if isinstance(instance, password_credentials.PasswordCredentials):
payload.update({
'username': instance.username,
'password': instance.password
})
else:
raise TypeError('{} is not supported by InstagramWebPayloadFormatter'.format(instance.__class__.__name__))
return payload
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from pyqir.generator import BasicQisBuilder, SimpleModule
mod = SimpleModule("if", num_qubits=2, num_results=2)
qis = BasicQisBuilder(mod.builder)
# Manually reset a qubit by measuring it and applying the X gate if the result
# is one.
qis.h(mod.qubits[0])
qis.m(mod.qubits[0], mod.results[0])
qis.if_result(mod.results[0], lambda: qis.x(mod.qubits[0]))
# Branches can be nested, for example, to execute an instruction only if both
# results are one.
for i in range(2):
qis.h(mod.qubits[i])
qis.m(mod.qubits[i], mod.results[i])
def x_both():
qis.x(mod.qubits[0])
qis.x(mod.qubits[1])
qis.if_result(mod.results[0], lambda: qis.if_result(mod.results[1], x_both))
# You can also add instructions that will execeute only when the result is zero.
qis.if_result(mod.results[0], zero=lambda: qis.x(mod.qubits[0]))
# In general, you can provide both the one and zero branches at the same time.
qis.if_result(
mod.results[0],
one=lambda: qis.z(mod.qubits[0]),
zero=lambda: qis.y(mod.qubits[0]),
)
if __name__ == "__main__":
print(mod.ir())
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
from django.template.loader_tags import ExtendsNode
register = template.Library()
class IncludeExtendNode(template.Node):
"""
The node that implements include_extend.
Note that we can't inherit from ExtendsNode as only one ExtendsNode is
allowed in a template, although the implementation is exactly the same.
"""
def __init__(self, nodelist, parent_name, parent_name_expr, template_dirs=None):
self.impl = ExtendsNode(nodelist, parent_name, parent_name_expr,
template_dirs)
def render(self, context):
return self.impl.render(context)
def do_include_extend(parser, token):
"""
A tag to include a template with the ability to replace blocks like with
'extends'. Use like:
{% include_extend 'foo' %}
{% block bar %}...{% endblock %}
{% endinclude_extend %}
This is supposed to be used when you want to override bits of a template but
don't plan to reuse the version on any other page, so that the overhead of
doing that is kept to a minimum, encouraging reuse.
Parsing copied from django's do_extend.
"""
bits = token.contents.split()
if len(bits) != 2:
raise template.TemplateSyntaxError, "'%s' takes one argument" % bits[0]
parent_name, parent_name_expr = None, None
if bits[1][0] in ('"', "'") and bits[1][-1] == bits[1][0]:
parent_name = bits[1][1:-1]
else:
parent_name_expr = parser.compile_filter(bits[1])
nodelist = parser.parse(('end' + bits[0],))
parser.delete_first_token()
return IncludeExtendNode(nodelist, parent_name, parent_name_expr)
register.tag('include_extend', do_include_extend)
class IfPredNode(template.Node):
def __init__(self, var1, var2, predicate, nodelist_true, nodelist_false):
self.var1, self.var2 = template.Variable(var1), template.Variable(var2)
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.predicate = predicate
def render(self, context):
try:
val1 = self.var1.resolve(context)
except VariableDoesNotExist:
val1 = None
try:
val2 = self.var2.resolve(context)
except VariableDoesNotExist:
val2 = None
predicate = self.predicate
if (predicate(val1, val2)):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
def if_pred(parser, token, predicate):
bits = list(token.split_contents())
if len(bits) != 3:
raise template.TemplateSyntaxError, "%r takes two arguments" % bits[0]
end_tag = 'end' + bits[0]
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfPredNode(bits[1], bits[2], predicate, nodelist_true, nodelist_false)
|
from operator import itemgetter
from random import randint
# from runlocal import evaluate as run_evaluate
import random
from aletheia.settings import BASE_DIR
import json
import os
import shutil
import math
class Gene:
def __init__(self, **data):
self.__dict__.update(data)
self.size = len(data['data'])
class GA:
def __init__(self, **parameter) -> None:
'''
bound: {(start, end): (up, down)}}
example:
ga = GA(10, {(1,11):(1,2)}, evaluate_func, result_path, NGEN, CXPB, MUTPB)
'''
self.popsize = parameter['popsize']
self.bound = parameter['bound']
tmp = {}
for key, value in self.bound.items():
if isinstance(key, tuple):
for i in range(key[0], key[1]):
tmp[i] = value
elif isinstance(key, int):
tmp[key] = value
self.bound = tmp
self.evaluate = parameter['evaluate']
self.result_path = parameter['result_path']
self.NGEN = parameter['NGEN']
self.CXPB = parameter['CXPB']
self.MUTPB = parameter['MUTPB']
self.init_the_group()
def init_the_group(self):
pop = []
for i in range(self.popsize):
geneinfo = [
randint(self.bound[i][0], self.bound[i][1]) for i in range(len(self.bound))
]
fitness, measure = self.evaluate(geneinfo)
pop.append({'Gene': Gene(data=geneinfo),
'fitness': fitness, 'measure': measure})
self.pop = pop
self.bestindividual = self.selectBest(self.pop)
if os.path.exists(self.result_path):
if os.path.isfile(self.result_path):
os.remove(self.result_path)
elif os.isdir(self.result_path):
shutil.rmtree(self.result_path)
def selectBest(self, pop):
s_inds = sorted(pop, key=itemgetter('fitness'), reverse=True)
return s_inds[0]
def selection(self, individuals, k):
s_inds = sorted(individuals, key=itemgetter('fitness'), reverse=True)
sum_fits = sum(abs(ind['fitness']) for ind in individuals)
chosen = []
for i in range(k):
u = random.random() * sum_fits
sum_ = 0
for ind in s_inds:
sum_ += abs(ind['fitness'])
if sum_ >= u:
chosen.append(ind)
break
chosen = sorted(chosen, key=itemgetter('fitness'), reverse=True)
return chosen
def crossoperate(self, offspring):
dim = len(offspring[0]['Gene'].data)
# Gene's data of first offspring chosen from the selected pop
geninfo1 = offspring[0]['Gene'].data
# Gene's data of second offspring chosen from the selected pop
geninfo2 = offspring[1]['Gene'].data
if dim == 1:
pos1 = 1
pos2 = 1
else:
# select a position in the range from 0 to dim-1,
pos1 = random.randrange(1, dim)
pos2 = random.randrange(1, dim)
newoff1 = Gene(data=[]) # offspring1 produced by cross operation
newoff2 = Gene(data=[]) # offspring2 produced by cross operation
temp1 = []
temp2 = []
for i in range(dim):
if min(pos1, pos2) <= i < max(pos1, pos2):
temp2.append(geninfo2[i])
temp1.append(geninfo1[i])
else:
temp2.append(geninfo1[i])
temp1.append(geninfo2[i])
newoff1.data = temp1
newoff2.data = temp2
return newoff1, newoff2
def mutation(self, crossoff, bound):
dim = len(crossoff.data)
if dim == 1:
pos = 0
else:
pos = random.randrange(0, dim)
crossoff.data[pos] = random.randint(bound[pos][0], bound[pos][1])
return crossoff
def save_gen(self, gen):
with open(self.result_path, 'a', encoding='utf-8') as f:
datas = {
'gen': gen,
# 'pop': [data.]
'pop': [
{'Gene': x['Gene'].data, 'fitness': x['fitness'], 'measure':x['measure']} for x in self.pop
],
'best': {'Gene': self.bestindividual['Gene'].data, 'fitness': self.bestindividual['fitness'], 'measure': self.bestindividual['measure']}
}
datas = json.dumps(datas, ensure_ascii=False)
f.write(datas + "\n")
def GA_main(self):
popsize = self.popsize
print('Start of evolution')
NGEN = self.NGEN
CXPB = self.CXPB
MUTPB = self.MUTPB
for g in range(NGEN):
print('############ Generation {} ############'.format(g))
self.save_gen(g)
selectpop = self.selection(self.pop, popsize)
nextoff = []
while len(nextoff) != popsize:
if len(selectpop) < 2:
print('debug')
offspring = [selectpop.pop() for _ in range(2)]
if random.random() < CXPB:
crossoff1, crossoff2 = self.crossoperate(offspring)
if random.random() < MUTPB: # mutate an individual with probability MUTPB
muteoff1 = self.mutation(crossoff1, self.bound)
muteoff2 = self.mutation(crossoff2, self.bound)
# Evaluate the individuals
fit_muteoff1, measure = self.evaluate(
muteoff1.data)
# Evaluate the individuals
fit_muteoff2, measure = self.evaluate(
muteoff2.data)
nextoff.append(
{'Gene': muteoff1, 'fitness': fit_muteoff1, 'measure': measure})
nextoff.append(
{'Gene': muteoff2, 'fitness': fit_muteoff2, 'measure': measure})
else:
fit_crossoff1, measure = self.evaluate(
crossoff1.data) # Evaluate the individuals
fit_crossoff2, measure = self.evaluate(
crossoff2.data)
nextoff.append(
{'Gene': crossoff1, 'fitness': fit_crossoff1, 'measure': measure})
nextoff.append(
{'Gene': crossoff2, 'fitness': fit_crossoff2, 'measure': measure})
else:
nextoff.extend(offspring)
self.pop = nextoff
fits = [ind['fitness'] for ind in self.pop]
best_ind = self.selectBest(self.pop)
if best_ind['fitness'] > self.bestindividual['fitness']:
self.bestindividual = best_ind
print("Best individual found is {}, {}".format(self.bestindividual['Gene'].data,
self.bestindividual['fitness']))
print(" Max fitness of current pop: {}".format(max(fits)))
if __name__ == '__main__':
CXPB, MUTPB, NGEN, popsize = 0.8, 0.4, 1000, 100 # popsize must be even number
parameter = [CXPB, MUTPB, NGEN, popsize]
run = GA(agent_number=116, popsize=1000)
# run.GA_main()
run.GA_draw(skip=False, sort=True)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import unittest
import numpy as np
import pandas as pd
from nimbusml import Pipeline
from nimbusml.preprocessing.normalization import GlobalContrastRowScaler
from nimbusml.preprocessing.schema import ColumnConcatenator
from sklearn.utils.testing import assert_greater, assert_less
class TestGlobalContrastRowScaler(unittest.TestCase):
def test_globalcontrastrowscaler(self):
in_df = pd.DataFrame(
data=dict(
Sepal_Length=[
2.5, 1, 2.1, 1.0], Sepal_Width=[
.75, .9, .8, .76], Petal_Length=[
0, 2.5, 2.6, 2.4], Species=[
"setosa", "viginica", "setosa", 'versicolor']))
in_df.iloc[:, 0:3] = in_df.iloc[:, 0:3].astype(np.float32)
# generate two new Columns - Petal_Normed and Sepal_Normed
concat = ColumnConcatenator() << {
'concated_columns': [
'Petal_Length',
'Sepal_Width',
'Sepal_Length']}
# Performs a global contrast normalization on input values:
# Y = (s * X - M) / D, where s is a scale, M is mean and D is either
# L2 norm or standard deviation
normed = GlobalContrastRowScaler() << {
'normed_columns': 'concated_columns'}
pipeline = Pipeline([concat, normed])
out_df = pipeline.fit_transform(in_df)
cols = [
'concated_columns.' +
s for s in [
'Sepal_Length',
'Sepal_Width',
'Petal_Length']]
cols.extend(['normed_columns.' +
s for s in
['Sepal_Length', 'Sepal_Width', 'Petal_Length']])
sum = out_df[cols].sum().sum()
assert_greater(sum, 17.309, "sum should be greater than %s" % 17.309)
assert_less(sum, 17.3102, "sum should be less than %s" % 17.31)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.0.5 on 2020-05-05 21:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='safariaccount',
options={'verbose_name': 'Safari Account', 'verbose_name_plural': 'Safari Accounts'},
),
migrations.AddField(
model_name='safariaccount',
name='expires_at',
field=models.DateTimeField(blank=True, null=True),
),
]
|
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np, tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
import os
import csv
import gc
from sklearn.metrics import mean_squared_error
import math
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn import linear_model
from xgboost.sklearn import XGBRegressor
from sklearn.decomposition import PCA
import copy
import pyflux as pf
import datetime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
PRICED_BITCOIN_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/pricedBitcoin2009-2018.csv"
DAILY_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/dailyOccmatrices/"
betti0_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_0(100).csv"
betti1_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_1(100).csv"
DAILY_FILTERED_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/filteredDailyOccMatrices/"
ROW = -1
COLUMN = -1
TEST_SPLIT = 0.01
ALL_YEAR_INPUT_ALLOWED = False
YEAR = 2017
# Baseline
from sklearn.metrics import mean_squared_error
from sklearn import metrics
import matplotlib.pyplot as plt
def exclude_days(train, test):
row, column = train.shape
train_days = np.asarray(train[:, -1]).reshape(-1, 1)
x_train = train[:, 0:column - 1]
test_days = np.asarray(test[:, -1]).reshape(-1, 1)
x_test = test[:, 0:column - 1]
return x_train, x_test, train_days, test_days
def merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed):
if(aggregation_of_previous_days_allowed):
if(occurrence_data.size==0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.add(occurrence_data, daily_occurrence_normalized_matrix)
else:
if(occurrence_data.size == 0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.concatenate((occurrence_data, daily_occurrence_normalized_matrix), axis=0)
#print("merge_data shape: {} occurrence_data: {} ".format(occurrence_data.shape, occurrence_data))
return occurrence_data
def get_normalized_matrix_from_file(day, year, totaltx):
daily_occurrence_matrix_path_name = DAILY_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + '.csv'
daily_occurence_matrix = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
def fl_get_normalized_matrix_from_file(day, year, totaltx, n_components):
daily_occurence_matrix = np.asarray([],dtype=np.float32)
for filter_number in range(0, 50, 10):
daily_occurrence_matrix_path_name = DAILY_FILTERED_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + "_" + str(filter_number) +'.csv'
daily_occurence_matrix_read = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
if(daily_occurence_matrix.size == 0):
daily_occurence_matrix = daily_occurence_matrix_read
else:
daily_occurence_matrix = np.concatenate((daily_occurence_matrix, daily_occurence_matrix_read), axis = 1)
pca = PCA(n_components = 20)
pca.fit(daily_occurence_matrix)
daily_occurence_matrix = pca.transform(daily_occurence_matrix)
#print("daily_occurence_matrix: ", daily_occurence_matrix, daily_occurence_matrix.shape)
#return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)
def get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
#print("occurrence_data: ", occurrence_data)
if(is_price_of_previous_days_allowed):
#print("previous_price_data: ", np.asarray(previous_price_data).reshape(1, -1), np.asarray(previous_price_data).reshape(1, -1).shape)
occurrence_data = np.asarray(previous_price_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
#print("current_row: ", current_row, current_row.shape)
#print(" price occurrence_input: ", np.asarray(current_row['price']).reshape(1,1), (np.asarray(current_row['price']).reshape(1,1)).shape)
#print("concatenate with price occurrence_input: ", occurrence_input, occurrence_input.shape)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print(" price occurrence_input: ", np.asarray(current_row['day']).reshape(1,1), (np.asarray(current_row['day']).reshape(1,1)).shape)
#print("concatenate with day occurrence_input: ", occurrence_input, occurrence_input.shape)
return occurrence_input
def betti_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.array([], dtype=np.float32)
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.row_stack((occurrence_data,previous_price_data))
#print(occurrence_data, occurrence_data.shape)
#print(previous_price_data, previous_price_data.shape)
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
#betti0_50 = read_betti(betti0_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_data, np.asarray(betti0_50).reshape(1,-1)), axis=1)
#betti1_50 = read_betti(betti1_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_input, np.asarray(betti1_50).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def betti_der_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
betti0_50_diff1 = betti0_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data.reshape(1,-1), np.asarray(betti0_50_diff1).reshape(1,-1)), axis=1)
betti1_50_diff1 = betti1_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data, np.asarray(betti1_50_diff1).reshape(1,-1)), axis=1)
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.concatenate((occurrence_data, previous_price_data.reshape(1,-1)), axis=1)
#print(occurrence_data, occurrence_data.shape)
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def fl_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
daily_occurrence_normalized_matrix = fl_get_normalized_matrix_from_file(row['day'], row['year'], row['totaltx'], 20)
occurrence_data = merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed)
#print("occurrence_data: ",occurrence_data, occurrence_data.shape)
if(is_price_of_previous_days_allowed):
occurrence_data = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(previous_price_data).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print("occurrence_input: ",occurrence_input, occurrence_input.shape)
return occurrence_input
def read_betti(file_path, day):
day = day - 1
betti = pd.read_csv(file_path, index_col=0)
try:
betti_50 = betti.iloc[day, 0:50]
except:
print("day:", day)
return betti_50
def rf_base_rmse_mode(train_input, train_target, test_input, test_target):
rf_regression = RandomForestRegressor(max_depth=2, random_state=0)
rf_regression.fit(train_input, train_target.ravel() )
predicted = rf_regression.predict(test_input)
rf_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
return rf_base_rmse
def gp_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'kernel': RationalQuadratic(alpha=0.01, length_scale=1),
'n_restarts_optimizer': 2
}
adj_params = {'kernel': [RationalQuadratic(alpha=0.01,length_scale=1)],
'n_restarts_optimizer': [2]}
gpr = GaussianProcessRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(gpr, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input,train_target)
#print("cv_results_:",cscv.cv_results_)
print("best_params_: ",cscv.best_params_)
gpr = GaussianProcessRegressor(**cscv.best_params_)
gpr.fit(train_input, train_target)
mu, cov = gpr.predict(test_input, return_cov=True)
test_y = mu.ravel()
#uncertainty = 1.96 * np.sqrt(np.diag(cov))
gp_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, test_y))
print(gp_base_rmse)
return gp_base_rmse
def enet_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'alpha': 10,
'l1_ratio': 1,
}
elastic = linear_model.ElasticNet(**param)
adj_params = {'alpha': [10],
'l1_ratio': [ 1]}
#'max_iter': [100000]}
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(elastic, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
print("best_params_: ",cscv.best_params_)
elastic= linear_model.ElasticNet(**cscv.best_params_)
elastic.fit(train_input,train_target.ravel())
predicted = elastic.predict(test_input)
enet_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("enet_base_rmse: ", enet_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return enet_base_rmse
def xgbt_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'n_estimators':1000,
'learning_rate': 0.01,
}
adj_params = {
'n_estimators':[1000],
'learning_rate': [0.01]
}
xgbt = XGBRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(xgbt, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
print("best_params_: ", cscv.best_params_)
xgbt= XGBRegressor(**cscv.best_params_)
xgbt.fit(train_input,train_target.ravel())
predicted = xgbt.predict(test_input)
xgbt_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("xgbt_base_rmse: ", xgbt_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return xgbt_base_rmse
def arimax_initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
train = data[0:100, :]
test = data[100:100+prediction_horizon, :]
x_train, x_test, train_days, test_days = exclude_days(train, test)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
def arimax_base_rmse_mode(train_input, train_target, test_input, test_target):
train_input_diff_arr = np.array([])
train_columns_name = []
train_input_column = int(train_input.shape[1])
for i in range(train_input_column):
if(i%2==0):
train_columns_name.append('price_' + str(i))
else:
train_columns_name.append('totaltx_' + str(i))
train_input_diff = np.diff(train_input[:,i] )
if i == 0:
train_input_diff_arr = train_input_diff
else:
train_input_diff_arr = np.dstack((train_input_diff_arr, train_input_diff))
columns_name = copy.deepcopy(train_columns_name)
columns_name.append('current_price')
train_target_diff = np.diff(train_target )
train_input_diff_arr = np.dstack((train_input_diff_arr, train_target_diff))
train_input_diff_arr = pd.DataFrame(train_input_diff_arr[0], columns = columns_name)
model = pf.ARIMAX(data=train_input_diff_arr,formula="current_price~totaltx_5",ar=1,ma=2,integ=0)
model_1 = model.fit("MLE")
model_1.summary()
test_input_pd = pd.DataFrame(test_input, columns = train_columns_name)
test_target_pd = pd.DataFrame(test_target, columns = ['current_price'])
test_input_target = pd.concat([test_input_pd, test_target_pd], axis=1)
pred = model.predict(h=test_input_target.shape[0],
oos_data=test_input_target,
intervals=True, )
arimax_base_rmse = mean_squared_error([test_input_target.iloc[0, 6]],[(train_target[99])+pred.current_price[99]])
print("arimax_base_rmse:",arimax_base_rmse)
return arimax_base_rmse
def run_print_model(train_input, train_target, test_input, test_target, train_days, test_days):
rf_base_rmse = rf_base_rmse_mode(train_input, train_target, test_input, test_target)
xgbt_base_rmse = xgbt_base_rmse_mode(train_input, train_target, test_input, test_target)
gp_base_rmse = gp_base_rmse_mode(train_input, train_target, test_input, test_target)
enet_base_rmse = enet_base_rmse_mode(train_input, train_target, test_input, test_target)
return rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse
#print_results(predicted, test_target, original_log_return, predicted_log_return, cost, test_days, rmse)
#return rf_base_rmse
def preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
priced_bitcoin = pd.read_csv(PRICED_BITCOIN_FILE_PATH, sep=",")
if(ALL_YEAR_INPUT_ALLOWED):
pass
else:
priced_bitcoin = priced_bitcoin[priced_bitcoin['year']==YEAR].reset_index(drop=True)
# get normalized occurence matrix in a flat format and merge with totaltx
daily_occurrence_input = np.array([],dtype=np.float32)
temp = np.array([], dtype=np.float32)
for current_index, current_row in priced_bitcoin.iterrows():
if(current_index<(window_size+prediction_horizon-1)):
pass
else:
start_index = current_index - (window_size + prediction_horizon) + 1
end_index = current_index - prediction_horizon
if(dataset_model=="base"):
temp = get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="betti"):
temp = betti_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="fl"):
temp = fl_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="betti_der"):
temp = betti_der_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
else:
sys.exit("Dataset model support only baseline, betti, fl and betti_der!")
if(daily_occurrence_input.size == 0):
daily_occurrence_input = temp
else:
daily_occurrence_input = np.concatenate((daily_occurrence_input, temp), axis=0)
return daily_occurrence_input
def initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
train = data[0:100, :]
test = data[100, :].reshape(1, -1)
x_train, x_test, train_days, test_days = exclude_days(train, test)
#print("x_train:", x_train)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
#x_test = x_test.reshape(-1,1)
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
parameter_dict = {#0: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':True})}
1: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':False})}
for step in parameter_dict:
names = locals()
gc.collect()
evalParameter = parameter_dict.get(step)
is_price_of_previous_days_allowed = evalParameter.get('is_price_of_previous_days_allowed')
aggregation_of_previous_days_allowed = evalParameter.get('aggregation_of_previous_days_allowed')
print("IS_PRICE_OF_PREVIOUS_DAYS_ALLOWED: ", is_price_of_previous_days_allowed)
print("AGGREGATION_OF_PREVIOUS_DAYS_ALLOWED: ", aggregation_of_previous_days_allowed)
window_size_array = [3, 5, 7]
horizon_size_array = [1, 2, 5, 7, 10, 15, 20, 25, 30]
dataset_model_array = ["base", "betti", "fl","betti_der"]
for dataset_model in dataset_model_array:
print('dataset_model: ', dataset_model)
for window_size in window_size_array:
print('WINDOW_SIZE: ', window_size)
for prediction_horizon in horizon_size_array:
print("PREDICTION_HORIZON: ", prediction_horizon)
train_input, train_target, test_input, test_target, train_days, test_days = initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse = run_print_model(train_input, train_target, test_input, test_target, train_days, test_days)
rmse = pd.DataFrame({'rf_' + dataset_model + '_rmse_'+str(window_size): [rf_base_rmse], 'xgbt_' + dataset_model + '_rmse_'+str(window_size): [xgbt_base_rmse], 'gp_' + dataset_model + '_rmse_'+str(window_size): [gp_base_rmse], 'enet_' + dataset_model + '_rmse_'+str(window_size): [enet_base_rmse]})
if(prediction_horizon==1):
rmse_total = rmse
else:
rmse_total = [rmse_total, rmse]
rmse_total = pd.concat(rmse_total)
if(window_size==3):
names['rmse_' + dataset_model + '_total'] = rmse_total
else:
names['rmse_' + dataset_model + '_total'] = pd.concat([names.get('rmse_' + dataset_model + '_total') , rmse_total], axis=1)
names['rmse_' + dataset_model + '_total'].index = pd.Series(horizon_size_array)
print('rmse_{}_total = {}'.format(dataset_model, names.get('rmse_' + dataset_model + '_total')))
t = datetime.datetime.now()
dir_name = t.strftime('%m_%d___%H_%M')
if not os.path.exists(dir_name):
os.makedirs(dir_name)
betti_gain = 100 * (1 -rmse_betti_total.div(rmse_base_total.values))
fl_gain = 100 * (1 -rmse_fl_total.div(rmse_base_total.values))
betti_der_gain = 100 * (1 -rmse_betti_der_total.div(rmse_base_total.values))
for i in range(12):
path = dir_name + "/"
perf = pd.concat([betti_gain.iloc[:,i],betti_der_gain.iloc[:,i], fl_gain.iloc[:, i]], axis=1).plot.bar()
modelnames = ["rf","xgbt","gp","enet"]
windows = [3, 5, 7]
filename = path + modelnames[int(i%4)] +"_window_" + str(windows[int(i/4)])
perf.figure.savefig(filename)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 14:26:28 2020
@author: apramanik
"""
from torch.utils.data import DataLoader
import numpy as np
import os, torch
import matplotlib.pyplot as plt
from tst_dataset import cardiacdata
from Auto3D_D4 import Auto3D
def dice_comp(pred, gt):
return (2. * (np.sum(pred.astype(float) * gt.astype(float))) + 1.) / (np.sum(pred.astype(float)) \
+ np.sum(gt.astype(float)) + 1.)
#%%
nImg=1
dispind=0
vol_slice=5
chunk_size=nImg
#%% Choose training model directory
############################## 3DAE #########################
subDirectory='19Apr_1111pm_70I_10000E_1B'
print(subDirectory)
#%%
cwd=os.getcwd()
PATH= cwd+'/savedModels/'+subDirectory #complete path
#%%
tst_dataset = cardiacdata()
tst_loader = DataLoader(tst_dataset, batch_size=1, shuffle=False, num_workers=0)
# network
net = Auto3D(num_classes=4, in_channels=1, depth=4, start_filts=32, res=False).cuda()
net.load_state_dict(torch.load(os.path.join(PATH, "model_best.pth.tar"))['state_dict'])
normOrg=np.zeros((1,16,144,144),dtype=np.float32)
normGT=np.zeros((1,16,144,144),dtype=np.int16)
normSeg=np.zeros((1,16,144,144),dtype=np.int16)
dice = np.zeros((nImg, 3))
net.eval()
for step, (img, seg_gt) in enumerate(tst_loader, 0):
img, seg_gt = img.cuda(), seg_gt.cuda()
pred = net(img)
_, pred = torch.max(pred, 1)
pred = pred.squeeze().detach().cpu().numpy().astype(np.int8)
img = img.squeeze().detach().cpu().numpy()
gt = seg_gt.squeeze().detach().cpu().numpy().astype(np.int8)
for i in range(3):
dice[step, i] = dice_comp(pred==i+1, gt==i+1)
normOrg[step]=img
normGT[step]=gt
normSeg[step]=pred
print("DICE Right Ventricle: {0:.5f}".format(np.mean(dice[:,0])))
print("DICE Myocardium: {0:.5f}".format(np.mean(dice[:,1])))
print("DICE Left Ventricle: {0:.5f}".format(np.mean(dice[:,2])))
#%%%
normOrg=np.reshape(normOrg,[int(normOrg.shape[1]/8),8,144,144])
normGT=np.reshape(normGT,[int(normGT.shape[1]/8),8,144,144])
normSeg=np.reshape(normSeg,[int(normSeg.shape[1]/8),8,144,144])
normError=np.abs(normGT.astype(np.float32)-normSeg.astype(np.float32))
normOrg=normOrg-normOrg.min()
#%% Display the output images
plot= lambda x: plt.imshow(x,cmap=plt.cm.gray,interpolation='bilinear')
plot1= lambda x: plt.imshow(x,interpolation='bilinear')
plt.clf()
plt.subplot(141)
plot(np.abs(normOrg[dispind,vol_slice,:,:]))
plt.axis('off')
plt.title('Original')
plt.subplot(142)
plot1(np.abs(normGT[dispind,vol_slice,:,:]))
plt.axis('off')
plt.title('True labels')
plt.subplot(143)
plot1(np.abs(normSeg[dispind,vol_slice,:,:]))
plt.axis('off')
plt.title('Segmentation')
plt.subplot(144)
plot(np.abs(normError[dispind,vol_slice,:,:]))
plt.title('Error')
plt.axis('off')
plt.subplots_adjust(left=0, right=1, top=1, bottom=0,wspace=.01)
plt.show()
|
# -*- coding: utf-8 -*-
"""Naive Bayes python implementation.
Homework of IoT Information processing Lab 2. A simple implementation
of Naive Bayes algorithm.
Example:
$ python NaiveBayes.py
$ python NiaveBayes.py -k num_of_iterations
$ python NaiveBayes.py -k 25
Author: Yongjian Hu
License: MIT License
"""
from collections import defaultdict
import pandas as pd
import random
import math
import array
import argparse
def read_file(file_path):
"""Read data file from disk.
Args:
file_path (str): Path to file on disk.
Returns:
df. The data frame object contains the data set.
"""
col_names = ["x1", "x2", "x3", "x4", "class"]
df = pd.read_csv(file_path, names=col_names)
return df
def bootstrap(data, length):
"""Partition the data set to training set and testing set.
Args:
data (pandas.DataFrame): Data frame that contains the data set.
length (int): The length of data set.
Return:
training set and testing set.
"""
index = random.randint(0, length - 1)
training_set = pd.DataFrame()
testing_set = pd.DataFrame()
index_set = set()
# Select training set
for _ in range(length):
index_set.add(index)
row = data.iloc[index]
training_set = training_set.append(row)
index = random.randint(0, length - 1)
# Let the remaining to be testing set
for i in range(length):
if i not in index_set:
testing_set = testing_set.append((data.iloc[i]))
return training_set, testing_set
def feature_scaler(x_train, x_test):
"""Feature scaler. Standardize the features.
Args:
x_train (pandas.DataFrame): features of training set.
x_test (pandas.DataFrame): features of testing set.
Returns:
Training set and testing set after scaling.
"""
mean = x_train.mean()
std = x_train.std(ddof=0)
x_train = (x_train - mean) / std
x_test = (x_test - mean) / std
return x_train, x_test
def calc_accuracy(y_test, prediction):
"""Accuracy of the prediction.
Args:
y_test (pandas.DataFrame): Actual classes of test set.
prediction (list): Predicted classes of test set.
Returns:
Accuracy of the prediction.
"""
count = 0
length = len(y_test)
for i in range(length):
if prediction[i] == y_test.iloc[i]:
count += 1
return count / length
class NaiveBayesClassifier:
"""Naive Bayes Classifier.
Attributes:
x_train (pandas.DataFrame): Training set.
y_train (pandas.DataFrame): Classes of training set.
feature_num (int): Feature number.
class_num (int): Class number.
prob_y (dict): prior probability of each class.
feature_mean (dict): Mean of each feature of each class.
feature_std (dict): Standard deviation of each feature of each class.
length (int): Length of training set..
"""
def __init__(self, x_train, y_train, feature_num, class_num):
"""Initialize naive Bayes classifier.
Args:
x_train (pandas.DataFrame): Training set.
y_train (pandas.DataFrame): Classes of training set.
feature_num (int): No. of features.
class_num (int): No. of classes.
"""
self.x_train = x_train
self.y_train = y_train
self.feature_num = feature_num
self.class_num = class_num
self.prob_y = defaultdict(float)
self.feature_mean = defaultdict(array.array)
self.feature_std = defaultdict(array.array)
self.length = x_train.shape[0]
def train(self):
"""Train the Gaussian Naive Bayes model.
Returns:
Prior probability of each class,
Mean value and standard deviation of each feature of different classes.
"""
# Get Probability(c), prior prob of each class c.
class_count = self.y_train.groupby(self.y_train).size()
for class_, count in class_count.items():
self.prob_y[class_] = round(count / self.length, 6)
self.prob_y = dict(self.prob_y)
# Get mean and std for each feature of each class.
feature_sum = defaultdict(array.array)
feature_mean = defaultdict(array.array)
feature_std = defaultdict(array.array)
# Initialize array in dict.
for class_ in self.prob_y.keys():
feature_sum[class_] = array.array('f', [.0 for _ in range(self.feature_num)])
feature_mean[class_] = array.array('f', [.0 for _ in range(self.feature_num)])
feature_std[class_] = array.array('f', [.0 for _ in range(self.feature_num)])
# Sum.
for i in range(self.length):
for j in range(self.feature_num):
feature_sum[self.y_train.iloc[i]][j] += self.x_train.iloc[i, j]
# Mean.
for class_, count in class_count.items():
for j in range(self.feature_num):
feature_mean[class_][j] = feature_sum[class_][j] / count
# Standard deviation.
for i in range(self.length):
class_ = self.y_train.iloc[i]
for j in range(self.feature_num):
feature_std[class_][j] += (self.x_train.iloc[i, j] - feature_mean[class_][j]) ** 2
for class_, count in class_count.items():
for j in range(self.feature_num):
feature_std[class_][j] = (feature_std[class_][j] / count) ** 0.5
self.feature_mean = dict(feature_mean)
self.feature_std = dict(feature_std)
return self.prob_y, self.feature_mean, self.feature_std
def gaussian_pdf(self, x, mean, std):
"""Gaussian distribution, probability density function.
N(x, mu, theta) = ( 1/(2pi)^0.5 theta ) * ( e^-( (x - mu)^2/2 theta^2 ) )
Args:
x (float): probability.
mean (float): mean value.
std (float): standard deviation.
Returns:
Probability.
"""
prob = math.e ** (-(x - mean) ** 2 / (2 * std ** 2)) / ((2 * math.pi) ** 0.5 * std)
return prob
def joint_prob(self, test_data):
"""Calculate joint probability of likelihood and prior probability.
Args:
test_data (list): Test data set, contains features of the test data.
Returns:
Joint probability of each class.
"""
joint_probs = defaultdict(float)
for class_ in self.prob_y.keys():
likelihood = 1.0
# Calculate likelihood first.
for i in range(self.feature_num):
feature = test_data[i]
mean = self.feature_mean[class_][i]
std = self.feature_std[class_][i]
gaussian_prob = self.gaussian_pdf(feature, mean, std)
likelihood += gaussian_prob
# Calculate prior_prob * likelihood.
prior_prob = self.prob_y[class_]
joint_probs[class_] = prior_prob * likelihood
return dict(joint_probs)
def get_max(self, test_data):
"""Get maximum probability from all joint probabilities,
and hence predict the class.
Args:
test_data (list): Test data set, contains features of test data.
Returns:
Predicted class that has the max probability.
"""
joint_probs = self.joint_prob(test_data)
max_prob = max(joint_probs, key=joint_probs.get)
return max_prob
def predict(self, test_set):
"""Predict on the give test set.
Args:
test_set (pandas.DataFrame): Test data set.
Returns:
List of predictions.
"""
prediction = list()
for row in test_set.values:
max_prob = self.get_max(row)
prediction.append(max_prob)
return prediction
def bootstrap_accuracy(data_set, k=20):
"""Calculate model accuracy using .632 bootstrap.
Args:
data_set (pandas.DataFrame): Data set.
k (int): The number of iterations. Default is 20
Returns:
Accuracy of the model.
"""
acc_sum = 0
for i in range(k):
# Partition
training_set, testing_set = bootstrap(data_set, data_set.shape[0])
# Separates features and classes
x_train = training_set.iloc[:, 1:5]
y_train = training_set.iloc[:, 0]
x_test = testing_set.iloc[:, 1:5]
y_test = testing_set.iloc[:, 0]
# Feature scaling
x_train, x_test = feature_scaler(x_train, x_test)
# Train
classifier = NaiveBayesClassifier(x_train, y_train, 4, 3)
classifier.train()
# Predict
prediction_test = classifier.predict(x_test)
prediction_train = classifier.predict(x_train)
# Accuracy
acc_test = calc_accuracy(y_test, prediction_test)
acc_train = calc_accuracy(y_train, prediction_train)
print("Iteration " + str(i) + ": ", end="")
print("Acc_test = " + str(acc_test) + ", Acc_train = " + str(acc_train))
acc_sum += 0.632 * acc_test + 0.368 * acc_train
return acc_sum / k
if __name__ == '__main__':
# parse argument
parser = argparse.ArgumentParser()
parser.add_argument("-k", help="Number of iteration, default 20", \
type=int, default=20)
args = parser.parse_args()
# Check k value
if (args.k <= 0):
raise Exception("Invalid k. k should be > 0", args.k)
# Read file
df = read_file('Iris.csv')
# Using .632 bootstrap
accuracy = bootstrap_accuracy(df, args.k)
# Print model accuracy
print("Accuracy " + str(accuracy))
|
def doctree_resolved(app, doctree, docname):
for node in doctree.traverse():
if (node.astext() == 'inline' and node.parent.tagname == 'desc_signature_line'):
node.parent.remove(node)
def setup(app):
app.connect('doctree-resolved', doctree_resolved)
|
"""
@author: Alex Kerr
"""
import numpy as np
def main():
a = 1.40
posList, zList = get_sites(a)
nList = [[1],[0,2,3],[1],[1,4],[3]]
return posList,nList,zList
def get_sites(a):
c0 = np.array([0.,0.,0.])
c1 = c0 + a*np.array([1.,0.,0.])
o2 = c1 + a*np.array([1.,1.,0.])/np.sqrt(2.)
o3 = o2 - a*np.array([0.,1.,0.])
h4 = o3 + 1.1*np.array([1.,0.,0.])
z = np.array([6,6,8,8,1], dtype=int)
return np.array([c0,c1,o2,o3,h4]), z
|
import functools
import jwt
import os
import uuid
from validator_collection.checkers import is_uuid
from flask import request, g
from .controllers import format_response
from .models import User, Task
def token_required(func):
""" Check if the client has the required token for access the api """
@functools.wraps(func)
def wrapper_token_required(*args, **kwargs):
# Retrieves token
auth_header = request.headers.get("Authorization")
if(not auth_header):
return format_response("missing auth header", 401)
# Check if auth header is correctly formated as "<Scheme> <token>"
try:
scheme, token = auth_header.split(" ")
except:
return format_response("bad auth header", 400)
# Check for bearer scheme
if(scheme not in ['Bearer', 'bearer']):
return format_response("unsupported auth scheme", 400)
# Try to Decode token
try:
payload = jwt.decode(token, os.environ.get("SECRET_KEY"), algorithms=["HS256"])
except jwt.ExpiredSignatureError:
return format_response("token expired", 403)
except jwt.DecodeError:
return format_response("invalid token", 401)
g.token_payload = payload
value = func(*args, **kwargs)
return value
return wrapper_token_required
def admin_required(func):
""" Check if the client has the required admin token for access the endpoint """
@functools.wraps(func)
def wrapper_admin_required(*args, **kwargs):
try:
token_payload = g.token_payload
except:
raise Exception('admin_required requires token_required decorator as prerequisite')
if(not token_payload.get("adm")):
return format_response("non authorized", 403)
value = func(*args, **kwargs)
return value
return wrapper_admin_required
def authorization_required(func):
""" Check if the user has authorization for perform the requested action """
@functools.wraps(func)
def wrapper_authorization_required(*args, **kwargs):
try:
token_payload = g.token_payload
except:
raise Exception('authorization_required requires token_required decorator as prerequisite')
# Retrieves id and checks integrity
id = request.view_args.get("u_id")
# If there is no id, but "current" at the url,
# set id to the id of the user associated with the auth token provided to the api call
if(id == "current"):
id = token_payload.get("uid")
# Check if supplied id complains with UUID standards
if(not is_uuid(id)):
return format_response("invalid user id", 422)
# If the token is not owned by an admin, and the url id dont match with the id of the supplied token owner
# Cancel the operation because a user can only make ops on his data
if((id != token_payload.get("uid")) and (not token_payload.get("adm"))):
return format_response("non authorized", 403)
value = func(user_id=id, *args, **kwargs)
return value
return wrapper_authorization_required
def task_required(func):
""" Check if the user has authorization for perform the requested action """
@functools.wraps(func)
def wrapper_task_required(*args, **kwargs):
# Retrieves id and checks integrity
id = request.view_args.get("t_id")
# Check if supplied id complains with UUID standards
if(not is_uuid(id)):
return format_response("invalid task id", 422)
value = func(task_id=id, *args, **kwargs)
return value
return wrapper_task_required
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum, auto
from codegen.passes import *
from codegen.mir import *
from codegen.spec import *
from codegen.live_intervals import cmp_interval_start_func, cmp_inst_func
from codegen.live_intervals import LiveIntervals
class Spiller:
def __init__(self):
pass
def spill(self, func, stack_slot, vreg, new_ranges=None):
from codegen.live_intervals import LiveRange, SlotSegment
insts = set(
[operand.inst for operand in func.reg_info.get_use_def_iter(vreg)])
target_inst_info = func.target_info.get_inst_info()
for inst in insts:
has_use = False
has_def = False
for operand in inst.operands:
if not operand.is_reg:
continue
if operand.reg != vreg:
continue
has_use |= (operand.is_use | (operand.subreg is not None))
has_def |= operand.is_def
assert(has_use | has_def)
new_vreg = func.reg_info.create_virtual_register(vreg.regclass)
if has_use:
target_inst_info.copy_reg_from_stack(
new_vreg, stack_slot, vreg.regclass, inst).comment = "Reload"
if has_def:
target_inst_info.copy_reg_to_stack(
new_vreg, stack_slot, vreg.regclass, inst.next_inst).comment = "Spill"
interval = LiveRange()
interval.reg = new_vreg
segment = SlotSegment()
interval.segments.append(segment)
start_inst = inst.prev_inst if has_use else inst
end_inst = inst.next_inst if has_def else inst
segment.start = start_inst
segment.end = end_inst
if new_ranges is not None:
new_ranges.append(interval)
for operand in inst.operands:
if not operand.is_reg:
continue
if operand.reg != vreg:
continue
operand.reg = new_vreg
from functools import cmp_to_key
class LinearScanRegisterAllocation(MachineFunctionPass):
"""
A global register allocation.
"""
def __init__(self):
super().__init__()
def expire_old_intervals(self, live_range):
for rnge in list(self.active_regs):
if cmp_inst_func(rnge.end, live_range.start) > 0:
continue
self.active_regs.remove(rnge)
self.inactive_regs.append(rnge)
phys_reg = self.get_phys_reg(rnge)
if not phys_reg:
continue
reg_units = set(iter_reg_units(phys_reg.spec))
for reg_unit in reg_units:
self.used_reg_units.remove(reg_unit)
def is_reg_used(self, phys_reg: MachineRegister):
assert(isinstance(phys_reg, MachineRegister))
reg_units = set(iter_reg_units(phys_reg.spec))
used_units = reg_units & self.used_reg_units
return len(used_units) > 0
def allocate_reg_or_stack(self, live_range):
hwmode = self.mfunc.target_info.hwmode
alloc_regs = self.register_info.get_ordered_regs(
live_range.reg.regclass)
if self.get_phys_reg(live_range):
return
for phys_reg_def in alloc_regs:
phys_reg = MachineRegister(phys_reg_def)
if self.is_reg_used(phys_reg):
continue
if set(iter_reg_units(phys_reg.spec)) & self.fixed:
continue
self.set_phys_reg(live_range, phys_reg)
reg_units = set(iter_reg_units(phys_reg.spec))
self.used_reg_units |= reg_units
self.active_regs.append(live_range)
return
spill_reg = None
phys_reg_to_alloc = None
for active_reg in self.active_regs:
if isinstance(active_reg.reg, MachineRegister):
continue
phys_reg = self.get_phys_reg(active_reg)
if not phys_reg:
continue
for reg in live_range.reg.regclass.regs:
if phys_reg.spec in iter_reg_aliases(reg):
spill_reg = active_reg
phys_reg_to_alloc = MachineRegister(reg)
break
if spill_reg:
break
assert(spill_reg)
self.set_phys_reg(live_range, phys_reg_to_alloc)
for active_reg in self.active_regs:
if isinstance(active_reg.reg, MachineRegister):
continue
phys_reg = self.get_phys_reg(active_reg)
if not phys_reg:
continue
if set(iter_reg_units(phys_reg.spec)) & set(iter_reg_units(phys_reg_to_alloc.spec)):
regclass = active_reg.reg.regclass
align = int(regclass.align / 8)
tys = regclass.get_types(hwmode)
size = tys[0].get_size_in_bits()
size = int(int((size + 7) / 8))
stack_slot = self.get_stack(active_reg)
if stack_slot == -1:
stack_slot = self.mfunc.create_stack_object(size, align)
self.set_stack(active_reg, stack_slot)
self.active_regs.remove(active_reg)
reg_units = set(iter_reg_units(phys_reg.spec))
self.used_reg_units -= reg_units
self.set_phys_reg(active_reg, None)
self.spills.append(active_reg)
self.active_regs.append(live_range)
self.used_reg_units |= set(iter_reg_units(phys_reg_to_alloc.spec))
def allocate(self):
alloc = True
while alloc:
alloc = False
unhandled = list(
sorted(self.mfunc.live_ranges.values(), key=cmp_to_key(cmp_interval_start_func)))
self.used_reg_units = set()
self.active_regs = []
self.inactive_regs = []
self.spills = []
for cur_range in list(unhandled):
if isinstance(cur_range.reg, MachineRegister):
continue
self.expire_old_intervals(cur_range)
phys_reg = self.get_phys_reg(cur_range)
if phys_reg:
reg_units = set(iter_reg_units(phys_reg.spec))
self.used_reg_units |= reg_units
assert(cur_range not in self.active_regs)
self.active_regs.append(cur_range)
continue
self.allocate_reg_or_stack(cur_range)
for spill in self.spills:
new_vregs = []
self.spiller.spill(
self.mfunc, self.get_stack(spill), spill.reg, new_vregs)
for new_vreg in new_vregs:
self.mfunc.live_ranges[new_vreg.reg] = new_vreg
self.mfunc.live_ranges.pop(spill.reg)
self.live_intervals.process_machine_function(self.mfunc)
alloc = True
def set_phys_reg(self, interval, reg):
self.phys_reg_for_vreg[interval.reg] = reg
def get_phys_reg(self, interval):
if interval.reg not in self.phys_reg_for_vreg:
return None
return self.phys_reg_for_vreg[interval.reg]
def set_stack(self, interval, reg):
self.stack_for_vreg[interval.reg] = reg
def get_stack(self, interval):
if interval.reg not in self.stack_for_vreg:
return -1
return self.stack_for_vreg[interval.reg]
def process_machine_function(self, mfunc: MachineFunction):
self.mfunc = mfunc
self.target_lowering = mfunc.target_info.get_lowering()
self.target_inst_info = mfunc.target_info.get_inst_info()
self.register_info = mfunc.target_info.get_register_info()
self.allocatable_regs = self.register_info.allocatable_regs
self.used_reg_units = set()
self.active_regs = []
self.fixed = set()
self.spiller = Spiller()
self.spills = []
self.phys_reg_for_vreg = {}
self.stack_for_vreg = {}
self.live_intervals = LiveIntervals()
for reg, live_range in self.mfunc.live_ranges.items():
self.set_phys_reg(live_range, None)
if isinstance(reg, MachineRegister):
self.set_phys_reg(live_range, reg)
if reg.spec in self.allocatable_regs:
regs = set(iter_reg_units(reg.spec))
self.fixed |= regs
self.allocate()
for mbb in self.mfunc.bbs:
for inst in mbb.insts:
for operand in inst.operands:
if not operand.is_reg or not operand.is_virtual:
continue
phys_reg = self.get_phys_reg(
self.mfunc.live_ranges[operand.reg])
if not phys_reg:
continue
operand.reg = phys_reg
operand.is_renamable = True
self.mfunc.live_ranges = {}
|
from typing import Optional, Union, Sequence
from slack_sdk.web import WebClient
from slack_sdk.models.blocks import Block
class Configure:
def __init__(self, *, callback_id: str, client: WebClient, body: dict):
self.callback_id = callback_id
self.client = client
self.body = body
def __call__(
self, *, blocks: Optional[Sequence[Union[dict, Block]]] = None, **kwargs
) -> None:
self.client.views_open(
trigger_id=self.body["trigger_id"],
view={
"type": "workflow_step",
"callback_id": self.callback_id,
"blocks": blocks,
**kwargs,
},
)
|
# -*- coding: utf-8 -*-
#Lista05 - Fila - Questão 08
#Mayara Rysia
"""
8. Em certas aplicações do TAD fila, é comum repetidamente realizar dequeue e então, imediamente, realizar enqueue com o
mesmo elemento. Modifique a implementação FilaArray para incluir um método rodar() que deve ser semanticamente
identifico a f.enqueue(f.dequeue) . Assegure que sua implementação do método rodar seja mais eficiente do que a
chamada separada para f.enqueue(f.dequeue) .
"""
class FilaArray:
CAPACIDADE_PADRAO = 5
def __init__(self):
self._dados = [None] * FilaArray.CAPACIDADE_PADRAO
self._tamanho = 0
self._inicio = 0
def __len__(self):
return self._tamanho
def is_empty(self):
return self._tamanho == 0
def first(self):
if self.is_empty():
raise Exception('A Fila está vazia')
return self._dados[self._inicio]
def dequeue(self):
if self.is_empty():
raise Exception('A Fila está vazia')
result = self._dados[self._inicio]
self._dados[self._inicio] = None
self._inicio = (self._inicio + 1) % len(self._dados)
self._tamanho -= 1
return result
def enqueue(self, e):
if self._tamanho == len(self._dados):
self._aumenta_tamanho(2 * len(self._dados))
disponivel = (self._inicio + self._tamanho) % len(self._dados)
self._dados[disponivel] = e
self._tamanho += 1
def _aumenta_tamanho(self, novo_tamanho):
dados_antigos = self._dados
self._dados = [None] * novo_tamanho
posicao = self._inicio
for k in range(self._tamanho):
self._dados[k] = dados_antigos[posicao]
posicao = (1 + posicao) % len(dados_antigos)
self._inicio = 0
def rodar(self):
self.enqueue(self.dequeue())
def get(self):
if self.is_empty():
raise Exception('A Fila está vazia')
aux = []
for e in self._dados:
if e!= None: aux.append(e)
return aux
if __name__ == '__main__':
f = FilaArray()
n=5
for i in range(3):
f.enqueue(n)
n+=1
print(f.get())
f.rodar()
print(f.get())
|
import pytest
import tenseal as ts
def test_context_creation():
context = ts.context(ts.SCHEME_TYPE.BFV, 8192, 1032193)
assert context.is_private() is True, "TenSEALContext should be private"
assert context.public_key() is not None, "TenSEALContext shouldn't be None"
def test_make_context_public():
context = ts.context(ts.SCHEME_TYPE.BFV, 8192, 1032193)
context.make_context_public(generate_galois_keys=False, generate_relin_keys=False)
assert context.is_public() is True, "TenSEALContext should be public"
def test_generate_galois_keys():
context = ts.context(ts.SCHEME_TYPE.BFV, 8192, 1032193)
secret_key = context.secret_key()
context.make_context_public(generate_galois_keys=False, generate_relin_keys=False)
context.generate_galois_keys(secret_key)
assert isinstance(context.galois_keys(), ts.GaloisKeys), "Galois keys should be set"
def test_generate_relin_keys():
context = ts.context(ts.SCHEME_TYPE.BFV, 8192, 1032193)
secret_key = context.secret_key()
context.make_context_public(generate_galois_keys=False, generate_relin_keys=False)
context.generate_relin_keys(secret_key)
assert isinstance(context.relin_keys(), ts.RelinKeys), "Relin keys should be set"
def test_global_scale():
context = ts.context(ts.SCHEME_TYPE.CKKS, 8192, 0, [60, 40, 40, 60])
# global scale shouldn't be set at first
with pytest.raises(ValueError) as e:
scale = context.global_scale
for scale in [0, 1, 2, 2 ** 40]:
context.global_scale = scale
assert context.global_scale == scale
def test_auto_flags():
context = ts.context(ts.SCHEME_TYPE.CKKS, 8192, 0, [60, 40, 40, 60])
# default values
assert context.auto_relin == True
assert context.auto_rescale == True
assert context.auto_mod_switch == True
# switch on/off and check
context.auto_relin = False
assert context.auto_relin == False
assert context.auto_rescale == True
assert context.auto_mod_switch == True
context.auto_relin = True
assert context.auto_relin == True
assert context.auto_rescale == True
assert context.auto_mod_switch == True
context.auto_rescale = False
assert context.auto_relin == True
assert context.auto_rescale == False
assert context.auto_mod_switch == True
context.auto_rescale = True
assert context.auto_relin == True
assert context.auto_rescale == True
assert context.auto_mod_switch == True
context.auto_mod_switch = False
assert context.auto_relin == True
assert context.auto_rescale == True
assert context.auto_mod_switch == False
context.auto_mod_switch = True
assert context.auto_relin == True
assert context.auto_rescale == True
assert context.auto_mod_switch == True
|
# Generated by Django 3.1.3 on 2020-12-09 00:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bulletin', '0002_article_article_image'),
]
operations = [
migrations.AddField(
model_name='article',
name='is_featured',
field=models.BooleanField(default=False),
),
]
|
#Import Libraries
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pylab as pl
#Functions
def test_plot(str_values):
print('plot undeformed structure to verify original design')
fig = plt.figure() #setup the plot
fig.subplots_adjust(bottom=0,top=1) #fit 3Dplot in window better
sub = fig.add_subplot(1,1,1,projection="3d")
X = [] ; Y = [] ; Z = []
#retrieve node/element into from data
ele_values = str_values[0]
for ele_prop in ele_values:
x = [] ; y = [] ; z = []
#retrieve x,z,y components of nodal coordinates
#x: horizontal, y: out of plane, z: vertical
n1x = ele_prop[0][0][0][0]
n1y = ele_prop[0][0][0][1]
n1z = ele_prop[0][0][0][2]
n2x = ele_prop[0][1][0][0]
n2y = ele_prop[0][1][0][1]
n2z = ele_prop[0][1][0][2]
x.append(n1x) ; X.append(n1x)
x.append(n2x) ; X.append(n2x)
y.append(n1y) ; Y.append(n1y)
y.append(n2y) ; Y.append(n2y)
z.append(n1z) ; Z.append(n1z)
z.append(n2z) ; Z.append(n2z)
sub.plot(x,y,z, color = 'blue', marker = 'o', markersize = 3)
# Create cubic bounding box to simulate equal aspect ratio
box_size = max( max(X)-min(X), max(Y)-min(Y), max(Z)-min(Z) )
a = -( box_size/2 - (max(X) + min(X))/2 )
b = box_size/2 + (max(X) + min(X))/2
c = -( box_size/2 - (max(Y) + min(Y))/2 )
d = box_size/2 + (max(Y) + min(Y))/2
e = -( box_size/2 - (max(Z) + min(Z))/2 )
f = box_size/2 + (max(Z) + min(Z))/2
x_b = [a,b]
y_b = [d,c]
z_b = [f,e]
sub.plot(x_b, y_b, z_b, linewidth=0, color='red')
#after all elements have been added to the plot, display the figure
plt.show()
def get_material_properties(material):
if material == 's':
#structural steel, ASTM A36
E = 200 * 10**9 #Pa
s_max = 250 * 10**6 #Pa yield stress
density = 7850 #Kg/cu.m
if material == 'a':
#standard 6061-T6 alloy
E = 102 * 10**9 #Pa
s_max = 276 * 10**6 #Pa yield stress
density = 2700 #Kg/cu.m
if material == 'w':
#wood, douglas-fir
E = 10.8 * 10**9 #Pa
s_max = 21.6 * 10**6 #Pa yield stress
density = 526 #Kg/cu.m
if material == 't':
#based on most widely used 6Al-4V alloy
E = 102 * 10**9 #Pa
s_max = 340 * 10**6 #Pa yield stress
density = 4510 #Kg/cu.m
return E, s_max, density
def calculate_element_values(str_values):
#print('calculate element values')
ele_values = str_values[0]
for ele_props in ele_values:
#calculate element length
n1x = ele_props[0][0][0][0]
n1z = ele_props[0][0][0][2]
n2x = ele_props[0][1][0][0]
n2z = ele_props[0][1][0][2]
ele_len = ( (n2x-n1x)**2+(n2z-n1z)**2 )**(1/2)
#get element material properties
material = ele_props[2]
E, s_max, density = get_material_properties(material)
#convert xc_area value from sq.cm to sq.m
ele_xc_area = ele_props[1]
#calculate element mass
ele_mass = ele_len * ele_xc_area * density #Kg
#add values to element properties list
ele_props.append(ele_len)
ele_props.append(E)
ele_props.append(s_max)
ele_props.append(ele_mass)
ele_values = str_values[0]
# for ele_props in ele_values:
# print(ele_props)
def add_self_weight_loads(str_values):
#unless vertically fixed, each node gets 1/2 the weight of every
#element that is attached to it. Note wt loads are negative (-z dir)
#print("add self-weight loads")
grav = 9.807 #m/sec^2
ele_wt_total = 0
added_load = 0
lost_load = 0
ele_values = str_values[0]
for ele_props in ele_values:
ele_mass = ele_props[6]
ele_wt = ele_mass * grav #Newtons
ele_wt_total += ele_wt
#if the 1st element node is free (=1) in the vertical direction,
#increment the vertical load on that node by 1/2 element wt.
supt1_z = ele_props[0][0][1][2]
if supt1_z == 1:
ele_props[0][0][2][2] += -ele_wt/2
added_load += ele_wt/2
else:
lost_load += ele_wt/2
#if the 2nd element node is free (=1) in the vertical direction,
#increment the vertical load on that node by 1/2 element wt.
supt2_z = ele_props[0][1][1][2]
if supt2_z == 1:
ele_props[0][1][2][2] += -ele_wt/2
added_load += ele_wt/2
else:
lost_load += ele_wt/2
# print("\ntotal structure wt: ", ele_wt_total)
# print("added load: ", added_load)
# print("lost load: ", lost_load)
# print("added + lost load: ", added_load + lost_load)
# for ele_props in ele_values:
# print(ele_props)
def calculate_element_stiffness_matrices(str_values):
#print('calculate element stiffness matrices')
ele_values = str_values[0]
for ele_props in ele_values:
A = ele_props[1]
E = ele_props[4]
L = ele_props[3]
n1x = ele_props[0][0][0][0]
n1z = ele_props[0][0][0][2]
n2x = ele_props[0][1][0][0]
n2z = ele_props[0][1][0][2]
c = (n2x-n1x) / L
s = (n2z-n1z) / L
T = np.array( [ [c**2, c*s, -c**2, -c*s],
[c*s, s**2, -c*s, -s**2],
[-c**2, -c*s, c**2, c*s],
[-c*s, -s**2, c*s, s**2] ] )
k = (E*A/L)
esm = k * T
ele_props.append(esm)
np.set_printoptions(precision = 2, linewidth=150)
ele_values = str_values[0]
# for ele_props in ele_values:
# print(ele_props[7])
def generate_global_stiffness_matrix(str_values):
#print('generate global stiffness matrix')
# print('make a list of all nodes')
node_list = []
ele_values = str_values[0]
for ele_props in ele_values:
n1x = ele_props[0][0][0][0]
n1z = ele_props[0][0][0][2]
n2x = ele_props[0][1][0][0]
n2z = ele_props[0][1][0][2]
node1 = [n1x, n1z]
node2 = [n2x, n2z]
if node1 not in node_list:
node_list.append(node1)
if node2 not in node_list:
node_list.append(node2)
str_values.append(node_list)
# print(node_list)
# print('make a list of global DOFs')
dof_list = []
for i in range(0, len(node_list)*2, 2):
# print(i)
dof_pair = [i,i+1]
# print(dof_pair)
dof_list.append(dof_pair)
str_values.append(dof_list)
# print(dof_list)
# print('assign DOFs to elements')
global_dof_list = []
for ele_props in ele_values:
n1x = ele_props[0][0][0][0]
n1z = ele_props[0][0][0][2]
n2x = ele_props[0][1][0][0]
n2z = ele_props[0][1][0][2]
node1_2D = [n1x, n1z]
node2_2D = [n2x, n2z]
for i in range(len(node_list)):
if node_list[i] == node1_2D:
dof1 = dof_list[i]
if node_list[i] == node2_2D:
dof2 = dof_list[i]
dof_pair = [dof1, dof2]
global_dof_list.append(dof_pair)
str_values.append(global_dof_list)
# print(global_dof_list)
# print('destination DOF')
destination_dof = []
for pair in global_dof_list:
# print(pair)
dof_group = []
for item in pair:
# print(item)
for number in item:
dof_group.append(number)
destination_dof.append(dof_group)
str_values.append(destination_dof)
# print(destination_dof)
#make a zeroes matrix to match the size of the gsm
#each node has 2 degrees of freedom...
size = ( 2*len(node_list), 2*len(node_list) )
gsm = np.zeros(size)
# print(gsm)
#map ESMs to the GSM !!!
for i in range(0, len(ele_values)):
esm = ele_values[i][7]
dd = destination_dof[i]
i = 0
for x in dd:
j = 0
for y in dd:
#i,j is the ESM value
#x,y is the GSM destination
gsm[x][y] += esm[i][j]
j += 1
i += 1
# print('apply boundary conditions')
#make a list of support conditions for each global DOF
supports = []
support_condition = []
for i in range(0, len(ele_values)):
s_n1x = ele_values[i][0][0][1][0]
s_n1z = ele_values[i][0][0][1][2]
s_n2x = ele_values[i][0][1][1][0]
s_n2z = ele_values[i][0][1][1][2]
support_condition.append([s_n1x, s_n1z, s_n2x, s_n2z])
# print('destination_dof list')
# print(destination_dof)
# print('list global support conditions (0=fixed, 1=free)')
# print(support_condition)
used_dof = []
for dof in range(0, len(node_list*2)):
for j in range(0, len(ele_values)):
for k in range(0,len(destination_dof[0])):
if destination_dof[j][k] == dof and dof not in used_dof:
supports.append(support_condition[j][k])
used_dof.append(dof)
# print(supports)
#modify GSM with the boundary conditions...
for i in range(0, len(supports)):
support = supports[i]
if support == 0: # if DOF is fixed...
gsm[i] = 0 # zero out row
gsm[:,i] = 0 # zero out column
gsm[i,i] = 1 # make the diagonal position equal 1
str_values.append(gsm)
# print('global stiffness matrix with boundary conditions applied')
# print(gsm)
# str_values.append(gsm)
# print(gsm)
def create_loads_vector(str_values):
ele_values = str_values[0]
node_list = str_values[1]
destination_code = str_values[4]
#create the loads vector
#print('create the loads vector')
loads = []
load_condition = []
for i in range(0, len(ele_values)):
L_n1x = ele_values[i][0][0][2][0]
L_n1z = ele_values[i][0][0][2][2]
L_n2x = ele_values[i][0][1][2][0]
L_n2z = ele_values[i][0][1][2][2]
load_condition.append([L_n1x, L_n1z, L_n2x, L_n2z])
# print(load_condition)
used_i = []
for i in range(0, len(node_list*2)):
for j in range(0, len(ele_values)):
for k in range(0,len(destination_code[0])):
if destination_code[j][k] == i and i not in used_i:
loads.append(load_condition[j][k])
used_i.append(i)
str_values.append(loads)
# print(loads)
def calculate_displacements(str_values):
#print('calculate displacements')
gsm = str_values[5]
loads = str_values[6]
nodal_disp = np.linalg.solve(gsm,loads)
str_values.append(nodal_disp)
# print(nodal_disp)
def calculate_deformed_element_length(str_values):
#print('calculate new nodal positions') #change to element length?
node_list = str_values[1]
nodal_disp = str_values[7]
displaced_nodes = []
for i in range(0, len(node_list)):
# print(i)
node_x = node_list[i][0]
node_z = node_list[i][1]
disp_x = nodal_disp[2*i]
disp_z = nodal_disp[2*i+1]
# print([node_x, node_z])
# print([disp_x, disp_z])
disp_node = [(node_x + disp_x), (node_z + disp_z)]
displaced_nodes.append(disp_node)
str_values.append(displaced_nodes)
# print(displaced_nodes)
ele_values = str_values[0]
ele_len_deformed_list = []
for ele_props in ele_values:
n1x = ele_props[0][0][0][0]
n1z = ele_props[0][0][0][2]
n2x = ele_props[0][1][0][0]
n2z = ele_props[0][1][0][2]
node1_2D = [n1x, n1z]
node2_2D = [n2x, n2z]
for i in range(0, len(node_list)):
if node_list[i] == node1_2D:
n1d = displaced_nodes[i]
if node_list[i] == node2_2D:
n2d = displaced_nodes[i]
n1dx = n1d[0] ; n1dz = n1d[1]
n2dx = n2d[0] ; n2dz = n2d[1]
ele_len_deformed = ( (n2dx-n1dx)**2+(n2dz-n1dz)**2 )**(1/2)
ele_len_deformed_list.append(ele_len_deformed)
str_values.append(ele_len_deformed_list)
# print(ele_len_deformed_list)
# calculate magnitude change of nodal position
node_position_magnitude_change = []
disp_nodes = str_values[8]
for i in range(0, len(node_list)):
orig_pos = node_list[i]
disp_pos = disp_nodes[i]
n1dx = orig_pos[0] ; n1dz = orig_pos[1]
n2dx = disp_pos[0] ; n2dz = disp_pos[1]
move_dist = ( (n2dx-n1dx)**2+(n2dz-n1dz)**2 )**(1/2)
node_position_magnitude_change.append(move_dist)
str_values.append(node_position_magnitude_change)
# print(node_position_magnitude_change)
def calculate_strain_and_stress(str_values):
#print('calculate element strains & stresses')
ele_strain = []
ele_stress = []
ele_len_deformed_list = str_values[9]
ele_values = str_values[0]
for i in range(0, len(ele_values)):
ele_len_original = ele_values[i][3]
modulus = ele_values[i][4]
ele_len_deformed = ele_len_deformed_list[i]
strain = (ele_len_deformed - ele_len_original)/ele_len_original
stress = strain * modulus
ele_strain.append(strain)
ele_stress.append(stress)
str_values.append(ele_stress)
str_values.append(ele_strain)
# print(ele_strain)
# print(ele_stress)
def calculate_other_results(str_values):
#print('calculate other results')
#total structure mass
total_mass = 0
ele_values = str_values[0]
for ele_props in ele_values:
ele_mass = ele_props[6]
total_mass += ele_mass
str_values.append(total_mass)
# print(total_mass)
#element safety factor, tension failure or buckling failure
failure_mode = []
ele_factor_of_safety = []
ele_stress = str_values[11]
for i in range(len(ele_values)):
ele_calcd_stress = ele_stress[i]
ele_mod = ele_values[i][4] #modulus of elasticity
ele_max_allow_stress = ele_values[i][5] #tension
if ele_calcd_stress >= 0: # element in tension
fos_t = ele_max_allow_stress / ele_calcd_stress
ele_factor_of_safety.append(fos_t)
failure_mode.append('t')
if ele_calcd_stress < 0: #element in compression
K = 1 # column effective length factor
# = 1 for pinned-pinned, =0.5 for fixed-fixed
pi = 3.14159
ele_xc_area = ele_values[i][1]
ele_r = (ele_xc_area/pi)**.5
I_x_circle = (pi/4)*ele_r**4
l_e= ele_values[i][3]*K #effective element length
r_gyr = (I_x_circle/ele_xc_area)**0.5 #radius of gyration
#calculate element critical buckling stress
ele_cb_stress = -(pi**2*ele_mod)/((l_e/r_gyr)**2)
#calculate compressive yield stress
ele_cy_stress = -ele_max_allow_stress / ele_calcd_stress
#use the smaller of the two critical stresses to calc
#factor of safety for compressively loaded elements
if ele_cb_stress <= ele_cy_stress:
fos = ele_cb_stress / ele_calcd_stress
failure_mode.append('b')
else:
fos = ele_cy_stress / ele_calcd_stress
failure_mode.append('c')
ele_factor_of_safety.append(fos)
str_values.append(ele_factor_of_safety)
str_values.append(failure_mode)
# print(str_values[13])
# print(str_values[14])
# print(str_values[15])
def plot_results(str_values, fig, rows, cols, subplot_num, title):
#print('plot results of analysis')
#setup the figure
#fig = plt.figure(figsize=(9,9)) #setup the plot
sub = fig.add_subplot(rows,cols,subplot_num,projection="3d")
sub.view_init(elev=0, azim=270)
sub.dist = 8
sub.set_title(title)
sub.set_xlabel('X', fontweight='bold')
sub.set_ylabel('Y', fontweight='bold')
sub.set_zlabel('Z', fontweight='bold')
#retrieve node/element into from data
ele_values = str_values[0]
node_list = str_values[1]
nodal_disp = str_values[7]
displaced_nodes = str_values[8]
ele_stress = str_values[11]
total_mass = str_values[13]
factor_of_safety = str_values[14]
failure_mode = str_values[15]
# plot original structure
X = [] ; Y = [] ; Z = []
for ele_prop in ele_values:
x = [] ; y = [] ; z = []
# retrieve x,y,z components of nodal coordinates
# x: horizontal, y: out of plane, z: vertical
n1x = ele_prop[0][0][0][0]
n1y = ele_prop[0][0][0][1]
n1z = ele_prop[0][0][0][2]
n2y = ele_prop[0][1][0][1]
n2x = ele_prop[0][1][0][0]
n2z = ele_prop[0][1][0][2]
x.append(n1x) ; X.append(n1x)
x.append(n2x) ; X.append(n2x)
y.append(n1y) ; Y.append(n1y)
y.append(n2y) ; Y.append(n2y)
z.append(n1z) ; Z.append(n1z)
z.append(n2z) ; Z.append(n2z)
sub.plot(x,y,z, color = 'black', marker = 'o',
markersize = 1, linewidth=.5, linestyle='dashed')
# Create cubic bounding box to simulate equal aspect ratio
box_size = max( max(X)-min(X), max(Y)-min(Y), max(Z)-min(Z) )
a = -( box_size/2 - (max(X) + min(X))/2 )
b = box_size/2 + (max(X) + min(X))/2
c = -( box_size/2 - (max(Y) + min(Y))/2 )
d = box_size/2 + (max(Y) + min(Y))/2
e = -( box_size/2 - (max(Z) + min(Z))/2 )
f = box_size/2 + (max(Z) + min(Z))/2
x_b = [a,b]
y_b = [d,c]
z_b = [f,e]
sub.plot(x_b, y_b, z_b, linewidth=0)
#plot displaced structure
#set up the colormap!
n = 40 #number of color levels in the color linspace
c_factor = 1.15 #max/min multiplier so colors aren’t so dark
tension_colors = pl.cm.Reds(np.linspace(0,1,n))
compression_colors = pl.cm.Blues(np.linspace(0,1,n))
# create a list of displaced nodes- not scaled and scaled
max_disp = (max(str_values[10]) * 1000) #convert m to mm
disp_scale = 125/max_disp
displaced_nodes_scaled = []
for i in range(0, len(node_list)):
node_x = node_list[i][0]
node_z = node_list[i][1]
disp_x_s = nodal_disp[2*i] * disp_scale
disp_z_s = nodal_disp[2*i+1] * disp_scale
disp_node_s = [(node_x + disp_x_s), (node_z + disp_z_s)]
displaced_nodes_scaled.append(disp_node_s)
s_index = 0
s_max = max(ele_stress)
s_min = min(ele_stress)
for ele_props in ele_values:
stress = ele_stress[s_index]
fm = failure_mode[s_index]
fos = factor_of_safety[s_index]
s_index += 1
#calculate color_num
if stress > 0:
color_num = int(round(n*((stress)/(s_max*c_factor))))
if stress < 0:
color_num = int(round(n*((stress)/(s_min*c_factor))))
#condition color_num
if color_num >= n: # high limit
color_num = int(n-1)
if color_num < 0: # low limit
color_num = int(1)
#configure element color, tension in red, compression in blue.
if stress > 0:
select_color = tension_colors[color_num]
if stress < 0:
select_color = compression_colors[color_num]
n1x = ele_props[0][0][0][0]
n1y = ele_props[0][0][0][1]
n1z = ele_props[0][0][0][2]
n2x = ele_props[0][1][0][0]
n2y = ele_props[0][1][0][1]
n2z = ele_props[0][1][0][2]
node1_2D = [n1x, n1z]
node2_2D = [n2x, n2z]
for i in range(0, len(node_list)):
x_d = [] ; y_d = [] ; z_d = []
if node_list[i] == node1_2D:
n1_d = displaced_nodes_scaled[i]
if node_list[i] == node2_2D:
n2_d = displaced_nodes_scaled[i]
n1_dx = n1_d[0] ; n2_dx = n2_d[0]
n1_dy = 0 ; n2_dy = 0
n1_dz = n1_d[1] ; n2_dz = n2_d[1]
x_d.append(n1_dx)
x_d.append(n2_dx)
y_d.append(n1_dy)
y_d.append(n2_dy)
z_d.append(n1_dz)
z_d.append(n2_dz)
#calculate location for element label
x_ave = sum(x_d)/len(x_d)
y_ave = sum(y_d)/len(y_d)
z_ave = sum(z_d)/len(z_d)
sub.plot(x_d,y_d,z_d, color = select_color,
linewidth=3, linestyle='solid')
#place a label on each element
s_mpa = stress / 10**6
# sub.text(x_ave, y_ave, z_ave,
# str( "%.1f" % s_mpa +'MPa|FS:'+"%.2f" % fos+'('+fm+')'), fontsize=8.5,
# fontweight='bold', color='black')
#draw a dot at each node location
sub.scatter(x_d,y_d,z_d, s=20, c='black', marker='o')
#calculate & plot magnitude of change of nodal position
for i in range(0, len(node_list)):
orig_pos = node_list[i]
disp_pos = displaced_nodes[i]
n1dx = orig_pos[0] ; n1dz = orig_pos[1]
n2dx = disp_pos[0] ; n2dz = disp_pos[1]
move_dist = ( (n2dx-n1dx)**2+(n2dz-n1dz)**2 )**(1/2)
move_dist_mm = move_dist * 1000 # convert m to mm
# sub.text(n2dx, 0, n2dz, str( "%.2f" % move_dist_mm + " mm"),
# fontsize=8, fontweight='bold', color='darkblue')
#display total structure mass
sub.text(1,0,1.5,'Total Mass:'+str("% .3f" % total_mass)+' kg')
#plt.show() #disable for multiplot
#Main Program
def run_analysis(str_values):
#str_values = simple_truss1()
#test_plot(str_values)
calculate_element_values(str_values)
add_self_weight_loads(str_values)
calculate_element_stiffness_matrices(str_values)
generate_global_stiffness_matrix(str_values)
create_loads_vector(str_values)
calculate_displacements(str_values)
calculate_deformed_element_length(str_values)
calculate_strain_and_stress(str_values)
calculate_other_results(str_values)
# plot_results(str_values)
return(str_values)
|
# -*- coding: utf-8 -*-
from wemake_python_styleguide.types import final
from wemake_python_styleguide.violations.consistency import (
FormattedStringViolation,
)
from wemake_python_styleguide.visitors.base import BaseNodeVisitor
@final
class WrongStringVisitor(BaseNodeVisitor):
"""Restricts to use ``f`` strings."""
def visit_JoinedStr(self, node) -> None: # type is not defined in ast yet
"""
Restricts to use ``f`` strings.
Raises:
FormattedStringViolation
"""
self.add_violation(FormattedStringViolation(node))
self.generic_visit(node)
|
from aioify import aioify
from discord.ext import commands, tasks
import aiohttp
import aiosqlite
import asyncio
import discord
import json
import os
import shutil
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.os = aioify(os, name='os')
self.shutil = aioify(shutil, name='shutil')
self.utils = self.bot.get_cog('Utils')
self.auto_clean_db.start()
self.signing_party_detection.start()
self.auto_invalid_device_check.start()
@tasks.loop()
async def auto_clean_db(self) -> None:
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss') as cursor:
data = await cursor.fetchall()
for user_devices in data:
devices = json.loads(user_devices[0])
if devices == list():
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('DELETE FROM autotss WHERE devices = ?', (user_devices[0],))
await db.commit()
await asyncio.sleep(300)
@auto_clean_db.before_loop
async def before_auto_clean_db(self) -> None:
await self.bot.wait_until_ready()
await asyncio.sleep(3) # If first run, give on_ready() some time to create the database
@tasks.loop()
async def signing_party_detection(self) -> None:
async with aiohttp.ClientSession() as session:
async with session.get('https://api.ipsw.me/v4/devices') as resp:
devices = await resp.json()
devices = [d for d in devices if any(x in d['identifier'] for x in ('iPhone', 'AppleTV', 'iPod', 'iPad'))]
api = dict()
for device in [d['identifier'] for d in devices]:
api[device] = await self.utils.get_firms(session, device)
try:
self._api
except AttributeError:
self._api = api
return
for device in self._api.keys():
for firm in [x for x in self._api[device] if x['signed'] == False]:
if any(new_firm['signed'] == True for new_firm in api[device] if new_firm['buildid'] == firm['buildid']):
print(f"[SIGN] Detected resigned firmware for: {device}, iOS {firm['version']}")
await self.utils.update_auto_saver_frequency(60) # Set blob saver frequency to 1 minute
tss = self.bot.get_cog('TSS') # Get TSS class
tss.blobs_loop = False
tss.auto_blob_saver.cancel() # Restart auto blob saver
await asyncio.sleep(1)
await self.utils.update_device_count()
tss.auto_blob_saver.start()
await asyncio.sleep(600) # Wait 10 minutes
await self.utils.update_auto_saver_frequency() # Set blob saver frequency back to 3 hours
tss.auto_blob_saver.cancel() # Restart auto blob saver
await asyncio.sleep(1)
tss.auto_blob_saver.start()
return
else:
self._api[device] = api[device]
await asyncio.sleep(30)
@signing_party_detection.before_loop
async def before_signing_party_detection(self) -> None:
await self.bot.wait_until_ready()
await asyncio.sleep(3) # If first run, give on_ready() some time to create the database
@tasks.loop()
async def auto_invalid_device_check(self) -> None: # If any users are saving SHSH blobs for A12+ devices without using custom apnonces, attempt to DM them saying they need to re-add the device
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT * FROM autotss') as cursor:
data = await cursor.fetchall()
if len(data) == 0:
return
invalid_devices = dict()
async with aiohttp.ClientSession() as session:
for userinfo in data:
userid = userinfo[0]
devices = json.loads(userinfo[1])
invalid_devices[userid] = list()
for device in devices:
cpid = await self.utils.get_cpid(session, device['identifier'], device['boardconfig'])
if (device['apnonce'] is not None) and (await self.utils.check_apnonce(cpid, device['apnonce']) == False):
invalid_devices[userid].append(device)
continue
if (device['generator'] is not None) and (await self.utils.check_generator(device['generator']) == False):
invalid_devices[userid].append(device)
continue
if (0x8020 <= cpid < 0x8900) and (device['apnonce'] is None):
invalid_devices[userid].append(device)
for userid in [x for x in invalid_devices.keys() if len(invalid_devices[x]) > 0]:
embed = discord.Embed(title='Hey!')
msg = (
'One or more of your devices were added incorrectly to AutoTSS, and are saving **invalid SHSH blobs**.',
'Due to this, they have been removed from AutoTSS so they are no longer continuing to save invalid SHSH blobs.'
'To fix this, please re-add the following devices to AutoTSS:'
)
embed.description = '\n'.join(msg)
for device in invalid_devices[userid]:
device_info = [
f"Device Identifier: `{device['identifier']}`",
f"ECID: `{device['ecid']}`",
f"Boardconfig: `{device['boardconfig']}`"
]
if device['generator'] is not None:
device_info.insert(-1, f"Custom generator: `{device['generator']}`")
if device['apnonce'] is not None:
device_info.insert(-1, f"Custom ApNonce: `{device['apnonce']}`")
embed.add_field(name=f"**{device['name']}**", value='\n'.join(device_info))
user = await self.bot.fetch_user(userid)
try:
await user.send(embed=embed)
except:
pass
async with aiosqlite.connect('Data/autotss.db') as db:
for device in invalid_devices[userid]:
await self.shutil.rmtree(f"Data/Blobs/{device['ecid']}")
async with db.execute('SELECT devices FROM autotss WHERE user = ?', (userid,)) as cursor:
devices = json.loads((await cursor.fetchone())[0])
devices.pop(next(devices.index(x) for x in devices if x['ecid'] == device['ecid']))
await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), userid))
await db.commit()
await asyncio.sleep(259200)
@auto_invalid_device_check.before_loop
async def before_invalid_device_check(self) -> None:
await self.bot.wait_until_ready()
await asyncio.sleep(3) # If first run, give on_ready() some time to create the database
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db:
async with db.execute('SELECT prefix from prefix WHERE guild = ?', (guild.id,)) as cursor:
if await cursor.fetchone() is not None:
await db.execute('DELETE from prefix where guild = ?', (guild.id,))
await db.commit()
await db.execute('INSERT INTO prefix(guild, prefix) VALUES(?,?)', (guild.id, 'b!'))
await db.commit()
embed = await self.utils.info_embed('b!', self.bot.user)
for channel in guild.text_channels:
try:
await channel.send(embed=embed)
break
except:
pass
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('DELETE from prefix where guild = ?', (guild.id,))
await db.commit()
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT * from autotss WHERE user = ?', (member.id,)) as cursor:
data = await cursor.fetchone()
if data is None:
return
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET enabled = ? WHERE user = ?', (True, member.id))
await db.commit()
await self.utils.update_device_count()
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member) -> None:
await self.bot.wait_until_ready()
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT * from autotss WHERE user = ?', (member.id,)) as cursor:
data = await cursor.fetchone()
if data is None:
return
if len(member.mutual_guilds) == 0:
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET enabled = ? WHERE user = ?', (False, member.id))
await db.commit()
await self.utils.update_device_count()
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
await self.bot.wait_until_ready()
if message.channel.type == discord.ChannelType.private:
return
if message.content.replace(' ', '').replace('!', '') == self.bot.user.mention:
whitelist = await self.utils.get_whitelist(message.guild.id)
if (whitelist is not None) and (whitelist.id != message.channel.id):
return
prefix = await self.utils.get_prefix(message.guild.id)
embed = discord.Embed(title='AutoTSS', description=f'My prefix is `{prefix}`. To see all of my commands, run `{prefix}help`.')
embed.set_footer(text=message.author.name, icon_url=message.author.avatar_url_as(static_format='png'))
try:
await message.reply(embed=embed)
except:
pass
@commands.Cog.listener()
async def on_ready(self) -> None:
await self.os.makedirs('Data', exist_ok=True)
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('''
CREATE TABLE IF NOT EXISTS autotss(
user INTEGER,
devices JSON,
enabled BOOLEAN
)
''')
await db.commit()
await db.execute('''
CREATE TABLE IF NOT EXISTS prefix(
guild INTEGER,
prefix TEXT
)
''')
await db.commit()
await db.execute('''
CREATE TABLE IF NOT EXISTS whitelist(
guild INTEGER,
channel INTEGER,
enabled BOOLEAN
)
''')
await db.commit()
await db.execute('''
CREATE TABLE IF NOT EXISTS auto_frequency(
time INTEGER
)
''')
await db.commit()
await self.utils.update_device_count()
await self.utils.update_auto_saver_frequency()
print('AutoTSS is now online.')
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error) -> None:
await self.bot.wait_until_ready()
embed = discord.Embed(title='Error')
if ctx.message.channel.type == discord.ChannelType.private:
embed.description = 'AutoTSS cannot be used in DMs. Please use AutoTSS in a Discord server.'
await ctx.reply(embed=embed)
return
if await self.utils.whitelist_check(ctx) != True:
return
prefix = await self.utils.get_prefix(ctx.guild.id)
if isinstance(error, commands.CommandNotFound):
if ctx.prefix.replace('!', '').replace(' ', '') == self.bot.user.mention:
return
embed.description = f"That command doesn't exist! Use `{prefix}help` to see all the commands I can run."
await ctx.reply(embed=embed)
elif isinstance(error, commands.MaxConcurrencyReached):
embed.description = f"`{prefix + ctx.command.qualified_name}` cannot be ran more than once at the same time!"
await ctx.reply(embed=embed)
elif isinstance(error, commands.ChannelNotFound):
embed = discord.Embed(title='Error', description='That channel does not exist.')
await ctx.reply(embed=embed)
elif (isinstance(error, commands.errors.NotOwner)) or \
(isinstance(error, commands.MissingPermissions)):
return
else:
raise error
def setup(bot):
bot.add_cog(Events(bot))
|
# -*- coding: utf-8 -*
# @Time : 2020/12/22 17:29
from motor.motor_asyncio import AsyncIOMotorClient
class MotorClient:
client: AsyncIOMotorClient = None
mongodb_ = MotorClient()
async def get_async_motor() -> AsyncIOMotorClient:
return mongodb_.client
|
"""Remove resource and person from slots
Revision ID: 55cc7870b02a
Revises: 3b1769f1cfdb
Create Date: 2014-03-29 20:20:50.274118
"""
# revision identifiers, used by Alembic.
revision = '55cc7870b02a'
down_revision = '3b1769f1cfdb'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column("strotime_slots", "resource_id")
op.drop_column("strotime_slots", "person_id")
op.drop_column("repeating_slots", "resource_id")
op.drop_column("slots", "resource_id")
op.drop_column("slots", "person_id")
op.drop_column("slots", "organisation_id")
def downgrade():
raise NotImplementedError('This application does not support downgrades.')
|
import os
from datetime import datetime
import muons
from matplotlib import pyplot as plt
folder = "data"
name = "pi_data"
# Raspberry pi time lags behind by this amount. Need to adjust it to align with weather data and other muon data.
time_offset = 365760
# Only get data from files with "pi_data" in their names.
files = filter(lambda i: name in i[:len(name)], os.listdir(folder))
def get_counts():
"""
Returns a list containing the time of every recorded count.
There is only one count corresponding to each time, so a 1D
list is sufficient.
"""
lines = []
for f in files:
with open("{}/{}".format(folder, f), 'r') as current_file:
for l in current_file:
lines.append(l)
return parse_data(lines)
def parse_data(lines):
"""
Gets the times of each count from the lines of data, applying the
time_offset to ensure times are in sync with "real" time
from other detectors and data.
"""
data_set = set( # Remove duplicate items by using a set.
map(lambda i: float(i.split(": ")[-1]) + time_offset, lines)) # Extract the time from each line, and adjust for the offset.
return sorted(list(data_set)) # Convert back to a list, and sort.
def get_counts_in_time(counts, seconds=60):
"""
Split up a list of times (each representing a single count)
to get a list containing the number of counts in each interval of
the parameter 'seconds'.
"""
start = counts[0] # The start value of each interval.
# Tuple containing list of times and associated counts.
counts_in_time = ([], [])
temp_counts = 0 # Number of counts in the current interval.
for c in counts:
if c - start > seconds: # If we exceed interval, start a new one.
counts_in_time[0].append(start)
counts_in_time[1].append(temp_counts / (c-start))
temp_counts = 0
start = c
temp_counts += 1
return counts_in_time
if __name__ == "__main__":
data = get_counts()
counts_per_time = get_counts_in_time(data, 600)
times = counts_per_time[0]
counts = counts_per_time[1]
plt.plot(times, counts, ".")
plt.show()
|
#!/usr/bin/env python3
from ruamel.yaml import YAML, dump, RoundTripDumper
#
import os
import math
import argparse
import time
import numpy as np
from rpg_baselines.envs import tcn_mlp_ae_vec_env_wrapper as wrapper
import rpg_baselines.common.util as U
#
from flightgym import QuadrotorEnv_v1
def parser():
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=int, default=1,
help="To train new model or simply test pre-trained model")
parser.add_argument('--render', type=int, default=1,
help="Enable Unity Render")
parser.add_argument('--save_dir', type=str, default=os.path.dirname(os.path.realpath(__file__)),
help="Directory where to save the checkpoints and training metrics")
parser.add_argument('--seed', type=int, default=0,
help="Random seed")
parser.add_argument('-w', '--weight', type=str, default='./saved/quadrotor_env.zip',
help='trained weight path')
return parser
def main():
args = parser().parse_args()
cfg = YAML().load(open(os.environ["FLIGHTMARE_PATH"] +
"/flightlib/configs/vec_env.yaml", 'r'))
if not args.train:
cfg["env"]["num_envs"] = 1
cfg["env"]["num_threads"] = 1
if args.render:
cfg["env"]["render"] = "yes"
else:
cfg["env"]["render"] = "no"
env = wrapper.FlightEnvVec(QuadrotorEnv_v1(
dump(cfg, Dumper=RoundTripDumper), False))
env.test = True
action = np.zeros([env.num_envs, env.num_acts], dtype=np.float32)
action += np.array([-0.01, -0.01, 0.00, 0.00])
connectedToUnity = False
connectedToUnity = env.connectUnity()
# while not connectedToUnity:
# connectedToUnity = env.connectUnity()
# if not connectedToUnity:
# print("Couldn't connect to unity, will try another time.")
# print("env.num_envs : ", env.num_envs)
max_ep_length = env.max_episode_steps
if env.num_envs == 1:
object_density_fractions = np.ones([env.num_envs], dtype=np.float32)
else:
object_density_fractions = np.linspace(0.0, 1.0, num=env.num_envs)
# object_density_fractions = np.random.rand(env.num_envs)
env.set_objects_densities(object_density_fractions=object_density_fractions)
time.sleep(5)
print(object_density_fractions)
env.reset()
# print("max_ep_length : ", max_ep_length)
done, ep_len = False, 0
while not ((ep_len >= max_ep_length*10)):
index = 0
observations, reward, done, infos = env.step(action)
# print("RelVelCommand ", observations[index, :3])
# print("RotMat 1 col: ", observations[index,3:6])
# print("RotMat 2 col: ", observations[index, 6:9])
# print("RotMat 3 col: ", observations[index, 9:12])
# print("VelLin: ", observations[index, 12:15])
# print("AngRates: ", observations[index, 0, 15:18, -1])
# print("RelPosGoal: ", observations[1, 0, 18:21, -1])
# print(reward)
# print(ep_len)
# print("###############")
ep_len += 1
if __name__ == "__main__":
main()
|
import utils.util as util
from generation.structures.baseStructure import *
class GeneratedWell(BaseStructure):
def __init__(self) :
super(BaseStructure, self).__init__()
self.uselessBlocks = [
'minecraft:air', 'minecraft:cave_air', 'minecraft:water', 'minecraft:lava'
'minecraft:oak_leaves', 'minecraft:leaves', 'minecraft:birch_leaves', 'minecraft:spruce_leaves'
'minecraft:oak_log', 'minecraft:spruce_log', 'minecraft:birch_log', 'minecraft:jungle_log', 'minecraft:acacia_log', 'minecraft:dark_oak_log',
'minecraft:grass', 'minecraft:snow', 'minecraft:poppy'
'minecraft:dead_bush', "minecraft:cactus", "minecraft:sugar_cane"]
def setupInfoAndGetCorners(self):
self.setSize([6, 9, 6])
self.info["mainEntry"]["position"] = [int(self.size[0] / 2), self.size[1] - 5, 0]
return self.getCornersLocalPositionsAllFlipRotation(self.info["mainEntry"]["position"])
def getNextBuildingInformation(self, flip, rotation):
info = {}
info["size"] = self.size
self.info["mainEntry"]["facing"] = "north"
info["entry"] = {
"position" : self.info["mainEntry"]["position"],
"facing" : self.getFacingMainEntry(flip, rotation)
}
info["corner"] = self.getCornersLocalPositions(self.info["mainEntry"]["position"].copy(), flip, rotation)
return info
def build(self, worldModif, buildingCondition, chestGeneration):
self.setSize(buildingCondition["size"])
self.entry = buildingCondition["referencePoint"].copy()
self.computeOrientation(buildingCondition["rotation"], buildingCondition["flip"])
if buildingCondition["flip"] == 1 or buildingCondition["flip"] == 3:
buildingCondition["referencePoint"][0] = self.size[0] - 1 - buildingCondition["referencePoint"][0]
if buildingCondition["flip"] == 2 or buildingCondition["flip"] == 3:
buildingCondition["referencePoint"][2] = self.size[2] - 1 - buildingCondition["referencePoint"][2]
woodType = "*woodType*"
result = util.changeNameWithBalise(woodType, buildingCondition["replacements"])
if result[0] >= 0:
woodType = result[1]
else :
woodType = "oak"
self.plankType = "minecraft:" + woodType + "_planks"
self.addStoneBricks(worldModif, buildingCondition)
self.addStoneBrickStairs(worldModif, buildingCondition)
self.addWoodAroundTheWell(worldModif, buildingCondition)
# Add water
fromBlock = self.returnWorldPosition(
[2, int(self.size_y()), 2], buildingCondition["flip"],
buildingCondition["rotation"], buildingCondition["referencePoint"], buildingCondition["position"])
toBlock = self.returnWorldPosition(
[3, int(self.size_y()), 3], buildingCondition["flip"],
buildingCondition["rotation"], buildingCondition["referencePoint"], buildingCondition["position"])
worldModif.fillBlocks(fromBlock[0], fromBlock[1] - 3, fromBlock[2], toBlock[0], toBlock[1]- 7, toBlock[2],"minecraft:air")
self.addStoneBricks(worldModif, buildingCondition)
self.addStoneBrickStairs(worldModif, buildingCondition)
self.addWoodAroundTheWell(worldModif, buildingCondition)
worldModif.fillBlocks(fromBlock[0], fromBlock[1] - 6, fromBlock[2], toBlock[0], toBlock[1] - 5, toBlock[2], "minecraft:water")
worldModif.fillBlocks(fromBlock[0], fromBlock[1] - 8, fromBlock[2], toBlock[0], toBlock[1] - 9, toBlock[2], "minecraft:stone_bricks")
def addWoodAroundTheWell(self, worldModif, buildingCondition):
positions = [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [1, 5], [2, 5], [3, 5], [4, 5], [5, 5],
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [5, 1], [5, 2], [5, 3], [5, 4]]
# Add wood plank
for i in range(len(positions)):
localPosition = [positions[i][0], self.size_y() - 5, positions[i][1]]
position = self.returnWorldPosition(
localPosition, buildingCondition["flip"],
buildingCondition["rotation"], buildingCondition["referencePoint"], buildingCondition["position"])
worldModif.setBlock(position[0], position[1], position[2], self.plankType)
def addStoneBrickStairs(self, worldModif, buildingCondition):
# Add stairs
positions = [[2, 4], [3, 4], [1, 2], [1, 3], [2, 1], [3, 1], [4, 2], [4, 3]]
orientations = ["north", "north", "east", "east", "south", "south", "west", "west"]
for i in range(len(positions)):
localPosition = positions[i][0], self.size_y() - 4, positions[i][1]
position = self.returnWorldPosition(
localPosition, buildingCondition["flip"],
buildingCondition["rotation"], buildingCondition["referencePoint"], buildingCondition["position"])
worldModif.setBlock(position[0], position[1], position[2], "minecraft:stone_brick_stairs[" + self.convertProperty('facing', orientations[i] ) + "]")
for k in range(1, 4):
worldModif.setBlock(position[0], position[1] - k, position[2], "minecraft:stone_bricks")
for j in range(1, 3):
worldModif.setBlock(position[0], position[1] + 3, position[2], "minecraft:stone_brick_slab")
def addStoneBricks(self, worldModif, buildingCondition):
# Add stones to the corner
positions = [[1, 1], [1, 4], [4, 1], [4, 4]]
for i in range(len(positions)):
localPosition = positions[i][0], self.size_y() - 4, positions[i][1]
position = self.returnWorldPosition(
localPosition, buildingCondition["flip"],
buildingCondition["rotation"], buildingCondition["referencePoint"], buildingCondition["position"])
worldModif.setBlock(position[0], position[1], position[2], "minecraft:infested_chiseled_stone_bricks")
worldModif.setBlock(position[0], position[1] - 1, position[2], "minecraft:stone_bricks")
for j in range(1, 3):
# Add cobblestone walls
worldModif.setBlock(position[0], position[1] + j, position[2], "minecraft:cobblestone_wall")
# Add stone brick slabs
worldModif.setBlock(position[0], position[1] + j + 1, position[2], "minecraft:stone_brick_slab")
# Add stones upside the well
positions = [[2, 2], [2, 3], [3, 2], [3, 3]]
for i in range(len(positions)):
localPosition = positions[i][0], self.size_y() - 1 , positions[i][1]
position = self.returnWorldPosition(
localPosition, buildingCondition["flip"],
buildingCondition["rotation"], buildingCondition["referencePoint"], buildingCondition["position"])
worldModif.setBlock(position[0], position[1], position[2], "minecraft:chiseled_stone_bricks")
|
from typing import Generic, List, TypeVar
T = TypeVar('T')
class Graph(Generic[T]):
def __init__(self) -> None:
self.__graph = {}
def addNode(self, node: T) -> None:
if node not in self.__graph:
self.__graph[node] = []
def addDirectedEdge(self, src: T, dest: T) -> None:
if src not in self.__graph:
self.__graph[src] = []
if dest not in self.__graph:
self.__graph[dest] = []
self.__graph[src].append(dest)
def addUndirectedEdge(self, src: T, dest: T) -> None:
if src not in self.__graph:
self.__graph[src] = []
if dest not in self.__graph:
self.__graph[dest] = []
self.__graph[src].append(dest)
self.__graph[dest].append(src)
def bfs(self, startNode: T) -> str:
output = []
visited = set()
queue = []
if startNode in self.__graph:
queue.append(startNode)
visited.add(startNode)
while queue:
currNode = queue.pop(0)
output.append(currNode)
for connectedNode in self.__graph[currNode]:
if connectedNode not in visited:
queue.append(connectedNode)
visited.add(connectedNode)
return output
def dfs(self, startNode: T):
visited = set()
output = []
self.__dfs(startNode, visited, output)
return output
def hasPath(self, start: T, dest: T) -> bool:
visited = set()
return self.__hasPath(start, dest, visited)
def numConnectedComponent(self) -> int:
return len(self.__getComponents())
def largetComponent(self) -> int:
return max(len(component) for component in self.__getComponents())
def __hasPath(self, currentNode: T, dest: T, visited: set) -> bool:
visited.add(currentNode)
if currentNode == dest:
return True
for neighbor in self.__graph[currentNode]:
if neighbor not in visited:
if self.__hasPath(neighbor, dest, visited):
return True
return False
def __dfs(self, currNode: T, visited: set, output: List[int]) -> None:
visited.add(currNode)
output.append(currNode)
for neighbor in self.__graph[currNode]:
if neighbor not in visited:
self.__dfs(neighbor, visited, output)
def __getComponents(self) -> List[List[int]]:
components = []
visited = set()
for node in self.__graph.keys():
output = []
if node not in visited:
self.__dfs(node, visited, output)
components.append(output)
return components
|
try:
with open("input.txt", "r") as fileContent:
drawnNumbers = [drawnNumber.strip("\n")
for drawnNumber in fileContent.readline().split(",")]
bingoCards = [line.strip("\n").strip().split() for line in fileContent.readlines()
if line.strip("\n")]
bingoCards = [bingoCards[index:index + 5]
for index in range(0, len(bingoCards), 5)]
except FileNotFoundError:
print("[!] The input file was not found. The program will not continue.")
exit(-1)
def markDrawnNumbers(drawnNumber, bingoCard):
for row in bingoCard:
for index, number in enumerate(row):
row[index] = "X" if number == drawnNumber else row[index]
def checkIfWon(bingoCard):
colWon = []
for index, row in enumerate(bingoCard):
if row == ["X"] * 5:
return True
for col in range(5):
colWon.append(True if bingoCard[index][col] == "X" else False)
if not(False in colWon):
return True
return False
wonCard = []
wonDrawnNumber = 0
won = False
for drawnNumber in drawnNumbers:
if won:
break
for bingoCard in bingoCards:
markDrawnNumbers(drawnNumber, bingoCard)
won = checkIfWon(bingoCard)
if won:
wonCard = bingoCard
wonDrawnNumber = drawnNumber
break
wonCardUnmarkedNumbers = [
int(number) for line in wonCard for number in line if number != "X"]
print(sum(wonCardUnmarkedNumbers) * int(wonDrawnNumber))
|
# This list of imports is likely incomplete --- add anything you need.
# TODO: Your code here.
import torch
import torch.nn as nn
from allennlp.nn.util import masked_softmax
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from allennlp.nn.util import sort_batch_by_length
from allennlp.nn.util import replace_masked_values, masked_log_softmax
# Name: Pranav Varanasi
# UTEID: ptv247
class AttentionRNN(nn.Module):
def __init__(self, embedding_matrix, hidden_size,
dropout):
"""
Parameters
----------
embedding_matrix: FloatTensor
FloatTensor matrix of shape (num_words, embedding_dim),
where each row of the matrix is a word vector for the
associated word index.
hidden_size: int
The size of the hidden state in the RNN.
dropout: float
The dropout rate.
"""
# Save the construction arguments, useful for serialization
self.init_arguments = locals()
self.init_arguments.pop("self")
self.init_arguments.pop("__class__")
super(AttentionRNN, self).__init__()
self.embedding_matrix = embedding_matrix
self.num_embedding_words = embedding_matrix.size(0)
self.embedding_dim = embedding_matrix.size(1)
# only change hidden size for gru, affine transformation should be for full hidden size
# dividing by 2 using integer division, in python thats //
half_hidden = hidden_size // 2
# Create Embedding object
# TODO: Your code here.
self.embedding = nn.Embedding(self.num_embedding_words,
self.embedding_dim, padding_idx=0)
# Load our embedding matrix weights into the Embedding object,
# and make them untrainable (requires_grad=False)
# TODO: Your code here.
# Initialize embedding weights
self.embedding.weight = nn.Parameter(self.embedding_matrix,
requires_grad=False)
# Make a RNN to encode the passage. Note that batch_first=True.
# TODO: Your code here.
# Use bidirectional GRU variant of RNN
self.gruPassage = nn.GRU(self.embedding_dim, half_hidden, batch_first = True, bidirectional = True, dropout = dropout)
# Make a RNN to encode the question. Note that batch_first=True.
# TODO: Your code here.
# Create GRU for question
self.gruQuestion = nn.GRU(self.embedding_dim, half_hidden, batch_first = True, bidirectional = True, dropout = dropout)
# Affine transform for attention.
# TODO: Your code here.
# Initialize attention layer with same dimensions as projections
self.attention_transform = nn.Linear(3 * hidden_size, 1)
# Affine transform for predicting start index.
# TODO: Your code here.
# Apply affine transform with 3 * original hidden size
self.start_output_projection = nn.Linear(3 * hidden_size, 1)
# Affine transform for predicting end index.
# TODO: Your code here.
# Apply end affine transform with 3 * original hidden size
self.end_output_projection = nn.Linear(3 * hidden_size, 1)
# Stores the number of gradient updates performed.
self.global_step = 0
def forward(self, passage, question):
"""
The forward pass of the RNN-based model with attention.
Parameters
----------
passage: Variable(LongTensor)
A Variable(LongTensor) of shape (batch_size, passage_length)
representing the words in the passage for each batch.
question: Variable(LongTensor)
A Variable(LongTensor) of shape (batch_size, question_length)
representing the words in the question for each batch.
Returns
-------
An output dictionary consisting of:
start_logits: Variable(FloatTensor)
The first element in the returned tuple. Variable(FloatTensor) of
shape (batch_size, max_passage_size). Each value is the score
assigned to a given token. Masked indices are assigned very
small scores (-1e7).
end_logits: Variable(FloatTensor)
The second element in the returned tuple. Variable(FloatTensor) of
shape (batch_size, max_passage_size). Each value is the score
assigned to a given token. Masked indices are assigned very
small scores (-1e7).
softmax_start_logits: Variable(FloatTensor)
The third element in the returned tuple. Variable(FloatTensor) of
shape (batch_size, max_passage_size). Exactly the same as
start_logits, but with a masked log softmax applied. Represents
a probability distribution over the passage, indicating the
probability that any given token is where the answer begins.
Masked indices have probability mass of -inf.
softmax_end_logits: Variable(FloatTensor)
The fourth element in the returned tuple. Variable(FloatTensor) of
shape (batch_size, max_passage_size). Exactly the same as
start_logits, but with a masked log softmax applied. Represents
a probability distribution over the passage, indicating the
probability that any given token is where the answer end.
Masked indices have probability mass of -inf.
"""
# Mask: FloatTensor with 0 in positions that are
# padding (word index 0) and 1 in positions with actual words.
# Make a mask for the passage. Shape: ?
# TODO: Your code here.
passage_mask = (passage != 0).type(
torch.cuda.FloatTensor if passage.is_cuda else
torch.FloatTensor)
# Make a mask for the question. Shape: ?
# TODO: Your code here.
question_mask = (question != 0).type(
torch.cuda.FloatTensor if question.is_cuda else
torch.FloatTensor)
# Make a LongTensor with the length (number non-padding words
# in) each passage.
# Shape: ?
# TODO: Your code here.
# Sum along dim 1 to get length of non-padding words for passage
passageLengths = passage_mask.sum(dim=1)
# Make a LongTensor with the length (number non-padding words
# in) each question.
# Shape: ?
# TODO: Your code here.
# Sum along dim 1 to get length of non-padding words for questions
questionLengths = question_mask.sum(dim=1)
# Part 1: Embed the passages and the questions.
# 1.1 Embed the passage.
# TODO: Your code here.
# Shape: ?
# Get passage embedding
embedded_passage = self.embedding(passage)
# 1.2. Embed the question.
# TODO: Your code here.
# Shape: ?
# Get question embedding
embedded_question = self.embedding(question)
# Part 2. Encode the embedded passages with the RNN.
# 2.1. Sort embedded passages by decreasing order of passage_lengths.
# Hint: allennlp.nn.util.sort_batch_by_length might be helpful.
# TODO: Your code here.
# Sort training batch with allennlp function with passage and lengths as parameters
sorted_passage, sorted_passage_lengths, passage_restoration, _ = sort_batch_by_length(embedded_passage, passageLengths)
# 2.2. Pack the passages with torch.nn.utils.rnn.pack_padded_sequence.
# Hint: Make sure you have the proper value for batch_first.
# TODO: Your code here.
# pack sequence based on non-padding words
packed_passage = pack_padded_sequence(sorted_passage, sorted_passage_lengths, batch_first = True)
# 2.3. Encode the packed passages with the RNN.
# TODO: Your code here.
# get passage representation from GRU
passageEncoding, passageHidden = self.gruPassage(packed_passage)
# 2.4. Unpack (pad) the passages with
# torch.nn.utils.rnn.pad_packed_sequence.
# Hint: Make sure you have the proper value for batch_first.
# Shape: ?
# TODO: Your code here.
# Unpack sequence based on padding words
passage_unpacked, lens_unpacked = pad_packed_sequence(passageEncoding, batch_first=True)
# 2.5. Unsort the unpacked, encoded passage to restore the
# initial ordering.
# Hint: Look into torch.index_select or NumPy/PyTorch fancy indexing.
# Shape: ?
# TODO: Your code here.
# get unsorted passage using restoration indices
unsorted_passage = passage_unpacked.index_select(0, passage_restoration)
# Part 3. Encode the embedded questions with the RNN.
# 3.1. Sort the embedded questions by decreasing order
# of question_lengths.
# Hint: allennlp.nn.util.sort_batch_by_length might be helpful.
# TODO: Your code here.
# Sort question batches
sorted_question, sorted_question_lengths, question_restoration, _ = sort_batch_by_length(embedded_question, questionLengths)
# 3.2. Pack the questions with pack_padded_sequence.
# Hint: Make sure you have the proper value for batch_first.
# TODO: Your code here.
# pack questions based on padding words
packed_question = pack_padded_sequence(sorted_question, sorted_question_lengths, batch_first = True)
# 3.3. Encode the questions with the RNN.
# TODO: Your code here.
# Get question representation from GRU
questionEncoding, questionHidden = self.gruQuestion(packed_question)
# 3.4. Unpack (pad) the questions with pad_packed_sequence.
# Hint: Make sure you have the proper value for batch_first.
# Shape: ?
# TODO: Your code here.
# Unpack question representation based on padding words
question_unpacked, lens_unpacked = pad_packed_sequence(questionEncoding, batch_first = True)
# 3.5. Unsort the unpacked, encoded question to restore the
# initial ordering.
# Hint: Look into torch.index_select or NumPy/PyTorch fancy indexing.
# Shape: ?
# TODO: Your code here.
# restore original question ordering using restoration indices
unsorted_question = question_unpacked.index_select(0, question_restoration)
# Part 4. Calculate attention weights and attend to question.
# use softmax to convert weights to probabilities, use those probabilites by multiplying questions to get weighted average
# another affine transformation to get the weights from the passages, trainable parameter
# define another matrix for attention and multiply that by tensor to get logits, then do softmax from logits to get probability
# then computed weighted average from that probability
# 4.1. Expand the encoded question to shape suitable for attention.
# Hint: Think carefully about what the shape of the attention
# input vector should be. torch.unsqueeze and torch.expand
# might be useful.
# Shape: (batch_size, passage_len, question_len, hidden_dim) (64, 15, 124, 512)
# need to get question and passage shape the same
# TODO: Your code here.
# Goal: (batch_size, passage_len, question_len, hidden_dim) (64, 15, 124, 512)
# unsorted question shape is (batch_size, question_len, hidden_dim) (64, , 15 x 512)
# passage_mask.size(1) gets max passage size for the mask
maxPassageSize = passage_mask.size(1)
# Use -1 index within expand to keep original dimensions same and only add passage dimension
expanded_question = unsorted_question.unsqueeze(dim=1).expand(-1, maxPassageSize, -1, -1)
# 4.2. Expand the encoded passage to shape suitable for attention.
# Hint: Think carefully about what the shape of the attention
# input vector should be. torch.unsqueeze and torch.expand
# might be useful.
# Shape: ?
# TODO: Your code here.
# Get max question size
maxQuestionSize = question_mask.size(1)
# unsorted pasage shape is (batch_size, passage_len, hidden_dim) (64, 124, 512)
# Add question dimension using expand and max question size
# Keep all other dimensions the same with -1 index
# Unsqueeze adds singleton dimension at place to insert new values
expanded_passage = unsorted_passage.unsqueeze(dim=2).expand(-1, -1, maxQuestionSize,-1)
# 4.3. Build attention_input. This is the tensor passed through
# the affine transform.
# Hint: Think carefully what the shape of this tensor should be.
# torch.cat might be useful.
# Shape: ?
# attention_input is the concatenating of 4.1 and 4.2
# pass in attention input to affine transformation
# use affine_transform function and pass in the concatenated matrix
# TODO: Your code here.
# concatenate along the last dimension -1 with expanded passages and questions
attention_input = torch.cat([expanded_passage, expanded_question,
expanded_passage * expanded_question], dim=-1)
# 4.4. Apply affine transform to attention input to get
# attention logits. You will need to slightly reshape it
# into a tensor of the shape you expect.
# Shape:
# TODO: Your code here.
# apply attention transform layer, reshape the last dimension to so logits works with mask
attention_logits = self.attention_transform(attention_input).squeeze(-1)
# 4.5. Masked-softmax the attention logits over the last dimension
# to normalize and make the attention logits a proper
# probability distribution.
# Hint: allennlp.nn.util.last_dim_softmax might be helpful.
# Shape: ?
# TODO: Your code here.
# Get probabilty distribution from logits using softmax, default dim = -1 for last dimension
prob_dist = masked_softmax(attention_logits, question_mask)
# 4.6. Use the attention weights to get a weighted average
# of the RNN output from encoding the question for each
# passage word.
# Hint: torch.bmm might be helpful.
# Shape: ?
# use torch.bmm with question to add weights
# TODO: Your code here.
# compute weighted average using matrix product with bmm
# Weighted average = P(event) * event
attentionWeights = torch.bmm(prob_dist, unsorted_question)
# Part 5: Combine the passage and question representations by
# concatenating the passage and question representations with
# their product.
# 5.1. Concatenate to make the combined representation.
# Hint: Use torch.cat
# Shape: ?
# TODO: Your code here.
# Get combined representation using formula to add passage representation, weighted average, and element-wise product
# Concatenate over last dimension
combinedRepresent = torch.cat([unsorted_passage, attentionWeights,
unsorted_passage * attentionWeights], dim=-1)
# Part 6: Compute logits for answer start index.
# 6.1. Apply the affine transformation, and edit the shape.
# Shape after affine transformation: ?
# Shape after editing shape: ?
# TODO: Your code here.
# Get start projection using combined representation
start_logits = self.start_output_projection(combinedRepresent).squeeze(-1)
# 6.2. Replace the masked values so they have a very low score (-1e7).
# This tensor is your start_logits.
# Hint: allennlp.nn.util.replace_masked_values might be helpful.
# Shape: ?
# TODO: Your code here.
start_logits = replace_masked_values(start_logits, passage_mask, -1e7)
# 6.3. Apply a padding-aware log-softmax to normalize.
# This tensor is your softmax_start_logits.
# Hint: allennlp.nn.util.masked_log_softmax might be helpful.
# Shape: ?
# TODO: Your code here.
softmax_start_logits = masked_log_softmax(start_logits, passage_mask)
# Part 7: Compute logits for answer end index.
# 7.1. Apply the affine transformation, and edit the shape.
# Shape after affine transformation: ?
# Shape after editing shape: ?
# TODO: Your code here.
end_logits = self.end_output_projection(combinedRepresent).squeeze(-1)
# 7.2. Replace the masked values so they have a very low score (-1e7).
# This tensor is your end_logits.
# Hint: allennlp.nn.util.replace_masked_values might be helpful.
# Shape: ?
# TODO: Your code here.
end_logits = replace_masked_values(end_logits, passage_mask, -1e7)
# 7.3. Apply a padding-aware log-softmax to normalize.
# This tensor is your softmax_end_logits.
# Hint: allennlp.nn.util.masked_log_softmax might be helpful.
# Shape: ?
# TODO: Your code here.
softmax_end_logits = masked_log_softmax(end_logits, passage_mask)
# Part 8: Output a dictionary with the start_logits, end_logits,
# softmax_start_logits, softmax_end_logits.
# TODO: Your code here. Remove the NotImplementedError below.
return {
"start_logits": start_logits,
"end_logits": end_logits,
"softmax_start_logits": softmax_start_logits,
"softmax_end_logits": softmax_end_logits
}
|
import random
from simulators.constants_and_packages import *
def init_message_boxes(agents, iterations):
for agent in agents:
agent.message_box = {
itr: {
nei.name: {} for nei in agent.neighbours
}
for itr in range(iterations)
}
def load_file(file_name):
with open(file_name, 'rb') as fileObject:
return pickle.load(fileObject)
def distance(pos1, pos2):
return math.sqrt(math.pow(pos1[0] - pos2[0], 2) + math.pow(pos1[1] - pos2[1], 2))
def calculate_coverage(robots, targets):
convergence = 0
for target in targets:
curr_conv = target.req
for robot in robots:
if distance(target.pos_node.pos, robot.pos_node.pos) <= robot.sr:
curr_conv = max(0, curr_conv - robot.cred)
convergence += curr_conv
return convergence
def calculate_collisions(robots, big_iteration):
collisions = 0
for robot1, robot2 in itertools.product(robots, robots):
if robot1.name != robot2.name:
if robot1.pos_node.num == robot2.pos_node.num:
if big_iteration > 0 and robot1.prev_pos_node and robot1.prev_pos_node == robot1.pos_node:
if robot2.prev_pos_node and robot2.prev_pos_node == robot2.pos_node:
continue
collisions += 1
# print(f'robot one - {robot1.name}, robot two - {robot2.name}')
return collisions
def calculate_chosen_positions(robots):
return_dict = {robot.name: '' for robot in robots}
for robot in robots:
return_dict[robot.name] = robot.pos_node.name
return return_dict
def count_collisions(robots):
collisions = 0
for robot1, robot2 in itertools.product(robots, robots):
if robot1.name != robot2.name:
if robot1.pos_node.num == robot2.pos_node.num:
collisions += 1
return collisions / 2
def count_future_collisions(robots):
collisions = 0
for robot1, robot2 in itertools.product(robots, robots):
if robot1.name != robot2.name:
if robot1.next_pos_node and robot2.next_pos_node:
if robot1.next_pos_node.num == robot2.next_pos_node.num:
collisions += 1
return collisions / 2
def print_minutes(start, end):
print()
print(f'\nThe program finished in {time.strftime("%H:%M:%S", time.gmtime(end - start))}.')
def flatten_message(message):
if FLATTEN_MESSAGE:
min_value = min(message.values())
return {pos_i: value - min_value for pos_i, value in message.items()}
return message
def create_dict_of_weights(robots):
# return_value = {robot.name: random.uniform(1e-3, 1e-1) for robot in robots}
return_value = {robot.name: random.uniform(1e-10, 1e-5) for robot in robots}
# return_value = {robot.name: 0 for robot in robots}
return return_value
def print_and_return_choices(all_agents, s_iteration, need_to_print=False):
# return_value: {'robot_name': ['pos_i', ...], 'robot_name_2': ['pos_i', ...], ...}
return_value = {}
str_for_print = ''
for a in all_agents:
# s_iteration = len(list(a.message_box.keys())) - 1
if 'robot' in a.name:
counter_dict = {}
for d in a.domain:
counter_dict[d] = 0
for b in all_agents:
if b.name in a.message_box[s_iteration]:
for k, v in a.message_box[s_iteration][b.name].items():
counter_dict[k] += v
max_value = max(counter_dict.values())
cells_with_highest_value = [k for k, v in counter_dict.items() if v == max_value]
choose_str = 'chooses one of' if len(cells_with_highest_value) > 1 else 'chooses'
str_for_print += f'\n{colored(a.name, "green")} {choose_str}: ' \
f'{cells_with_highest_value} with the highest value: {max_value:.2f}'
return_value[a.name] = cells_with_highest_value
if need_to_print:
print(str_for_print)
# print_all_pos_sum_weights(all_agents, iteration)
return return_value
def cover_target(target, robots_set):
cumulative_cov = sum([robot.cred for robot in robots_set])
return cumulative_cov > target.req
def select_FMR_nei(target):
total_set = []
SR_set = []
rest_set = []
for robot in target.neighbours:
dist = distance(robot.pos_node.pos, target.pos_node.pos)
if dist <= robot.sr + robot.mr:
total_set.append(robot)
if dist <= robot.sr:
SR_set.append(robot)
else:
rest_set.append(robot)
while cover_target(target, total_set):
def get_degree(node):
targets_nearby = list(filter(lambda x: 'target' in x.name, node.neighbours))
return len(targets_nearby)
max_degree = max([get_degree(x) for x in rest_set], default=0)
min_degree = min([get_degree(x) for x in SR_set], default=0)
if len(rest_set) > 0:
selected_to_remove = list(filter(lambda x: get_degree(x) == max_degree, rest_set))[0]
rest_set.remove(selected_to_remove)
else:
selected_to_remove = list(filter(lambda x: get_degree(x) == min_degree, SR_set))[0]
SR_set.remove(selected_to_remove)
temp_total_set = total_set[:]
temp_total_set.remove(selected_to_remove)
if not cover_target(target, temp_total_set):
break
total_set.remove(selected_to_remove)
# return total_set
total_set.sort(key=lambda x: x.cred, reverse=True)
return_set = []
for robot in total_set:
if not cover_target(target, return_set):
return_set.append(robot)
if len(total_set) > len(return_set):
pass
return return_set
def set_diff_cred(robots, min_v, max_v):
def set_cred(x):
x.cred = random.randint(min_v, max_v)
_ = [set_cred(x) for x in robots]
def select_pos(robot, targets, graph, robot_pos_name_set=None):
if robot_pos_name_set is None:
robot_pos_name_set = [pos_name for pos_name in robot.domain]
pos_dict_name_pos = {pos_node.name: pos_node.pos for pos_node in graph}
pos_dict_name_pos_node = {pos_node.name: pos_node for pos_node in graph}
next_pos_name = select_pos_internal(robot, robot_pos_name_set, [t for t in targets], pos_dict_name_pos)
return pos_dict_name_pos_node[next_pos_name]
def select_pos_internal(robot, robot_pos_name_set, funcs, pos_dict_name_pos):
max_func_value = max([target.req for target in funcs])
if len(robot_pos_name_set) == 1 or max_func_value < 1:
return random.sample(robot_pos_name_set, 1)[0]
target_set = []
for target in funcs:
if target.req == max_func_value:
if any([distance(target.pos_node.pos, pos_dict_name_pos[p_n]) < robot.sr for p_n in robot_pos_name_set]):
target_set.append(target)
if len(target_set) == 0:
return random.sample(robot_pos_name_set, 1)[0]
within_sr_range_list, target_set = within_sr_from_most(robot, robot_pos_name_set, target_set, pos_dict_name_pos)
for target in target_set:
funcs.remove(target)
return select_pos_internal(robot, within_sr_range_list, funcs, pos_dict_name_pos)
def within_sr_from_most(robot, robot_pos_name_set, target_set, pos_dict_name_pos):
within_sr_range_dict = {}
max_list = []
for robot_name in robot_pos_name_set:
count = sum([distance(target.pos_node.pos, pos_dict_name_pos[robot_name]) < robot.sr for target in target_set])
max_list.append(count)
within_sr_range_dict[robot_name] = count
max_value = max(max_list)
within_sr_range_list, target_set_to_send = [], []
for robot_name, count in within_sr_range_dict.items():
if count == max_value:
within_sr_range_list.append(robot_name)
target_set_to_send.extend(list(filter(
lambda x: distance(x.pos_node.pos, pos_dict_name_pos[robot_name]) < robot.sr,
target_set
)))
target_set_to_send = list(set(target_set_to_send))
return within_sr_range_list, target_set_to_send
def breakdowns_correction(robots, params):
if 'breakdowns' in params:
for robot in robots[:]:
if not robot.breakdowns:
for nei_robot in robots[:]:
if robot.name != nei_robot.name and robot.pos_node is nei_robot.pos_node:
robot.breakdowns = True
robot.breakdown_pose = robot.pos_node
# print(f'\n{robot.name} and {nei_robot.name} in breakdown')
# break
for robot in robots[:]:
if robot.breakdowns:
# robots.remove(robot)
robot.prev_pos_node = robot.breakdown_pose
robot.pos_node = robot.breakdown_pose
def print_table_of_messages(all_agents, iteration):
headers = ["to \ from", ]
for a in all_agents:
headers.append(a.name)
table = PrettyTable(headers)
for a in all_agents:
raw = [a.name]
for b in all_agents:
if b.name in a.message_box[iteration]:
cell_to_print = ''
for k, v in a.message_box[iteration][b.name].items():
cell_to_print = cell_to_print + str(k) + '->' + str(round(v, 2)) + '\n'
raw.append(cell_to_print)
else:
raw.append('')
table.add_row(raw)
print('---')
print(colored('### ITERATION: %s ###', 'yellow', 'on_grey') % (iteration + 1))
print(table)
def print_t_test(file_name):
results_dict = load_file(file_name)
max_length_of_name = max([len(x) for x, y in ALGORITHMS_TO_CHECK])
for alg_name1, _ in ALGORITHMS_TO_CHECK:
matrix1 = results_dict[alg_name1]['coverage']
for alg_name2, _ in ALGORITHMS_TO_CHECK:
if alg_name1 != alg_name2:
matrix2 = results_dict[alg_name2]['coverage']
print(f'{alg_name1} <-> {alg_name2} '
f'\tP_value: {ttest_ind(matrix1[-1], matrix2[-1])[1]: 10.2f}')
|
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import chainladder as cl
from rpy2.robjects.packages import importr
from rpy2.robjects import r
CL = importr('ChainLadder')
@pytest.fixture
def atol():
return 1e-5
def mack_r(data, alpha, est_sigma):
return r('mack<-MackChainLadder({},alpha={}, est.sigma="{}", tail=TRUE)'.format(data, alpha, est_sigma))
def mack_p(data, average, est_sigma):
return cl.TailCurve(curve='exponential').fit_transform(cl.Development(average=average, sigma_interpolation=est_sigma).fit_transform(cl.load_dataset(data)))
def mack_p_no_tail(data, average, est_sigma):
return cl.Development(average=average, sigma_interpolation=est_sigma).fit_transform(cl.load_dataset(data))
data = ['RAA', 'ABC', 'GenIns', 'MW2008', 'MW2014']
# M3IR5 in R fails silently on exponential tail. Python actually computes it.
averages = [('simple', 0), ('volume', 1), ('regression', 2)]
est_sigma = [('mack', 'Mack'), ('log-linear', 'log-linear')]
@pytest.mark.parametrize('data', data)
@pytest.mark.parametrize('averages', averages)
@pytest.mark.parametrize('est_sigma', est_sigma)
def test_mack_tail_ldf(data, averages, est_sigma, atol):
r = np.array(mack_r(data, averages[1], est_sigma[1]).rx('f'))
p = mack_p(data, averages[0], est_sigma[0]).ldf_.values[0, 0, :, :]
p = np.concatenate((p[:,:-2], np.prod(p[:, -2:],-1, keepdims=True)), -1)
p = np.unique(p, axis=-2)
assert_allclose(r, p, atol=atol)
@pytest.mark.parametrize('data', data)
@pytest.mark.parametrize('averages', averages)
@pytest.mark.parametrize('est_sigma', est_sigma)
def test_mack_tail_sigma(data, averages, est_sigma, atol):
r = np.array(mack_r(data, averages[1], est_sigma[1]).rx('sigma'))
p = mack_p(data, averages[0], est_sigma[0]).sigma_.values[0, 0, :, :]
p = np.unique(p, axis=-2)
assert_allclose(r, p, atol=atol)
@pytest.mark.parametrize('data', data)
@pytest.mark.parametrize('averages', averages)
@pytest.mark.parametrize('est_sigma', est_sigma)
def test_mack_tail_std_err(data, averages, est_sigma, atol):
r = np.array(mack_r(data, averages[1], est_sigma[1]).rx('f.se'))
p = mack_p(data, averages[0], est_sigma[0]).std_err_.values[0, 0, :, :]
p = np.unique(p, axis=-2)
assert_allclose(r, p, atol=atol)
@pytest.mark.parametrize('data', data)
@pytest.mark.parametrize('averages', averages[0:1])
@pytest.mark.parametrize('est_sigma', est_sigma[0:1])
def test_tail_doesnt_mutate_std_err(data, averages, est_sigma):
p = mack_p(data, averages[0], est_sigma[0]).std_err_.values[:, :, :, :-1]
p_no_tail = mack_p_no_tail(data, averages[0], est_sigma[0]).std_err_.values
assert_equal(p_no_tail, p)
@pytest.mark.parametrize('data', data)
@pytest.mark.parametrize('averages', averages[0:1])
@pytest.mark.parametrize('est_sigma', est_sigma[0:1])
def test_tail_doesnt_mutate_ldf_(data, averages, est_sigma):
p = mack_p(data, averages[0], est_sigma[0]).ldf_.values[..., :len(cl.load_dataset(data).ddims)-1]
p_no_tail = mack_p_no_tail(data, averages[0], est_sigma[0]).ldf_.values
assert_equal(p_no_tail, p)
@pytest.mark.parametrize('data', data)
@pytest.mark.parametrize('averages', averages[0:1])
@pytest.mark.parametrize('est_sigma', est_sigma[0:1])
def test_tail_doesnt_mutate_sigma_(data, averages, est_sigma):
p = mack_p(data, averages[0], est_sigma[0]).sigma_.values[:, :, :, :-1]
p_no_tail = mack_p_no_tail(data, averages[0], est_sigma[0]).sigma_.values
assert_equal(p_no_tail, p)
|
#
# Copyright 2021. Clumio, Inc.
#
from typing import Any, Dict, Mapping, Optional, Sequence, Type, TypeVar
T = TypeVar('T', bound='ComplianceStatsDeprecated')
class ComplianceStatsDeprecated:
"""Implementation of the 'ComplianceStatsDeprecated' model.
ComplianceStatsDeprecated denotes compliance metrics for all entities associated
with a given type
Attributes:
COMPLIANT:
Compliant count.
DEACTIVATED:
Deactivated count.
NON_COMPLIANT:
Non-Compliant count.
SEEDING:
Seeding count.
WAIT_FOR_SEEDING:
Wait-for-seeding count.
"""
# Create a mapping from Model property names to API property names
_names = {
'COMPLIANT': 'COMPLIANT',
'DEACTIVATED': 'DEACTIVATED',
'NON_COMPLIANT': 'NON_COMPLIANT',
'SEEDING': 'SEEDING',
'WAIT_FOR_SEEDING': 'WAIT_FOR_SEEDING',
}
def __init__(
self,
COMPLIANT: int = None,
DEACTIVATED: int = None,
NON_COMPLIANT: int = None,
SEEDING: int = None,
WAIT_FOR_SEEDING: int = None,
) -> None:
"""Constructor for the ComplianceStatsDeprecated class."""
# Initialize members of the class
self.COMPLIANT: int = COMPLIANT
self.DEACTIVATED: int = DEACTIVATED
self.NON_COMPLIANT: int = NON_COMPLIANT
self.SEEDING: int = SEEDING
self.WAIT_FOR_SEEDING: int = WAIT_FOR_SEEDING
@classmethod
def from_dictionary(cls: Type, dictionary: Mapping[str, Any]) -> Optional[T]:
"""Creates an instance of this model from a dictionary
Args:
dictionary: A dictionary representation of the object as obtained
from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if not dictionary:
return None
# Extract variables from the dictionary
COMPLIANT = dictionary.get('COMPLIANT')
DEACTIVATED = dictionary.get('DEACTIVATED')
NON_COMPLIANT = dictionary.get('NON_COMPLIANT')
SEEDING = dictionary.get('SEEDING')
WAIT_FOR_SEEDING = dictionary.get('WAIT_FOR_SEEDING')
# Return an object of this model
return cls(COMPLIANT, DEACTIVATED, NON_COMPLIANT, SEEDING, WAIT_FOR_SEEDING)
|
"""
This file defines:
-quad_area_centroid (method)
- CHEXA8 (class)
- f1
- f2
"""
import numpy as np
from numpy import arange, cross, abs, searchsorted, array, ones, eye
from numpy.linalg import norm # type: ignore
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.bdf_interface.assign_type import integer
from pyNastran.dev.bdf_vectorized.cards.elements.solid.solid_element import SolidElement
def quad_area_centroid(n1, n2, n3, n4):
"""
Gets the area, :math:`A`, and centroid of a quad.::
1-----2
| / |
| / |
4-----3
"""
a = n1 - n2
b = n2 - n4
area1 = 0.5 * norm(cross(a, b), axis=1)
c1 = (n1 + n2 + n4) / 3.
a = n2 - n4
b = n2 - n3
area2 = 0.5 * norm(cross(a, b), axis=1)
#area2.reshape(
c2 = (n2 + n3 + n4) / 3.
area = area1 + area2
try:
#centroid = (c1 * area1 + c2 * area2) / area
centroid = ((c1.T * area1 + c2.T * area2) / area).T
except FloatingPointError:
msg = '\nc1=%r\narea1=%r\n' % (c1, area1)
msg += 'c2=%r\narea2=%r' % (c2, area2)
raise FloatingPointError(msg)
except ValueError:
msg = 'c1 = %s\n' % str(c1.shape)
msg += 'c2 = %s\n' % str(c2.shape)
msg += 'area1 = %s\n' % str(area1.shape)
msg += 'area2 = %s\n' % str(area2.shape)
msg += 'area = %s' % str(area.shape)
print(msg)
#dot(c1.T, area1)
raise
n = len(n1)
assert area.shape == (n, ), area.shape
assert centroid.shape == (n, 3), centroid.shape
return(area, centroid)
class CHEXA8(SolidElement):
type = 'CHEXA8'
nnodes = 8
def __init__(self, model):
"""
Defines the CHEXA object.
Parameters
----------
model : BDF
the BDF object
"""
SolidElement.__init__(self, model)
def add_card(self, card, comment=''):
#self.model.log.debug('chexa8-add')
i = self.i
#comment = self._comments[i]
eid = integer(card, 1, 'element_id')
if comment:
self.set_comment(eid, comment)
#: Element ID
self.element_id[i] = eid
#: Property ID
self.property_id[i] = integer(card, 2, 'property_id')
#: Node IDs
nids = array([
integer(card, 3, 'node_id_1'),
integer(card, 4, 'node_id_2'),
integer(card, 5, 'node_id_3'),
integer(card, 6, 'node_id_4'),
integer(card, 7, 'node_id_5'),
integer(card, 8, 'node_id_6'),
integer(card, 9, 'node_id_7'),
integer(card, 10, 'node_id_8')
], dtype='int32')
assert 0 not in nids, '%s\n%s' % (nids, card)
self.node_ids[i, :] = nids
assert len(card) == 11, 'len(CHEXA8 card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
nid_map = maps['node']
pid_map = maps['property']
for i, (eid, pid, nids) in enumerate(zip(self.element_id, self.property_id, self.node_ids)):
print(self.print_card(i))
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
self.node_ids[i, 2] = nid_map[nids[2]]
self.node_ids[i, 3] = nid_map[nids[3]]
self.node_ids[i, 4] = nid_map[nids[4]]
self.node_ids[i, 5] = nid_map[nids[5]]
self.node_ids[i, 6] = nid_map[nids[6]]
self.node_ids[i, 7] = nid_map[nids[7]]
def get_mass_matrix(self, i, model, positions, index0s, is_lumped=True):
nnodes = 8
ndof = 3 * nnodes
pid = self.property_id[i]
rho = self.model.elements.properties_solid.psolid.get_density_by_property_id(pid)[0]
n0, n1, n2, n3, n4, n5, n6, n7 = self.node_ids[i, :]
V = volume8(
positions[self.node_ids[i, 0]],
positions[self.node_ids[i, 1]],
positions[self.node_ids[i, 2]],
positions[self.node_ids[i, 3]],
positions[self.node_ids[i, 4]],
positions[self.node_ids[i, 5]],
positions[self.node_ids[i, 6]],
positions[self.node_ids[i, 7]],
)
mass = rho * V
if is_lumped:
mi = mass / 4.
nnodes = 4
M = eye(ndof, dtype='float32')
else:
mi = mass / 20.
M = ones((ndof, ndof), dtype='float32')
for i in range(nnodes):
j = i * 3
M[j:j+3, j:j+3] = 2.
M *= mi
dofs, nijv = self.get_dofs_nijv(index0s, n0, n1, n2, n3, n4, n5, n6, n7)
return M, dofs, nijv
def get_stiffness_matrix(self, i, model, positions, index0s):
return K, dofs, nijv
def get_dofs_nijv(self, index0s, n0, n1, n2, n3, n4, n5, n6, n7):
pid = self.property_id[i]
prop = self.model.elements.properties_solid.psolid
rho = prop.get_density_by_property_id(pid)[0]
n0, n1, n2, n3 = self.node_ids[i, :]
xyz1 = positions[self.node_ids[i, 0]]
xyz2 = positions[self.node_ids[i, 1]]
xyz3 = positions[self.node_ids[i, 2]]
xyz4 = positions[self.node_ids[i, 3]]
vol = volume4(xyz1, xyz2, xyz3, xyz4)
#stiffness = rho * vol
#ki = stiffness / 4.
nnodes = 8
ndof = nnodes * 3
K = np.zeros((ndof, ndof), dtype='float32')
mid1 = prop.material_id[0]
mat = self.model.materials.get_solid_material(mid1)
print(mat)
E = mat.E[0]
nu = mat.nu[0]
G = mat.G[0]
i0 = index0s[n0]
i1 = index0s[n1]
i2 = index0s[n2]
i3 = index0s[n3]
i4 = index0s[n4]
i5 = index0s[n5]
i6 = index0s[n6]
i7 = index0s[n7]
dofs = array([
i0, i0+1, i0+2,
i1, i1+1, i1+2,
i2, i2+1, i2+2,
i3, i3+1, i3+2,
i4, i4+1, i4+2,
i5, i5+1, i5+2,
i6, i6+1, i6+2,
i7, i7+1, i7+2,
], 'int32')
nijv = [
# translation
(n0, 1), (n0, 2), (n0, 3),
(n1, 1), (n1, 2), (n1, 3),
(n2, 1), (n2, 2), (n2, 3),
(n3, 1), (n3, 2), (n3, 3),
(n4, 1), (n4, 2), (n4, 3),
(n5, 1), (n5, 2), (n5, 3),
(n6, 1), (n6, 2), (n6, 3),
(n7, 1), (n7, 2), (n7, 3),
]
uvw = np.array([
[-1, -1, 1,],
[1, -1, -1,],
[1, 1, -1],
[-1, 1, -1],
[-1, -1, 1,],
[1, -1, 1,],
[1, 1, 1],
[-1, 1, 1],
])
#n1 = 0.125 * (1 - u) * (1 - v) * (1 - w)
#n2 = 0.125 * (1 + u) * (1 - v) * (1 - w)
#n3 = 0.125 * (1 + u) * (1 + v) * (1 - w)
#n4 = 0.125 * (1 - u) * (1 + v) * (1 - w)
#n5 = 0.125 * (1 - u) * (1 - v) * (1 + w)
#n6 = 0.125 * (1 + u) * (1 - v) * (1 + w)
#n7 = 0.125 * (1 + u) * (1 + v) * (1 + w)
#n8 = 0.125 * (1 - u) * (1 + v) * (1 + w)
#n1u = 0.125 * (1 - v) * (1 - w) * -1
#n2u = 0.125 * (1 - v) * (1 - w) * 1
#n3u = 0.125 * (1 + v) * (1 - w) * 1
#n4u = 0.125 * (1 + v) * (1 - w) * -1
#n5u = 0.125 * (1 - v) * (1 + w) * -1
#n6u = 0.125 * (1 - v) * (1 + w) * 1
#n7u = 0.125 * (1 + v) * (1 + w) * 1
#n8u = 0.125 * (1 + v) * (1 + w) * -1
#n1v = 0.125 * (1 - u) * (1 - v) * -1
#n2v = 0.125 * (1 + u) * (1 - v) * -1
#n3v = 0.125 * (1 + u) * (1 + v) * 1
#n4v = 0.125 * (1 - u) * (1 + v) * 1
#n5v = 0.125 * (1 - u) * (1 - v) * -1
#n6v = 0.125 * (1 + u) * (1 - v) * -1
#n7v = 0.125 * (1 + u) * (1 + v) * 1
#n8v = 0.125 * (1 - u) * (1 + v) * 1
#n1w = 0.125 * (1 - u) * (1 - v) * -1
#n2w = 0.125 * (1 + u) * (1 - v) * -1
#n3w = 0.125 * (1 + u) * (1 + v) * -1
#n4w = 0.125 * (1 - u) * (1 + v) * -1
#n5w = 0.125 * (1 - u) * (1 - v) * 1
#n6w = 0.125 * (1 + u) * (1 - v) * 1
#n7w = 0.125 * (1 + u) * (1 + v) * 1
#n8w = 0.125 * (1 - u) * (1 + v) * 1
Dcoeff = E / ((1. + nu) * (1. - 2. *nu))
D = Dcoeff * np.array([
[(1. - nu), nu, nu, 0, 0, 0],
[nu, (1. - nu), nu, 0, 0, 0],
[nu, nu, (1. - nu), 0, 0, 0],
[0, 0, 0, ((1 - 2 * nu)/2.), 0, 0],
[0, 0, 0, 0, ((1 - 2 * nu)/2.), 0],
[0, 0, 0, 0, 0, ((1 - 2 * nu)/2.)],
], dtype='float32')
integration = 'complete'
if integration == 'complete':
locations = np.array([
[-0.577350269189626, -0.577350269189626, -0.577350269189626],
[0.577350269189626, -0.577350269189626, -0.577350269189626],
[0.577350269189626, 0.577350269189626, -0.577350269189626],
[-0.577350269189626, 0.577350269189626, -0.577350269189626],
[-0.577350269189626, -0.577350269189626, 0.577350269189626],
[0.577350269189626, -0.577350269189626, 0.577350269189626],
[0.577350269189626, 0.577350269189626, 0.577350269189626],
[-0.577350269189626, 0.577350269189626, 0.577350269189626],
], dtype='float32')
weights = np.array([1, 1, 1, 1, 1, 1, 1, 1], dtype='float32')
jacobian_matrix = natural_derivatives * global_coord
inv_jacobian = np.linalg.inv(jacobian_matrix)
xy_derivatives = inv_jacobian * natural_derivatives
B = array([
#[a1, 0., 0., a2, 0., 0., a3, 0., 0., a4, 0., 0.],
#[0., b1, 0., 0., b2, 0., 0., b3, 0., 0., b4, 0.],
#[0., 0., c1, 0., 0., c2, 0., 0., c3, 0., 0., c4],
#[b1, a1, 0., b2, a2, 0., b3, a3, 0., b4, a4, 0.],
#[0., c1, b1, 0., c2, b2, 0., c3, b3, 0., c4, b4],
#[c1, 0., a1, c2, 0., a2, c3, 0., a3, c4, 0., a4],
]) / (6 * vol)
elif integration == 'reduced':
locations = np.array([0, 0], dtype='float32')
weights = np.array([4])
return dofs, nijv
def _verify(self, xref=True):
eid = self.eid
pid = self.Pid()
nids = self.node_ids
assert isinstance(eid, int)
assert isinstance(pid, int)
for i, nid in enumerate(nids):
assert isinstance(nid, int), 'nid%i is not an integer; nid=%s' %(i, nid)
if xref:
c = self.centroid()
v = self.volume()
assert isinstance(v, float)
for i in range(3):
assert isinstance(c[i], float)
def get_node_indicies(self, i=None):
if i is None:
i1 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 0])
i2 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 1])
i3 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 2])
i4 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 3])
i5 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 4])
i6 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 5])
i7 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 6])
i8 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 7])
else:
i1 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 0])
i2 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 1])
i3 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 2])
i4 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 3])
i5 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 4])
i6 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 5])
i7 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 6])
i8 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 7])
return i1, i2, i3, i4, i5, i6, i7, i8
def _get_node_locations_by_index(self, i, xyz_cid0):
"""
Parameters
----------
i : (nnodes, ) int ndarray; None -> all
node IDs
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
"""
grid = self.model.grid
get_node_index_by_node_id = self.model.grid.get_node_index_by_node_id
node_ids = self.node_ids
#msg = ', which is required by %s' % self.type
i1, i2, i3, i4, i5, i6, i7, i8 = self.get_node_indicies(i)
n1 = xyz_cid0[i1, :]
n2 = xyz_cid0[i2, :]
n3 = xyz_cid0[i3, :]
n4 = xyz_cid0[i4, :]
n5 = xyz_cid0[i5, :]
n6 = xyz_cid0[i6, :]
n7 = xyz_cid0[i7, :]
n8 = xyz_cid0[i8, :]
return n1, n2, n3, n4, n5, n6, n7, n8
def get_volume_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the volume for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None
the elements to consider
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
total : bool; default=False
should the volume be summed
Notes
-----
Volume for a CHEXA is the average area of two opposing faces
times the length between the centroids of those points
"""
n1, n2, n3, n4, n5, n6, n7, n8 = self._get_node_locations_by_element_id(element_id, xyz_cid0)
volume = volume8(n1, n2, n3, n4, n5, n6, n7, n8)
if total:
volume = abs(volume).sum()
else:
volume = abs(volume)
return volume
def get_mass_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the mass for one or more CTETRA elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None
the elements to consider
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
total : bool; default=False
should the centroid be summed
"""
if element_id is None:
element_id = self.element_id
if xyz_cid0 is None:
xyz_cid0 = self.model.grid.get_position_by_node_index()
n = len(element_id)
V = self.get_volume_by_element_id(element_id, xyz_cid0)
mid = self.model.properties_solid.get_material_id_by_property_id(self.property_id)
assert mid.shape == (n,), 'mid.shape=%s; n=%s' % (str(mid.shape), n)
rho = self.model.materials.get_density_by_material_id(mid)
assert V.shape == (n,), 'V.shape=%s; n=%s' % (str(V.shape), n)
assert rho.shape == (n,), 'rho.shape=%s; n=%s' % (str(rho.shape), n)
mass = V * rho
if total:
mass = mass.sum()
return mass
def get_centroid_volume_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the centroid and volume for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None
the elements to consider
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
total : bool; default=False
should the volume be summed; centroid be averaged
.. seealso:: CHEXA8.get_volume_by_element_id() and
CHEXA8.get_centroid_by_element_id() for more information.
"""
n1, n2, n3, n4, n5, n6, n7, n8 = self._get_node_locations_by_element_id(element_id, xyz_cid0)
(A1, c1) = quad_area_centroid(n1, n2, n3, n4)
(A2, c2) = quad_area_centroid(n5, n6, n7, n8)
centroid = (c1 * A1 + c2 * A2) / (A1 + A2)
volume = (A1 + A2) / 2. * norm(c1 - c2, axis=1)
if total:
centroid = centroid.mean()
volume = abs(volume).sum()
else:
volume = abs(volume)
assert volume.min() > 0.0, 'volume.min() = %f' % volume.min()
return centroid, volume
def get_centroid_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the centroid for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None
the elements to consider
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
total : bool; default=False
should the centroid be averaged
"""
n1, n2, n3, n4, n5, n6, n7, n8 = self._get_node_locations_by_element_id(
element_id, xyz_cid0)
(A1, c1) = quad_area_centroid(n1, n2, n3, n4)
(A2, c2) = quad_area_centroid(n5, n6, n7, n8)
centroid = (c1 * A1 + c2 * A2) / (A1 + A2)
if total:
centroid = centroid.mean(axis=0)
return centroid
def get_face_nodes(self, nid, nid_opposite):
raise NotImplementedError()
#nids = self.node_ids[:8]
#indx = nids.index(nid_opposite)
#nids.pop(indx)
#return nids
def write_card(self, bdf_file, size=8, element_id=None):
if self.n:
if element_id is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, element_id)
for (eid, pid, n) in zip(self.element_id[i], self.property_id[i], self.node_ids[i]):
if eid in self._comments:
bdf_file.write(self._comments[eid])
card = ['CHEXA', eid, pid, n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7]]
bdf_file.write(print_card_8(card))
def volume8(n1, n2, n3, n4, n5, n6, n7, n8):
(A1, c1) = quad_area_centroid(n1, n2, n3, n4)
(A2, c2) = quad_area_centroid(n5, n6, n7, n8)
volume = (A1 + A2) / 2. * norm(c1 - c2, axis=1)
return volume
|
def main():
secret_number = 777
print(
"""
+================================+
| Welcome to my game, muggle! |
| Enter an integer number |
| and guess what number I've |
| picked for you. |
| So, what is the secret number? |
+================================+
""")
n = int(input("Enter a number: "))
while (n != secret_number):
print("Ha ha! You're stuck in my loop!")
n = int(input("Enter a number: "))
print("Well done, muggle! You are free now.")
if __name__ == '__main__':
main()
|
#! /usr/bin/python
# -*- coding: utf8 -*-
import deeptensor as dt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import horovod.torch as hvd
# Init horovod
dt.train.init_library()
# Configuration
cfg = dt.config.Config(name="ImageNet")
ARGS = cfg.opt().args
# Datalink over network
#def datalink_recv(socket, packet):
# opt = dt.Opt().loads(packet._data.decode())
# #print(opt)
# # Set learning rate
# if opt.t == 'cmd':
# if opt.a == 'set':
# if opt.key == 'lr':
# dt.train.set_lr_val(opt.val)
# dt.util.datalink().send_opt_sock(socket, dt.Opt(ACK='OK'))
#if ARGS.port > 1000 and hvd.rank() == 0:
# dt.util.datalink_start(port=ARGS.port)
# dt.util.datalink_register_recv(datalink_recv)
class ImageNetEstimator(dt.estimator.ClassEstimator):
def __init__(self, ctx):
super(ImageNetEstimator, self).__init__(ctx)
self.tag = "EST::IMAGENET"
dt.trace(dt.DC.MODEL, "[{}] ({}) __init__".format(self.tag, type(self).__name__))
def build_data(self):
dt.trace(dt.DC.MODEL, "[{}] ({}) build data".format(self.tag, type(self).__name__))
args = self._ctx.args
data = dt.data.ImageNet(data_dir=args.data_dir,
batch_size=args.batch_size, valid_size=args.valid_size,
out_size=args.out_size, num_workers=args.num_workers, pin_memory=self.use_cuda)
data.init_data()
data.load_data()
self._data = data
return True
def build_model(self):
dt.trace(dt.DC.MODEL, "[{}] ({}) build model".format(self.tag, type(self).__name__))
args = self._ctx.args
pretrained = (args.pretrained > 0)
if args.model_name == 'efficientnet':
if args.model_type == 'b0':
self._model = dt.model.efficientnet.efficientnet_b0(pretrained=pretrained)
elif args.model_type == 'b1':
self._model = dt.model.efficientnet.efficientnet_b1(pretrained=pretrained)
elif args.model_type == 'b2':
self._model = dt.model.efficientnet.efficientnet_b2(pretrained=pretrained)
elif args.model_type == 'b3':
self._model = dt.model.efficientnet.efficientnet_b3(pretrained=pretrained)
elif args.model_type == 'b4':
self._model = dt.model.efficientnet.efficientnet_b4(pretrained=pretrained)
elif args.model_type == 'b5':
self._model = dt.model.efficientnet.efficientnet_b5(pretrained=pretrained)
elif args.model_type == 'b6':
self._model = dt.model.efficientnet.efficientnet_b6(pretrained=pretrained)
elif args.model_type == 'b7':
self._model = dt.model.efficientnet.efficientnet_b7(pretrained=pretrained)
elif args.model_name == 'efficientnet_lm':
if args.model_type == 'b0' or \
args.model_type == 'b1' or \
args.model_type == 'b2' or \
args.model_type == 'b3' or \
args.model_type == 'b4' or \
args.model_type == 'b5' or \
args.model_type == 'b6' or \
args.model_type == 'b7':
model_arch = "efficientnet-{}".format(args.model_type)
if pretrained:
self._model = dt.model.efficientnet.EfficientNetLM.from_pretrained(model_arch)
else:
self._model = dt.model.efficientnet.EfficientNetLM.from_name(model_arch)
elif args.model_name == 'efficientnet_rw':
if args.model_type == 'b0' or \
args.model_type == 'b1' or \
args.model_type == 'b2' or \
args.model_type == 'b3' or \
args.model_type == 'b4' or \
args.model_type == 'b5' or \
args.model_type == 'b6' or \
args.model_type == 'b7':
model_arch = "efficientnet_{}".format(args.model_type)
self._model = dt.model.timm.create_model(model_arch, pretrained=pretrained)
elif args.model_name == 'fairnas':
if args.model_type == 'a':
self._model = dt.model.fairnas.FairNasA() # 8-gpu
elif args.model_name == 'resnet_rw':
#if dt.train.is_chief():
# dt.print_pp(dt.model.timm.list_models())
if args.model_type == '34':
self._model = dt.model.timm.create_model('resnet34', pretrained=pretrained)
elif args.model_type == '50':
self._model = dt.model.timm.create_model('resnet50', pretrained=pretrained)
else:
#if dt.train.is_chief():
# dt.print_pp(torchvision.models.__dict__)
self._model = torchvision.models.__dict__[args.model_name](pretrained=pretrained)
dt.info(dt.DC.TRAIN, "model {}, type {}, pretrained {}".format(args.model_name, args.model_type, args.pretrained))
return True
def post_model(self):
args = self._ctx.args
if dt.train.is_chief():
dt.summary.summary_model_patch(self._model)
dt.info(dt.DC.TRAIN, "\n{}".format(dt.summary.summary_model_fwd(self._model, (3, args.out_size, args.out_size), device='cpu')))
dt.summary.summary_model_patch(self._model, patch_fn=dt.summary.patch_clear_dt)
def build_optimizer(self):
if self._ctx.optim == 'RMSpropRW':
self._optimizer = dt.optimizer.RMSpropRW(self._model.parameters(), lr=self.trainer.get_lr_val(),
alpha=self._ctx.alpha, eps=self._ctx.opt_eps,
momentum=self._ctx.momentum, weight_decay=self._ctx.weight_decay,
centered=False, decoupled_decay=False, lr_in_momentum=True)
elif self._ctx.optim == 'RMSpropNA':
self._optimizer = dt.optimizer.RMSpropNA(self._model.parameters(), lr=self.trainer.get_lr_val(),
rho=self._ctx.alpha, eps=self._ctx.opt_eps,
momentum=self._ctx.momentum, weight_decay=self._ctx.weight_decay,
warmup=0)
elif self._ctx.optim == 'SDG':
self._optimizer = optim.SGD(self._model.parameters(), lr=self.trainer.get_lr_val(),
momentum=self._ctx.momentum, weight_decay=self._ctx.weight_decay)
else:
self._optimizer = None
return True
# Train
ctx = dt.Opt(args=ARGS,
optim=ARGS.optim, data_format=ARGS.data_format,
lr_initial=ARGS.lr_initial, lr_minimal=ARGS.lr_minimal, lr_curve=ARGS.lr_curve,
batch_size=ARGS.batch_size, valid_size=ARGS.valid_size,
validate_ep=ARGS.validate_ep, max_ep=ARGS.max_ep,
model_dir=ARGS.model_dir, save_interval=ARGS.save_interval,
alpha=ARGS.alpha, beta1=ARGS.beta1, beta2=ARGS.beta2, opt_eps=ARGS.opt_eps,
momentum=ARGS.momentum, weight_decay=ARGS.weight_decay,
random_seed=dt.util.random_int(1, 999999), gpu0=ARGS.gpu0, valid_only=ARGS.valid_only)
est = ImageNetEstimator(ctx)
est.build_flow()
trainer = dt.train.Trainer(ctx).init()
trainer.bind_estimator(est)
trainer.train_setup()
trainer.train_begin()
trainer.train()
trainer.train_end()
#dt.util.datalink_close()
|
import abc
import collections.abc
import socket
__all__ = ['get_socket_type', 'get_server_socket', 'get_client_socket',
'SocketReader', 'SocketWriter', 'JSONReader', 'JSONWriter']
def get_socket_type(host=None, ip_type=None):
if ip_type is not None:
return ip_type
if host and ':' in host:
return socket.AF_INET6
return socket.AF_INET
def get_server_socket(host, port, ip_type=None):
sock = socket.socket(get_socket_type(host, ip_type))
sock.bind((host, port))
return sock
def get_client_socket(host, port, ip_type=None):
sock = socket.socket(get_socket_type(host, ip_type))
sock.connect((host, port))
return sock
class SocketReader(collections.abc.Iterator):
@abc.abstractmethod
def close(self):
...
class SocketWriter(abc.ABC):
@abc.abstractmethod
def send(self, message):
...
@abc.abstractmethod
def close(self):
...
|
#!/usr/bin/env python3
from harness import HttpError, UsersClient, PermissionsClient
from common import *
# print('user: {} secret: {}'.format(client_user_id, client_user_secret))
if args.action == 'user-add':
role_set = args.role_set
engine_id = args.engineid
users_client = UsersClient(
url=url,
user_id=client_user_id,
user_secret=client_user_secret
)
try:
if role_set == 'client':
res = users_client.create_user(role_set_id=role_set, resource_id=engine_id)
print_success(res, 'Added user: '.format())
elif role_set == 'admin':
res = users_client.create_user(role_set_id=role_set, resource_id='*')
print_success(res, 'Added user: '.format())
else:
print("Whoopsie, bad role")
except HttpError as err:
print_failure(err, 'Error creating new user\n')
elif args.action == 'user-delete':
user_id = args.user_id
users_client = UsersClient(
url=url,
user_id=client_user_id,
user_secret=client_user_secret
)
try:
# res = users_client.delete(u)
# print_success(res, 'Deleted user: {} Success:\n'.format(user_id))
print("Deleting user: {}".format(user_id))
print("Not implemented yet, try revoking permissions")
except HttpError as err:
print_failure(err, 'Error deleting user.')
elif args.action == 'grant':
role_set = args.role_set
user_id = args.user_id
if role_set == 'admin':
engine_id = '*'
else:
engine_id = args.engineid # non-positional engine-id passed as a param
permissions_client = PermissionsClient(
url=url,
user_id=client_user_id,
user_secret=client_user_secret
)
try:
res = permissions_client.grant_permission(permitted_user_id=user_id, role_set_id=role_set, resource_id=engine_id)
# print_success(res, 'Added permissions for user: {} Success:\n'.format(user_id))
print_success(res, 'Granting permission for user: {} to act as: {} for engine-id: {} '.format(user_id, role_set, engine_id))
except HttpError as err:
print_failure(err, 'Error granting permission for user: {}\n'.format(user_id))
elif args.action == 'revoke':
role_set = args.role_set
user_id = args.user_id
if role_set == 'admin':
engine_id = '*'
else:
engine_id = args.engineid # non-positional engine-id passed as a param
permissions_client = PermissionsClient(
url=url,
user_id=client_user_id,
user_secret=client_user_secret
)
try:
res = permissions_client.revoke_permission(permitted_user_id=user_id, role_set_id=role_set)
# print_success(res, 'Added permissions for user: {} Success:\n'.format(user_id))
print_success(res, 'Revoking permission for user: {} role: {} '.format(user_id, role_set))
except HttpError as err:
print_failure(err, 'Error revoking permission for user: {}\n'.format(engine_id))
elif args.action == 'status':
user_id = args.user_id
users_client = UsersClient(
url=url,
user_id=client_user_id,
user_secret=client_user_secret
)
try:
if user_id is not None:
res = users_client.get(user_id)
print_success(res, 'Status for user: {}\n'.format(user_id))
else:
res = users_client.get(user_id)
print_success(res, 'Status for all users:\n')
except HttpError as err:
print_failure(err, 'Error getting status of: {}\n'.format(user_id))
else:
print_warning("Unknown action: %{}".format(args.action))
|
from django.db import models
from django.forms import ModelForm
class Placeholder(models.Model):
location = models.SlugField(max_length=50, unique=True, help_text=
"""This field must match the argument given in {% editable %} tag. Use only letters, number, underscores or hyphens only. Must be a unique name""")
content = models.TextField(max_length=1500, help_text="Add any text you like!")
def __unicode__(self):
return self.content
class Meta:
ordering = ['location']
class EditForm(ModelForm):
class Meta:
model = Placeholder
fields = ('content',)
|
"""Code for checking if deployment is possible via CLI"""
import logging
import click
from shipping.commands import Process
from shipping.configs.base_config import AppConfig, HostConfig
from shipping.deploy.conda import check_if_deploy_possible
from shipping.environment import get_python_path
from shipping.log import get_log_line
from shipping.package import fetch_package_version
LOG = logging.getLogger(__name__)
@click.command(name="check")
@click.pass_context
def check_cmd(context):
"""Run a check if deployment would be possible"""
LOG.info("Running shipping check")
host_config: HostConfig = context.obj["host_config"]
app_config: AppConfig = context.obj["app_config"]
env_name: str = context.obj["env_name"]
LOG.info(
"%s wants to deploy %s on host %s in environment %s",
context.obj["current_user"],
app_config.tool,
context.obj["current_host"],
env_name,
)
if not check_if_deploy_possible(conda_env_name=env_name):
LOG.info("Please use 'shipping provision' to create valid conda environment")
return
python_process = Process(str(get_python_path(env_name)))
current_version = fetch_package_version(python_process, app_config.tool)
click.echo(
get_log_line(
time_zone=host_config.tz_object,
user=context.obj["current_user"],
tool=app_config.tool,
current_version=current_version,
)
)
|
import torch
import torchaudio
import torchaudio.functional as F
print(torch.__version__)
print(torchaudio.__version__)
import math
import os
import requests
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_sample_data"
SAMPLE_WAV_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.wav"
SAMPLE_WAV_PATH = os.path.join(_SAMPLE_DIR, "steam.wav")
SAMPLE_RIR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/room-response/rm1/impulse/Lab41-SRI-VOiCES-rm1-impulse-mc01-stu-clo.wav"
SAMPLE_RIR_PATH = os.path.join(_SAMPLE_DIR, "rir.wav")
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
SAMPLE_NOISE_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/distractors/rm1/babb/Lab41-SRI-VOiCES-rm1-babb-mc01-stu-clo.wav"
SAMPLE_NOISE_PATH = os.path.join(_SAMPLE_DIR, "bg.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_URL, SAMPLE_WAV_PATH),
(SAMPLE_RIR_URL, SAMPLE_RIR_PATH),
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
(SAMPLE_NOISE_URL, SAMPLE_NOISE_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_PATH, resample=resample)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def get_rir_sample(*, resample=None, processed=False):
rir_raw, sample_rate = _get_sample(SAMPLE_RIR_PATH, resample=resample)
if not processed:
return rir_raw, sample_rate
rir = rir_raw[:, int(sample_rate * 1.01) : int(sample_rate * 1.3)]
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
return rir, sample_rate
def get_noise_sample(*, resample=None):
return _get_sample(SAMPLE_NOISE_PATH, resample=resample)
waveform1, sample_rate1 = get_sample(resample=16000)
effects = [
["lowpass", "-1", "300"],
["speed", "0.8"],
["rate", f"{sample_rate1}"],
["reverb", "-w"],
]
waveform2, sample_rate2 = torchaudio.sox_effects.apply_effects_tensor(waveform1, sample_rate1, effects)
plot_waveform(waveform1, sample_rate1, title="Original", xlim=(-0.1, 3.2))
plot_waveform(waveform2, sample_rate2, title="Effects Applied", xlim=(-0.1, 3.2))
print_stats(waveform1, sample_rate=sample_rate1, src="Original")
print_stats(waveform2, sample_rate=sample_rate2, src="Effects Applied")
plot_specgram(waveform1, sample_rate1, title="Original", xlim=(0, 3.04))
play_audio(waveform1, sample_rate1)
plot_specgram(waveform2, sample_rate2, title="Effects Applied", xlim=(0, 3.04))
play_audio(waveform2, sample_rate2)
sample_rate = 8000
rir_raw, _ = get_rir_sample(resample=sample_rate)
plot_waveform(rir_raw, sample_rate, title="Room Impulse Response (raw)", ylim=None)
plot_specgram(rir_raw, sample_rate, title="Room Impulse Response (raw)")
play_audio(rir_raw, sample_rate)
rir = rir_raw[:, int(sample_rate * 1.01) : int(sample_rate * 1.3)]
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
print_stats(rir)
plot_waveform(rir, sample_rate, title="Room Impulse Response", ylim=None)
speech, _ = get_speech_sample(resample=sample_rate)
speech_ = torch.nn.functional.pad(speech, (rir.shape[1] - 1, 0))
augmented = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0]
plot_waveform(speech, sample_rate, title="Original", ylim=None)
plot_waveform(augmented, sample_rate, title="RIR Applied", ylim=None)
plot_specgram(speech, sample_rate, title="Original")
play_audio(speech, sample_rate)
plot_specgram(augmented, sample_rate, title="RIR Applied")
play_audio(augmented, sample_rate)
sample_rate = 8000
speech, _ = get_speech_sample(resample=sample_rate)
noise, _ = get_noise_sample(resample=sample_rate)
noise = noise[:, : speech.shape[1]]
plot_waveform(noise, sample_rate, title="Background noise")
plot_specgram(noise, sample_rate, title="Background noise")
play_audio(noise, sample_rate)
speech_power = speech.norm(p=2)
noise_power = noise.norm(p=2)
for snr_db in [20, 10, 3]:
snr = math.exp(snr_db / 10)
scale = snr * noise_power / speech_power
noisy_speech = (scale * speech + noise) / 2
plot_waveform(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]")
plot_specgram(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]")
play_audio(noisy_speech, sample_rate)
waveform, sample_rate = get_speech_sample(resample=8000)
plot_specgram(waveform, sample_rate, title="Original")
play_audio(waveform, sample_rate)
configs = [
({"format": "wav", "encoding": "ULAW", "bits_per_sample": 8}, "8 bit mu-law"),
({"format": "gsm"}, "GSM-FR"),
({"format": "mp3", "compression": -9}, "MP3"),
({"format": "vorbis", "compression": -1}, "Vorbis"),
]
for param, title in configs:
augmented = F.apply_codec(waveform, sample_rate, **param)
plot_specgram(augmented, sample_rate, title=title)
play_audio(augmented, sample_rate)
sample_rate = 16000
speech, _ = get_speech_sample(resample=sample_rate)
plot_specgram(speech, sample_rate, title="Original")
play_audio(speech, sample_rate)
rir, _ = get_rir_sample(resample=sample_rate, processed=True)
speech_ = torch.nn.functional.pad(speech, (rir.shape[1] - 1, 0))
speech = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0]
plot_specgram(speech, sample_rate, title="RIR Applied")
play_audio(speech, sample_rate)
noise, _ = get_noise_sample(resample=sample_rate)
noise = noise[:, : speech.shape[1]]
snr_db = 8
scale = math.exp(snr_db / 10) * noise.norm(p=2) / speech.norm(p=2)
speech = (scale * speech + noise) / 2
plot_specgram(speech, sample_rate, title="BG noise added")
play_audio(speech, sample_rate)
speech, sample_rate = torchaudio.sox_effects.apply_effects_tensor(
speech,
sample_rate,
effects=[
["lowpass", "4000"],
["compand", "0.02,0.05", "-60,-60,-30,-10,-20,-8,-5,-8,-2,-8", "-8", "-7", "0.05"],
["rate", "8000"],
],
)
plot_specgram(speech, sample_rate, title="Filtered")
play_audio(speech, sample_rate)
speech = F.apply_codec(speech, sample_rate, format="gsm")
plot_specgram(speech, sample_rate, title="GSM Codec Applied")
play_audio(speech, sample_rate)
|
# set this to the full path to the dashvend respository checkout
DASHVEND_DIR = '/home/pi/dashvend'
# note: also update paths in:
# bin/.init.d.dashvend
# bin/_dashvend_control.sh
# bin/dashvend_screens.screenrc
# bin/show_screen_number.sh
# after testing, set this to True to use mainnet
MAINNET = False
# **public** mainnet bip32 seed for vending address generation
BIP32_MAINNET_SEED = 'drkpRxPP5eefb7GqpdDSQKGPjJHHuBqPJJVD2Qx4BHF7CVP1dC8uVxVy6JfDQsn1U1EazDZPa4DWMsmV7pDhMtLTQQypHHc6cFnPPYZvwib5nVi' # noqa
# on a secure machine, generate above with pycoin 'ku' command.
# for testing, ku is already installed on this machine during the 'make'
# install pycoin by doing:
# git clone https://github.com/richardkiss/pycoin.git
# cd pycoin ; sudo python setup.py install
# ku -n DASH -s 0p/0 P:<a unique, unpublished 100 character long sentence>
# and use the **public version** output
# for sane passphrase selection see: https://masternode.me/smart_passwords.html
# **public** testnet bip32 seed for vending address generation
BIP32_TESTNET_SEED = 'DRKPuUbaZSQn2SV5vyTh9DRHcooktYP3TB3NQa8cgMGXxT8znzH5opFtDgY8PVTKzTohyEfitf1TkcxnygJdY7ACJxvbVTvSVn6q6gCEVfydtJ6y' # noqa
# ku -n tDASH -s 0p/0 P:<a unique, unpublished 100 character long sentence>
# require seven (out of ten) masternode locks to accept purchase
IX_LOCK_TRESHOLD = 7
# note: sometimes all ten locks do not fully propagate across the network,
# settings above 9 are not recommended.
# dash value required to trigger sale
VENDING_COST = 0.01
|
files = """
bdg_math.cpp
bdg_math.h
bdg_random.cpp
bdg_random.h
button.cpp
button.h
city.cpp
city.h
constants.h
coord.cpp
coord.h
gameclock.cpp
gameclock.h
hsv.cpp
hsv.h
kruskal.cpp
kruskal.h
layers.h
main.cpp
mode_highway.cpp
mode_highway.h
modes.h
node.cpp
node.h
nodemgr.cpp
nodemgr.h
screen_bg.cpp
screen_bg.h
sound_test.cpp
sparkle.cpp
""".split()
total = 0
for f in files:
fn = f.strip()
print(fn)
with open(fn) as fileobj:
lines = fileobj.readlines()
count = len(lines)
print("line count:", count)
total += count
print ("total line count:", total)
|
# Copyright (c) 2017, Zebula Sampedro, CU Boulder Research Computing
"""
Singularity Spawner
SingularitySpawner provides a mechanism for spawning Jupyter Notebooks inside of Singularity containers. The spawner options form is leveraged such that the user can specify which Singularity image the spawner should use.
A `singularity exec {notebook spawn cmd}` is used to start the notebook inside of the container.
"""
import os, subprocess
import pipes, shutil
from subprocess import Popen
from tornado import gen
from tornado.process import Subprocess
from tornado.iostream import StreamClosedError
from singularity.cli import Singularity
from jupyterhub.spawner import (
LocalProcessSpawner, set_user_setuid
)
from jupyterhub.utils import random_port
from jupyterhub.traitlets import Command
from traitlets import (
Integer, Unicode, Float, Dict, List, Bool, default
)
class SingularitySpawner(LocalProcessSpawner):
"""SingularitySpawner - extends the default LocalProcessSpawner to allow for:
1) User-specification of a singularity image via the Spawner options form
2) Spawning a Notebook server within a Singularity container
"""
singularity_cmd = Command(['/opt/singularity/3.3.0/bin/singularity','-d','exec'],
help="""
This is the singularity command that will be executed when starting the
single-user server. The image path and notebook server args will be concatenated to the end of this command. This is a good place to
specify any site-specific options that should be applied to all users,
such as default mounts.
"""
).tag(config=True)
notebook_cmd = Command(['jupyterhub-singleuser'],
help="""
The command used for starting the single-user server.
Provide either a string or a list containing the path to the startup script command. Extra arguments,
other than this path, should be provided via `args`.
"""
).tag(config=True)
imagename = Unicode('',
help="""
Absolute POSIX filepath to Singularity image that will be used to
execute the notebook server spawn command, if another path is not
specified by the user.
"""
).tag(config=True)
default_image_path = Unicode('',
help="""
Absolute POSIX filepath to Singularity image that will be used to
execute the notebook server spawn command, if another path is not
specified by the user.
"""
).tag(config=True)
def _build_cmd(self):
cmd = []
cmd.extend(self.singularity_cmd)
cmd.extend([self.default_image_path])
cmd.extend(self.notebook_cmd)
return cmd
@property
def cmd(self):
return self._build_cmd()
def get_env(self):
"""Get the complete set of environment variables to be set in the spawned process."""
env = super().get_env()
env = self.user_env(env)
env['CONTAINER_IMAGE'] = str(self.imagename)
tmpdirpath = os.path.join('/tmp',self.user.name,self.imagename)
if not os.path.exists(tmpdirpath):
os.makedirs(tmpdirpath, exist_ok=True)
shutil.chown(tmpdirpath, user=str(self.user.name), group=str(self.user.name))
os.chmod(tmpdirpath, 0o755)
env['SINGULARITY_BINDPATH'] = '/tmp/'+str(self.user.name)+'/'+str(self.imagename)+':/tmp'
biojhubhome = str(subprocess.check_output('sudo -Hiu '+str(self.user.name)+' env| grep BIOJHUBHOME|cut -f2 -d "="', shell=True),'utf-8').rstrip()
if biojhubhome is "":
biojhubhome = '/data/users/'+str(self.user.name)+'/container_cache/'+str(self.imagename)
else:
biojhubhome = biojhubhome+'/'+str(self.imagename)
#if not os.path.exists(biojhubhome):
#print(str(subprocess.check_output('sudo -u '+str(self.user.name)+' mkdir -p '+biojhubhome),'utf-8'))
if not os.path.isdir(biojhubhome):
#os.makedirs(biojhubhome, exist_ok=True)
#shutil.chown(biojhubhome, user=str(self.user.name), group=str(self.user.name))
#os.chmod(biojhubhome, 0o755)
subprocess.call('sudo -u '+str(self.user.name)+' mkdir -p '+str(biojhubhome), shell=True)
subprocess.call('sudo -u '+str(self.user.name)+' chmod 755 '+str(biojhubhome), shell=True)
env['SINGULARITY_HOME'] = biojhubhome+":/home/jovyan"
return env
async def start(self):
"""Start the single-user server."""
self.port = random_port()
cmd = []
env = self.get_env()
cmd.extend(self.cmd)
cmd.extend(self.get_args())
if self.shell_cmd:
# using shell_cmd (e.g. bash -c),
# add our cmd list as the last (single) argument:
cmd = self.shell_cmd + [' '.join(pipes.quote(s) for s in cmd)]
self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
popen_kwargs = dict(
preexec_fn=self.make_preexec_fn(self.user.name),
start_new_session=True, # don't forward signals
)
popen_kwargs.update(self.popen_kwargs)
# don't let user config override env
popen_kwargs['env'] = env
try:
self.proc = Popen(cmd, **popen_kwargs)
except PermissionError:
# use which to get abspath
script = shutil.which(cmd[0]) or cmd[0]
self.log.error(
"Permission denied trying to run %r. Does %s have access to this file?",
script,
self.user.name,
)
raise
self.pid = self.proc.pid
if self.__class__ is not LocalProcessSpawner:
# subclasses may not pass through return value of super().start,
# relying on deprecated 0.6 way of setting ip, port,
# so keep a redundant copy here for now.
# A deprecation warning will be shown if the subclass
# does not return ip, port.
if self.ip:
self.server.ip = self.ip
self.server.port = self.port
self.db.commit()
return (self.ip or '127.0.0.1', self.port)
|
import sys
import cftime
import numpy as np
import numpy.matlib as npm
import pytest
import xarray as xr
@pytest.fixture
def dset():
start_date = np.array([0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334], dtype=np.float64)
start_date = np.append(start_date, start_date + 365)
end_date = np.array([31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365], dtype=np.float64)
end_date = np.append(end_date, end_date + 365)
ds = xr.Dataset(coords={'time': 24, 'lat': 2, 'lon': 2, 'd2': 2})
ds['time'] = xr.DataArray(end_date, dims='time')
ds['lat'] = xr.DataArray([0, 1], dims='lat')
ds['lon'] = xr.DataArray([0, 1], dims='lon')
ds['d2'] = xr.DataArray([0, 1], dims='d2')
ds['time_bound'] = xr.DataArray(
np.array([start_date, end_date]).transpose(), dims=['time', 'd2']
)
ds['variable_1'] = xr.DataArray(
np.append(
np.zeros([12, 2, 2], dtype='float32'), np.ones([12, 2, 2], dtype='float32'), axis=0
),
dims=['time', 'lat', 'lon'],
)
ds['variable_2'] = xr.DataArray(
np.append(
np.ones([12, 2, 2], dtype='float32'), np.zeros([12, 2, 2], dtype='float32'), axis=0
),
dims=['time', 'lat', 'lon'],
)
ds.time.attrs['units'] = 'days since 0001-01-01 00:00:00'
ds.time.attrs['calendar'] = 'noleap'
ds.time.attrs['bounds'] = 'time_bound'
return ds.copy(True)
def xr_ds_ex(decode_times=True, nyrs=3, var_const=True):
"""return an example xarray.Dataset object, useful for testing functions"""
# set up values for Dataset, 4 yrs of analytic monthly values
days_1yr = np.array([31.0, 28.0, 31.0, 30.0, 31.0, 30.0, 31.0, 31.0, 30.0, 31.0, 30.0, 31.0])
time_edges = np.insert(np.cumsum(npm.repmat(days_1yr, nyrs, 1)), 0, 0)
time_bounds_vals = np.stack((time_edges[:-1], time_edges[1:]), axis=1)
time_vals = np.mean(time_bounds_vals, axis=1)
time_vals_yr = time_vals / 365.0
if var_const:
var_vals = np.ones_like(time_vals_yr)
else:
var_vals = np.sin(np.pi * time_vals_yr) * np.exp(-0.1 * time_vals_yr)
time_units = 'days since 0001-01-01'
calendar = 'noleap'
if decode_times:
time_vals = cftime.num2date(time_vals, time_units, calendar)
time_bounds_vals = cftime.num2date(time_bounds_vals, time_units, calendar)
# create Dataset, including time_bounds
time_var = xr.DataArray(
time_vals,
name='time',
dims='time',
coords={'time': time_vals},
attrs={'bounds': 'time_bounds'},
)
if not decode_times:
time_var.attrs['units'] = time_units
time_var.attrs['calendar'] = calendar
time_bounds = xr.DataArray(
time_bounds_vals, name='time_bounds', dims=('time', 'd2'), coords={'time': time_var}
)
var = xr.DataArray(var_vals, name='var_ex', dims='time', coords={'time': time_var})
ds = var.to_dataset()
ds = xr.merge((ds, time_bounds))
if decode_times:
ds.time.encoding['units'] = time_units
ds.time.encoding['calendar'] = calendar
return ds
|
from flask import Flask
def create_app():
from . import routes, models
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:////{app.instance_path}/url.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
routes.init_app(app)
models.init_app(app)
return app
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 8 17:24:59 2020
@author: Nikki Frazer
Student Number: 1003111116
"""
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
#Question 1
#create the basic plot
def divergent(x:int, y:int, iteration_count:int):
"""Given the data point x y where c_pair= x + iy
is in the complex plane. x and y values are from -2 to 2 exclusive.
Return a list of ones and zeros corresponding to whether a z
value diverges or converges at that point c."""
z = complex(0,0)
c_pair = complex(x,y)
c_values = []
for iteration in range(iteration_count):
z = (z * z) + (c_pair)
#split z into real and imaginary parts
#if the real part of z goes over 2, the point will diverge
zR = z.real
#at each iteration check if the point is diverging
if zR > 2:
c_values.append(0)
break
else:
c_values.append(1)
#if zR reaches 2, break out of loop since it will diverge
return c_values
print("For point (1, 1) in the complex plane, it is divergent if 0, convergent if 1:", divergent(1, 1, 50))
def mandel_plot(threshold, density):
"""
Given a threshold, return a plot for a mandelbrot set,
calling upon the function divergent to supply the
values of when an iteration of z diverges or converges.
"""
x_axis = np.linspace(-2, 2, density)
y_axis = np.linspace(-2, 2, density)
x_length = len(x_axis)
y_length = len(y_axis)
X, Y = np.meshgrid(x_axis, y_axis)
c= []
for i in range(x_length):
for k in range(y_length):
x = X[i,k]
y = Y[i,k]
c = c + divergent(x, y, threshold)
c = np.array(c)
plt.scatter(np.reshape(X, -1), np.reshape(Y, -1), c=np.reshape(c,-1))
plt.title("Basic Mandelbrot Plot")
plt.xlabel("real axis")
plt.ylabel("imaginary axis")
plt.show()
mandel_plot(50, 250)
#Colourbar Plot: count the number of iterations it takes for each point C to diver or converge (github)
def how_many_iterations(x:int, y:int, iteration_count:int):
"""Count how many iterations it takes to establish that
a certain point c: x + iy is not part of the Mandelbrot set
"""
z = complex(0, 0)
c_pair = complex(x,y)
c_values = []
for iteration in range(iteration_count):
z = (z * z) + (c_pair)
#split z into real and imaginary parts
#if the real part of z goes over 2, the point will diverge
zR = z.real
#at each iteration check if the point is diverging
if zR > 2:
c_values.append(iteration)
break
else:
c_values.append(0)
#if zR reaches 2, break out of loop since it will diverge
return c_values
def mandelbrot_scale(threshold, density):
"""
Given a threshold, return a plot for a mandelbrot set,
calling upon the function how_many_iterations to supply the
values of when an iteration of z diverges or converges.
"""
x_axis = np.linspace(-2, 2, density)
y_axis = np.linspace(-2, 2, density)
x_length = len(x_axis)
y_length = len(y_axis)
X, Y = np.meshgrid(x_axis, y_axis)
c= []
for i in range(x_length):
for k in range(y_length):
x = X[i,k]
y = Y[i,k]
c = c + how_many_iterations(x, y, threshold)
c = np.array(c)
plt.scatter(np.reshape(X, -1), np.reshape(Y, -1), c=np.reshape(c,-1), cmap="Spectral")
plt.colorbar(label="Colour")
plt.title("Colourscale Mandelbrot")
plt.xlabel("real axis")
plt.ylabel("imaginary axis")
plt.show()
mandelbrot_scale(50, 250)
#Question 2
#population
N = 1000
#timeline of 200 days
t = np.linspace(0, 200, 200)
#inital numbers N = I + S + R
#inital number of infected individuals
infected_0 = 1
#initial number of susceptible individuals
sus_0 = 999
#initial value of recovered individuals
recovered_0 = 0
y_naught = sus_0, infected_0, recovered_0
beta = 1
gamma = 0.10
# The SIR model differential equations
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
dS_dt = -(beta * S * I) / 1000
dI_dt = ((beta * S * I) / (1000)) - (gamma * I)
dR_dt = gamma * I
return dS_dt, dI_dt, dR_dt
#integrate the functions using scipy
sol = solve_ivp(SIR, [0,200], y_naught, t_eval=t)
#parameter set one
#plot
plt.plot(sol.t,sol.y[0], color="slateblue")
plt.plot(sol.t,sol.y[1], color="red")
plt.plot(sol.t,sol.y[2], color="mediumseagreen")
plt.title(r'SIR Population Model 1: $\beta$: 1 $\gamma$: 0.10')
plt.xlabel('Time (days)')
plt.ylabel('Population')
plt.legend(["Susceptible","Infected","Recovered"])
plt.figure()
#parameter set two
beta = 0.2
gamma = 0.10
# The SIR model differential equations
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
dS_dt = -(beta * S * I) / 1000
dI_dt = ((beta * S * I) / (1000)) - (gamma * I)
dR_dt = gamma * I
return dS_dt, dI_dt, dR_dt
#integrate the functions using scipy
sol = solve_ivp(SIR, [0,200], y_naught, t_eval=t)
#plot
plt.plot(sol.t,sol.y[0], color="slateblue")
plt.plot(sol.t,sol.y[1], color="red")
plt.plot(sol.t,sol.y[2], color="mediumseagreen")
plt.title(r'SIR Population Model 2: $\beta$: 0.2 $\gamma$: 0.10')
plt.xlabel('Time (days)')
plt.ylabel('Population')
plt.legend(["Susceptible","Infected","Recovered"])
plt.figure()
#parameter set three
beta = 1
gamma = 0.50
# The SIR model differential equations
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
dS_dt = -(beta * S * I) / 1000
dI_dt = ((beta * S * I) / (1000)) - (gamma * I)
dR_dt = gamma * I
return dS_dt, dI_dt, dR_dt
#integrate the functions using scipy
sol = solve_ivp(SIR, [0,200], y_naught, t_eval=t)
#plot
plt.plot(sol.t,sol.y[0], color="slateblue")
plt.plot(sol.t,sol.y[1], color="red")
plt.plot(sol.t,sol.y[2], color="mediumseagreen")
plt.title(r'SIR Population Model 3: $\beta$: 0.1 $\gamma$: 0.50')
plt.xlabel('Time (days)')
plt.ylabel('Population')
plt.legend(["Susceptible","Infected","Recovered"])
plt.figure()
#parameter set four
beta = 5
gamma = 0.10
# The SIR model differential equations
def SIR(t, y):
S = y[0]
I = y[1]
R = y[2]
dS_dt = -(beta * S * I) / 1000
dI_dt = ((beta * S * I) / (1000)) - (gamma * I)
dR_dt = gamma * I
return dS_dt, dI_dt, dR_dt
#integrate the functions using scipy
sol = solve_ivp(SIR, [0,200], y_naught, t_eval=t)
#plot
plt.plot(sol.t,sol.y[0], color="slateblue")
plt.plot(sol.t,sol.y[1], color="red")
plt.plot(sol.t,sol.y[2], color="mediumseagreen")
plt.title(r'SIR Population Model 4: $\beta$: 5 $\gamma$: 0.10')
plt.xlabel('Time (days)')
plt.ylabel('Population')
plt.legend(["Susceptible","Infected","Recovered"])
plt.figure()
|
# Copyright 2021 UChicago Argonne, LLC
# Author:
# - Haoyu Wang and Roberto Ponciroli, Argonne National Laboratory
# - Andrea Alfonsi, Idaho National Laboratory
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0.txt
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Author: H. Wang
Date : 07/30/2020
"""
from __future__ import division, print_function , unicode_literals, absolute_import
#External Modules---------------------------------------------------------------
import numpy as np
import os
import math
from scipy import signal
from scipy import io
from scipy.interpolate import interp1d
from datetime import datetime
import csv
import sys
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from PluginBaseClasses.ExternalModelPluginBase import ExternalModelPluginBase
#Internal Modules End-----------------------------------------------------------
class RefGov_unparameterized_SIMO(ExternalModelPluginBase):
# External Model plugin class, Reference Governor
#################################
#### RAVEN API methods BEGIN ####
#################################
def _readMoreXML(self, container, xmlNode):
"""
Method to read the portion of the XML that belongs to this plugin
@ In, container, object, self-like object where all the variables can be stored
@ In, xmlNode, xml.etree.ElementTree.Element, XML node that needs to be read
@ Out, None
"""
""" Initialization of 4 entries """
container.constants = {}
# Extract the output Variable name: container.outputVariables = ['V1', 'V1min', 'V1max']
outputVarsNode = xmlNode.find("outputVariables")
# print(outputVarsNode)
if outputVarsNode is None: # if cannot find the "outputVariables" tag, return error
raise IOError("RG Plugin: <outputVariables> XML block must be inputted!")
# container.outputVariables = outputVarsNode.text.strip()
container.outputVariables = [var.strip() for var in outputVarsNode.text.split(",")]
# print(container.outputVariables) # ['V1', 'V1min', 'V1max']
for child in xmlNode:
# print(child.tag)
# xmlNode is the Nodes within the section of <ExternalModel>.
# child.tag are the strings containing each node name. child.tag == child.tag.strip()
if child.tag.strip() == "variables":
# get verbosity if it exists
# Extract Variable names: container.variables = ['Vi', 'Pi']
container.variables = [var.strip() for var in child.text.split(",")]
# print(container.variables) # ['V1', 'V1min', 'V1max', 'P1']
# if container.outputVariable not in container.variables:
# raise IOError("RG Plug-in: "+container.outputVariable+" variable MUST be present in the <variables> definition!")
container.constants['Sys_State_x']=[] # place holder
if child.tag.strip() == "constant":
# Extract the constant names and their values: container.constants = {'TimeInterval': 3600.0}
# varName has to be provided in the <constant> entry
if "varName" not in child.attrib:
raise IOError("RG Plug-in: attribute varName must be present in <coefficient> XML node!")
# extract the system state variable, the only vector
if child.attrib['varName'] == "Sys_State_x":
container.constants['Sys_State_x'] = [float(var.strip()) for var in child.text.split(",")]
else:
container.constants[child.attrib['varName']] = float(child.text)
# print(container.constants) # {'TimeInterval': 3600.0}
# print(a)
Min_counter = 0; Max_counter = 0
for key, value in container.constants.items(): # count the inputs
if key.startswith('Min_Target'):
Min_counter += 1
# print(Min_counter,key)
elif key.startswith('Max_Target'):
Max_counter += 1
# print(Max_counter, key)
# print(Min_counter, Max_counter)
container.RG_Min_Targets = []
container.RG_Max_Targets = []
if Min_counter ==0 or Max_counter ==0: # check if Min/Max entry exists
raise IOError("RG Plug-in: Missing 'Min_Target' or 'Max_Target' inputs!")
else:
if Min_counter != Max_counter: # check if Min and Max have the same length
raise IOError("RG Plug-in: 'Min_Target' and 'Max_Target' are different in size!")
else:
for i in range(0,Min_counter):
try:
container.RG_Min_Targets.append(container.constants['Min_Target%d' % (i+1)])
except:
raise IOError("RG Plug-in: 'Min_Target%d' does not exist!" % (i+1))
try:
container.RG_Max_Targets.append(container.constants['Max_Target%d' % (i+1)])
except:
raise IOError("RG Plug-in: 'Max_Target%d' does not exist!" % (i+1))
# print(container.RG_Min_Targets)
# print(container.RG_Max_Targets)
# print(a)
# check if yMin < yMax is satisfied
a = np.asarray(container.RG_Max_Targets)-np.asarray(container.RG_Min_Targets)
# print(a)
if any(n<=0 for n in a):
# print("negative found")
raise IOError("RG Plug-in: 'Min_Targets < Max_Targets' is not satisfied. Check the <ExternalModel> node!")
inputvariables = set(container.variables)-set(container.outputVariables)
container.variables = inputvariables
# print(container.variables) # {'P1'}
def initialize(self, container, runInfoDict, inputFiles):
"""
Method to initialize this plugin
@ In, container, object, self-like object where all the variables can be stored
@ In, runInfoDict, dict, dictionary containing all the RunInfo parameters (XML node <RunInfo>)
@ In, inputFiles, list, list of input files (if any)
@ Out, None
"""
# print("\n###############################################\n")
# # print(runInfoDict['WorkingDir'])
# print(inputFiles[1].__dict__)
# print("\n###############################################\n")
# initialization: ensure each var has an initial value
# for var in container.variables:
# if var not in container.coefficients:
# container.coefficients[var] = 1.0
# print("ExamplePlugin: not found coefficient for variable "+var+". Default value is 1.0!")
# container.stepSize = (container.endValue - container.startValue)/float(container.numberPoints)
def createNewInput(self, container, inputs, samplerType, **Kwargs):
# Extract the matrix file name
for item in inputs:
# print(item)
if 'UserGenerated' in item.__class__.__name__: # look for the file input that contains the path to XML file
# Assemble the filename
MatrixFileName = item.__dict__['_File__path']+item.__dict__['_File__base']+'.'+item.__dict__['_File__ext']
# print(MatrixFileName)
f = open("MatrixFilePath.txt","w")
f.write(MatrixFileName)
f.close()
# Remove this item from inputs list
inputs.remove(item)
if 'MatrixFileName' not in locals():
f = open("MatrixFilePath.txt","r")
MatrixFileName = f.read()
f.close()
# print(MatrixFileName)
# Load the XML file containing the ABC matrices
container.Tss, container.n, container.m, container.p, container.para_array, container.UNorm_list, container.XNorm_list, container.XLast_list, container.YNorm_list, container.A_list, container.B_list, container.C_list, container.eig_A_array = read_unparameterized_XML(MatrixFileName)
# Tss is the sampling period of discrete A,B,C matrices
if len(container.RG_Min_Targets)!=container.p or len(container.RG_Max_Targets)!=container.p:
sys.exit('ERROR: Check the size of "Min_Target" ({}) or "Max_Target" ({}). \n\tBoth should contain {} items.\n'.format(len(container.RG_Min_Targets), len(container.RG_Max_Targets), container.p))
""" Keep only the profiles with YNorm within the [y_min, y_max] range """
container.para_array, container.UNorm_list, container.XNorm_list, container.XLast_list, container.YNorm_list, container.A_list, container.B_list, container.C_list, container.eig_A_array = check_YNorm_within_Range(
container.RG_Min_Targets, container.RG_Max_Targets, container.para_array, container.UNorm_list, container.XNorm_list, container.XLast_list, container.YNorm_list, container.A_list, container.B_list, container.C_list, container.eig_A_array)
if container.YNorm_list == []:
sys.exit('ERROR: No proper linearization point (YNorm) found in Matrix File. \n\tPlease provide a state space profile linearized within the [Min_Target, Max_Target] range\n')
max_eigA_id = container.eig_A_array.argmax()
container.A_m = container.A_list[max_eigA_id]; container.B_m = container.B_list[max_eigA_id]; container.C_m = container.C_list[max_eigA_id]; container.D_m = np.zeros((container.p,container.m)) # all zero D matrix
# print(container.eig_A_array)
# print(max_eigA_id)
# print("\n###############################################\n")
return Kwargs['SampledVars']
def run(self, container, Inputs):
"""
This is a simple example of the run method in a plugin.
This method takes the variables in input and computes
oneOutputOfThisPlugin(t) = var1Coefficient*exp(var1*t)+var2Coefficient*exp(var2*t) ...
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
"""
""" Process the input from XML file """
# extract the power setpoint from Inputs, type == <class 'float'>
for var in container.variables:
r_value = Inputs[var]
# print("\n###############################################\n")
# print("r_value=", r_value, type(r_value))
""" MOAS steps Limit """
g = int(container.constants['MOASsteps']) # numbers of steps to look forward
""" Select the correct profile with ABCD matrices """
# Find the correct profile according to r_value
profile_id = (np.abs(container.para_array - r_value)).argmin()
# print(profile_id)
# Retrive the correct A, B, C matrices
A_d = container.A_list[profile_id]; B_d = container.B_list[profile_id]; C_d = container.C_list[profile_id]; D_d = np.zeros((container.p,container.m)) # all zero D matrix
# Retrive the correct y_0, r_0 and X
y_0 = container.YNorm_list[profile_id]; r_0 = float(container.UNorm_list[profile_id]);
xLast=container.XLast_list[profile_id]; xNorm=container.XNorm_list[profile_id]
# print(type(r_0))
""" XLast and r_value """
if container.constants['Sys_State_x']==[]: # if user didn't supply the final system state vector
X_Last_RG = np.asarray(xLast - xNorm)
else:
X_Last_RG = np.asarray(container.constants['Sys_State_x']) - np.asarray(xNorm)
# print("X_Last_RG=", X_Last_RG, type(X_Last_RG))
# print(a)
r_value_RG = float(r_value) - r_0
""" Calculate Maximal Output Admissible Set (MOAS) """
s = [] # type == <class 'list'>
for i in range(0,container.p):
s.append([abs(container.RG_Max_Targets[i] - y_0[i])])
s.append([abs(y_0[i] - container.RG_Min_Targets[i])])
# print(s)
H, h = fun_MOAS_noinf(A_d, B_d, C_d, D_d, s, g) # H and h, type = <class 'numpy.ndarray'>
# print("H:\n", H); print("h:\n", h)
""" Call the Reference Governor to mild the r_value """
v_RG = fun_RG_SISO(0, X_Last_RG, r_value_RG, H, h, container.p) # v_RG: type == <class 'numpy.ndarray'>
""" 2nd adjustment """
# MOAS for the steps "g+1" - step "2g"
Hm, hm = fun_MOAS_noinf(container.A_m, container.B_m, container.C_m, container.D_m, s, g)
# Calculate the max/min for v, ensuring the hm-Hxm*x(g+1) always positive for the next g steps.
v_max, v_min = fun_2nd_gstep_calc(X_Last_RG, Hm, hm, container.A_m, container.B_m, g)
if v_RG < v_min:
v_RG = v_min
elif v_RG > v_max:
v_RG = v_max
# Provide the Output variable Vi with value
container.__dict__[container.outputVariables[0]] = v_RG + r_0
container.__dict__[container.outputVariables[1]] = v_min + r_0
container.__dict__[container.outputVariables[2]] = v_max + r_0
###############################
#### RAVEN API methods END ####
###############################
##################################
#### Sub Functions Definition ####
##################################
def read_unparameterized_XML(MatrixFileName):
tree = ET.parse(MatrixFileName)
root = tree.getroot()
para_array=[]; UNorm_list = []; XNorm_list = []; XLast_list = []; YNorm_list =[]
A_Re_list = []; B_Re_list = []; C_Re_list = []; A_Im_list = []; B_Im_list = []; C_Im_list = []
for child1 in root:
# print(' ',child1.tag) # DMDrom
for child2 in child1:
# print(' > ', child2.tag) # ROM, DMDcModel
for child3 in child2:
# print(' > > ', child3.tag) # dmdTimeScale, UNorm, XNorm, XLast, Atilde, Btilde, Ctilde
if child3.tag == 'dmdTimeScale':
# print(child3.text)
Temp_txtlist = child3.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
TimeScale = np.asarray(Temp_floatlist)
TimeInterval = TimeScale[1]-TimeScale[0]
# print(TimeInterval) #10.0
if child3.tag == 'UNorm':
for child4 in child3:
# print(' > > > ', child4.tag)
# print(' > > > ', child4.attrib)
para_array.append(0)
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
UNorm_list.append(np.asarray(Temp_floatlist))
para_array = np.asarray(para_array)
# print(para_array)
# print(UNorm_list)
# print(np.shape(self.UNorm))
if child3.tag == 'XNorm':
for child4 in child3:
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
XNorm_list.append(np.asarray(Temp_floatlist))
# print(XNorm_list)
# print(np.shape(self.XNorm))
if child3.tag == 'XLast':
for child4 in child3:
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
XLast_list.append(np.asarray(Temp_floatlist))
# print(XLast_list)
# print(np.shape(self.XLast))
if child3.tag == 'YNorm':
for child4 in child3:
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
YNorm_list.append(np.asarray(Temp_floatlist))
# print(YNorm_list)
# print(YNorm_list[0])
# print(np.shape(YNorm_list))
# print(np.shape(self.YNorm))
for child4 in child3:
for child5 in child4:
# print(' > > > ', child5.tag) # real, imaginary, matrixShape, formatNote
if child5.tag == 'real':
Temp_txtlist = child5.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
# print(Temp_txtlist)
# print(Temp_floatlist)
if child3.tag == 'Atilde':
A_Re_list.append(np.asarray(Temp_floatlist))
if child3.tag == 'Btilde':
B_Re_list.append(np.asarray(Temp_floatlist))
if child3.tag == 'Ctilde':
C_Re_list.append(np.asarray(Temp_floatlist))
if child5.tag == 'imaginary':
Temp_txtlist = child5.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
# print(Temp_txtlist)
# print(Temp_floatlist)
if child3.tag == 'Atilde':
A_Im_list.append(np.asarray(Temp_floatlist))
if child3.tag == 'Btilde':
B_Im_list.append(np.asarray(Temp_floatlist))
if child3.tag == 'Ctilde':
C_Im_list.append(np.asarray(Temp_floatlist))
# print(A_Re_list)
# print(C_Im_list)
n = len(XNorm_list[0]) # dimension of x
m = len(UNorm_list[0]) # dimension of u
p = len(YNorm_list[0]) # dimension of y
# Reshape the A, B, C lists
for i in range(len(para_array)):
A_Re_list[i]=np.reshape(A_Re_list[i],(n,n)).T
A_Im_list[i]=np.reshape(A_Im_list[i],(n,n)).T
B_Re_list[i]=np.reshape(B_Re_list[i],(m,n)).T
B_Im_list[i]=np.reshape(B_Im_list[i],(m,n)).T
C_Re_list[i]=np.reshape(C_Re_list[i],(n,p)).T
C_Im_list[i]=np.reshape(C_Im_list[i],(n,p)).T
# print(A_Re_list[19])
# print(B_Re_list[19])
# print(C_Re_list[19])
A_list = A_Re_list
B_list = B_Re_list
C_list = C_Re_list
eig_A_array=[]
# eigenvalue of A
for i in range(len(para_array)):
w,v = np.linalg.eig(A_list[i])
eig_A_array.append(max(w))
eig_A_array = np.asarray(eig_A_array)
# print(eig_A_array)
return TimeInterval, n, m, p, para_array, UNorm_list, XNorm_list, XLast_list, YNorm_list, A_list, B_list, C_list, eig_A_array
def check_YNorm_within_Range(y_min, y_max, para_array, UNorm_list, XNorm_list, XLast_list, YNorm_list, A_list, B_list, C_list, eig_A_array):
UNorm_list_ = []; XNorm_list_ = []; XLast_list_ = []; YNorm_list_ =[]
A_list_ = []; B_list_ = []; C_list_ = []; para_array_ = []; eig_A_array_ =[]
for i in range(len(YNorm_list)):
state = True
for j in range(len(YNorm_list[i])):
if YNorm_list[i][j] < y_min[j] or YNorm_list[i][j] > y_max[j]:
state = False
if state == True:
UNorm_list_.append(UNorm_list[i])
XNorm_list_.append(XNorm_list[i])
XLast_list_.append(XLast_list[i])
YNorm_list_.append(YNorm_list[i])
A_list_.append(A_list[i])
B_list_.append(B_list[i])
C_list_.append(C_list[i])
para_array_.append(para_array[i])
eig_A_array_.append(eig_A_array[i])
para_array_ = np.asarray(para_array_); eig_A_array_ = np.asarray(eig_A_array_)
return para_array_, UNorm_list_, XNorm_list_, XLast_list_, YNorm_list_, A_list_, B_list_, C_list_, eig_A_array_
def fun_MOAS_noinf(A, B, C, D, s, g):
p = len(C) # dimension of y
T = np.linalg.solve(np.identity(len(A))-A, B)
""" Build the S matrix"""
S = np.zeros((2*p, p))
for i in range(0,p):
S[2*i, i] = 1.0
S[2*i+1, i] = -1.0
Kx = np.dot(S,C)
# print("Kx", Kx)
Lim = np.dot(S,(np.dot(C,T) + D))
# print("Lim", Lim)
Kr = np.dot(S,D)
# print("Kr", Kr)
""" Build the core of H and h """
# H = np.concatenate((0*Kx, Lim),axis=1); h = s
# NewBlock = np.concatenate((Kx, Kr),axis=1)
# H = np.concatenate((H,NewBlock)); h = np.concatenate((h,s))
H = np.concatenate((Kx, Kr),axis=1); h = s
""" Build the add-on blocks of H and h """
i = 0
while i < g :
i = i + 1
Kx = np.dot(Kx, A)
Kr = Lim - np.dot(Kx,T)
NewBlock = np.concatenate((Kx,Kr), axis=1)
H = np.concatenate((H,NewBlock)); h = np.concatenate((h,s))
""" To Insert the ConstRedunCheck """
return H, h
def fun_RG_SISO(v_0, x, r, H, h, p):
n = len(x) # dimension of x
x = np.vstack(x) # x is horizontal array, must convert to vertical for matrix operation
# because v_0 and r are both scalar, so no need to vstack
Hx = H[:, 0:n]; Hv = H[:, n:]
alpha = h - np.dot(Hx,x) - np.dot(Hv,v_0) # alpha is the system remaining vector
beta = np.dot(Hv, (r-v_0)) # beta is the anticipated response vector with r
kappa = 1
for k in range(0,len(alpha)):
if 0 < alpha[k] and alpha[k] < beta[k]:
kappa = min(kappa, alpha[k]/beta[k])
else:
kappa = kappa
v = np.asarray(v_0 + kappa*(r-v_0)).flatten()
return v
def fun_2nd_gstep_calc(x, Hm, hm, A_m, B_m, g):
n = len(x) # dimension of x
# x = np.vstack(x) # x is horizontal array, must convert to vertical for matrix operation
# because v_0 and r are both scalar, so no need to vstack
Hxm = Hm[:, 0:n]; Hvm = Hm[:, n:]
T = np.linalg.solve(np.identity(n)-A_m, B_m)
Ag = np.identity(n)
for k in range(g+1):
Ag = np.dot(Ag,A_m)
alpha = hm - np.dot(Hxm, np.dot(Ag, np.vstack(x)))
beta = np.dot(Hxm, np.dot((np.identity(n)-Ag),T))
# print(np.shape(alpha))
# print(np.shape(beta))
v_st = []; v_bt = []
for k in range(0,len(alpha)):
if beta[k]>0:
v_st.append(alpha[k]/beta[k])
elif beta[k]<0:
v_bt.append(alpha[k]/beta[k])
# print('v_smaller_than,\n',v_st)
v_max = np.asarray(min(v_st))
v_min = np.asarray(max(v_bt))
return v_max, v_min
|
from .attrib_base import AttributeBase, AttributeLengthError
__all__ = ['AttributeDeprecated']
class AttributeDeprecated(AttributeBase):
def __init__(self, type_index, parent):
AttributeBase.__init__(self, type_index, parent)
if self.length:
raise AttributeLengthError
|
'''
Created on 22 Feb 2018
@author: Anna
'''
from setuptools import setup
setup(name="sysinfo",
version="0.1",
description="Basic system information for COMP30670",
author="Anna Ryzova",
author_email="anna.ryzova@ucdconnect.ie",
license="GPL3",
packages=['systeminfo'],
entry_points={
'console_scripts':['comp30670_systeminfo=systeminfo.main:main']
}
)
|
#Bisection method to find a real root of an equation***********
a,b=input ('enter the value of a and b')
maxitr=input('enter the no. of iterations')
itr=0
print("itr, a, b, x, fx")
func= lambda x: x**3+x-1
while itr<maxitr:
x=(a+b)/2.0
fa=func(a)
fx=func(x)
if fa*fx<0.0:
b=x
else:
a=x
print ([a,b,x,fx])
itr=itr+1
|
import aiohttp
import asyncio
from typing import Optional, List
from .abstractions import TelemetryChannel
from ..exceptions import OperationFailed
from ..utils.json import friendly_dumps
class AiohttpTelemetryChannel(TelemetryChannel):
def __init__(self,
loop:Optional[asyncio.AbstractEventLoop]=None,
client:Optional[aiohttp.ClientSession]=None,
endpoint:Optional[str]=None):
super().__init__()
dispose_client = True
if client is None:
if loop is None:
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession(loop=loop)
else:
dispose_client = False
if not endpoint:
endpoint = 'https://dc.services.visualstudio.com/v2/track'
self._dispose_client = dispose_client
self._http_client = client
self._endpoint = endpoint
self._headers = {'Accept': 'application/json', 'Content-Type': 'application/json; charset=utf-8'}
async def send(self, data: List):
body = friendly_dumps(data)
response = await self._http_client.post(self._endpoint,
data=body.encode('utf8'),
headers=self._headers)
if response.status != 200:
text = await response.text()
raise OperationFailed(f'Response status does not indicate success: {response.status}; response body: {text}')
async def dispose(self):
# NB: the client is disposed only if it was instantiated
if self._dispose_client:
await self._http_client.close()
|
"""
A bit easier than yesterday, I simply solved the puzzle by using np.where in a loop. The main thing is that we need to
take into account the borders of the array and that I used a separate mask array to keep track of the octopi that
flashed during this turn. After correctly implementing part 1, part 2 was easy, just wait until the turn that the
tracking mask had all 1s.
"""
from utils import Solution
from typing import Any
import numpy as np
class DaySolution(Solution):
def __init__(self, day: int = 11, year: int = 2021) -> None:
super().__init__(day, year)
def _parse_data(self, input_data: str) -> Any:
"""
Data parsing was again splitting the lines and then converting them to ints and into a numpy array.
"""
return np.array([[int(x) for x in line] for line in input_data.split("\n") if line])
def _observe_octopi(self, initial_state: np.ndarray, total_steps: int = 0):
octopi = initial_state.copy()
total_flashes = 0
step = 0
while True:
octopi += 1
charged_octopi = np.where(octopi > 9)
octopi_mask = np.zeros((10, 10))
while charged_octopi[0].size > 0:
for row, col in zip(*charged_octopi):
if octopi_mask[row, col] == 1:
continue
octopi_mask[row, col] = 1
min_row = row - 1 if row > 0 else row
max_row = row + 2 if row < 9 else row + 1
min_col = col - 1 if col > 0 else col
max_col = col + 2 if col < 9 else col + 1
octopi[min_row:max_row, min_col: max_col] += 1
charged_octopi = np.where(np.logical_and(octopi > 9, octopi_mask == 0))
for row, col in zip(*np.where(octopi_mask == 1)):
octopi[row, col] = 0
total_flashes += np.sum(octopi_mask)
if np.sum(octopi_mask) == 100 or step + 1 == total_steps:
return total_flashes, step + 1
step += 1
def _solve_part1(self, parsed_data: np.ndarray) -> Any:
"""
Simply run the _observe_octopi function for a hundred turns and use the number of flashes return value. The
observe_octopi function simply +1's the neighborhood of around a flashing octopus. Note that we do not need
to correct for the center pixel as we will reset that to zero anyway.
"""
return self._observe_octopi(parsed_data, 100)[0]
def _solve_part2(self, parsed_data: np.ndarray) -> Any:
"""
As noted above, simply run the function until the flash tracking array sums to 100 and return the turn that
happens (+1 because we start at turn 0, not 1).
"""
return self._observe_octopi(parsed_data)[1]
|
from django_filters.rest_framework import DjangoFilterBackend, FilterSet
from rest_framework import filters, viewsets
from .models import Question
from .pagination import QuestionPagination
from .permissions import QuestionPermission
from .serializers import QuestionSerializer
from .throttles import BurstCommunityRateThrottle, SustainedCommunityRateThrottle
class QuestionFilter(FilterSet):
class Meta:
model = Question
fields = {
"category": ["exact", "in"],
}
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.order_by("-ask_time")
serializer_class = QuestionSerializer
permission_classes = [QuestionPermission]
throttle_classes = [BurstCommunityRateThrottle, SustainedCommunityRateThrottle]
filter_backends = [
filters.SearchFilter,
filters.OrderingFilter,
DjangoFilterBackend,
]
search_fields = ["title"]
ordering_fields = ["ask_time"]
pagination_class = QuestionPagination
filterset_class = QuestionFilter
|
#!/usr/bin/python
# BSD 3-Clause License
# Copyright (c) 2019, Noam C. Golombek
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import cv2
def resize_image(height, width, image, interpolation=None):
"""A function that resizes a provided picture.
Inputs: width and height to resize to
image to resize
Outputs: input_image_resized"""
if interpolation is None:
if str(image.dtype).startswith(("int", "bool")):
interpolation = cv2.INTER_NEAREST
else:
interpolation = cv2.INTER_LINEAR # default
image_resized = cv2.resize(image, dsize=(width, height), interpolation=interpolation)
return image_resized
class ResizeAndCrop(object):
"""Resize And Crop to process and back"""
def __init__(self, hypes, original_image_size):
"""A function that provides the indices to start and stop cropping the picture at.
Inputs: hypes file to get crop parameters,
original_image_size
Define: crop_y_from, crop_y_to, crop_x_from, crop_x_to, processing_image_size"""
def _get(h, field):
""" Get field from h if such present, else return False"""
if field in h.keys():
return h[field]
return False
if 'jitter' in hypes.keys():
h_ = hypes['jitter']
else:
h_ = hypes
# ------------- resize_image -----------------------
self.resize_image = _get(h_, 'reseize_image') or _get(h_, 'resize_image')
if self.resize_image:
inter_image_size = (h_['image_width'], h_['image_height'])
else:
inter_image_size = original_image_size[:2] # float
self.inter_image_size = inter_image_size
# ------------- crop_for_processing -----------------------
self.crop_for_processing = _get(h_, 'crop_for_processing')
if self.crop_for_processing:
if 'crop_x_from' in h_.keys():
self.crop_x_from = int(inter_image_size[0] * h_['crop_x_from'])
else:
self.crop_x_from = int(0)
if 'crop_x_to' in hypes['jitter'].keys():
self.crop_x_to = int(inter_image_size[0] * h_['crop_x_to'])
else:
self.crop_x_to = int(inter_image_size[0])
if 'crop_y_from' in h_.keys():
self.crop_y_from = int(inter_image_size[1] * h_['crop_y_from'])
else:
self.crop_y_from = int(0)
if 'crop_y_to' in h_.keys():
self.crop_y_to = int(inter_image_size[1] * h_['crop_y_to'])
else:
self.crop_y_to = int(inter_image_size[1])
self.processing_image_size = (
self.crop_x_to - self.crop_x_from, self.crop_y_to - self.crop_y_from)
else:
self.processing_image_size = inter_image_size
def preprocess_image(self, image, image_uncropped=None):
"""A function that does all of the image preprocessing
Inputs: image to process
image_uncropped empty image for postprocessing (allocated if is None)
Outputs: preprocessed image, image_uncropped"""
preprocessed_image = image
# Resize the image
if self.resize_image:
#self.inter_image_size = (h_['image_width'], h_['image_height'])
preprocessed_image = resize_image(self.inter_image_size[1], # -> image_height
self.inter_image_size[0], # -> image_width
image)
# Crop the image
if self.crop_for_processing:
if image_uncropped is None:
image_uncropped = np.zeros(
(preprocessed_image.shape[0], preprocessed_image.shape[1]))
preprocessed_image = preprocessed_image[self.crop_y_from:self.crop_y_to, self.crop_x_from:self.crop_x_to]
return preprocessed_image, image_uncropped
def postprocess_image(self, image,
output_image_uncropped,
resulting_image_for_shape, # image shape to resize back, only shape is used
filter_data=None):
"""A function that does all of the image preprocessing for KittiSeg
Inputs: image to process
output_image_uncropped empty image for postprocessing
Outputs: way_prediction"""
#Insert the cropped image into the full sized image
if self.crop_for_processing:
output_image_uncropped[self.crop_y_from:self.crop_y_to, self.crop_x_from:self.crop_x_to] = image
image = output_image_uncropped
#Resize the image to its original size
if self.resize_image:
image = resize_image(resulting_image_for_shape.shape[0], resulting_image_for_shape.shape[1], image)
# Accept all pixel with conf >= threshold as positive prediction
# This creates a `hard` prediction result for class street
if str(image.dtype).startswith("float"):
if filter_data is None:
filter_data = 0.5
way_prediction = image > filter_data
elif str(image.dtype).startswith("int"):
way_prediction = image.copy()
elif str(image.dtype).startswith("bool"):
way_prediction = image.copy()
else:
print(image.dtype)
assert str(image.dtype).startswith(("float", "int", "bool"))
return way_prediction
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import DCNS
from element import Element
# Autogenerated
def Creator(**args):
return Element(qname = (DCNS,'creator'), **args)
def Date(**args):
return Element(qname = (DCNS,'date'), **args)
def Description(**args):
return Element(qname = (DCNS,'description'), **args)
def Language(**args):
return Element(qname = (DCNS,'language'), **args)
def Subject(**args):
return Element(qname = (DCNS,'subject'), **args)
def Title(**args):
return Element(qname = (DCNS,'title'), **args)
# The following complete the Dublin Core elements, but there is no
# guarantee a compliant implementation of OpenDocument will preserve
# these elements
#def Contributor(**args):
# return Element(qname = (DCNS,'contributor'), **args)
#def Coverage(**args):
# return Element(qname = (DCNS,'coverage'), **args)
#def Format(**args):
# return Element(qname = (DCNS,'format'), **args)
#def Identifier(**args):
# return Element(qname = (DCNS,'identifier'), **args)
#def Publisher(**args):
# return Element(qname = (DCNS,'publisher'), **args)
#def Relation(**args):
# return Element(qname = (DCNS,'relation'), **args)
#def Rights(**args):
# return Element(qname = (DCNS,'rights'), **args)
#def Source(**args):
# return Element(qname = (DCNS,'source'), **args)
#def Type(**args):
# return Element(qname = (DCNS,'type'), **args)
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Substitute variables of QuadraticProgram."""
import logging
from collections import defaultdict
from dataclasses import dataclass
from math import isclose
from typing import Dict, Optional, Tuple, Union, cast
from ..exceptions import QiskitOptimizationError
from ..infinity import INFINITY
from .constraint import ConstraintSense
from .linear_expression import LinearExpression
from .quadratic_expression import QuadraticExpression
from .quadratic_program import QuadraticProgram
logger = logging.getLogger(__name__)
@dataclass
class SubstitutionExpression:
"""Represents a substitution of a variable with a linear expression.
If `variable` is `None`, it substitutes a variable with the constant value.
Otherwise, it substitutes a variable with (constant + coefficient * new_variable).
"""
const: float = 0.0
"""Constant value"""
coeff: float = 0.0
"""Coefficient of the new variable"""
variable: Optional[str] = None
"""Variable name or `None`"""
def substitute_variables(
quadratic_program: QuadraticProgram,
constants: Optional[Dict[Union[str, int], float]] = None,
variables: Optional[Dict[Union[str, int], Tuple[Union[str, int], float]]] = None,
) -> QuadraticProgram:
"""Substitutes variables with constants or other variables.
Args:
quadratic_program: a quadratic program whose variables are substituted.
constants: replace variable by constant
e.g., {'x': 2} means 'x' is substituted with 2
variables: replace variables by weighted other variable
need to copy everything using name reference to make sure that indices are matched
correctly. The lower and upper bounds are updated accordingly.
e.g., {'x': ('y', 2)} means 'x' is substituted with 'y' * 2
Returns:
An optimization problem by substituting variables with constants or other variables.
If the substitution is valid, `QuadraticProgram.status` is still
`QuadraticProgram.Status.VALID`.
Otherwise, it gets `QuadraticProgram.Status.INFEASIBLE`.
Raises:
QiskitOptimizationError: if the substitution is invalid as follows.
- Same variable is substituted multiple times.
- Coefficient of variable substitution is zero.
"""
# guarantee that there is no overlap between variables to be replaced and combine input
subs = {}
if constants:
for i, v in constants.items():
# substitute i <- v
i_2 = quadratic_program.get_variable(i).name
if i_2 in subs:
raise QiskitOptimizationError(
f"Cannot substitute the same variable twice: {i} <- {v}"
)
subs[i_2] = SubstitutionExpression(const=v)
if variables:
for i, (j, v) in variables.items():
if v == 0:
raise QiskitOptimizationError(f"coefficient must be non-zero: {i} {j} {v}")
# substitute i <- j * v
i_2 = quadratic_program.get_variable(i).name
j_2 = quadratic_program.get_variable(j).name
if i_2 == j_2:
raise QiskitOptimizationError(
f"Cannot substitute the same variable: {i} <- {j} {v}"
)
if i_2 in subs:
raise QiskitOptimizationError(
f"Cannot substitute the same variable twice: {i} <- {j} {v}"
)
if j_2 in subs:
raise QiskitOptimizationError(
"Cannot substitute by variable that gets substituted itself: " f"{i} <- {j} {v}"
)
subs[i_2] = SubstitutionExpression(variable=j_2, coeff=v)
return _SubstituteVariables().substitute_variables(quadratic_program, subs)
class _SubstituteVariables:
"""A class to substitute variables of an optimization problem with constants for other
variables"""
def __init__(self):
self._src: Optional[QuadraticProgram] = None
self._dst: Optional[QuadraticProgram] = None
self._subs: Dict[str, SubstitutionExpression] = {}
def substitute_variables(
self, quadratic_program: QuadraticProgram, subs: Dict[str, SubstitutionExpression]
) -> QuadraticProgram:
"""Substitutes variables with constants or other variables.
Args:
quadratic_program: a quadratic program whose variables are substituted.
subs: substitution expressions as a dictionary.
e.g., {'x': SubstitutionExpression(const=1, coeff=2, variable='y'} means
`x` is substituted with `1 + 2 * y`.
Returns:
An optimization problem by substituting variables with constants or other variables.
If the substitution is valid, `QuadraticProgram.status` is still
`QuadraticProgram.Status.VALID`.
Otherwise, it gets `QuadraticProgram.Status.INFEASIBLE`.
"""
self._src = quadratic_program
self._dst = QuadraticProgram(quadratic_program.name)
self._subs = subs
results = [
self._variables(),
self._objective(),
self._linear_constraints(),
self._quadratic_constraints(),
]
if not all(results):
self._dst._status = QuadraticProgram.Status.INFEASIBLE
return self._dst
@staticmethod
def _feasible(sense: ConstraintSense, rhs: float) -> bool:
"""Checks feasibility of the following condition
0 `sense` rhs
"""
if sense == ConstraintSense.EQ:
if rhs == 0:
return True
elif sense == ConstraintSense.LE:
if rhs >= 0:
return True
elif sense == ConstraintSense.GE:
if rhs <= 0:
return True
return False
def _variables(self) -> bool:
# copy variables that are not replaced
feasible = True
for var in self._src.variables:
name = var.name
vartype = var.vartype
lowerbound = var.lowerbound
upperbound = var.upperbound
if name not in self._subs:
self._dst._add_variable(lowerbound, upperbound, vartype, name)
for i, expr in self._subs.items():
lb_i = self._src.get_variable(i).lowerbound
ub_i = self._src.get_variable(i).upperbound
# substitute x_i <- x_j * coeff + const
# lb_i <= x_i <= ub_i -->
# (lb_i - const) / coeff <= x_j <= (ub_i - const) / coeff if coeff > 0
# (ub_i - const) / coeff <= x_j <= (lb_i - const) / coeff if coeff < 0
# lb_i <= const <= ub_i if coeff == 0
if isclose(expr.coeff, 0.0, abs_tol=1e-10):
if not lb_i <= expr.const <= ub_i:
logger.warning("Infeasible substitution for variable: %s", i)
feasible = False
else:
if abs(lb_i) < INFINITY:
new_lb_i = (lb_i - expr.const) / expr.coeff
else:
new_lb_i = lb_i if expr.coeff > 0 else -lb_i
if abs(ub_i) < INFINITY:
new_ub_i = (ub_i - expr.const) / expr.coeff
else:
new_ub_i = ub_i if expr.coeff > 0 else -ub_i
var_j = self._dst.get_variable(expr.variable)
lb_j = var_j.lowerbound
ub_j = var_j.upperbound
if expr.coeff > 0:
var_j.lowerbound = max(lb_j, new_lb_i)
var_j.upperbound = min(ub_j, new_ub_i)
else:
var_j.lowerbound = max(lb_j, new_ub_i)
var_j.upperbound = min(ub_j, new_lb_i)
for var in self._dst.variables:
if var.lowerbound > var.upperbound:
logger.warning(
"Infeasible lower and upper bounds: %s %f %f",
var,
var.lowerbound,
var.upperbound,
)
feasible = False
return feasible
def _linear_expression(self, lin_expr: LinearExpression) -> Tuple[float, LinearExpression]:
const = 0.0
lin_dict: Dict[str, float] = defaultdict(float)
for i, w_i in lin_expr.to_dict(use_name=True).items():
i = cast(str, i)
expr_i = self._subs.get(i, SubstitutionExpression(coeff=1, variable=i))
const += w_i * expr_i.const
if expr_i.variable:
lin_dict[expr_i.variable] += w_i * expr_i.coeff
new_lin = LinearExpression(
quadratic_program=self._dst, coefficients=lin_dict if lin_dict else {}
)
return const, new_lin
def _quadratic_expression(
self, quad_expr: QuadraticExpression
) -> Tuple[float, Optional[LinearExpression], Optional[QuadraticExpression]]:
const = 0.0
lin_dict: Dict[str, float] = defaultdict(float)
quad_dict: Dict[Tuple[str, str], float] = defaultdict(float)
for (i, j), w_ij in quad_expr.to_dict(use_name=True).items():
i = cast(str, i)
j = cast(str, j)
expr_i = self._subs.get(i, SubstitutionExpression(coeff=1, variable=i))
expr_j = self._subs.get(j, SubstitutionExpression(coeff=1, variable=j))
const += w_ij * expr_i.const * expr_j.const
if expr_i.variable:
lin_dict[expr_i.variable] += w_ij * expr_i.coeff * expr_j.const
if expr_j.variable:
lin_dict[expr_j.variable] += w_ij * expr_j.coeff * expr_i.const
if expr_i.variable and expr_j.variable:
quad_dict[expr_i.variable, expr_j.variable] += w_ij * expr_i.coeff * expr_j.coeff
new_lin = LinearExpression(
quadratic_program=self._dst, coefficients=lin_dict if lin_dict else {}
)
new_quad = QuadraticExpression(
quadratic_program=self._dst, coefficients=quad_dict if quad_dict else {}
)
return const, new_lin, new_quad
def _objective(self) -> bool:
obj = self._src.objective
const1, lin1 = self._linear_expression(obj.linear)
const2, lin2, quadratic = self._quadratic_expression(obj.quadratic)
constant = obj.constant + const1 + const2
linear = lin1.coefficients + lin2.coefficients
if obj.sense == obj.sense.MINIMIZE:
self._dst.minimize(constant=constant, linear=linear, quadratic=quadratic.coefficients)
else:
self._dst.maximize(constant=constant, linear=linear, quadratic=quadratic.coefficients)
return True
def _linear_constraints(self) -> bool:
feasible = True
for lin_cst in self._src.linear_constraints:
constant, linear = self._linear_expression(lin_cst.linear)
rhs = lin_cst.rhs - constant
if linear.coefficients.nnz > 0:
self._dst.linear_constraint(
name=lin_cst.name,
linear=linear.coefficients,
sense=lin_cst.sense,
rhs=rhs,
)
else:
if not self._feasible(lin_cst.sense, rhs):
logger.warning("constraint %s is infeasible due to substitution", lin_cst.name)
feasible = False
return feasible
def _quadratic_constraints(self) -> bool:
feasible = True
for quad_cst in self._src.quadratic_constraints:
const1, lin1 = self._linear_expression(quad_cst.linear)
const2, lin2, quadratic = self._quadratic_expression(quad_cst.quadratic)
rhs = quad_cst.rhs - const1 - const2
linear = lin1.coefficients + lin2.coefficients
if quadratic.coefficients.nnz > 0:
self._dst.quadratic_constraint(
name=quad_cst.name,
linear=linear,
quadratic=quadratic.coefficients,
sense=quad_cst.sense,
rhs=rhs,
)
elif linear.nnz > 0:
name = quad_cst.name
lin_names = set(lin.name for lin in self._dst.linear_constraints)
while name in lin_names:
name = "_" + name
self._dst.linear_constraint(name=name, linear=linear, sense=quad_cst.sense, rhs=rhs)
else:
if not self._feasible(quad_cst.sense, rhs):
logger.warning("constraint %s is infeasible due to substitution", quad_cst.name)
feasible = False
return feasible
|
import json
import os
import re
import subprocess
from itertools import groupby
import tensorflow as tf
class Wav2Vec2Processor:
def __init__(
self, is_tokenizer, do_normalize=True, vocab_path="./vocab.json"
):
# whether to use as `feature_extractor` or `tokenizer`
self.is_tokenizer = is_tokenizer
self.do_normalize = do_normalize
self.vocab_path = vocab_path
if self.is_tokenizer:
self._setup_vocab()
self.token_to_id_mapping = self.get_vocab()
self.id_to_token_mapping = {
v: k for k, v in self.token_to_id_mapping.items()
}
self.unk_token = "<unk>"
self.unk_id = self.token_to_id_mapping[self.unk_token]
self.dimiliter_token = "|"
self.dimiliter_id = self.token_to_id_mapping[self.dimiliter_token]
special_tokens = ["<pad>"]
self.special_ids = [self.token_to_id_mapping[k] for k in special_tokens]
def _setup_vocab(self):
"""This method will download & setup the vocab file if it's not on the `vocab_path`"""
if not os.path.isfile(self.vocab_path):
url = "https://github.com/vasudevgupta7/gsoc-wav2vec2/raw/main/data/vocab.json"
print(f"Downloading `vocab.json` from {url} ... ", end="")
try:
subprocess.run(
["wget", url], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except:
raise ValueError(f"Couldn't download `vocab.json` from {url}")
print("DONE")
self.vocab_path = "./vocab.json"
def __call__(self, input_values):
"""
if is_tokenizer:
input_values (:obj: `str`):
Single string you want to encode to ids
else:
input_values (:obj: `tf.Tensor`):
Tensor which needs to be fed into `model.call()`
"""
if self.is_tokenizer:
input_values = self._tokenize(input_values)
input_values = [
self.token_to_id_mapping.get(k, self.unk_id) for k in input_values
]
else:
if self.do_normalize:
input_values = self._normalize(input_values)
return input_values
def decode(self, input_ids: list, skip_special_tokens=True, group_tokens=True):
"""
Use this method to decode your ids back to string.
Args:
input_ids (:obj: `list`):
input_ids you want to decode to string.
skip_special_tokens (:obj: `bool`, `optional`):
Whether to remove special tokens (like `<pad>`) from string.
group_tokens (:obj: `bool`, `optional`):
Whether to group repeated characters.
"""
if group_tokens:
input_ids = [t[0] for t in groupby(input_ids)]
if skip_special_tokens:
input_ids = [k for k in input_ids if k not in self.special_ids]
tokens = [self.id_to_token_mapping.get(k, self.unk_token) for k in input_ids]
tokens = [k if k != self.dimiliter_token else " " for k in tokens]
return "".join(tokens).strip()
def _tokenize(self, string: str):
string = re.sub("-", " ", string)
string = re.sub("[^A-Z' ]", "", string.upper())
return list(string.replace(" ", self.dimiliter_token))
def get_vocab(self):
with open(self.vocab_path, "r") as f:
vocab = json.load(f)
return vocab
def _normalize(self, x):
"""You must call this before padding."""
# -> (1, seqlen)
mean = tf.reduce_mean(x, axis=-1, keepdims=True)
var = tf.math.reduce_variance(x, axis=-1, keepdims=True)
return tf.squeeze((x - mean) / tf.sqrt(var + 1e-5))
if __name__ == "__main__":
"""Testing Area"""
feature_extractor = Wav2Vec2Processor(is_tokenizer=False)
batch, _ = tf.audio.decode_wav(tf.io.read_file("../data/sample.wav"))
batch = tf.transpose(batch, perm=(1, 0))
batch = tf.concat([batch, batch], axis=0)
out = feature_extractor(batch)
print(out)
print("\n\n")
tokenizer = Wav2Vec2Processor(is_tokenizer=True)
ids = tokenizer("vasudev guptaa is a data scientist.")
print(ids)
print(tokenizer.decode(ids))
print(tokenizer.decode(ids, group_tokens=False))
ids = tokenizer("how is life gooing? what's up.. yayy i got results. it's awe-some")
print(ids)
print(tokenizer.decode(ids))
print(tokenizer.decode(ids, group_tokens=False))
|
# Generated by Django 2.0 on 2018-01-27 07:55
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('post', models.TextField(max_length=255)),
('post_pic', models.ImageField(blank=True, height_field='height_field', null=True, upload_to='', width_field='width_field')),
('height_field', models.IntegerField(default=0)),
('width_field', models.IntegerField(default=0)),
('user', models.ForeignKey(null=True, on_delete='models.cascade', related_name='from_user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Feed',
'ordering': ('-date',),
'verbose_name_plural': 'Feeds',
},
),
]
|
import click
from natsort import natsorted
from tabulate import tabulate
from swsscommon.swsscommon import SonicV2Connector
import utilities_common.cli as clicommon
CHASSIS_MODULE_INFO_TABLE = 'CHASSIS_MODULE_TABLE'
CHASSIS_MODULE_INFO_KEY_TEMPLATE = 'CHASSIS_MODULE {}'
CHASSIS_MODULE_INFO_DESC_FIELD = 'desc'
CHASSIS_MODULE_INFO_SLOT_FIELD = 'slot'
CHASSIS_MODULE_INFO_OPERSTATUS_FIELD = 'oper_status'
CHASSIS_MODULE_INFO_ADMINSTATUS_FIELD = 'admin_status'
CHASSIS_MIDPLANE_INFO_TABLE = 'CHASSIS_MIDPLANE_TABLE'
CHASSIS_MIDPLANE_INFO_IP_FIELD = 'ip_address'
CHASSIS_MIDPLANE_INFO_ACCESS_FIELD = 'access'
@click.group(cls=clicommon.AliasedGroup)
def chassis_modules():
"""Show chassis-modules information"""
pass
@chassis_modules.command()
@clicommon.pass_db
@click.argument('chassis_module_name', metavar='<module_name>', required=False)
def status(db, chassis_module_name):
"""Show chassis-modules status"""
header = ['Name', 'Description', 'Physical-Slot', 'Oper-Status', 'Admin-Status']
chassis_cfg_table = db.cfgdb.get_table('CHASSIS_MODULE')
state_db = SonicV2Connector(host="127.0.0.1")
state_db.connect(state_db.STATE_DB)
key_pattern = '*'
if chassis_module_name:
key_pattern = '|' + chassis_module_name
keys = state_db.keys(state_db.STATE_DB, CHASSIS_MODULE_INFO_TABLE + key_pattern)
if not keys:
print('Key {} not found in {} table'.format(key_pattern, CHASSIS_MODULE_INFO_TABLE))
return
table = []
for key in natsorted(keys):
key_list = key.split('|')
if len(key_list) != 2: # error data in DB, log it and ignore
print('Warn: Invalid Key {} in {} table'.format(key, CHASSIS_MODULE_INFO_TABLE))
continue
data_dict = state_db.get_all(state_db.STATE_DB, key)
desc = data_dict[CHASSIS_MODULE_INFO_DESC_FIELD]
slot = data_dict[CHASSIS_MODULE_INFO_SLOT_FIELD]
oper_status = data_dict[CHASSIS_MODULE_INFO_OPERSTATUS_FIELD]
admin_status = 'up'
config_data = chassis_cfg_table.get(key_list[1])
if config_data is not None:
admin_status = config_data.get(CHASSIS_MODULE_INFO_ADMINSTATUS_FIELD)
table.append((key_list[1], desc, slot, oper_status, admin_status))
if table:
click.echo(tabulate(table, header, tablefmt='simple', stralign='right'))
else:
click.echo('No data available in CHASSIS_MODULE_TABLE\n')
@chassis_modules.command()
@click.argument('chassis_module_name', metavar='<module_name>', required=False)
def midplane_status(chassis_module_name):
"""Show chassis-modules midplane-status"""
header = ['Name', 'IP-Address', 'Reachability']
state_db = SonicV2Connector(host="127.0.0.1")
state_db.connect(state_db.STATE_DB)
key_pattern = '*'
if chassis_module_name:
key_pattern = '|' + chassis_module_name
keys = state_db.keys(state_db.STATE_DB, CHASSIS_MIDPLANE_INFO_TABLE + key_pattern)
if not keys:
print('Key {} not found in {} table'.format(key_pattern, CHASSIS_MIDPLANE_INFO_TABLE))
return
table = []
for key in natsorted(keys):
key_list = key.split('|')
if len(key_list) != 2: # error data in DB, log it and ignore
print('Warn: Invalid Key {} in {} table'.format(key, CHASSIS_MIDPLANE_INFO_TABLE))
continue
data_dict = state_db.get_all(state_db.STATE_DB, key)
ip = data_dict[CHASSIS_MIDPLANE_INFO_IP_FIELD]
access = data_dict[CHASSIS_MIDPLANE_INFO_ACCESS_FIELD]
table.append((key_list[1], ip, access))
if table:
click.echo(tabulate(table, header, tablefmt='simple', stralign='right'))
else:
click.echo('No data available in CHASSIS_MIDPLANE_TABLE\n')
|
import functools
import urlparse
class require_role(object):
def __init__(self, role):
self.role = role
def __call__(self, method):
@functools.wraps(method)
def wrapper(handler, *args, **kwargs):
if not handler.current_user:
url = handler.get_login_url()
if '?' not in url:
if urlparse.urlsplit(url).scheme:
next_url = handler.request.full_url()
else:
next_url = handler.request.uri
url += '?next=' + next_url
return handler.redirect(url)
user = handler.current_user
if not user.username:
handler.flash_message('Please setup a username', 'warn')
return handler.redirect('/account/setting')
if user.role == 1:
handler.flash_message('Please verify your email', 'warn')
return handler.redirect('/account/setting')
if user.role < 1:
return handler.redirect('/doc/guideline')
if user.role < self.role:
return handler.send_error(403)
return method(handler, *args, **kwargs)
return wrapper
require_user = require_role(2)
require_staff = require_role(6)
require_admin = require_role(9)
def require_system(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.remote_ip != '127.0.0.1':
self.send_error(403)
return
return method(self, *args, **kwargs)
return wrapper
|
"""
ID: piraadw1
LANG: PYTHON3
TASK: [Task Name]
"""
fin = open ('[Task Name].in', 'r')
fout = open ('[Task Name].out', 'w')
fout.write([send to server])
fout.close()
|
''' TMP36 temperature sensor
2017-0802 PePo initial version
Let op: verwissel Vs en GND niet van de TMP36!
pin TMP36 NodeMCU Huzzah ESP8266
data-out A0 ADC
Vs 3.3V 3.3V
GND GND GND
#'''
import machine, time
import ssd1306
#import heartbeat #diagnostic tool
# using from machine import ADC is bad practice
# calculate temperature (celsius) from voltage value (mV)
def temp(value):
_OFFSET = const(20) #calibration factor, should be 0
return (value - 500 - _OFFSET) / 10 #Huzzah
#NodeMCU: return (value) / 10
# Celsius to Fahrenheit
def fahrenheit(celsius):
return (celsius * 9/5) + 32
_ADC_PIN = 0
_WARNING_LED_PIN = 14
# create i2c for display
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4), freq=100000)
print('i2c.scan: ', i2c.scan()) #[60]
# OLED screen dimensions
__WIDTH = const(128)
__HEIGHT = const(32)
oled = ssd1306.SSD1306_I2C(__WIDTH, __HEIGHT, i2c)
# program
# read voltage (mV)
adc = machine.ADC(_ADC_PIN)
#TEST: print('ADC reading:', adc.read())
alert = machine.Pin(_WARNING_LED_PIN, machine.Pin.OUT)
# alert ON and OFF
def alertOn():
alert.on()
def alertOff():
alert.off()
# run readng T, Ctrl-C to abort
_TRESHOLD = const(30)
def run(dt=2.0):
print('TMP36 demo on OLED')
try:
while True:
oled.fill(0) # clear screen
reading = adc.read()
celsius_temp = temp(reading)
fahrenheit_temp = fahrenheit(celsius_temp)
print('TMP36 reading {}\tDegrees Celsius {}\tDegrees Fahrenheit {}'.format(reading, celsius_temp, fahrenheit_temp))
oled.text('TMP36 {0} '.format(reading),0,0)
oled.text('Celsius {0:0.1f}'.format(celsius_temp),0,10)
oled.text('Fahrenheit {0:0.1f}'.format(fahrenheit_temp),0,20)
if celsius_temp > _TRESHOLD:
alertOn()
else:
alertOff()
oled.show()
time.sleep(dt) #wait > s, see datasheet
except:
print('Exception! Done')
run(5.0)
|
class Solution:
def longestPalindrome(self, s: str) -> int:
str_dict={}
for each in s:
if each not in str_dict:
str_dict[each]=0
str_dict[each]+=1
result=0
odd=0
for k, v in str_dict.items():
if v%2==0:
result+=v
else:
result+=v-1
odd=1
return result+odd
|
from datetime import date
from factory import DjangoModelFactory, Faker, SubFactory, fuzzy, post_generation
from tests.models import Article, EarningsReport, NewsAgency, Place, Publication, Reporter, Restaurant, Waiter
class PlaceFactory(DjangoModelFactory):
address = Faker('street_address')
name = Faker('company')
class Meta:
model = Place
class RestaurantFactory(DjangoModelFactory):
place = SubFactory(PlaceFactory)
serves_hot_dogs = Faker('boolean')
serves_pizza = Faker('boolean')
class Meta:
model = Restaurant
class WaiterFactory(DjangoModelFactory):
restaurant = SubFactory(RestaurantFactory)
name = Faker('name')
class Meta:
model = Waiter
class EarningsReportFactory(DjangoModelFactory):
restaurant = SubFactory(RestaurantFactory)
date = fuzzy.FuzzyDate(date(2000, 1, 1))
amount = fuzzy.FuzzyDecimal(0, 10000)
class Meta:
model = EarningsReport
class NewsAgencyFactory(DjangoModelFactory):
website = Faker('url')
class Meta:
model = NewsAgency
class ReporterFactory(DjangoModelFactory):
email = Faker('email')
first_name = Faker('first_name')
last_name = Faker('last_name')
news_agency = SubFactory(NewsAgencyFactory)
class Meta:
model = Reporter
class PublicationFactory(DjangoModelFactory):
title = Faker('catch_phrase')
class Meta:
model = Publication
@post_generation
def number_of_articles(self, create, extracted, **kwargs):
if not create or extracted is None or extracted < 1:
return
self.article_set.add(*ArticleFactory.create_batch(extracted))
class ArticleFactory(DjangoModelFactory):
headline = Faker('bs')
pub_date = Faker('date')
reporter = SubFactory(ReporterFactory)
class Meta:
model = Article
@post_generation
def number_of_publications(self, create, extracted, **kwargs):
if not create or extracted is None or extracted < 1:
return
self.publications.add(*PublicationFactory.create_batch(extracted))
|
#!/usr/bin/python
from __future__ import unicode_literals
# https://stackoverflow.com/questions/19475955/using-django-models-in-external-python-script
from django.core.management.base import BaseCommand, CommandError
import sys
import datetime
from django.utils import timezone
from provision.models import Activity, ActivityFetch, ActivityStat, ActivityRunningStat, ActivityType, ActivityStatComp
class Command(BaseCommand):
help = "Clears the system be removing all 'provision' Activity related objects -- all other qrba objects are not touched"
def handle(self, *args, **options):
qr = ActivityStatComp.objects.all()
qr.delete()
qr = ActivityRunningStat.objects.all()
qr.delete()
qr = ActivityStat.objects.all()
qr.delete()
qr = ActivityFetch.objects.all()
qr.delete()
qr = Activity.objects.all()
qr.delete()
# qr = ActivityType.objects.all()
# qr.delete()
|
import os
import numpy as np
import pandas as pd
import multiprocessing
from PIL import Image
from sklearn.metrics import pairwise_distances
from lib.image_lib import preprocess
from lib.image_lib import center_of_mass
LEVEL = 'level'
PRETRAIN = 'pre-train'
def find_start_end(profile):
start = None
end = None
for i, val in enumerate(profile):
if val != 0:
start = i
break
for i, val in enumerate(profile[::-1]):
if val != 0:
end = len(profile) - i
break
start = start if start is not None else 0
end = end if end is not None else len(profile)
return start, end
def extract_word(img):
x_profile = np.sum(img, axis=0)
y_profile = np.sum(img, axis=1)
x_start, x_end = find_start_end(x_profile)
y_start, y_end = find_start_end(y_profile)
return img[y_start:y_end, x_start:x_end]
def open_image(img_path, clean):
img = Image.open(img_path)
bin_img = preprocess(img)
return extract_word(bin_img)
def create_level_descriptor(img, levels):
width = img.shape[1]
pixel_num = np.sum(img)
center_x, center_y = center_of_mass(img)
sums = []
for level in levels:
l_width = width // level
for i in range(level):
sums.append(np.sum(img[:center_y,
i * l_width:i * l_width + l_width]))
sums.append(np.sum(img[center_y:,
i * l_width:i * l_width + l_width]))
sums = np.array(sums)
# handle special case of emtpy word image
if pixel_num == 0:
return sums
else:
return sums / float(pixel_num)
def create_level_descriptors(imgs, levels):
descriptors = []
for img in imgs:
descriptors.append(create_level_descriptor(img, levels))
return np.array(descriptors)
def create_work(img_names, img_paths, clean, thread_num):
work = []
img_num = len(img_paths) // thread_num
for i in range(thread_num):
work.append([img_names[i * img_num:i * img_num + img_num],
img_paths[i * img_num:i * img_num + img_num], clean])
# add remaining paths to last worker
work[-1][0] += img_names[img_num * thread_num:len(img_paths)]
work[-1][1] += img_paths[img_num * thread_num:len(img_paths)]
return work
def load(data):
img_names, img_paths, clean = data
imgs = []
names = []
for name, img_path in zip(img_names, img_paths):
img = open_image(img_path, clean)
# ignore empty images
if np.sum(img) > 0:
imgs.append(img)
names.append(name)
else:
print('WARN: Skip image: {}'.format(name))
return imgs, names
def load_imgs(img_dir, clean, thread_num):
img_names = [f for f in os.listdir(img_dir) if f.endswith('.png')]
img_paths = []
if len(img_names) == 0:
labels = [f for f in os.listdir(img_dir)
if os.path.isdir(os.path.join(img_dir, f))]
for label in labels:
path = os.path.join(img_dir, label)
for img in os.listdir(path):
img_names.append('{}_{}'.format(label, img))
img_paths.append(os.path.join(path, img))
else:
img_paths = [os.path.join(img_dir, f) for f in img_names]
imgs = []
names = []
work = create_work(img_names, img_paths, clean, thread_num)
pool = multiprocessing.Pool(processes=thread_num)
results = pool.map(load, work)
for result in results:
imgs += result[0]
names += result[1]
return imgs, names
def write_distances(distances, names, out_file):
df = pd.DataFrame(data=distances, index=names, columns=names)
df.to_csv(out_file)
def load_phocs(phoc_file):
data = np.load(phoc_file)
descriptors = data['output']
img_names = ['{}.png'.format(f.decode('utf-8')) for f in data['img_ids']]
return descriptors, img_names
def main(img_dir, strategy, out_file, clean, levels, metric, thread_num,
phoc_file):
if img_dir is not None:
imgs, img_names = load_imgs(img_dir, clean, thread_num)
elif strategy == LEVEL:
raise ValueError('--img_dir required for strategy: {}'
.format(strategy))
if strategy == PRETRAIN and phoc_file is None:
raise ValueError('--phoc_file required for strategy: {}'
.format(strategy))
if strategy == LEVEL:
descriptors = create_level_descriptors(imgs, levels)
elif strategy == PRETRAIN:
descriptors, img_names = load_phocs(phoc_file)
else:
raise ValueError('Strategy "{}" not supported'.format(strategy))
distances = pairwise_distances(descriptors, metric=metric,
n_jobs=thread_num)
write_distances(distances, img_names, out_file)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Compute distances for '
'given files.')
parser.add_argument('strategy', choices=[LEVEL, PRETRAIN])
parser.add_argument('out_file', help='Path to output file.')
parser.add_argument('--img_dir', help='Path to folder with images to '
'compute distances from.', default=None)
parser.add_argument('--phoc_file', help='Path to NPZ file with predicted '
'PHOC descriptors.', default=None)
parser.add_argument('--clean', help='Remove border noise before computing '
'representations.', action='store_true')
parser.add_argument('--levels', '-l', action='store',
type=lambda x: [int(elem) for elem in x.split(',')],
default='2,3,4,5', help='Comma seperated list of PHOC '
'unigram levels to be used when computing PHOCs. '
'Default: 2,3,4,5')
parser.add_argument('--metric', help='Distance metric to use (must be'
'supported by SciPy).', default='l2')
parser.add_argument('--thread_num', type=int, default=20, help='Number '
'of threads to use for computations (Default: 20).')
args = vars(parser.parse_args())
main(**args)
|
# Copyright 2020- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from datetime import timedelta
from typing import Dict, Optional, Union
from ..base import LibraryComponent
from ..generated.playwright_pb2 import Request
from ..utils import ElementState, keyword, logger
class Waiter(LibraryComponent):
@keyword(tags=("Wait", "PageContent"))
def wait_for_elements_state(
self,
selector: str,
state: ElementState = ElementState.visible,
timeout: Optional[timedelta] = None,
message: Optional[str] = None,
):
"""Waits for the element found by ``selector`` to satisfy state option.
Note that Browser library has `Implicit waiting` mechanisms. Depending on
the situation you might not need to use `Wait for Elements State`.
State options could be either appear/disappear from dom, or become visible/hidden.
If at the moment of calling the keyword, the selector already satisfies the condition,
the keyword will return immediately.
If the selector doesn't satisfy the condition within the timeout the keyword will FAIL.
``selector`` Selector of the corresponding object.
See the `Finding elements` section for details about the selectors.
``state`` See `ElementState` for explanation.
Note that element without any content or with display:none has an empty bounding box
and is not considered visible.
``timeout`` uses default timeout from library if not set.
``message`` overrides the default error message. The ``message``
argument accepts `{selector}`, `{function}`, and `{timeout}`
[https://docs.python.org/3/library/stdtypes.html#str.format|format] options.
The `{function}` formatter is same ``state`` argument value.
Keyword uses strict mode, see `Finding elements` for more details about strict mode.
Example:
| `Wait For Elements State` //h1 visible timeout=2 s
| `Wait For Elements State` //hi focused 1s
"""
timeout_as_str = self.millisecs_to_timestr(self.get_timeout(timeout))
funct = {
ElementState.attached: "e => e.isConnected",
ElementState.detached: "e => !e.isConnected",
ElementState.readonly: "e => e.readOnly",
ElementState.selected: "e => e.selected",
ElementState.deselected: "e => !e.selected",
ElementState.focused: "e => document.activeElement === e",
ElementState.defocused: "e => document.activeElement !== e",
ElementState.checked: "e => e.checked",
ElementState.unchecked: "e => !e.checked",
}
if state in [
ElementState.visible,
ElementState.hidden,
ElementState.stable,
ElementState.enabled,
ElementState.disabled,
ElementState.editable,
]:
end = float(
self.convert_timeout(timeout, False) if timeout else self.timeout / 1000
)
end += time.monotonic()
while True:
try:
return self._wait_for_elements_state(
selector, state, timeout, self.strict_mode
)
except Exception as error:
if end > time.monotonic():
logger.debug(f"Suppress error: {error}")
else:
if message:
message = message.format(
selector=selector,
function=state,
timeout=timeout_as_str,
)
raise AssertionError(message)
raise
else:
self.wait_for_function(
funct[state],
selector=selector,
timeout=timeout,
message=message,
)
def _wait_for_elements_state(
self,
selector: str,
state: ElementState = ElementState.visible,
timeout: Optional[timedelta] = None,
strict: bool = True,
):
with self.playwright.grpc_channel() as stub:
options: Dict[str, object] = {"state": state.name}
if timeout:
options["timeout"] = self.get_timeout(timeout)
options_json = json.dumps(options)
response = stub.WaitForElementsState(
Request().ElementSelectorWithOptions(
selector=selector, options=options_json, strict=strict
)
)
logger.info(response.log)
@keyword(tags=("Wait", "PageContent"))
def wait_for_function(
self,
function: str,
selector: str = "",
polling: Union[str, timedelta] = "raf",
timeout: Optional[timedelta] = None,
message: Optional[str] = None,
):
"""Polls JavaScript expression or function in browser until it returns a (JavaScript) truthy value.
``function`` A valid javascript function or a javascript function body. For example
``() => true`` and ``true`` will behave similarly.
``selector`` Selector to resolve and pass to the JavaScript function. This will be the first
argument the function receives. If given a selector a function is necessary, with an argument
to capture the elementhandle. For example ``(element) => document.activeElement === element``
See the `Finding elements` section for details about the selectors.
``polling`` Default polling value of "raf" polls in a callback for ``requestAnimationFrame``.
Any other value for polling will be parsed as a robot framework time for interval between polls.
``timeout`` Uses default timeout of 10 seconds if not set.
``message`` overrides the default error message. The ``message``
argument accepts `{selector}`, `{function}`, and `{timeout}`
[https://docs.python.org/3/library/stdtypes.html#str.format|format] options.
Keyword uses strict mode, see `Finding elements` for more details about strict mode.
Example usage:
| ${promise} `Promise To` `Wait For Function` element => element.style.width=="100%" selector=\\#progress_bar timeout=4s
| `Click` \\#progress_bar
| `Wait For` ${promise}
"""
timeout_as_str = self.millisecs_to_timestr(self.get_timeout(timeout))
end = float(
self.convert_timeout(timeout, False) if timeout else self.timeout / 1000
)
end += time.monotonic()
while True:
try:
return self._wait_for_function(
function, selector, polling, timeout, self.strict_mode
)
except Exception as error:
if end > time.monotonic():
logger.debug(f"Suppress {error}")
else:
if message:
message = message.format(
selector=selector, function=function, timeout=timeout_as_str
)
raise AssertionError(message)
raise
def _wait_for_function(
self,
function: str,
selector: str = "",
polling: Union[str, timedelta] = "raf",
timeout: Optional[timedelta] = None,
strict: bool = True,
):
with self.playwright.grpc_channel() as stub:
options: Dict[str, int] = {}
if polling != "raf":
options["polling"] = self.convert_timeout(polling) # type: ignore
if timeout:
options["timeout"] = self.convert_timeout(timeout) # type: ignore
options_json = json.dumps(options)
response = stub.WaitForFunction(
Request().WaitForFunctionOptions(
script=function,
selector=selector,
options=options_json,
strict=strict,
)
)
logger.debug(response.json)
logger.info(response.log)
|
import os
from modules.base_crawler import Base_Crawler
from modules.csv_module.csv_helper import read_clothDB_info
class Tier_1_Crawler(Base_Crawler):
''' This func is used by func: generate_tier_1_info
func: delete_tier_1_temp_files
'''
def get_two_saving_paths(self, original_supplier_name):
supplier_source_path = f"./res/html_source/{original_supplier_name}_source.txt"
tier_1_info_path = f"./output/tier1/json/{original_supplier_name}_tier_1_info.json"
return supplier_source_path, tier_1_info_path
''' This func is used by main_DB_crawler.py
input data: suppliers' names and URLs is obtained from outer csv module.
'''
# Rewrite from func: generate_tier_1_info,
# only get all html sources and save them to specific path.
def get_soups_for_suppliers(self):
supplier_names, supplier_URLs = read_clothDB_info()
ID_counter = 1
for original_supplier_name, original_url in zip(supplier_names, supplier_URLs):
supplier_source_path, _ = self.get_two_saving_paths(original_supplier_name)
is_soup_saved = self.save_soup(original_url, supplier_source_path)
if is_soup_saved:
print(f"成功儲存第{ID_counter}家服飾商:{original_supplier_name}的原始碼\n")
else:
print(f"第{ID_counter}家服飾商:{original_supplier_name}的原始碼儲存失敗\n")
ID_counter += 1
''' This func is used by main_DB_crawler.py
[Just for testing if errors occur]
input data: is given by function parameter
'''
def get_soup_for_one_supplier(self, supplier_name, original_url):
print(f"開始抓取服飾商: {supplier_name} 的網頁原始碼")
cw = Tier_1_Crawler()
supplier_source_path, _ = cw.get_two_saving_paths(supplier_name)
save_status = cw.save_soup(original_url, supplier_source_path)
if save_status == False:
print("[008] Error occurs! (func: get_soup_for_one_supplier)")
''' This func is used by main_DB_crawler.py '''
def generate_tier_1_info(self, supplier_name, method=None):
supplier_source_path, tier_1_info_path = self.get_two_saving_paths(supplier_name)
if os.path.exists(supplier_source_path):
# tier_1_info: (<type: dict>)
if method == None:
tier_1_info = self.extract_data(supplier_source_path)
elif method == "NET-only":
tier_1_info = self.extract_data()
if tier_1_info:
try:
self.save_json(tier_1_info_path, tier_1_info) # dict -> json
print(f"成功儲存服飾商: {supplier_name} 的 tier_1_info (json檔)")
except:
print("[007] Cannot save tier_1_info.")
else:
print("Because func: extract_data has error, cannot execute func: generate_tier_1_info.")
else:
print("[001] Because func: save_soup has error, cannot execute other func below.")
''' This func is used by main_DB_crawler.py '''
def delete_tier_1_temp_files(self, supplier_name):
supplier_source_path, tier_1_info_path = self.get_two_saving_paths(supplier_name)
if os.path.exists(tier_1_info_path):
while True:
keyin = str(input(f"確定要刪除: {tier_1_info_path} 嗎?\n請輸入(y/n)並按下Enter ==> "))
if keyin.lower() == "y":
os.remove(tier_1_info_path)
print("暫存資料刪除成功!")
break
elif keyin.lower() == "n":
print("OK!操作取消")
break
else:
print("輸入錯誤,請重新輸入!")
else:
print("[006] Fail to remove temporary file.(The path does not exist.)")
''' This func is used by func: extract_data '''
def load_texts(self, source_path):
string_buffer = None
with open(source_path, "r", encoding="utf-8") as fp:
texts = fp.readlines()
string_buffer = ""
for row in texts:
if "<br/>" in row:
row = row.replace("<br/>", "")
row = row.replace("\n", "")
string_buffer += row
#texts = ''.join(texts)
#return texts
return string_buffer
|
import unittest
import qiskit_toqm.native as toqm
class TestTOQM(unittest.TestCase):
def test_version(self):
self.assertEqual(toqm.__version__, "0.1.0")
def test_basic(self):
num_q = 4
gates = [
toqm.GateOp(0, "cx", 0, 1),
toqm.GateOp(1, "cx", 0, 2),
toqm.GateOp(2, "cx", 0, 3),
toqm.GateOp(3, "cx", 1, 2),
toqm.GateOp(4, "cx", 1, 3),
toqm.GateOp(5, "cx", 2, 3)
]
coupling = toqm.CouplingMap(5, {(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4)})
q = toqm.DefaultQueue()
exp = toqm.DefaultExpander()
cf = toqm.CXFrontier()
lat = toqm.Latency_1_2_6()
fs = [toqm.HashFilter(), toqm.HashFilter2()]
nms = []
mapper = toqm.ToqmMapper(q, exp, cf, lat, nms, fs)
mapper.setRetainPopped(0)
result = mapper.run(gates, num_q, coupling, -1)
# Print result
for g in result.scheduledGates:
print(f"{g.gateOp.type} ", end='')
if g.physicalControl >= 0:
print(f"q[{g.physicalControl}],", end='')
print(f"q[{g.physicalTarget}]; ", end='')
print(f"//cycle: {g.cycle}", end='')
if (g.gateOp.type.lower() != "swap"):
print(f" //{g.gateOp.type} ", end='')
if g.gateOp.control >= 0:
print(f"q[{g.gateOp.control}],", end='')
print(f"q[{g.gateOp.target}]; ", end='')
print()
|
"""Unit tests for the Gerrit hosting service."""
from __future__ import unicode_literals
import base64
import re
import sys
from django.utils.six.moves.urllib.request import (HTTPDigestAuthHandler,
OpenerDirector)
from reviewboard.hostingsvcs.errors import (AuthorizationError,
HostingServiceAPIError,
HostingServiceError,
RepositoryError)
from reviewboard.hostingsvcs.gerrit import GerritForm
from reviewboard.hostingsvcs.testing import HostingServiceTestCase
from reviewboard.scmtools.core import Branch, Commit
from reviewboard.scmtools.crypto_utils import encrypt_password
from reviewboard.scmtools.errors import FileNotFoundError
class GerritTestCase(HostingServiceTestCase):
"""Base class for Gerrit unit tests."""
service_name = 'gerrit'
default_account_data = {
'authorized': False,
'gerrit_http_password': encrypt_password('mypass'),
'gerrit_ssh_port': 1234,
}
default_repository_tool_name = 'Git'
default_repository_extra_data = {
'gerrit_url': 'http://gerrit.example.com/',
'gerrit_project_name': 'Project',
}
class GerritFormTests(GerritTestCase):
"""Unit tests for GerritForm."""
def test_clean_sets_gerrit_domain(self):
"""Testing GerritForm.clean sets gerrit_domain"""
form = GerritForm({
'gerrit_project_name': 'test-project',
'gerrit_ssh_port': 12345,
'gerrit_url': 'http://gerrit.example.com:8080',
})
self.assertTrue(form.is_valid())
self.assertIn('gerrit_domain', form.cleaned_data)
self.assertEqual(form.cleaned_data['gerrit_domain'],
'gerrit.example.com')
def test_clean_with_errors(self):
"""Testing GerritForm.clean with errors"""
form = GerritForm({
'gerrit_url': 'invalid',
})
self.assertFalse(form.is_valid())
self.assertEqual(form.cleaned_data, {})
self.assertIn('gerrit_project_name', form.errors)
self.assertIn('gerrit_ssh_port', form.errors)
self.assertIn('gerrit_url', form.errors)
self.assertEqual(form.errors['gerrit_project_name'],
['This field is required.'])
self.assertEqual(form.errors['gerrit_ssh_port'],
['This field is required.'])
self.assertEqual(form.errors['gerrit_url'],
['Enter a valid URL.'])
class GerritClientTests(GerritTestCase):
"""Unit tests for GerritClient."""
def setUp(self):
super(GerritClientTests, self).setUp()
hosting_account = self.create_hosting_account()
self.client = hosting_account.service.client
def test_auth_headers(self):
"""Testing GerritClient.http_request sets auth headers"""
class DummyResponse(object):
headers = {}
def getcode(self):
return 200
def geturl(self):
return 'http://gerrit.example.com/'
def read(self):
return b''
def _open(*args, **kwargs):
_open_args.extend(args)
return DummyResponse()
_open_args = []
self.spy_on(OpenerDirector.open,
owner=OpenerDirector,
call_fake=_open)
self.client.http_request(url='http://gerrit.example.com/',
username='test-user',
password='test-pass')
opener, request = _open_args
handler = opener.handlers[0]
self.assertIsInstance(handler, HTTPDigestAuthHandler)
self.assertEqual(
handler.passwd.find_user_password(None,
'http://gerrit.example.com/'),
('test-user', 'test-pass'))
self.assertEqual(
request.headers,
{
'Authorization': 'Basic dGVzdC11c2VyOnRlc3QtcGFzcw==',
})
class GerritTests(GerritTestCase):
"""Unit tests for the Gerrit hosting service."""
service_name = 'gerrit'
default_account_data = {
'authorized': False,
'gerrit_http_password': encrypt_password('mypass'),
'gerrit_ssh_port': 1234,
}
default_repository_tool_name = 'Git'
default_repository_extra_data = {
'gerrit_url': 'http://gerrit.example.com/',
'gerrit_project_name': 'Project',
}
def test_service_support(self):
"""Testing Gerrit service support capabilities"""
self.assertFalse(self.service_class.supports_bug_trackers)
self.assertTrue(self.service_class.supports_repositories)
self.assertFalse(self.service_class.supports_ssh_key_association)
self.assertTrue(self.service_class.supports_post_commit)
def test_authorize(self):
"""Testing Gerrit.authorize"""
hosting_account = self.create_hosting_account(data={})
with self.setup_http_test(hosting_account=hosting_account,
expected_http_calls=1) as ctx:
ctx.service.authorize(
username='myuser',
password='mypass',
hosting_url='',
credentials={
'username': 'myuser',
'password': 'mypass',
},
local_site_name=None,
gerrit_url='http://gerrit.example.com')
self.assertIn('authorized', hosting_account.data)
self.assertTrue(hosting_account.data['authorized'])
ctx.assertHTTPCall(0, url='http://gerrit.example.com/a/projects/')
def test_authorize_with_error(self):
"""Testing Gerrit.authorize handles authentication failure"""
expected_message = (
'Unable to authenticate to Gerrit at '
'http://gerrit.example.com/a/projects/. The username or password '
'used may be invalid.'
)
def _http_request(client, *args, **kwargs):
raise HostingServiceError('', http_code=401)
with self.setup_http_test(_http_request, expected_http_calls=1) as ctx:
with self.assertRaisesMessage(AuthorizationError,
expected_message):
ctx.service.authorize(
username='myuser',
password='mypass',
hosting_url='',
credentials={
'username': 'myuser',
'password': 'mypass',
},
local_site_name=None,
gerrit_url='http://gerrit.example.com')
ctx.assertHTTPCall(0, url='http://gerrit.example.com/a/projects/')
self.assertFalse(ctx.hosting_account.data['authorized'])
def test_check_repository(self):
"""Testing Gerrit.check_repository"""
payload = self._make_json_rsp({
'gerrit-reviewboard': {
'id': 'gerrit-reviewboard',
'version': self.service_class.REQUIRED_PLUGIN_VERSION_STR,
},
})
with self.setup_http_test(payload=payload,
expected_http_calls=2) as ctx:
ctx.service.check_repository(
gerrit_url='http://gerrit.example.com',
gerrit_project_name='Project')
ctx.assertHTTPCall(0,
url='http://gerrit.example.com/a/projects/Project')
ctx.assertHTTPCall(1, url='http://gerrit.example.com/a/plugins/')
def test_check_repository_with_404(self):
"""Testing Gerrit.check_repository with a non-existent repository"""
def _http_request(client, *args, **kwargs):
raise HostingServiceAPIError('', 404)
expected_message = (
'The project "Project" does not exist or you do not have access '
'to it.'
)
with self.setup_http_test(_http_request,
expected_http_calls=1) as ctx:
with self.assertRaisesMessage(RepositoryError, expected_message):
ctx.service.check_repository(
gerrit_url='http://gerrit.example.com',
gerrit_project_name='Project')
ctx.assertHTTPCall(0,
url='http://gerrit.example.com/a/projects/Project')
def test_check_repository_with_no_plugin(self):
"""Testing Gerrit.check_repository with no plugin"""
expected_message = (
'The "gerrit-reviewboard" plugin is not installed on the server. '
'See https://github.com/reviewboard/gerrit-reviewboard-plugin/ '
'for installation instructions.'
)
with self.setup_http_test(payload=self._make_json_rsp({}),
expected_http_calls=2) as ctx:
with self.assertRaisesMessage(RepositoryError, expected_message):
ctx.service.check_repository(
gerrit_url='http://gerrit.example.com',
gerrit_project_name='Project')
ctx.assertHTTPCall(0,
url='http://gerrit.example.com/a/projects/Project')
ctx.assertHTTPCall(1, url='http://gerrit.example.com/a/plugins/')
def test_check_repository_with_bad_plugin_version(self):
"""Testing Gerrit.check_repository with an outdated plugin"""
payload = self._make_json_rsp({
'gerrit-reviewboard': {
'id': 'gerrit-reviewboard',
'version': '0.0.0',
},
})
expected_message = (
'The "gerrit-reviewboard" plugin on the server is an incompatible '
'version: found 0.0.0 but version %s or higher is required.'
% self.service_class.REQUIRED_PLUGIN_VERSION_STR
)
with self.setup_http_test(payload=payload,
expected_http_calls=2) as ctx:
with self.assertRaisesMessage(RepositoryError, expected_message):
ctx.service.check_repository(
gerrit_url='http://gerrit.example.com',
gerrit_project_name='Project')
ctx.assertHTTPCall(0,
url='http://gerrit.example.com/a/projects/Project')
ctx.assertHTTPCall(1, url='http://gerrit.example.com/a/plugins/')
def test_get_file_exists(self):
"""Testing Gerrit.get_file_exists"""
blob_id = 'a' * 40
payload = self._make_json_rsp({
'blobId': blob_id,
})
with self.setup_http_test(payload=payload,
expected_http_calls=1) as ctx:
self.assertTrue(ctx.service.get_file_exists(
repository=ctx.create_repository(),
path='/bogus',
revision=blob_id))
ctx.assertHTTPCall(
0,
url=('http://gerrit.example.com/a/projects/Project/blobs/%s/'
% blob_id))
def test_get_file_exists_with_404(self):
"""Testing Gerrit.get_file_exists with a non-existant file"""
def _http_request(client, *args, **kwargs):
raise HostingServiceAPIError('', http_code=404)
blob_id = 'a' * 40
with self.setup_http_test(_http_request, expected_http_calls=1) as ctx:
self.assertFalse(ctx.service.get_file_exists(
repository=ctx.create_repository(),
path='/bogus',
revision=blob_id))
ctx.assertHTTPCall(
0,
url=('http://gerrit.example.com/a/projects/Project/blobs/%s/'
% blob_id))
def test_get_file(self):
"""Testing Gerrit.get_file"""
blob_id = 'a' * 40
with self.setup_http_test(payload=base64.b64encode(b'Hello, world!'),
expected_http_calls=1) as ctx:
data = ctx.service.get_file(repository=ctx.create_repository(),
path='/bogus',
revision=blob_id)
self.assertIsInstance(data, bytes)
self.assertEqual(data, b'Hello, world!')
ctx.assertHTTPCall(
0,
url=('http://gerrit.example.com/a/projects/Project/blobs/%s/'
'content/'
% blob_id))
def test_get_file_with_404(self):
"""Testing Gerrit.get_file with a non-existent blob ID"""
def _http_request(client, *args, **kwargs):
raise HostingServiceAPIError('', http_code=404)
blob_id = 'a' * 40
with self.setup_http_test(_http_request, expected_http_calls=1) as ctx:
with self.assertRaises(FileNotFoundError):
ctx.service.get_file(repository=ctx.create_repository(),
path='/bogus',
revision=blob_id)
ctx.assertHTTPCall(
0,
url=('http://gerrit.example.com/a/projects/Project/blobs/%s/'
'content/'
% blob_id))
def test_get_file_with_undecodable_response(self):
"""Testing Gerrit.get_file with an undecodable response"""
blob_id = 'a' * 40
if sys.version_info[:2] >= (3, 7):
# During Python 3.7.x, this error has changed a couple of times,
# so we're going to match only the prefix of it.
specific_error = 'Invalid base64-encoded string:'
else:
specific_error = 'Incorrect padding'
expected_message = re.escape(
'An error occurred while retrieving "/foo" at revision '
'"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" from Gerrit: the '
'response could not be decoded: %s'
% specific_error
)
with self.setup_http_test(payload=b'?Invalid base64',
expected_http_calls=1) as ctx:
with self.assertRaisesRegexp(HostingServiceAPIError,
expected_message):
ctx.service.get_file(repository=ctx.create_repository(),
path='/foo',
revision=blob_id)
ctx.assertHTTPCall(
0,
url=('http://gerrit.example.com/a/projects/Project/blobs/%s/'
'content/'
% blob_id))
def test_get_branches(self):
"""Testing Gerrit.get_branches"""
payload = self._make_json_rsp([
{
'ref': 'HEAD',
'revision': 'master'
},
{
'ref': 'refs/meta/config',
'revision': '7a59a483aeefc8c7d4082f1081c42db817176071'
},
{
'ref': 'refs/heads/master',
'revision': '6854734ef5fc8b2b9d291bf42aa59c344abf5a73'
},
{
'ref': 'refs/heads/release-2.0.x',
'revision': '7f68a001f8e5b77e7355c11385bfbcd2a6d3c077'
},
{
'ref': 'refs/heads/release-2.5.x',
'revision': 'fc8a7ecf288d835ecd9ded086ffaee9412d1da9c'
},
])
with self.setup_http_test(payload=payload,
expected_http_calls=1) as ctx:
branches = ctx.service.get_branches(ctx.create_repository())
ctx.assertHTTPCall(
0,
url='http://gerrit.example.com/a/projects/Project/branches/')
self.assertEqual(
branches,
[
Branch(commit='6854734ef5fc8b2b9d291bf42aa59c344abf5a73',
id='master',
default=True),
Branch(commit='7f68a001f8e5b77e7355c11385bfbcd2a6d3c077',
id='release-2.0.x'),
Branch(commit='fc8a7ecf288d835ecd9ded086ffaee9412d1da9c',
id='release-2.5.x'),
]
)
def test_get_commits(self):
"""Testing Gerrit.get_commits"""
payload = self._make_json_rsp([
{
'message': 'Backport a fix for screenshot commenting.\n',
'revision': '77c174669b7018936f16b98547445624c6738e1e',
'author': 'David Trowbridge',
'parents': [
'ecfbf578d31f550a135580cee26fa20fbaea36d9',
],
'time': '2016-09-05T23:28:30-07:00',
},
{
'message': 'Fix some issues with screenshot commenting.'
'\n',
'revision': '8a39b87f0124f27225234014a87914e434b223a9',
'author': 'David Trowbridge',
'parents': [
'3fb32c83993cd8c07fbbb605cf0cc523010da7c8',
],
'time': '2016-09-05T22:58:29-07:00',
},
{
'message': 'Fix draggability of the comment dialog.\n',
'revision': '3fb32c83993cd8c07fbbb605cf0cc523010da7c8',
'author': 'David Trowbridge',
'parents': [
'7619f51371b55bfcdf4cb3fccf5d3c76bf5002c0',
],
'time': '2016-09-05T22:47:55-07:00',
},
])
with self.setup_http_test(payload=payload,
expected_http_calls=1) as ctx:
commits = ctx.service.get_commits(ctx.create_repository())
ctx.assertHTTPCall(
0,
url='http://gerrit.example.com/a/projects/Project/all-commits/')
self.assertEqual(
commits,
[
Commit(author_name='David Trowbridge',
id='77c174669b7018936f16b98547445624c6738e1e',
date='2016-09-05T23:28:30-07:00',
message='Backport a fix for screenshot commenting.\n',
parent='ecfbf578d31f550a135580cee26fa20fbaea36d9'),
Commit(author_name='David Trowbridge',
id='8a39b87f0124f27225234014a87914e434b223a9',
date='2016-09-05T22:58:29-07:00',
message='Fix some issues with screenshot commenting.\n',
parent='3fb32c83993cd8c07fbbb605cf0cc523010da7c8'),
Commit(author_name='David Trowbridge',
id='3fb32c83993cd8c07fbbb605cf0cc523010da7c8',
date='2016-09-05T22:47:55-07:00',
message='Fix draggability of the comment dialog.\n',
parent='7619f51371b55bfcdf4cb3fccf5d3c76bf5002c0'),
])
for commit in commits:
self.assertIsNone(commit.diff)
def test_get_change(self):
"""Testing Gerrit.get_change"""
revision = '77c174669b7018936f16b98547445624c6738e1e'
paths = {
'/a/projects/Project/commits/%s/diff/' % revision: {
'payload': b'fake diff',
},
'/a/projects/Project/all-commits/': {
'payload': self._make_json_rsp([{
'message': 'Backport a fix for screenshot '
'commenting.\n',
'revision': '77c174669b7018936f16b98547445624c6738e1e',
'author': 'David Trowbridge',
'parents': [
'ecfbf578d31f550a135580cee26fa20fbaea36d9',
],
'time': '2016-09-05T23:28:30-07:00',
}]),
},
}
with self.setup_http_test(self.make_handler_for_paths(paths),
expected_http_calls=2) as ctx:
commit = ctx.service.get_change(
repository=ctx.create_repository(),
revision='77c174669b7018936f16b98547445624c6738e1e')
self.assertEqual(
commit,
Commit(author_name='David Trowbridge',
id='77c174669b7018936f16b98547445624c6738e1e',
date='2016-09-05T23:28:30-07:00',
message='Backport a fix for screenshot commenting.\n',
parent='ecfbf578d31f550a135580cee26fa20fbaea36d9'))
self.assertEqual(commit.diff, b'fake diff')
ctx.assertHTTPCall(
0,
url=('http://gerrit.example.com/a/projects/Project/all-commits/'
'?start=77c174669b7018936f16b98547445624c6738e1e&limit=1'))
ctx.assertHTTPCall(
1,
url=('http://gerrit.example.com/a/projects/Project/commits/'
'77c174669b7018936f16b98547445624c6738e1e/diff/'))
def _make_json_rsp(self, data):
"""Return a Gerrit JSON response payload for the given data.
Args:
data (object):
The data to serialize.
Returns:
bytes:
The serialized payload data.
"""
return b")]}'\n%s" % self.dump_json(data)
|
#!/usr/bin/env python
import os
import sys
import math
import numpy
if len(sys.argv) < 3:
print("usage: script.py bwa_depth.tab bwa_depth-pos.tab mode[contigs/complete] suffix")
sys.exit()
name = os.path.basename(sys.argv[1]).split(sys.argv[4])[0]
spec_stats = {}
# recover genome length and total counts per species (for average depth)
with open(sys.argv[1], "r") as f:
for line in f:
if line[0] != "*": # exclude unmapped line
cols = line.strip("\n").split("\t")
if sys.argv[3] == "contigs":
species = "_".join(cols[0].split("_")[:-1]) # species name
elif sys.argv[3] == "complete":
species = cols[0]
clength = int(cols[1]) # contig length
counts = int(cols[2]) # read counts
if species not in spec_stats.keys(): # first time species is found
spec_stats[species] = [clength, counts, []] # dict of length, counts and covPos
else:
spec_stats[species][0] += clength
spec_stats[species][1] += counts
# recover covered positions (for calc mean depth, coverage and evenness)
with open(sys.argv[2], "r") as f:
for line in f:
if line[0] != "*": # exclude unmapped line
cols = line.strip("\n").split("\t")
if sys.argv[3] == "contigs":
species = "_".join(cols[0].split("_")[:-1])
elif sys.argv[3] == "complete":
species = cols[0]
depth = int(cols[2]) # depth of covered position
spec_stats[species][2].append(depth)
print(f"Genome\t{name}_Length\t{name}_Counts\t{name}_MeanDepth\t{name}_Coverage\t{name}_ExpCoverage\t{name}_CoeffVar")
# combine stats and print per species
for species in spec_stats.keys():
length = spec_stats[species][0]
counts = spec_stats[species][1]
covPos = spec_stats[species][2]
covBases = len(covPos)
coverage = float(covBases)/length*100
if covBases < 1:
covSd = 0
else:
covSd = numpy.std(covPos)
meanDepth = float(sum(covPos))/max(length,1)
if float(meanDepth) > 0:
expCov = (1.00 - numpy.exp(-0.883*meanDepth))*100
meanDepth_covered = float(sum(covPos))/max(len(covPos),1)
cV = covSd/meanDepth_covered
else:
expCov = 0
cV = 0
print(f"{species}\t{length}\t{counts}\t{meanDepth:.2f}\t{coverage:.2f}\t{expCov:.2f}\t{cV:.2f}")
|
import matplotlib.pyplot as plt
import numpy
from matplotlib.figure import Figure
from tifffile import imread
from hylfm.detect_beads import get_bead_pos
def plot_bead_hist(bead_pos_tgt, bead_pos_pred, bi=0) -> Figure:
z_tgt_counts, bin_edges = numpy.histogram(bead_pos_tgt[bi][:, 0], bins=49, range=(0, 49))
z_pred_counts, bin_edges = numpy.histogram(bead_pos_pred[bi][:, 0], bins=49, range=(0, 49))
correct = numpy.minimum(z_pred_counts, z_tgt_counts)
diff = z_tgt_counts - z_pred_counts
missing = numpy.clip(diff, a_min=0, a_max=numpy.inf)
extra = numpy.clip(-diff, a_min=0, a_max=numpy.inf)
fig, ax = plt.subplots()
width = 0.35 # the width of the bars
x = numpy.arange(0.5, 49.5)
# ax.bar(x - width/2, z_tgt_counts, width, label="tgt")
# ax.bar(x + width/2, z_pred_counts, width, label="pred")
# ax.bar(x - width/2, z_pred_counts - z_tgt_counts, width, label="diff abs")
# ax.bar(x + width/2, (z_pred_counts - z_tgt_counts) / z_tgt_counts, width, label="diff rel")
ax.bar(x, correct, 1, color="g", label="correct")
ax.bar(x, missing, 1, bottom=correct, color="r", label="missing")
ax.bar(x, extra, 1, bottom=correct, color="b", label="extra")
return fig
if __name__ == "__main__":
tgt = (
imread("K:/beuttenm/repos/lnet/logs/beads/19-08-23_18-32_c307a5a_aux1_/result/test/target/0000.tif")[None, ...]
/ numpy.iinfo(numpy.uint16).max
)
pred = (
imread("K:/beuttenm/repos/lnet/logs/beads/19-08-23_18-32_c307a5a_aux1_/result/test/prediction/0000.tif")[
None, ...
]
/ numpy.iinfo(numpy.uint16).max
)
bead_pos_tgt = get_bead_pos(tgt)
bead_pos_pred = get_bead_pos(pred)
plot_bead_hist(bead_pos_tgt, bead_pos_pred)
plt.show()
|
from django.apps import AppConfig
class LeagueConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'league'
|
# import unittest
# import json
# from cloud_guardrails.shared import utils
# from cloud_guardrails.terraform.terraform import TerraformTemplateNoParams
# from cloud_guardrails.iam_definition.services import Services, Service
#
#
# class TerraformTemplateNoParamsTestCase(unittest.TestCase):
# def test_terraform_single_service(self):
# service = Services(service_names=["Key Vault"])
# policy_names = service.get_display_names_sorted_by_service_no_params()
# subscription_name = "example"
# management_group = ""
# enforcement_mode = False
# terraform_template = TerraformTemplateNoParams(policy_names=policy_names, subscription_name=subscription_name,
# management_group=management_group,
# enforcement_mode=enforcement_mode)
# result = terraform_template.rendered()
# print(result)
# self.assertListEqual(list(terraform_template.policy_names.keys()), ["Key Vault"])
# self.assertTrue("Key vaults should have soft delete enabled" in terraform_template.policy_names.get("Key Vault"))
# self.assertTrue("example_noparams" in result)
#
# def test_terraform_all_services(self):
# services = Services()
# subscription_name = "example"
# management_group = ""
# enforcement_mode = False
# policy_names = services.get_display_names_sorted_by_service_no_params()
# terraform_template = TerraformTemplateNoParams(policy_names=policy_names, subscription_name=subscription_name,
# management_group=management_group,
# enforcement_mode=enforcement_mode)
# result = terraform_template.rendered()
# policy_name_keys = list(terraform_template.policy_names.keys())
# all_services = utils.get_service_names()
# print(f"Length of Policy name keys: {len(policy_name_keys)}")
# print(f"Length of All Services list: {len(all_services)}")
# self.assertTrue(len(policy_name_keys) >= 39)
# for service in policy_name_keys:
# self.assertTrue(service in all_services)
# # print(result)
#
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import pytest
from sagemaker_sklearn_extension.externals import Header
@pytest.mark.parametrize("names, col_idx, feature_idx", [(["a"], [0], [0]), (["a", "c"], [0, 2], [0, 1])])
def test_header_happy(names, col_idx, feature_idx):
h = Header(column_names=["a", "b", "c"], target_column_name="b")
assert h.target_column_index == 1
assert h.as_feature_indices(names) == feature_idx
assert h.as_column_indices(names) == col_idx
assert h.num_features == 2
assert h.num_columns == 3
def test_header_errors_target_missing():
with pytest.raises(ValueError):
Header(column_names=["a", "b"], target_column_name="c")
@pytest.mark.parametrize("column_names, target_column", [(["a", "b", "b", "c"], "c"), (["a", "b", "c", "c"], "c")])
def test_header_errors_duplicate_columns(column_names, target_column):
with pytest.raises(ValueError):
Header(column_names=column_names, target_column_name=target_column)
@pytest.mark.parametrize(
"names, error_regex",
[(["unknown"], "'unknown' is an unknown feature name"), (["b"], "'b' is the target column name.")],
)
def test_header_error_as_feature_indices(names, error_regex):
h = Header(column_names=["a", "b", "c"], target_column_name="b")
assert h.target_column_index == 1
with pytest.raises(ValueError) as err:
h.as_feature_indices(names)
err.match(error_regex)
def test_header_error_as_column_index():
h = Header(column_names=["a", "b", "c"], target_column_name="b")
assert h.target_column_index == 1
with pytest.raises(ValueError):
h.as_column_indices(["unknown"])
def test_header_feature_column_index_order():
h = Header(column_names=["a", "b", "c", "d"], target_column_name="c")
assert h.feature_column_indices == [0, 1, 3]
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from aiohttp.test_utils import unittest_run_loop
from components import Mocks, BaseApiTest
from sawtooth_rest_api.protobuf.validator_pb2 import Message
from sawtooth_rest_api.protobuf import client_pb2
class BatchListTests(BaseApiTest):
async def get_application(self, loop):
self.set_status_and_connection(
Message.CLIENT_BATCH_LIST_REQUEST,
client_pb2.ClientBatchListRequest,
client_pb2.ClientBatchListResponse)
handlers = self.build_handlers(loop, self.connection)
return self.build_app(loop, '/batches', handlers.list_batches)
@unittest_run_loop
async def test_batch_list(self):
"""Verifies a GET /batches without parameters works properly.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids of '2', '1', and '0'
It should send a Protobuf request with:
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '2'
- a link property that ends in '/batches?head=2'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '2', '1', and '0'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('2', '1', '0')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response, '/batches?head=2')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '2', '1', '0')
@unittest_run_loop
async def test_batch_list_with_validator_error(self):
"""Verifies a GET /batches with a validator error breaks properly.
It will receive a Protobuf response with:
- a status of INTERNAL_ERROR
It should send back a JSON response with:
- a status of 500
- an error property with a code of 10
"""
self.connection.preset_response(self.status.INTERNAL_ERROR)
response = await self.get_assert_status('/batches', 500)
self.assert_has_valid_error(response, 10)
@unittest_run_loop
async def test_batch_list_with_no_genesis(self):
"""Verifies a GET /batches with validator not ready breaks properly.
It will receive a Protobuf response with:
- a status of NOT_READY
It should send back a JSON response with:
- a status of 503
- an error property with a code of 15
"""
self.connection.preset_response(self.status.NOT_READY)
response = await self.get_assert_status('/batches', 503)
self.assert_has_valid_error(response, 15)
@unittest_run_loop
async def test_batch_list_with_head(self):
"""Verifies a GET /batches with a head parameter works properly.
It will receive a Protobuf response with:
- a head id of '1'
- a paging response with a start of 0, and 2 total resources
- two batches with ids of 1' and '0'
It should send a Protobuf request with:
- a head_id property of '1'
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '1'
- a link property that ends in '/batches?head=1'
- a paging property that matches the paging response
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids '1' and '0'
"""
paging = Mocks.make_paging_response(0, 2)
batches = Mocks.make_batches('1', '0')
self.connection.preset_response(head_id='1', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?head=1')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(head_id='1', paging=controls)
self.assert_has_valid_head(response, '1')
self.assert_has_valid_link(response, '/batches?head=1')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], '1', '0')
@unittest_run_loop
async def test_batch_list_with_bad_head(self):
"""Verifies a GET /batches with a bad head breaks properly.
It will receive a Protobuf response with:
- a status of NO_ROOT
It should send back a JSON response with:
- a response status of 404
- an error property with a code of 50
"""
self.connection.preset_response(self.status.NO_ROOT)
response = await self.get_assert_status('/batches?head=bad', 404)
self.assert_has_valid_error(response, 50)
@unittest_run_loop
async def test_batch_list_with_ids(self):
"""Verifies GET /batches with an id filter works properly.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 2 total resources
- two batches with ids of '0' and '2'
It should send a Protobuf request with:
- a batch_ids property of ['0', '2']
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '2', the latest
- a link property that ends in '/batches?head=2&id=0,2'
- a paging property that matches the paging response
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids '0' and '2'
"""
paging = Mocks.make_paging_response(0, 2)
batches = Mocks.make_batches('0', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?id=0,2')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(batch_ids=['0', '2'], paging=controls)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response, '/batches?head=2&id=0,2')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], '0', '2')
@unittest_run_loop
async def test_batch_list_with_bad_ids(self):
"""Verifies GET /batches with a bad id filter breaks properly.
It will receive a Protobuf response with:
- a status of NO_RESOURCE
- a head id of '2'
It should send back a JSON response with:
- a response status of 200
- a head property of '2', the latest
- a link property that ends in '/batches?head=2&id=bad,notgood'
- a paging property with only a total_count of 0
- a data property that is an empty list
"""
paging = Mocks.make_paging_response(None, 0)
self.connection.preset_response(
self.status.NO_RESOURCE,
head_id='2',
paging=paging)
response = await self.get_assert_200('/batches?id=bad,notgood')
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response, '/batches?head=2&id=bad,notgood')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 0)
@unittest_run_loop
async def test_batch_list_with_head_and_ids(self):
"""Verifies GET /batches with head and id parameters work properly.
It should send a Protobuf request with:
- a head_id property of '1'
- a paging response with a start of 0, and 1 total resource
- a batch_ids property of ['0']
It will receive a Protobuf response with:
- a head id of '1'
- one batch with an id of '0'
- empty paging controls
It should send back a JSON response with:
- a response status of 200
- a head property of '1'
- a link property that ends in '/batches?head=1&id=0'
- a paging property that matches the paging response
- a data property that is a list of 1 dict
- and that dict is a full batch with an id of '0'
"""
paging = Mocks.make_paging_response(0, 1)
batches = Mocks.make_batches('0')
self.connection.preset_response(head_id='1', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?id=0&head=1')
controls = Mocks.make_paging_controls()
self.connection.assert_valid_request_sent(
head_id='1',
batch_ids=['0'],
paging=controls)
self.assert_has_valid_head(response, '1')
self.assert_has_valid_link(response, '/batches?head=1&id=0')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 1)
self.assert_batches_well_formed(response['data'], '0')
@unittest_run_loop
async def test_batch_list_paginated(self):
"""Verifies GET /batches paginated by min id works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 1, and 4 total resources
- one batch with the id 'c'
It should send a Protobuf request with:
- paging controls with a count of 1, and a start_index of 1
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=1&count=1'
- paging that matches the response, with next and previous links
- a data property that is a list of 1 dict
- and that dict is a full batch with the id 'c'
"""
paging = Mocks.make_paging_response(1, 4)
batches = Mocks.make_batches('c')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?min=1&count=1')
controls = Mocks.make_paging_controls(1, start_index=1)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&min=1&count=1')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=2&count=1',
'/batches?head=d&min=0&count=1')
self.assert_has_valid_data_list(response, 1)
self.assert_batches_well_formed(response['data'], 'c')
@unittest_run_loop
async def test_batch_list_with_zero_count(self):
"""Verifies a GET /batches with a count of zero breaks properly.
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 53
"""
response = await self.get_assert_status('/batches?min=2&count=0', 400)
self.assert_has_valid_error(response, 53)
@unittest_run_loop
async def test_batch_list_with_bad_paging(self):
"""Verifies a GET /batches with a bad paging breaks properly.
It will receive a Protobuf response with:
- a status of INVALID_PAGING
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 54
"""
self.connection.preset_response(self.status.INVALID_PAGING)
response = await self.get_assert_status('/batches?min=-1', 400)
self.assert_has_valid_error(response, 54)
@unittest_run_loop
async def test_batch_list_paginated_with_just_count(self):
"""Verifies GET /batches paginated just by count works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 0, and 4 total resources
- two batches with the ids 'd' and 'c'
It should send a Protobuf request with:
- paging controls with a count of 2
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&count=2'
- paging that matches the response with a next link
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'd' and 'c'
"""
paging = Mocks.make_paging_response(0, 4)
batches = Mocks.make_batches('d', 'c')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?count=2')
controls = Mocks.make_paging_controls(2)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&count=2')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=2&count=2')
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], 'd', 'c')
@unittest_run_loop
async def test_batch_list_paginated_without_count(self):
"""Verifies GET /batches paginated without count works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 2, and 4 total resources
- two batches with the ids 'b' and 'a'
It should send a Protobuf request with:
- paging controls with a start_index of 2
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=2'
- paging that matches the response, with a previous link
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'd' and 'c'
"""
paging = Mocks.make_paging_response(2, 4)
batches = Mocks.make_batches('b', 'a')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?min=2')
controls = Mocks.make_paging_controls(None, start_index=2)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&min=2')
self.assert_has_valid_paging(response, paging,
previous_link='/batches?head=d&min=0&count=2')
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], 'b', 'a')
@unittest_run_loop
async def test_batch_list_paginated_by_min_id(self):
"""Verifies GET /batches paginated by a min id works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with:
* a start_index of 1
* total_resources of 4
* a previous_id of 'd'
- three batches with the ids 'c', 'b' and 'a'
It should send a Protobuf request with:
- paging controls with a count of 5, and a start_id of 'c'
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=c&count=5'
- paging that matches the response, with a previous link
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids 'c', 'b', and 'a'
"""
paging = Mocks.make_paging_response(1, 4, previous_id='d')
batches = Mocks.make_batches('c', 'b', 'a')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?min=c&count=5')
controls = Mocks.make_paging_controls(5, start_id='c')
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&min=c&count=5')
self.assert_has_valid_paging(response, paging,
previous_link='/batches?head=d&max=d&count=5')
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], 'c', 'b', 'a')
@unittest_run_loop
async def test_batch_list_paginated_by_max_id(self):
"""Verifies GET /batches paginated by a max id works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with:
* a start_index of 1
* a total_resources of 4
* a previous_id of 'd'
* a next_id of 'a'
- two batches with the ids 'c' and 'b'
It should send a Protobuf request with:
- paging controls with a count of 2, and an end_id of 'b'
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&max=b&count=2'
- paging that matches the response, with next and previous links
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'c' and 'b'
"""
paging = Mocks.make_paging_response(1, 4, previous_id='d', next_id='a')
batches = Mocks.make_batches('c', 'b')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?max=b&count=2')
controls = Mocks.make_paging_controls(2, end_id='b')
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&max=b&count=2')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=a&count=2',
'/batches?head=d&max=d&count=2')
self.assert_has_valid_data_list(response, 2)
self.assert_batches_well_formed(response['data'], 'c', 'b')
@unittest_run_loop
async def test_batch_list_paginated_by_max_index(self):
"""Verifies GET /batches paginated by a max index works properly.
It will receive a Protobuf response with:
- a head id of 'd'
- a paging response with a start of 0, and 4 total resources
- three batches with the ids 'd', 'c' and 'b'
It should send a Protobuf request with:
- paging controls with a count of 3, and an start_index of 0
It should send back a JSON response with:
- a response status of 200
- a head property of 'd'
- a link property that ends in '/batches?head=d&min=3&count=7'
- paging that matches the response, with a next link
- a data property that is a list of 2 dicts
- and those dicts are full batches with ids 'd', 'c', and 'b'
"""
paging = Mocks.make_paging_response(0, 4)
batches = Mocks.make_batches('d', 'c', 'b')
self.connection.preset_response(head_id='d', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?max=2&count=7')
controls = Mocks.make_paging_controls(3, start_index=0)
self.connection.assert_valid_request_sent(paging=controls)
self.assert_has_valid_head(response, 'd')
self.assert_has_valid_link(response, '/batches?head=d&max=2&count=7')
self.assert_has_valid_paging(response, paging,
'/batches?head=d&min=3&count=7')
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], 'd', 'c', 'b')
@unittest_run_loop
async def test_batch_list_sorted(self):
"""Verifies GET /batches can send proper sort controls.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '0', '1', and '2'
It should send a Protobuf request with:
- empty paging controls
- sort controls with a key of 'header_signature'
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link property ending in '/batches?head=2&sort=header_signature'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '0', '1', and '2'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('0', '1', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?sort=header_signature')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls('header_signature')
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=header_signature')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '0', '1', '2')
@unittest_run_loop
async def test_batch_list_with_bad_sort(self):
"""Verifies a GET /batches with a bad sort breaks properly.
It will receive a Protobuf response with:
- a status of INVALID_PAGING
It should send back a JSON response with:
- a response status of 400
- an error property with a code of 57
"""
self.connection.preset_response(self.status.INVALID_SORT)
response = await self.get_assert_status('/batches?sort=bad', 400)
self.assert_has_valid_error(response, 57)
@unittest_run_loop
async def test_batch_list_sorted_with_nested_keys(self):
"""Verifies GET /batches can send proper sort controls with nested keys.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '0', '1', and '2'
It should send a Protobuf request with:
- empty paging controls
- sort controls with keys of 'header' and 'signer_pubkey'
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link ending in '/batches?head=2&sort=header.signer_pubkey'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '0', '1', and '2'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('0', '1', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200(
'/batches?sort=header.signer_pubkey')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls('header', 'signer_pubkey')
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=header.signer_pubkey')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '0', '1', '2')
@unittest_run_loop
async def test_batch_list_sorted_in_reverse(self):
"""Verifies a GET /batches can send proper sort parameters.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '2', '1', and '0'
It should send a Protobuf request with:
- empty paging controls
- sort controls with a key of 'header_signature' that is reversed
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link property ending in '/batches?head=2&sort=-header_signature'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '2', '1', and '0'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('2', '1', '0')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?sort=-header_signature')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls(
'header_signature', reverse=True)
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=-header_signature')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '2', '1', '0')
@unittest_run_loop
async def test_batch_list_sorted_by_length(self):
"""Verifies a GET /batches can send proper sort parameters.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '0', '1', and '2'
It should send a Protobuf request with:
- empty paging controls
- sort controls with a key of 'transactions' sorted by length
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- a link property ending in '/batches?head=2&sort=transactions.length'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '0', '1', and '2'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('0', '1', '2')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200('/batches?sort=transactions.length')
page_controls = Mocks.make_paging_controls()
sorting = Mocks.make_sort_controls('transactions', compare_length=True)
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=transactions.length')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '0', '1', '2')
@unittest_run_loop
async def test_batch_list_sorted_by_many_keys(self):
"""Verifies a GET /batches can send proper sort parameters.
It will receive a Protobuf response with:
- a head id of '2'
- a paging response with a start of 0, and 3 total resources
- three batches with ids '2', '1', and '0'
It should send a Protobuf request with:
- empty paging controls
- multiple sort controls with:
* a key of 'header_signature' that is reversed
* a key of 'transactions' that is sorted by length
It should send back a JSON response with:
- a status of 200
- a head property of '2'
- link with '/batches?head=2&sort=-header_signature,transactions.length'
- a paging property that matches the paging response
- a data property that is a list of 3 dicts
- and those dicts are full batches with ids '2', '1', and '0'
"""
paging = Mocks.make_paging_response(0, 3)
batches = Mocks.make_batches('2', '1', '0')
self.connection.preset_response(head_id='2', paging=paging, batches=batches)
response = await self.get_assert_200(
'/batches?sort=-header_signature,transactions.length')
page_controls = Mocks.make_paging_controls()
sorting = (Mocks.make_sort_controls('header_signature', reverse=True) +
Mocks.make_sort_controls('transactions', compare_length=True))
self.connection.assert_valid_request_sent(
paging=page_controls,
sorting=sorting)
self.assert_has_valid_head(response, '2')
self.assert_has_valid_link(response,
'/batches?head=2&sort=-header_signature,transactions.length')
self.assert_has_valid_paging(response, paging)
self.assert_has_valid_data_list(response, 3)
self.assert_batches_well_formed(response['data'], '2', '1', '0')
class BatchGetTests(BaseApiTest):
async def get_application(self, loop):
self.set_status_and_connection(
Message.CLIENT_BATCH_GET_REQUEST,
client_pb2.ClientBatchGetRequest,
client_pb2.ClientBatchGetResponse)
handlers = self.build_handlers(loop, self.connection)
return self.build_app(loop, '/batches/{batch_id}', handlers.fetch_batch)
@unittest_run_loop
async def test_batch_get(self):
"""Verifies a GET /batches/{batch_id} works properly.
It should send a Protobuf request with:
- a batch_id property of '1'
It will receive a Protobuf response with:
- a batch with an id of '1'
It should send back a JSON response with:
- a response status of 200
- no head property
- a link property that ends in '/batches/1'
- a data property that is a full batch with an id of '1'
"""
self.connection.preset_response(batch=Mocks.make_batches('1')[0])
response = await self.get_assert_200('/batches/1')
self.connection.assert_valid_request_sent(batch_id='1')
self.assertNotIn('head', response)
self.assert_has_valid_link(response, '/batches/1')
self.assertIn('data', response)
self.assert_batches_well_formed(response['data'], '1')
@unittest_run_loop
async def test_batch_get_with_validator_error(self):
"""Verifies GET /batches/{batch_id} w/ validator error breaks properly.
It will receive a Protobuf response with:
- a status of INTERNAL_ERROR
It should send back a JSON response with:
- a status of 500
- an error property with a code of 10
"""
self.connection.preset_response(self.status.INTERNAL_ERROR)
response = await self.get_assert_status('/batches/1', 500)
self.assert_has_valid_error(response, 10)
@unittest_run_loop
async def test_batch_get_with_bad_id(self):
"""Verifies a GET /batches/{batch_id} with unfound id breaks properly.
It will receive a Protobuf response with:
- a status of NO_RESOURCE
It should send back a JSON response with:
- a response status of 404
- an error property with a code of 71
"""
self.connection.preset_response(self.status.NO_RESOURCE)
response = await self.get_assert_status('/batches/bad', 404)
self.assert_has_valid_error(response, 71)
|
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
import errno
import re
from collections import namedtuple
import argparse
import time
DS_CIFAR = 'cifar10'
distiller_root = os.path.realpath('..')
examples_root = os.path.join(distiller_root, 'examples')
script_path = os.path.realpath(os.path.join(examples_root, 'classifier_compression',
'compress_classifier.py'))
###########
# Some Basic Logging Mechanisms
###########
class Colors:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
WHITE = '\033[37m'
BG_RED = '\033[41m'
BG_GREEN = '\033[42m'
BG_YELLOW = '\033[43m'
BG_BLUE = '\033[44m'
BG_PURPLE = '\033[45m'
BG_CYAN = '\033[30;46m'
BG_WHITE = '\x1b[30;47m'
BG_RESET = '\033[49m'
BOLD = '\033[1m'
UNDERLINE_ON = '\033[4m'
UNDERLINE_OFF = '\033[24m'
END = '\033[0m'
def colorize(string, color):
return color + string + Colors.END
def error(string):
print(colorize('ERROR: ' + string, Colors.RED))
def test_progress(string):
print(colorize(string, Colors.BLUE))
def success(string):
print(colorize(string, Colors.GREEN))
###########
# Checkers
###########
def compare_values(name, expected, actual):
print('Comparing {0}: Expected = {1} ; Actual = {2}'.format(name, expected, actual))
if expected != actual:
error('Mismatch on {0}'.format(name))
return False
else:
return True
def accuracy_checker(log, run_dir, expected_top1, expected_top5):
tops = re.findall(r"Top1: (?P<top1>\d*\.\d*) *Top5: (?P<top5>\d*\.\d*)", log)
if not tops:
error('No accuracy results in log')
return False
if not compare_values('Top-1', expected_top1, float(tops[-1][0])):
return False
return compare_values('Top-5', expected_top5, float(tops[-1][1]))
def collateral_checker(log, run_dir, *collateral_list):
"""Test that the test produced the expected collaterals.
A collateral_list is a list of tuples, where tuple elements are:
0: file name
1: expected file size
"""
def relaxed_size_equal(a, b, relaxation):
return True if abs(a-b) <= relaxation else False
for collateral in collateral_list:
file_path = os.path.join(run_dir, collateral[0])
statinfo = os.stat(file_path)
if not relaxed_size_equal(statinfo.st_size, collateral[1], 2):
return False
return True
###########
# Test Configurations
###########
TestConfig = namedtuple('TestConfig', ['args', 'dataset', 'checker_fn', 'checker_args'])
test_configs = [
TestConfig('--arch simplenet_cifar --epochs 2', DS_CIFAR, accuracy_checker, [44.460, 91.230]),
TestConfig('-a resnet20_cifar --resume {0} --quantize-eval --evaluate --qe-clip-acts avg --qe-no-clip-layers {1}'.
format(os.path.join(examples_root, 'ssl', 'checkpoints', 'checkpoint_trained_dense.pth.tar'), 'fc'),
DS_CIFAR, accuracy_checker, [91.64, 99.63]),
TestConfig('-a preact_resnet20_cifar --epochs 2 --compress {0}'.
format(os.path.join('full_flow_tests', 'preact_resnet20_cifar_pact_test.yaml')),
DS_CIFAR, accuracy_checker, [44.370, 89.640]),
TestConfig('-a resnet20_cifar --resume {0} --sense=filter --sense-range 0 0.10 0.05'.
format(os.path.join(examples_root, 'ssl', 'checkpoints', 'checkpoint_trained_dense.pth.tar')),
DS_CIFAR, collateral_checker, [('sensitivity.csv', 3175), ('sensitivity.png', 96157)])
]
###########
# Tests Execution
###########
def process_failure(msg, test_idx, cmd, log_path, failed_tests, log):
error(msg)
if not log_path:
test_progress('Log file not created. Full output from test:')
print(log)
else:
test_progress('Test log file: {0}'.format(colorize(log_path, Colors.YELLOW)))
failed_tests.append((test_idx, cmd, log_path))
def validate_dataset_path(path, default, name):
if path:
path = os.path.expanduser(path)
if not os.path.isdir(path):
error("Path provided to {0} dataset doesn't exist".format(name))
exit(1)
return path
test_progress('Path to {0} dataset not provided, defaulting to: {1}'.format(name,
colorize(os.path.abspath(default),
Colors.WHITE)))
try:
os.makedirs(default)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return default
def run_tests():
parser = argparse.ArgumentParser()
parser.add_argument('--cifar10-path', dest='cifar10_path', metavar='DIR', help='Path to CIFAR-10 dataset')
args = parser.parse_args()
cifar10_path = validate_dataset_path(args.cifar10_path, default='data.cifar10', name='CIFAR-10')
datasets = {DS_CIFAR: cifar10_path}
total_configs = len(test_configs)
failed_tests = []
for idx, tc in enumerate(test_configs):
print('')
test_progress('-------------------------------------------------')
test_progress('Running Test {0} / {1}'.format(idx + 1, total_configs))
dataset_dir = datasets[tc.dataset]
# Run with '--det -j 1' to ensure deterministic results
# Run with single GPU (lowest common denominator...)
cmd = 'python3 {script} {tc_args} --det -j 1 --gpus 0 {data}'.format(script=script_path, tc_args=tc.args,
data=dataset_dir)
test_progress('Executing command: ' + colorize(cmd, Colors.YELLOW))
p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
# Poll for completion
waiting_chars = ['-', '\\', '|', '/']
cnt = 0
while p.poll() is None:
print(waiting_chars[cnt] * 5, end='\r', flush=True)
cnt = (cnt + 1) % len(waiting_chars)
time.sleep(0.5)
log = p.stdout.read()
log_path = re.match(r"Log file for this run: (.*)", log)
log_path = log_path.groups()[0] if log_path else ''
if p.returncode != 0:
process_failure('Command returned with exit status {0}'.
format(p.returncode), idx, cmd, log_path, failed_tests, log)
continue
test_progress('Running checker: ' + colorize(tc.checker_fn.__name__, Colors.YELLOW))
if not tc.checker_fn(log, os.path.split(log_path)[0], *tc.checker_args):
process_failure('Checker failed', idx, cmd, log_path, failed_tests, log)
continue
success('TEST PASSED')
test_progress('Test log file: {0}'.format(colorize(log_path, Colors.YELLOW)))
print('')
test_progress('-------------------------------------------------')
test_progress('-------------------------------------------------')
test_progress('All tests completed')
test_progress('# Tests run: {0} ; # Tests passed {1} ; # Tests failed: {2}'.
format(total_configs, total_configs - len(failed_tests), len(failed_tests)))
if failed_tests:
print('')
print(colorize('Failed tests summary:', Colors.RED))
for idx, cmd, log_path in failed_tests:
print(colorize(' Test Index:', Colors.YELLOW), idx + 1)
print(colorize(' Command Line:', Colors.YELLOW), cmd)
print(colorize(' Log File Path:', Colors.YELLOW), log_path)
exit(1)
print('')
success('ALL TESTS PASSED')
print('')
exit(0)
if __name__ == '__main__':
run_tests()
|
"""
Django settings for ecssweb project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'secret_key'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SERVER_EMAIL = 'ecssweb@example.com'
EMAIL_SUBJECT_PREFIX = '[ECSSWEB] '
ADMINS = [('Example', 'example@example.com')]
ALLOWED_HOSTS = ['localhost']
# Sites
SITE_ID = 1
# Set to None to use session-based CSRF cookies
# https://docs.djangoproject.com/en/2.0/ref/settings/#csrf-cookie-age
CSRF_COOKIE_AGE = None
CSRF_COOKIE_SECURE = False
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'website.apps.WebsiteConfig',
'ecsswebauth.apps.EcsswebauthConfig',
'ecsswebadmin.apps.EcsswebadminConfig',
'portal.apps.PortalConfig',
'feedback.apps.FeedbackConfig',
'auditlog.apps.AuditlogConfig',
'fbevents.apps.FbeventsConfig',
'jumpstart.apps.JumpstartConfig',
'shop.apps.ShopConfig',
'election.apps.ElectionConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecssweb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecssweb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Auth
AUTHENTICATION_BACKENDS = [
'ecsswebauth.backends.SamlBackend',
'django.contrib.auth.backends.ModelBackend',
]
LOGIN_REDIRECT_URL = 'portal:overview'
LOGIN_URL = 'ecsswebauth:auth'
LOGOUT_REDIRECT_URL = 'ecsswebauth:auth'
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Sessions
SESSION_COOKIE_SECURE = False
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Logging
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'console': {
# 'class': 'logging.StreamHandler',
# },
# 'mail_admins': {
# 'level': 'ERROR',
# 'class': 'django.utils.log.AdminEmailHandler',
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console', 'mail_admins'],
# 'level': 'WARN',
# 'propagate': True,
# },
# },
# }
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ''
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# SAML
# SAML config file folders
SAML_FOLDER = os.path.join(BASE_DIR, 'ecsswebauth', 'saml_config')
SAML_GROUP_PREFIX = 'saml_'
# FB
FB_PAGE_ID = ''
FB_ACCESS_TOKEN = ''
# Face Detection
FACE_DETECT_ENABLED = False
FACE_DETECT_API = ''
|
class Constants:
SERVICE_ID = "Manga Scraper"
SETTINGS_FILE = "settings.json"
BASE_URI = "https://api.mangadex.org"
|
#!/usr/bin/python
from PyQt5.QtWidgets import QWidget, QLabel, QMessageBox, QTableWidget, QAbstractItemView, QPushButton, QCheckBox, \
QVBoxLayout, QDialog, QDialogButtonBox, QTableWidgetItem, QHeaderView, QFormLayout, \
QTabWidget, QLineEdit, QScrollArea, QListWidget
from PyQt5.QtCore import Qt
from convenience.server_apis import make_http_request
from ui_widgets.user.user_vehicles import UserVehicles
from entities.user import User
from entities.vehicle import Vehicle
from entities.user_category import UserCategory
from entities.vehicle_type import VehicleType
class DetailsDialog(QDialog):
def __init__(self, user, https_session, parent = None):
super().__init__(parent=parent)
self.user = user
self.https_session = https_session
self.initUI()
def initUI(self):
self.state_changed = False
self.setWindowTitle("User details")
QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
buttonBox = QDialogButtonBox(QBtn)
buttonBox.accepted.connect(self.edit_user)
buttonBox.rejected.connect(self.reject)
layout = QVBoxLayout()
tab_widget = QTabWidget()
update_info_widget = QWidget()
form_layout = QFormLayout()
self.password_le = QLineEdit()
form_layout.addRow("<b>Password: </b> ", self.password_le)
self.disability_cb = QCheckBox()
self.disability_cb.setChecked(self.user.get_disability())
self.disability_cb.stateChanged.connect(self.set_state_changed)
form_layout.addRow("<b>Disability: </b> ", self.disability_cb)
self.active_account_cb = QCheckBox()
self.active_account_cb.setChecked(self.user.get_active_account())
self.active_account_cb.stateChanged.connect(self.set_state_changed)
form_layout.addRow("<b>Active account: </b> ", self.active_account_cb)
self.delete_btn = QPushButton("Delete")
self.delete_btn.clicked.connect(self.delete_user)
form_layout.addRow("<b>Delete user: </b> ", self.delete_btn)
update_info_widget.setLayout(form_layout)
tab_widget.addTab(update_info_widget, "User info")
tab_widget.addTab(UserVehiclesWidget(self.user, self.https_session), "Vehicles")
layout.addWidget(tab_widget)
layout.addSpacing(20)
layout.addWidget(buttonBox)
self.setLayout(layout)
def set_state_changed(self):
self.state_changed = True
def edit_user(self):
if self.state_changed or self.password_le.text() != "":
if self.password_le.text() != "" and len(self.password_le.text()) < 8:
QMessageBox.information(self, "uPark tip", "Password must contains at least 8 characters!")
return
user = {
"disability": self.disability_cb.isChecked(),
"password": None if self.password_le.text() == "" else self.password_le.text(),
"active_account": self.active_account_cb.isChecked()
}
response = make_http_request(self.https_session, "put", "users/" + str(self.user.get_id()), json = user)
if response:
QMessageBox.information(self, "Server response", response.text)
self.accept()
return
self.reject()
def delete_user(self):
reply = QMessageBox.question(self, 'Delete user', f"Are you sure to delete {self.user.get_name()}?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
response = make_http_request(self.https_session, "delete", "users/" + str(self.user.get_id()))
if response:
QMessageBox.information(self, "Server response", response.text)
self.accept()
class UserVehiclesWidget(QWidget):
def __init__(self, user, https_session, parent = None):
super().__init__(parent=parent)
self.user = user
self.https_session = https_session
self.initUI()
def initUI(self):
self.vbox = QVBoxLayout()
self.scroll_area = QScrollArea()
self.scroll_area.setWidgetResizable(True)
self.scroll_area_content = QWidget()
self.vbox_grp = QVBoxLayout()
UserVehicles.get_user_vehicles(self) # return self.user_vehicles
UserVehicles.get_vehicle_types(self) # return self.vehicle_types
self.user_vehicle_types_id = {vehicle.get_id_vehicle_type() for vehicle in self.user_vehicles} # set comprehension; no order
self.user_vehicle_types_id = sorted(self.user_vehicle_types_id)
for vehicle_type_id in self.user_vehicle_types_id:
try:
vehicle_type_index = self.vehicle_types.index(VehicleType(id = vehicle_type_id))
except ValueError:
continue
else:
vehicle_type = self.vehicle_types[vehicle_type_index]
vehicle_type_label = QLabel(f"Type: <b>{vehicle_type.get_name()}</b>")
self.vbox_grp.addWidget(vehicle_type_label, 1, Qt.AlignBottom | Qt.AlignHCenter)
vehicle_list = QListWidget()
vehicle_list.setStyleSheet("margin-bottom: 20px;")
vehicle_list.setFocus(Qt.MouseFocusReason)
filtered_vehicles = [f"{vehicle.get_license_plate()} - {vehicle.get_brand()} - {vehicle.get_model()}" for vehicle in self.user_vehicles if vehicle.get_id_vehicle_type() == vehicle_type_id]
vehicle_list.addItems(filtered_vehicles)
self.vbox_grp.addWidget(vehicle_list, 1, Qt.AlignTop | Qt.AlignVCenter)
self.scroll_area_content.setLayout(self.vbox_grp)
self.scroll_area.setWidget(self.scroll_area_content)
self.vbox.addWidget(self.scroll_area, 8)
self.setLayout(self.vbox)
self.show()
class UserManagement(QWidget):
def __init__(self, https_session):
super().__init__()
self.https_session = https_session
self.initUI()
def initUI(self):
user_info = ["Email", "Name", "Surname", "Password", "Wallet", "Disability", "Active account", "User category"]
vbox_main = QVBoxLayout()
title = "User management"
title_lbl = QLabel(title)
title_lbl.setStyleSheet("font-family: Ubuntu; font-size: 30px;")
vbox_main.addWidget(title_lbl, 1, Qt.AlignTop | Qt.AlignHCenter)
self.users_table = QTableWidget(0, len(user_info)+1)
hheader_labels = list(user_info)
hheader_labels.append("Details")
self.users_table.setHorizontalHeaderLabels(hheader_labels)
self.users_table.setEditTriggers(QAbstractItemView.NoEditTriggers) # no editable
self.users_table.horizontalHeader().setStretchLastSection(True);
vbox_main.addWidget(self.users_table, 9)
vbox_main.addStretch(1)
self.setLayout(vbox_main)
self.setWindowTitle(title)
self.show()
def get_user_categories(self):
response = make_http_request(self.https_session, "get", "user_categories")
if response.json():
self.user_categories = [UserCategory(**user_category) for user_category in response.json()]
else:
self.user_categories = []
def get_user_category(self, user):
try:
id_user_category = user.get_id_user_category()
user_category_index = self.user_categories.index(UserCategory(id = id_user_category))
return self.user_categories[user_category_index].get_name()
except ValueError:
return "N/A"
def get_users(self):
self.users_table.clearContents()
self.users_table.setRowCount(0)
admin_row = None
response = make_http_request(self.https_session, "get", "users")
if response.json():
self.users = [User(**user) for user in response.json()]
else:
self.users = []
for row, user in enumerate(self.users):
user_methods = [user.get_email, user.get_name, user.get_surname, user.get_password, user.get_wallet,
user.get_disability, user.get_active_account, self.get_user_category]
self.users_table.insertRow(row)
for column in range(self.users_table.columnCount() - 1):
method = user_methods[column]
if column != len(user_methods) -1:
item = QTableWidgetItem(str(method()))
item.setTextAlignment(Qt.AlignCenter)
self.users_table.setItem(row, column, item)
else:
user_category_name = method(user)
if user_category_name == "Admin":
admin_row = row
item = QTableWidgetItem(user_category_name)
item.setTextAlignment(Qt.AlignCenter)
self.users_table.setItem(row, column, item)
for row in range(self.users_table.rowCount()):
item = QPushButton("Show")
self.users_table.setCellWidget(row, self.users_table.columnCount() - 1, item)
item.clicked.connect(lambda _,row=row: self.show_user_details(row))
if admin_row != None:
self.users_table.removeRow(admin_row)
def show_user_details(self, row):
user = self.users[row]
details_dialog = DetailsDialog(user, self.https_session, self)
# if clicked OK button
if details_dialog.exec_():
self.get_users()
def showEvent(self, event):
self.get_user_categories()
self.get_users()
|
from django.db import models
class Library(models.Model):
name = models.CharField(max_length=45, default="나의 서재")
class Meta:
db_table = "libraries"
class Shelf(models.Model):
name = models.CharField(max_length=100)
library = models.ForeignKey(
Library, on_delete=models.CASCADE, related_name="shelves"
)
class Meta:
db_table = "shelves"
|
import tensorflow as tf
def preprocess_image(image_data, centroid, bbox_size, cropsize):
"""
Performs preproccessing on a single image for feeding into the network.
:param image_data: raw image data as a bytestring
:param centroid: the center of the bounding box to crop to, in pixel coordinates
:param bbox_size: the side length of the bbox to crop to, in pixels
:param cropsize: the output size of the cropped image
:return: the decoded image cropped to a [bbox_size, bbox_size] square centered around centroid
and resized to [cropsize, cropsize]
"""
image = tf.io.decode_image(image_data, channels=3)
# this rescales inputs to the range [-1, 1], which should be what the model expects
image = tf.keras.applications.mobilenet_v2.preprocess_input(
tf.cast(image, tf.float32)
)
# ensure types
bbox_size = tf.cast(bbox_size, tf.float32)
centroid = tf.cast(centroid, tf.float32)
# convert to [0, 1] relative coordinates
imdims = tf.cast(tf.shape(image)[:2], tf.float32)
centroid /= imdims
bbox_size /= imdims # will broadcast to shape [2]
# crop to (bbox_size, bbox_size) centered around centroid and resize to (cropsize, cropsize)
bbox_size /= 2
image = tf.squeeze(
tf.image.crop_and_resize(
tf.expand_dims(image, 0),
[
[
centroid[0] - bbox_size[0],
centroid[1] - bbox_size[1],
centroid[0] + bbox_size[0],
centroid[1] + bbox_size[1],
]
],
[0],
[cropsize, cropsize],
extrapolation_value=-1,
)
)
image = tf.ensure_shape(image, [cropsize, cropsize, 3])
return imdims, image
def encode_displacement_field(keypoints, dfdims):
"""
:param keypoints: a shape (b, n, 2) Tensor with N keypoints normalized to (-1, 1)
:param dfdims: a shape [2] Tensor with the dimensions of the displacement field
:return: a shape (b, height, width, 2n) Tensor
"""
delta = 2 / tf.convert_to_tensor(dfdims, dtype=tf.float32)
y_range = tf.range(-1, 1, delta[0]) + (delta[0] / 2)
x_range = tf.range(-1, 1, delta[1]) + (delta[1] / 2)
mgrid = tf.stack(
tf.meshgrid(y_range, x_range, indexing="ij"), axis=-1
) # shape (y, x, 2)
df = keypoints[:, :, None, None, :] - mgrid # shape (b, n, y, x, 2)
df = tf.transpose(df, [0, 2, 3, 1, 4]) # shape (b, y, x, n, 2)
return tf.reshape(df, [tf.shape(keypoints)[0], dfdims[0], dfdims[1], -1])
def decode_displacement_field(df):
"""
:param df: a shape (b, height, width, 2n) displacement field
:return: a shape (b, height * width, n, 2) tensor where each keypoint has height * width predictions
"""
dfdims = tf.shape(df)[1:3]
df = tf.reshape(
df, [tf.shape(df)[0], dfdims[0], dfdims[1], -1, 2]
) # shape (b, y, x, n, 2)
delta = tf.cast(2 / dfdims, tf.float32)
y_range = tf.range(-1, 1, delta[0]) + (delta[0] / 2)
x_range = tf.range(-1, 1, delta[1]) + (delta[1] / 2)
mgrid = tf.stack(
tf.meshgrid(y_range, x_range, indexing="ij"), axis=-1
) # shape (y, x, 2)
keypoints = df + mgrid[:, :, None, :] # shape (b, y, x, n, 2)
return tf.reshape(
keypoints, [tf.shape(df)[0], dfdims[0] * dfdims[1], -1, 2]
) # shape (b, y*x, n, 2)
def preprocess_keypoints(parsed_kps, centroid, bbox_size, img_size, nb_keypoints):
"""Normalizes keypoints to the [-1, 1] range for training"""
keypoints = tf.reshape(parsed_kps, [-1, 2])[:nb_keypoints]
keypoints *= img_size
keypoints = (keypoints - centroid) / (bbox_size / 2)
return tf.reshape(keypoints, [nb_keypoints * 2])
|
"""Resources module."""
import datetime
import enum
import os
import pathlib
import shutil
import typing as t
import urllib.request as request
from contextlib import closing
from io import BytesIO
import attr
import numpy as np
import pandas as pd
import requests
import xarray as xr
from .units import format_missing_carats_units
from .units import ureg
from .util import to_data_set
@attr.s
class Resource:
"""Resource class.
This class represents a data set publicly available on the Web.
The class provides a link between the Web resource and the actual data set.
It provides methods to:
* save the data set in a cache
* fetch the data set from the cache
* fetch the data set from the Web
"""
name: str = attr.ib()
url: t.Union[str, t.List[str]] = attr.ib() # may be one or multiple URLs
transform: t.Callable[[t.Union[str, t.List[str]]], xr.Dataset] = attr.ib()
@property
def cache_path(self) -> pathlib.Path:
"""Path to data set in cache."""
cache_dir = pathlib.Path(".tengen_cache/")
filename = f"{self.name}.nc"
return cache_dir / filename
@property
def in_cache(self) -> bool:
"""``True`` if the resource is in the cache, ``False`` otherwise."""
return self.cache_path.exists()
def fetch_from_web(self) -> xr.Dataset:
"""Fetch the data set from the Web.
If the data set is not already in the cache, it is added to the cache.
Returns
-------
:class:`~xarray.Dataset`:
Data set.
"""
dataset = self.transform(self.url)
if not self.in_cache:
dataset.to_netcdf(self.cache_path)
return dataset
def fetch_from_cache(self) -> xr.Dataset:
"""Fetch the data set from the cache.
Returns
-------
:class:`~xarray.Dataset`:
Data set.
"""
if self.in_cache:
ds = xr.open_dataset(self.cache_path) # type: ignore[no-untyped-call]
return ds # type: ignore[no-any-return]
else:
raise ValueError("data set is not in the cache.")
def get(self) -> xr.Dataset:
"""Get the data set.
Try to fetch the resource from the Web.
In case of a connection error, fetch from the cache.
Raises
------
ValueError
If the data set could not be fetched neither from the Web
nor from the cache.
This can happen for example when the data set could not fetched
from the Web due to a connection error with a cache that does
not already include the data set.
Returns
-------
:class:`~xarray.Dataset`:
Data set.
"""
try:
return self.fetch_from_web()
except requests.ConnectionError as e:
if self.in_cache:
return self.fetch_from_cache()
else:
raise ValueError(
"could not fetch data set from the Web and could not fetch "
"from the cache because data set is in not in the cache"
) from e
# ------------------------------------------------------------------------------
# Thuillier (2003)
# ------------------------------------------------------------------------------
THUILLIER_2003_URL = "https://oceancolor.gsfc.nasa.gov/docs/rsr/f0.txt"
def transform_thuillier_2003(url: t.Union[str, t.List[str]]) -> xr.Dataset:
"""Transform function for Thuillier (2003).
Transform the HTTP response to :class:`xarray.Dataset` for the Thuillier
(2003) solar irradiance spectrum data set.
Parameters
----------
url: str or list of str
URL.
Returns
-------
:class:`xarray.Dataset`
Thuillier (2003) solar irradiance spectrum data set.
"""
response = requests.get(str(url))
data = np.loadtxt( # type: ignore[no-untyped-call]
fname=BytesIO(response.content),
comments=["/", "!"],
)
w = data[:, 0] * ureg.nm
ssi = ureg.Quantity(data[:, 1], "microwatt/cm^2/nm")
attrs = dict(
title="Thuillier (2003) solar irradiance spectrum",
institution=(
"Service d'Aéronomie du CNRS, F91371, Verrières-le-Buisson, France."
),
source=(
"Combined observations from the SOLSPEC instrument during "
"the ATLAS-1 mission (from 1992-03-24 to 1992-04-02) and the SOSP "
"instrument onboard the EURECA satellite (from 1992-8-7 to "
"1993-7-1), with the Kurucz and Bell (1995) synthetic "
"spectrum"
),
references="https://doi.org/10.1023/A:1024048429145",
)
return to_data_set(
w=w,
ssi=ssi,
attrs=attrs,
url=str(url),
)
thuillier_2003 = Resource(
name="thuillier_2003",
url=THUILLIER_2003_URL,
transform=transform_thuillier_2003,
)
# ------------------------------------------------------------------------------
# WHI (2008)
# ------------------------------------------------------------------------------
WHI_2008_URL = (
"https://lasp.colorado.edu/lisird/resources/whi_ref_spectra/data/"
"ref_solar_irradiance_whi-2008_ver2.dat"
)
WHI_2008_TIME_PERIOD = {
"sunspot active": (datetime.date(2008, 3, 25), datetime.date(2008, 3, 29)),
"faculae active": (datetime.date(2008, 3, 29), datetime.date(2008, 4, 4)),
"quiet sun": (datetime.date(2008, 4, 10), datetime.date(2008, 4, 16)),
}
def transform_whi_2008(
identifier: str,
) -> t.Callable[[t.Union[str, t.List[str]]], xr.Dataset]:
"""Creates the WHI (2008) transform method.
.. list-table::
:widths: 1 1
:header-rows: 1
* - Time period
- File name
* - 2008-03-25 - 2008-03-29
- ``whi_2008_1``
* - 2008-03-29 - 2008-04-04
- ``whi_2008_2``
* - 2008-04-10 - 2008-04-16
- ``whi_2008_3``
Parameters
----------
identifier: str
WHI (2008) spectrum variant identifier.
Returns
-------
callable
Associated transform method.
"""
def f(url: t.Union[str, t.List[str]]) -> xr.Dataset:
r = requests.get(url)
data = np.loadtxt( # type: ignore[no-untyped-call]
fname=BytesIO(r.content),
comments=";",
skiprows=142,
)
wavelength = data[:, 0]
mask = wavelength > 116.0
time_period = WHI_2008_TIME_PERIOD[identifier]
start, end = time_period
time_period_index = list(WHI_2008_TIME_PERIOD.keys()).index(identifier)
return to_data_set(
ssi=ureg.Quantity(data[mask, time_period_index], "W/m^2/nm"),
w=ureg.Quantity(wavelength[mask], "nm"),
url=WHI_2008_URL,
attrs=dict(
title=f"Whole Heliosphere Interval (WHI) solar "
f"irradiance reference spectrum (2008) for time p"
f"eriod {time_period} ('{identifier}' spectrum)",
source=f"Combination of satellite observations from "
f"the SEE and SORCE instruments (from {start} to {end}) "
f"onboard the TIMED satellite and a prototype EVE "
f"instrument onboard a sounding rocket launched on "
f"2008-04-14.",
ref="https://doi.org/10.1029/2008GL036373",
observation_period=" to ".join(
[x.strftime("%Y-%m-%d") for x in [start, end]]
),
comment="The original data covers the range from 0.05 to 2399.95 "
"nm, the present dataset includes only the part of the "
"original data where the wavelength > 116 nm.",
),
)
return f
whi_2008_sunspot_active = Resource(
name="whi_2008_sunspot_active",
url=WHI_2008_URL,
transform=transform_whi_2008(identifier="sunspot active"),
)
whi_2008_faculae_active = Resource(
name="whi_2008_faculae_active",
url=WHI_2008_URL,
transform=transform_whi_2008(identifier="faculae active"),
)
whi_2008_quiet_sun = Resource(
name="whi_2008_quiet_sun",
url=WHI_2008_URL,
transform=transform_whi_2008(identifier="quiet sun"),
)
# ------------------------------------------------------------------------------
# Meftah (2017)
# ------------------------------------------------------------------------------
MEFTAH_2018_URL = "http://cdsarc.u-strasbg.fr/ftp/J/A+A/611/A1/spectrum.dat.gz"
def transform_meftah_2018(url: t.Union[str, t.List[str]]) -> xr.Dataset:
"""Transform function for Meftah (2018).
Transform the HTTP response to :class:`xarray.Dataset` for the Meftah
(2018) solar irradiance spectrum data set.
Parameters
----------
url: str or list of str
URL.
Returns
-------
:class:`xarray.Dataset`
Meftah (2018) solar irradiance spectrum data set.
"""
filename = "spectrum.dat.gz"
with closing(request.urlopen(str(url))) as r: # noqa: S310
with open(filename, "wb") as f:
shutil.copyfileobj(r, f)
data = np.genfromtxt( # type: ignore[no-untyped-call]
fname=filename,
missing_values="---",
filling_values=np.nan,
)
wavelength = data[:, 0]
spectral_irradiance = data[:, 1]
# The raw data covers the 0.5 to 3000.10 nm range whereas the range
# indicated by Meftah (2018) in:
# https://doi.org/10.1051/0004-6361/201731316
# is 165 to 3000 nm.
# Therefore, we ignore wavelengthes < 165, and keep the 3000.10 nm point.
mask = wavelength >= 165.0
start = datetime.date(2008, 4, 5)
end = datetime.date(2016, 12, 31)
ds = to_data_set(
ssi=ureg.Quantity(spectral_irradiance[mask], "W/m^2/nm"),
w=ureg.Quantity(wavelength[mask], "nm"),
url=MEFTAH_2018_URL,
attrs=dict(
title="Meftah et al (2018) solar irradiance reference spectrum",
source=(
"Observations from the SOLSPEC instrument of the SOLAR payload "
"onboard the international space station"
),
ref="https://doi.org/10.1051/0004-6361/201731316",
observation_period=" to ".join(
[x.strftime("%Y-%m-%d") for x in [start, end]]
),
),
)
os.remove(filename)
return ds
meftah_2018 = Resource(
name="meftah_2018",
url=MEFTAH_2018_URL,
transform=transform_meftah_2018,
)
# ------------------------------------------------------------------------------
# SOLID (2017)
# ------------------------------------------------------------------------------
SOLID_2017_FTP_FOLDER = (
"ftp://ftp.pmodwrc.ch/pub/projects/SOLID/database/composite_published/"
"SOLID_1978_published/"
)
SOLID_2017_FILES = [
"solid_0_100.nc",
"solid_100_100.nc",
"solid_200_100.nc",
"solid_300_100.nc",
"solid_400_100.nc",
"solid_500_100.nc",
"solid_600_100.nc",
"solid_700_100.nc",
"solid_800_100.nc",
"solid_900_100.nc",
"solid_1000_100.nc",
"solid_1100_100.nc",
"solid_1200_100.nc",
"solid_1300_100.nc",
"solid_1400_100.nc",
"solid_1500_100.nc",
"solid_1600_100.nc",
"solid_1700_100.nc",
"solid_1800_100.nc",
"solid_1900_100.nc",
]
SOLID_2017_URL = [SOLID_2017_FTP_FOLDER + file for file in SOLID_2017_FILES]
def transform_solid_2017(url: t.Union[str, t.List[str]]) -> xr.Dataset:
"""Transform function for SOLID (2017).
Transform the HTTP response to :class:`xarray.Dataset` for the SOLID
(2017) solar irradiance spectrum data set.
Parameters
----------
url: str or list of str
URL.
Returns
-------
:class:`xarray.Dataset`
SOLID (2017) solar irradiance spectrum data set.
"""
filenames = []
for x in url:
with closing(request.urlopen(x)) as r: # noqa: S310
filename = x.split("/")[-1]
filenames.append(filename)
with open(filename, "wb") as f:
shutil.copyfileobj(r, f)
ds = xr.open_mfdataset("solid_*.nc") # type: ignore[no-untyped-call]
end = datetime.date(2014, 12, 31)
start = end - datetime.timedelta(ds.time.size - 1)
formatted = to_data_set(
w=ureg.Quantity(ds.wavelength.values, ds.wavelength.units),
t=pd.date_range(start, end),
ssi=ureg.Quantity(
ds.data.values.transpose(), format_missing_carats_units(ds.data.units)
),
url=SOLID_2017_FTP_FOLDER,
attrs=dict(
title="SOLID solar irradiance composite spectrum",
source="Combined original SSI observations from 20 different"
" instruments",
observation_period=" to ".join(
[x.strftime("%Y-%m-%d") for x in [start, end]]
),
ref="https://doi.org/10.1002/2016JA023492",
),
)
for filename in filenames:
os.remove(filename)
return formatted
solid_2017 = Resource(
name="solid_2017", url=SOLID_2017_URL, transform=transform_solid_2017
)
# ------------------------------------------------------------------------------
# Coddington (2021)
# ------------------------------------------------------------------------------
def transform_coddington_2021(url: t.Union[str, t.List[str]]) -> xr.Dataset:
"""Transform function for Coddington (2021).
Transform the HTTP response to :class:`xarray.Dataset` for the Coddington
(2021) solar irradiance spectrum data set.
Parameters
----------
url: str or list of str
URL.
Returns
-------
:class:`xarray.Dataset`
Coddington (2021) solar irradiance spectrum data set.
"""
response = requests.get(str(url))
raw = xr.open_dataset(BytesIO(response.content), engine="h5netcdf") # type: ignore
w = ureg.Quantity(raw["Vacuum Wavelength"].values, raw["Vacuum Wavelength"].units)
ssi = ureg.Quantity(
raw["SSI"].values, format_missing_carats_units(raw["SSI"].units)
)
attrs = dict(
title="TSIS-1 Hybrid Solar Reference Spectrum (HSRS)",
institution="Laboratory for Atmospheric and Space Physics",
source=(
"TSIS-1 Spectral Irradiance Monitor (SIM), CubeSat Compact SIM "
"(CSIM), Air Force Geophysical Laboratory ultraviolet solar "
"irradiance balloon observations, ground-based Quality Assurance "
"of Spectral Ultraviolet Measurements In Europe Fourier transform "
"spectrometer solar irradiance observations, Kitt Peak National "
"Observatory solar transmittance atlas and the semi-empirical "
"Solar Pseudo-Transmittance Spectrum atlas."
),
references="https://doi.org/10.1029/2020GL091709",
)
return to_data_set(w=w, ssi=ssi, attrs=attrs, url=str(url))
class Coddington2021Resolution(enum.Enum):
"""Coddington (2021) spectral resolution variant enumeration."""
HIGH_RESOLUTION = ""
ZP005 = "p005nm_resolution_"
ZP025 = "p025nm_resolution_"
ZP1 = "p1nm_resolution_"
Z1 = "1nm_resolution_"
def coddington_2021_url(
resolution: Coddington2021Resolution = Coddington2021Resolution.HIGH_RESOLUTION,
) -> str:
"""Get URL corresponding to Coddington (2021) spectral resolution variant.
Parameters
----------
resolution: Coddington2021Resolution
Coddington (2021) spectral resolution variant.
"""
root_url = "https://lasp.colorado.edu/lisird/resources/lasp/hsrs/"
filename = f"hybrid_reference_spectrum_{resolution.value}c2021-03-04_with_unc.nc"
return f"{root_url}/{filename}"
coddington_2021_high_resolution = Resource(
name="coddington_2021-high_resolution",
url=coddington_2021_url(resolution=Coddington2021Resolution.HIGH_RESOLUTION),
transform=transform_coddington_2021,
)
coddington_2021_p005 = Resource(
name="coddington_2021-p005",
url=coddington_2021_url(resolution=Coddington2021Resolution.ZP005),
transform=transform_coddington_2021,
)
coddington_2021_p025 = Resource(
name="coddington_2021-p025",
url=coddington_2021_url(resolution=Coddington2021Resolution.ZP025),
transform=transform_coddington_2021,
)
coddington_2021_p1 = Resource(
name="coddington_2021-p1",
url=coddington_2021_url(resolution=Coddington2021Resolution.ZP1),
transform=transform_coddington_2021,
)
coddington_2021_1 = Resource(
name="coddington_2021-1",
url=coddington_2021_url(resolution=Coddington2021Resolution.Z1),
transform=transform_coddington_2021,
)
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class L10nLatamIdentificationType(models.Model):
_inherit = "l10n_latam.identification.type"
l10n_ar_afip_code = fields.Char("AFIP Code")
|
#!/usr/bin/env python
import rospy
import roslib
import serial
from time import time, sleep
from std_msgs.msg import String, Float64
from sensor_msgs.msg import Image, Imu
import tf
#from geometry_msgs.msg import PoseStamped
#TODO need to look at the signs for the pitch, yaw, and thrust commands
class SerialBridge():
#MEASURE_TOPIC = "measurements"
#IMU_TOPIC = "imu_data"
#COMPASS_TOPIC = "angle_to_true_north"
#COMMAND_TOPIC = "command"
def __init__(self, mbedPort='/dev/serial0', mbedBaud = 115200, mbedUpdateInterval=1.25):
#rospy.loginfo("Serial node started.")
print('Serial node started.')
# self.imu_pub = rospy.Publisher("imu_data", Imu, queue_size=1)
# self.compass_pub = rospy.Publisher("angle_to_true_north", Float64, queue_size=1)
# rospy.Subscriber("command", String, self.callback)
self.cmd_arr_order = ['start', 'pitch', 'yaw', 'thrust', 'frequency']
self._mbedSerial = serial.Serial(mbedPort, baudrate=mbedBaud, timeout=0, bytesize=serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE)
self._mbedUpdateInterval = mbedUpdateInterval
self.CMD_MAX = 255
self.CMD_MIN = 1
self.buf = []
self.incomingStringLength = 31
self.cmd_received = False
self.pitch = 128
self.yaw = 128
self.thrust = 255
self.frequency = 128
self.DO_NOTHING = [255,135,135,1,85]
def writeCmdArray(self, cmd):
bytecmds = self.safeCmdToBytes(cmd)
self.writeBytes(bytecmds)
def writeBytes(self, bytecmds):
self._mbedSerial.write(bytecmds)
if bytecmds[-1] != 0:
self._mbedSerial.write(bytearray([0]))
self._mbedSerial.flush()
def safeCmdToBytes(self, cmd, cmdType='byteArray', nullTerminate=False):
if cmdType == "byteArray":
for i,val in enumerate(cmd):
cmd[i] = max(min(cmd[i],self.CMD_MAX),self.CMD_MIN)
elif cmdType == "dict":
for k in self.self.cmd_arr_order:
cmd[k] = max(min(cmd[k], self.CMD_MAX), self.CMD_MIN)
return self.cmdToBytes(cmd, cmdType, nullTerminate)
def cmdToBytes(self, cmd, cmdType='byteArray', nullTerminate=False):
if cmdType == "dict":
res = [cmd[cmd_key] for cmd_key in self.cmd_arr_order]
else:
res = cmd
assert(len(res) == len(self.cmd_arr_order))
if nullTerminate:
res.append(0)
return bytearray(res)
def writeOnce(self, cmd):
self._mbedSerial.flushInput()
self._mbedSerial.flushOutput()
self.writeBytes(cmd)
def convert(self, s):
str1 = ""
return(str1.join(s))
def parseSensorData(self, data):
data = data.replace(" ","")
arr = data.split(",")
values = [float(i) for i in arr]
return values
# def listen(self):
# while not rospy.is_shutdown():
# if self._mbedSerial.inWaiting():
# #bytesToRead = self._mbedSerial.inWaiting()
# x = self._mbedSerial.read_until()
# self.buf.append(x)
# if len(self.buf) > self.incomingStringLength:
# self._mbedSerial.flush()
# msg = self.convert(self.buf)
# data = self.parseSensorData(msg)
# rospy.loginfo(data)
# quat_array = tf.transformations.quaternion_from_euler(data[1], data[0], data[2])
# imu_msg = Imu()
# imu_msg.orientation.w = quat_array[0]
# imu_msg.orientation.x = quat_array[1]
# imu_msg.orientation.y = quat_array[2]
# imu_msg.orientation.z = quat_array[3]
# self.imu_pub.publish(imu_msg)
# angle_to_true_north = Float64()
# angle_to_true_north.data = data[3]
# self.compass_pub.publish(angle_to_true_north)
# self.buf = []
def heading_callback(self, ros_data):
self.cmd_received = True
self.yaw = int(128 + (127 *ros_data.data))
def pitch_callback(self, ros_data):
self.pitch = int(128 - (127 * ros_data.data))
def thrust_callback(self, ros_data):
self.thrust = int(128 - (127 * ros_data.data))
#self.frequency = ?? (should frequency be dependent on the distance between the fish and the target?)
def write(self):
rate = rospy.Rate(24)
while not rospy.is_shutdown():
#if new commands have been received, write an updated cmd array, otherwise write (do nothing)
if self.cmd_received:
arr = [255, self.pitch, self.yaw, self.thrust, self.frequency]
self.writeOnce(arr)
#print(arr)
#rospy.loginfo(arr)
self.cmd_received = False
#else:
# self.writeOnce(self.DO_NOTHING)
rate.sleep()
if __name__ == '__main__':
import sys
# update_hz = 24
rospy.init_node('serial', anonymous=True)
piSerial = SerialBridge()
rospy.Subscriber('heading_cmd', Float64, piSerial.heading_callback)
rospy.Subscriber('pitch_cmd', Float64, piSerial.pitch_callback)
rospy.Subscriber('thrust_cmd', Float64, piSerial.thrust_callback)
print("\nSerial Node: Beginning at 24hz")
piSerial.write()
print("\nSerial Node: done\n")
|
#!/usr/bin/python3
import sys
blocks = []
#
# blocks is an array of paths under which we want to block by
# default.
#
# blocks[0] = ['path' = '/sys', 'children' = [A,B] ]
# blocks[1] = ['path' = '/proc/sys', 'children' = [ E ] ]
# A = [ 'path' = 'fs', children = [C] ]
# C = [ 'path' = 'cgroup', children = [F] ]
# B = [ 'path' = 'class', children = [D] ]
# D = [ 'path' = 'net', children = [F] ]
# E = [ 'path' = 'shm*' ]
# F = [ 'path' = '**' ]
def add_block(path):
for b in blocks:
if b['path'] == path:
# duplicate
return
blocks.append({'path': path.strip(), 'children': []})
# @prev is an array of dicts which containing 'path' and
# 'children'. @path is a string. We are looking for an entry
# in @prev which contains @path, and will return its
# children array.
def child_get(prev, path):
for p in prev:
if p['path'] == path:
return p['children']
return None
def add_allow(path):
# find which block we belong to
found = None
for b in blocks:
l = len(b['path'])
if len(path) <= l:
continue
# TODO - should we find the longest match?
if path[0:l] == b['path']:
found = b
break
if found is None:
print("allow with no previous block at %s" % path)
sys.exit(1)
p = path[l:].strip()
while p[:1] == "/":
p = p[1:]
prev = b['children']
for s in p.split('/'):
n = {'path': s.strip(), 'children': []}
tmp = child_get(prev, n['path'])
if tmp is not None:
prev = tmp
else:
prev.append(n)
prev = n['children']
config = "config"
if len(sys.argv) > 1:
config = sys.argv[1]
with open(config) as f:
for x in f.readlines():
x.strip()
if x[:1] == '#':
continue
try:
(cmd, path) = x.split(' ')
except: # blank line
continue
if cmd == "block":
add_block(path)
elif cmd == "allow":
add_allow(path)
else:
print("Unknown command: %s" % cmd)
sys.exit(1)
denies = []
def collect_chars(children, ref, index):
r = ""
for c in children:
if index >= len(c['path']):
continue
if ref[0:index] != c['path'][0:index]:
continue
if c['path'][index] not in r:
r = r + c['path'][index]
return r
def append_deny(s):
s = "%s wklx," % s
if s not in denies:
denies.append(s)
def gen_denies(pathsofar, children):
for c in children:
for char in range(len(c['path'])):
if char == len(c['path'])-1 and c['path'][char] == '*':
continue
if char == len(c['path'])-2:
if c['path'][char:char+2] == '**':
continue
x = collect_chars(children, c['path'], char)
newdeny = "deny %s/%s[^%s]*{,/**}" % (pathsofar,
c['path'][0:char], x)
append_deny(newdeny)
if c['path'] != '**' and c['path'][len(c['path'])-1] != '*':
newdeny = "deny %s/%s?*{,/**}" % (pathsofar, c['path'])
append_deny(newdeny)
elif c['path'] != '**':
newdeny = "deny %s/%s/**" % (pathsofar, c['path'])
append_deny(newdeny)
if len(c['children']) != 0:
newpath = "%s/%s" % (pathsofar, c['path'])
gen_denies(newpath, c['children'])
for b in blocks:
gen_denies(b['path'], b['children'])
denies.sort()
genby = " # generated by: lxc-generate-aa-rules.py"
for a in sys.argv[1:]:
genby += " %s" % a
print(genby)
for d in denies:
print(" %s" % d)
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import io
import random
import paddle
from paddle.io import IterableDataset
class RecDataset(IterableDataset):
def __init__(self, file_list, config):
super(RecDataset, self).__init__()
self.file_list = file_list
self.config = config
self.init()
self.item_count = config.get("hyper_parameters.item_count")
def init(self):
self.res = []
self.max_len = 0
self.neg_candidate_item = []
self.neg_candidate_cat = []
self.max_neg_item = 10000
self.max_neg_cat = 1000
for file in self.file_list:
with open(file, "r") as fin:
for line in fin:
line = line.strip().split(';')
hist = line[0].split()
self.max_len = max(self.max_len, len(hist))
fo = open("tmp.txt", "w")
fo.write(str(self.max_len))
fo.close()
self.batch_size = self.config.get("runner.train_batch_size")
self.item_count = self.config.get("hyper_parameters.item_count", 63001)
self.cat_count = self.config.get("hyper_parameters.cat_count", 801)
self.group_size = (self.batch_size) * 20
def __iter__(self):
file_dir = self.file_list
res0 = []
for train_file in file_dir:
with open(train_file, "r") as fin:
for line in fin:
line = line.strip().split(';')
if len(line) != 5:
continue
hist = line[0].split()
tmp = [int(x) for x in hist]
if max(tmp) > self.item_count:
continue
cate = line[1].split()
tmp = [int(x) for x in cate]
if max(tmp) > self.cat_count:
continue
res0.append([hist, cate, line[2], line[3], float(line[4])])
data_set = res0
random.seed(12345)
random.shuffle(data_set)
reader, batch_size, group_size = data_set, self.batch_size, self.group_size
bg = []
for line in reader:
bg.append(line)
if len(bg) == group_size:
sortb = sorted(bg, key=lambda x: len(x[0]), reverse=False)
bg = []
for i in range(0, group_size, batch_size):
b = sortb[i:i + batch_size]
max_len = max(len(x[0]) for x in b)
if max_len < 2:
continue
itemInput = [x[0] for x in b]
itemRes0 = np.array(
[x + [0] * (max_len - len(x)) for x in itemInput])
item = itemRes0.astype("int64").reshape([-1, max_len])
catInput = [x[1] for x in b]
catRes0 = np.array(
[x + [0] * (max_len - len(x)) for x in catInput])
cat = catRes0.astype("int64").reshape([-1, max_len])
len_array = [len(x[0]) for x in b]
mask = np.array([[0] * x + [-1e9] * (max_len - x) for x in
len_array]).reshape([-1, max_len, 1])
target_item_seq = np.array(
[[x[2]] * max_len for x in b]).astype("int64").reshape(
[-1, max_len])
target_cat_seq = np.array(
[[x[3]] * max_len for x in b]).astype("int64").reshape(
[-1, max_len])
neg_item = [None] * len(item)
neg_cat = [None] * len(cat)
for i in range(len(b)):
neg_item[i] = []
neg_cat[i] = []
if len(self.neg_candidate_item) < self.max_neg_item:
self.neg_candidate_item.extend(b[i][0])
if len(self.
neg_candidate_item) > self.max_neg_item:
self.neg_candidate_item = self.neg_candidate_item[
0:self.max_neg_item]
else:
len_seq = len(b[i][0])
start_idx = random.randint(
0, self.max_neg_item - len_seq - 1)
self.neg_candidate_item[start_idx:start_idx +
len_seq + 1] = b[i][0]
if len(self.neg_candidate_cat) < self.max_neg_cat:
self.neg_candidate_cat.extend(b[i][1])
if len(self.neg_candidate_cat) > self.max_neg_cat:
self.neg_candidate_cat = self.neg_candidate_cat[
0:self.max_neg_cat]
else:
len_seq = len(b[i][1])
start_idx = random.randint(
0, self.max_neg_cat - len_seq - 1)
self.neg_candidate_item[start_idx:start_idx +
len_seq + 1] = b[i][1]
for _ in range(max_len):
neg_item[i].append(self.neg_candidate_item[
random.randint(
0, len(self.neg_candidate_item) - 1)])
for _ in range(max_len):
neg_cat[i].append(self.neg_candidate_cat[
random.randint(
0, len(self.neg_candidate_cat) - 1)])
for i in range(len(b)):
res = []
# res0 = []
res.append(np.array(item[i]))
res.append(np.array(cat[i]))
res.append(np.array(int(b[i][2])))
res.append(np.array(int(b[i][3])))
res.append(np.array(b[i][4]).astype('float32'))
res.append(np.array(mask[i]).astype('float32'))
res.append(np.array(target_item_seq[i]))
res.append(np.array(target_cat_seq[i]).astype('int64'))
res.append(np.array(neg_item[i]).astype('int64'))
res.append(np.array(neg_cat[i]).astype('int64'))
yield res
len_bg = len(bg)
if len_bg != 0:
sortb = sorted(bg, key=lambda x: len(x[0]), reverse=False)
bg = []
remain = len_bg % batch_size
for i in range(0, len_bg - remain, batch_size):
b = sortb[i:i + batch_size]
max_len = max(len(x[0]) for x in b)
if max_len < 2: continue
itemInput = [x[0] for x in b]
itemRes0 = np.array(
[x + [0] * (max_len - len(x)) for x in itemInput])
item = itemRes0.astype("int64").reshape([-1, max_len])
catInput = [x[1] for x in b]
catRes0 = np.array(
[x + [0] * (max_len - len(x)) for x in catInput])
cat = catRes0.astype("int64").reshape([-1, max_len])
len_array = [len(x[0]) for x in b]
mask = np.array([[0] * x + [-1e9] * (max_len - x) for x in
len_array]).reshape([-1, max_len, 1])
target_item_seq = np.array(
[[x[2]] * max_len for x in b]).astype("int64").reshape(
[-1, max_len])
target_cat_seq = np.array(
[[x[3]] * max_len for x in b]).astype("int64").reshape(
[-1, max_len])
neg_item = [None] * len(item)
neg_cat = [None] * len(cat)
for i in range(len(b)):
neg_item[i] = []
neg_cat[i] = []
if len(self.neg_candidate_item) < self.max_neg_item:
self.neg_candidate_item.extend(b[i][0])
if len(self.neg_candidate_item) > self.max_neg_item:
self.neg_candidate_item = self.neg_candidate_item[
0:self.max_neg_item]
else:
len_seq = len(b[i][0])
start_idx = random.randint(
0, self.max_neg_item - len_seq - 1)
self.neg_candidate_item[start_idx:start_idx + len_seq +
1] = b[i][0]
if len(self.neg_candidate_cat) < self.max_neg_cat:
self.neg_candidate_cat.extend(b[i][1])
if len(self.neg_candidate_cat) > self.max_neg_cat:
self.neg_candidate_cat = self.neg_candidate_cat[
0:self.max_neg_cat]
else:
len_seq = len(b[i][1])
start_idx = random.randint(
0, self.max_neg_cat - len_seq - 1)
self.neg_candidate_item[start_idx:start_idx + len_seq +
1] = b[i][1]
for _ in range(max_len):
neg_item[i].append(self.neg_candidate_item[
random.randint(0, len(self.neg_candidate_item) -
1)])
for _ in range(max_len):
neg_cat[i].append(self.neg_candidate_cat[
random.randint(0, len(self.neg_candidate_cat) -
1)])
for i in range(len(b)):
res = []
res.append(np.array(item[i]))
res.append(np.array(cat[i]))
res.append(np.array(int(b[i][2])))
res.append(np.array(int(b[i][3])))
res.append(np.array(b[i][4]).astype('float32'))
res.append(np.array(mask[i]).astype('float32'))
res.append(np.array(target_item_seq[i]))
res.append(np.array(target_cat_seq[i]))
res.append(np.array(neg_item[i]).astype('int64'))
res.append(np.array(neg_cat[i]).astype('int64'))
yield res
|
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Wrapper code for a recsim environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any
import attr
import core
@attr.s
class Params(core.Params):
"""Params object for recsim wrapper."""
recsim_env = attr.ib()
@attr.s(cmp=False)
class State(core.State):
"""State object for recsim wrapper."""
recsim_env = attr.ib()
observation = attr.ib(default=None)
is_done = attr.ib(default=False)
class RecsimWrapper(core.FairnessEnv):
"""Wraps a recsim environment as a FairnessEnv."""
def __init__(self, params=None):
"""Initializes RecsimWrapper."""
super(RecsimWrapper, self).__init__(
params,
initialize_observation_space=False,
init_action_space_random_state=False)
self.state = State(recsim_env=params.recsim_env)
# gym.Space.np_random is created lazily, make sure it is created here.
_ = self.action_space.np_random
# The use of @property here is intentional. RecsimGym objects have
# action_space and observation_space as properties because they are updated
# over the course of the simulation. In order to keep up to date with the
# current spaces, this wrapper must do the same thing.
@property
def action_space(self):
return self.state.recsim_env.action_space
@property
def observation_space(self):
return self.state.recsim_env.observation_space
def _step_impl(self, state, action):
obs, _, done, _ = state.recsim_env.step(action)
state.observation = obs
state.is_done = done
return state
def _get_observable_state(self):
return self.state.observation
def _is_done(self):
return self.state.is_done
def reset(self):
"""Resets the environment."""
observation = self.state.recsim_env.reset()
self.state.observation = observation
return observation
def wrap(environment):
"""Wrap a Recsim Environment to be an ML Fairness Gym Environment."""
return RecsimWrapper(Params(environment))
|
tree_map = """.......#................#......
...#.#.....#.##.....#..#.......
..#..#.#......#.#.#............
....#...#...##.....#..#.....#..
....#.......#.##......#...#..#.
...............#.#.#.....#..#..
...##...#...#..##.###...##.....
##..#.#...##.....#.#..........#
.#....#..#..#......#....#....#.
...........................#...
..........#.......#..#.....#.#.
..#.......###..#.#.......#.#...
....#..#....#....#..........#..
..##..#.......#.#...#..........
.....#.......#.....#....#......
..........##..#................
....##.#..###...#..##.....#.#..
..#..#.#.#...#......#...#.....#
....#.#....#...####.##.........
..#.........##...##.#..#..#....
.#......#...#..#..##.#.........
.#....#.......#..##..##..#.#.#.
...........#....#......#.......
..#....#....#...............#..
..#.....#....###.##.....#.#..#.
#..........#.#......#.#....#...
....###...#.#.....#....#.####.#
........#......#...#...#..##..#
...##..............##.#.......#
#..........#...........#.#....#
#...#....#..####..#............
###....#........#..............
...#.##....................#.##
...#..#.....#.....##...#....#..
.......###.#...#.........#.....
.#..#.....#.#..#.....#.........
#................#.............
...#......#.#.....##.#.#....#..
...#..#.#..#.....#...#....#....
.......#......#........#.....#.
.#.##..##.....#.#......#.#.#...
#...............#.....#....#...
.....#...........#..##.........
.....#..#........##..#..#.....#
..###.#.#.......#.#...........#
##....##....#.#....##...#.##.##
..................##.#.#.....#.
.#...........###...#...........
.#.#....#......#....###.#......
.......#.##...#...#..#.#.......
..#.....#.#....#..#............
.....#..#..#....#..#.........#.
..##.#......#.....#...#.#..#.#.
.........#......#....##.......#
#........#..#.#......#...#.#..#
...#....#.#..#....##.......###.
..#...#......#.##..........#...
........#..#..#...#.......#....
.##.#..#...#..#........#.#.####
#..#..#..........#....##...#...
....#...#........##........#...
.#......#.......#..#..#........
#...#.#......#....#............
#........#..##.#...##..........
...#..##.....#......##.#..#.#..
.#.#.....#.....#.####.#..##....
..........###....#.##...#......
.......#.......#..#.#.#.##.#..#
..#.#....#......#.#...#.......#
.#...#....#......#...#.........
.#....#..#....#.##.#....#..##..
...#..#.#..................#...
.##..#.............##.........#
...#.#.#................#.....#
...###..###..................#.
........##.##..#.#...#.....#...
.##...##...#...#....#...#......
#..#....#..#..#.#....#..####...
.#...............##....##.#....
#..#................#...#..#...
.#....#.....#..#.#........#....
...............##.#..##..##....
.#......#........#....#.#...#.#
.#.....#...##.#........#.##.#.#
..###............#..#.#....#...
..#.....#.........#....#..#.#..
.##.....#.#..........#.#....##.
...#...#....#..#......#.#.#..#.
#.....#...#....##...#.......##.
.......#.#.........##..........
............##.#.##...#.......#
.....#........##...#........#..
.#........#.#.#.#....#.........
#....#..#....#.#..#...#.#......
....##...........#...#...##.#.#
......#...##.###.....#.........
............#..##....##......#.
......##....#...#.#....#......#
#..#..#..#.#.#.........#...##.#
...#.........#...#.........##.#
#.#.....#.......#.##..#..#.....
##................#......#....#
....#..#.......#....##.....#...
.....#..#...#...#......#.#....#
..#....#.....#.........#.....#.
..#..#..........#.....#........
.......#..##.#......#.#........
.............##.....#....#.....
...#....#..#.#.#...............
........#....##..#...#........#
..##...............#.....#....#
........##.#.##.#......#..#....
..#.##.......#..........##..#..
.#..............#.#.##.........
.#.......#....#....#.#.#.......
.#.##.......#....#......###.#..
.......#...#............##.....
........#.#..........##..#.....
...###..#......#.....##..#..#..
...........##......#....#......
..............#....#..#..#.#..#
....#...#......#.##...#........
.#.............#..#......###.#.
#...#..#.#..............##..#.#
....................#.........#
..##..#......#.###.....#...#.#.
.#....#.#........#...#........#
..#....#.....#..............#..
##..........#..#..#...#........
...........#..##...#.......#...
........##.............#.......
#....#........#..#.#.###..#....
...........##..........##......
#......#.....##.#.##......##...
..#......#.........#.......#..#
......#.#....##..##.#...#.#...#
......#..................##....
...#....#.#...#.#.......##.....
#.#...##...##........#...##....
..#.......#.#.#...#............
.......#......#..#...#.........
#...#..#...........##..........
......#....#.........#.#....#..
#......#........#...#..##....#.
.....#.......##..#.#......#..#.
...........#......#...#......#.
#.#.##.....#....#.....##......#
.....##..#.#.#.###........#.#..
...#...#.#......#......#.......
......###....#..##...#.#.##....
#.....#.....#..................
...#...#......#...............#
..#............##..#.....#.....
.#....#...#...#...#...#..#.....
.##......#.........#.###.#.....
#.#.##.......##...#........##.#
.##.#.#......#.....#...#.....#.
....####.##.......#..##..##.#..
#.#.......#..##....###..#...#..
..#..#....#...#.#.#.#...#......
##.........#.##................
........#.....................#
..#...........#..#..##.#..#.#..
#...#...................#.###..
##..#............#.........#..#
...............##...#...##....#
#.#.....#..#.......#......#....
.#...#......#............#.....
#.......#...#..#....#.......#..
...#....#.##.#....#....#.#.....
...#..#..............#..#.#..#.
.........#.....#.#...#..#....#.
..#..#..#...##.....##.#.....#..
.#.#..........#........#.......
...............#........#.#.#..
.#......#.....#..............#.
........#.#..............#.#...
.......#.#....#..#.#.#..#.#.##.
...##..#...#.#..#...........#..
#...###.#.....#..#........#....
.#...##...##...##.#.....###....
.........#......#.#..##.#.#....
#....#.#..#...#.#.#....#..#..#.
.#.#...#......###.....#........
#.....#.#.......#..#.#...#.....
.................#.#....#..##..
#...........#....###..#......#.
##.#..#....#.#.#.#.............
#.....#..#...#........#........
..#..#......#..#.##.#..........
...#....#..#..........#.#.##.##
#........#...#.......#..##.#...
.#.#..#....#.#....#......#.....
##.......##.#........#...#..##.
##.##.....#.......#####.#....#.
..#..###.#.#..#....###..#.##..#
#.........#.............#.#...#
..#...##.#..................#..
.....#.#....#.#..#.#........#.#
......#.......#.#..##.#.#..#...
..#......#.#..##......#..#....#
..##..#..#.##.#..#....#...##...
###....#...##....##.........#..
#........##.........#......#..#
...#.........#......#.##.......
.....#.#.#....#......#.........
..#...........#....#......#.#..
##........#...##.....######....
....#..#..##.......#..#..#.....
..#....#..##....#......##....#.
...##....#........##......#....
.#.#...###...#......#..........
#....#..#.##.........#...#.....
......#..#.........#.##.....#..
...#............##....#......#.
...#.....##.....#........#.#..#
......#.#..#......#.....#..##..
#.#.........##..........#......
..###.....#..#....##..........#
.............##..#....#..##....
....#.#....##..#......#...#....
....###.....#..#.......#.......
............#..#...............
......#........#..#......#.....
.#........#.......#.##.......#.
..#.........#..#.#.....##....#.
...#.......#.......#.......##.#
#......##.#.....#......##.#..#.
#..........#.................#.
....#..##...........#.....#.#..
#.###...#............#.#....#.#
....#......#.#..###....##..#...
....#...#..........##..........
..#.#............#...#...###...
......#...#......#..#.#........
.#.......#..#...........##...#.
##...#...##....##.#..#..#.#....
.......#........#............##
.#......#...#.#................
#.#........#.#....#..#.##......
.......#.#...#....##.......##..
........#.#.#.........##..##...
..##...............#.#.###.#...
......#.#....#..#......##.....#
###.........#.....#.#.....##...
.#.#....#.....#.#.##..#.......#
..#..#.#......#...##..##.#..#..
...#........#..#....#..........
#...#.#...#..##....##..........
.........#........#.##....#..#.
..#...#.#.......##..........##.
###...........##.#......#.#..#.
...#....#...#..#..#......#.....
.....##.......###.#....###..##.
...#...#..........#.#......#...
....#.....##...##..#.#........#
.....#...#..#.....##...##....#.
................##.#.##....##.#
.#..#..#....#.....#....#..#...#
.....###.....#.................
#...#..##..#.........#.........
.....#..#................#.....
.#..#...#......#..#............
...#...#.#....#....##...#...##.
..........#....#.#..#.#.....#..
....#...###.##...#..#..#......#
#...#.......#..........#..#....
.#............#..##.......#...#
....#..#...#............#..#.#.
.#....#.......#..#.#......#....
...#...#............#...#.....#
....#.#.#..##.#.....#...#.#....
......#.#.#......#..#...#.....#
......##.....#.............#...
..#...#..#.#....#..............
.#.#..#....#.#..##....###.##...
..#...........#....#.###.#....#
.....#.........#.#.............
...#.#.....#......###......##..
...#...#.....#.................
...#..#...##.....##.........#..
..#...#..#..##..#...#........#.
##..#.#.##.#....#...........#..
.......#....##....#...##..#..#.
#.......##.#...##...##..#.....#
....#.#...............#......#.
....#.#...#.....#....#......#..
.#.........#.#....###........#.
.#.#.....#.....#.#.#....#.#....
............#...........#.#..##
#...#......#..#......#.#.......
...#.#.#.....#..#...#..##......
...#.#..#...#....#.........#.#.
........#..#......##.....#...#.
...#..#..............#..#......
.........#.......#...#......#..
.#......#.....#.....#......#...
......#.......#....#...#.#.....
.#.....#.##..#........#...#....
#.....##..##....#.#.......#..#.
.#..#...#..#.......#...........
..#..#...#.....##....#.....#...
#.#..............#....#..#.....
.........##...#......#.##...##.
.###...#.#...#.....#.........#.
.....#..........##...#..#....##
.#..#......#....##.#...#.......
.............###.#.#..#.#.#...#
.......#...##..#..#.....###....
##.......#...........#....#.#..
##......#...#.#................
.#.####..##.#...............#..
..#...#.#.#..#...#........#...#
.##..##.##.....#.......#..#.#..
...................#......#.#..
#.##..#..........#.............
##..#......#....#.#............
.#........#.....##...#.........
.##....#..#..##..........#...#.
#..........##........#..#..#.#.
####.###.#.....#....#..#.#....#
..#...#...#.#.......#....#...#.
......##.###..##.#.###......#.#"""
position = 0
trees = 0
for line in tree_map.split("\n"):
if line[position % len(line)] == "#":
trees += 1
position += 3
print(trees)
|
from enum import Enum
class TaskResultStatus(Enum):
FAILURE = 50
WARNING = 40
SUCCESS = 30
INFO = 20
UNKNOWN = 10
def __str__(self):
return self.name
|
"""
Django models for qcm app.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/db/models/
"""
from .branch import Branch
from .choice import Choice
from .question import Question
from .questionsset import QuestionsSet
from .questionssubset import QuestionsSubset
from .training import Training
|
name = " yang yahu "
print(name)
print(name.lstrip() + "\n" + name.rstrip() + "\t" + name.strip())
|
import torch.nn as nn
import torch
import torch.nn.functional as F
# from torch_geometric.nn import GCNConv
#from torch_geometric.nn.conv.gatv2_conv import GATv2Conv as GATConv
# from torch_geometric.nn import SuperGATConv as GATConv
# from torch_geometric.nn import
from torch_geometric.nn.norm import LayerNorm
import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GATConv
#from torch_geometric.nn.conv.gatv2_conv import GATv2Conv as GATConv
from torch_geometric.nn import TransformerConv as GATConv
from torch_geometric.nn import HGTConv
class GAT(nn.Module):
def __init__(self, meta):
super(GAT, self).__init__()
self.hid = 512
self.in_head = 8
self.out_head = 1
num_classes = 2
self.conv1 = HGTConv(-1, self.hid, meta, heads=self.in_head)
self.conv5 = HGTConv(-1, num_classes, meta,
heads=self.out_head)
# TODO (Nikhil, Saloni) -> Apply Edge loss on a different Attention head..
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = self.conv5(x, edge_index)
return x
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn_voc_wAnchor.py', '../_base_/datasets/voc0712OS.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=15)))
# optimizer
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
# actual epoch = 3 * 3 = 9
lr_config = dict(policy='step', step=[4, 6])
# runtime settings
runner = dict(
type='EpochBasedRunner', max_epochs=7)
|
from os import path, mkdir, listdir
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import tensorflow as tf
tf.set_random_seed(1)
import timeit
import cv2
from models import get_inception_resnet_v2_unet_softmax
from tqdm import tqdm
test_folder = path.join('..', 'data_test')
models_folder = 'nn_models'
test_pred = path.join('..', 'predictions', 'inception_test_pred_4')
all_ids = []
all_images = []
all_masks = []
def preprocess_inputs(x):
x = np.asarray(x, dtype='float32')
x /= 127.5
x -= 1.
return x
def bgr_to_lab(img):
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
lab = clahe.apply(lab[:, :, 0])
if lab.mean() > 127:
lab = 255 - lab
return lab[..., np.newaxis]
if __name__ == '__main__':
t0 = timeit.default_timer()
if not path.isdir(test_pred):
mkdir(test_pred)
print('Loading models')
models = []
for it in range(4):
model = get_inception_resnet_v2_unet_softmax((None, None), weights=None)
model.load_weights(path.join(models_folder, 'inception_resnet_v2_weights_{0}.h5'.format(it)))
models.append(model)
print('Predicting test')
for d in tqdm(listdir(test_folder)):
if not path.isdir(path.join(test_folder, d)):
continue
final_mask = None
for scale in range(3):
fid = d
img = cv2.imread(path.join(test_folder, fid, 'images', '{0}.png'.format(fid)), cv2.IMREAD_COLOR)
if final_mask is None:
final_mask = np.zeros((img.shape[0], img.shape[1], 3))
if scale == 1:
img = cv2.resize(img, None, fx=0.75, fy=0.75)
elif scale == 2:
img = cv2.resize(img, None, fx=1.25, fy=1.25)
elif scale == 3:
img = cv2.resize(img, None, fx=1.5, fy=1.5)
x0 = 16
y0 = 16
x1 = 16
y1 = 16
if (img.shape[1] % 32) != 0:
x0 = int((32 - img.shape[1] % 32) / 2)
x1 = (32 - img.shape[1] % 32) - x0
x0 += 16
x1 += 16
if (img.shape[0] % 32) != 0:
y0 = int((32 - img.shape[0] % 32) / 2)
y1 = (32 - img.shape[0] % 32) - y0
y0 += 16
y1 += 16
img0 = np.pad(img, ((y0,y1), (x0,x1), (0, 0)), 'symmetric')
img0 = np.concatenate([img0, bgr_to_lab(img0)], axis=2)
inp0 = []
inp1 = []
for flip in range(2):
for rot in range(4):
if flip > 0:
img = img0[::-1, ...]
else:
img = img0
if rot % 2 == 0:
inp0.append(np.rot90(img, k=rot))
else:
inp1.append(np.rot90(img, k=rot))
inp0 = np.asarray(inp0)
inp0 = preprocess_inputs(inp0)
inp1 = np.asarray(inp1)
inp1 = preprocess_inputs(inp1)
mask = np.zeros((img0.shape[0], img0.shape[1], 3))
for model in models:
pred0 = model.predict(inp0, batch_size=1)
pred1 = model.predict(inp1, batch_size=1)
j = -1
for flip in range(2):
for rot in range(4):
j += 1
if rot % 2 == 0:
pr = np.rot90(pred0[int(j / 2)], k=(4-rot))
else:
pr = np.rot90(pred1[int(j / 2)], k=(4-rot))
if flip > 0:
pr = pr[::-1, ...]
mask += pr
mask /= (8 * len(models))
mask = mask[y0:mask.shape[0]-y1, x0:mask.shape[1]-x1, ...]
if scale > 0:
mask = cv2.resize(mask, (final_mask.shape[1], final_mask.shape[0]))
final_mask += mask
final_mask /= 3
final_mask = final_mask * 255
final_mask = final_mask.astype('uint8')
cv2.imwrite(path.join(test_pred, '{0}.png'.format(fid)), final_mask, [cv2.IMWRITE_PNG_COMPRESSION, 9])
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60))
|
#this class will encapsulate all the necessary logic to extract our 3D HSV color histogram from our images
#import the necessary packages
import numpy as np #numerical processing
import cv2 #opencv
import imutils #check opencv ver.
class ColorDescriptor:
def __init__(self, bins):
#store the number of bins for the 3D histogram
self.bins = bins
def describe(self, image):
#convert the image to the HSV color space and intialize
#the features used to quantify the image
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
features = [] #initialize list of features
#grab the dimensions and compute the center of the image
(h,w) = image.shape[:2]
(cX, cY) = (int(w * 0.5), int(h * 0.5))
#divide the image into four rectangles/segments (top-left, top-right, bottom-right, bottom-left)
segments = [(0, cX, 0 , cY),(cX, w, 0, cY),(cX, w, cY, h),(0, cX, cY, h)]
#construct an elliptical mask representing the center of image
(axesX, axesY) = (int(w * 0.75) // 2, int(h * 0.75) // 2) #ellipse radius that is 75% of the width and height of the image
ellipMask = np.zeros(image.shape[:2], dtype = "uint8") #fill ellipse with zeroes
cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1) #draw ellipse
#loop over the segments
for (startX, endX, startY, endY) in segments:
#construct a mask for each corner of the image, subtracting the elliptical center from it
cornerMask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
cornerMask = cv2.subtract(cornerMask, ellipMask)
#extract a color histogram from the image, then update the feature vector
hist = self.histogram(image, cornerMask)
features.extend(hist)
#extract a color histogram from the elliptical region and update the feature vector
hist = self.histogram(image, ellipMask)
features.extend(hist)
#return the feature vector
return features
def histogram(self, image, mask):
#extract a 3D color histogram from the masked region of the image, using the supplied number of bins per channel
hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins, [0, 180, 0, 256, 0, 256])
#normalize the histogram if we are using OpenCV 2.4
if imutils.is_cv2():
#print('2.4')
hist = cv2.normalize(hist).flatten()
#otherwise handle for OpenCV 3+
else:
#hist = cv2.normalize(hist, hist).flatten()
#print('3')
cv2.normalize(hist, hist)
hist = hist.flatten()
#return the histogram
return hist
|
# -*- coding: utf-8 -*-
"""flask views file"""
from __future__ import absolute_import, unicode_literals, division
import logging
from datetime import datetime
from flask import Blueprint, render_template, Response
from vsphere_ds_exporter.libs import vsphere
metrics = Blueprint('metrics', __name__)
logger = logging.getLogger('ds_exporter')
@metrics.route('/')
def index():
return render_template('index.html')
@metrics.route('/metrics', methods=['GET'])
def details():
start_time = datetime.now()
si = vsphere.connect_vc()
if not si:
# if login vc error, return success 0
return Response("vsphere_ds_exporter_success 0", mimetype='text/plain')
logger.info('vCenter login success~')
content = si.content
vc_datacenters = vsphere.get_vc_dc(content)
# Get exporter datastores
datastore = [n for n in vsphere.get_datastore(vc_datacenters)]
logger.info('Datastore Query Starting')
# Create a list of datastore and
# it's capacity/freespace/uncommited size string list
capacity_list = ('''vsphere_datastore_capacity_bytes{datastore="%s"} %s'''
% (ds.name, vsphere.get_ds_capacity(ds)) for ds in datastore)
logger.info('Get datastore capacity size ok')
free_list = ('''vsphere_datastore_freespace_bytes{datastore="%s"} %s'''
% (ds.name, vsphere.get_ds_freespace(ds)) for ds in datastore)
logger.info("Get datastore freeSpace size ok")
uncmtd_list = ('''vsphere_datastore_uncommited_bytes{datastore="%s"} % s'''
% (ds.name, vsphere.get_ds_uncommitted(ds)) for ds in datastore)
logger.info("Get datastore uncommited size ok")
vsphere.disconnect_vc(si)
end_time = datetime.now()
q_time = (end_time - start_time).seconds
logger.info('Datastore Query Ending')
logger.info('The Query time is {0}s'.format(q_time))
return render_template('metrics.txt', capacity_list=capacity_list,
free_list=free_list, ucmtd_list=uncmtd_list), {'Content-Type': 'text/plain'}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.