content
stringlengths 5
1.05M
|
|---|
import numpy as np
import torch
DNA_SIZE = 16
POP_SIZE = 10
CROSSOVER_RATE = 0.8
MUTATION_RATE = 0.005
N_GENERATIONS = 10
class GA(object):
def setF(self,func):
self.F = func
def get_fitness(self,pop):
weight = self.translateDNA(pop)
pred = []
for i in range(len(weight)):
pred.append(self.F(weight[i]))
return (pred - np.min(pred)) + 1e-3 #减去最小的适应度是为了防止适应度出现负数,通过这一步fitness的范围为[0, np.max(pred)-np.min(pred)],最后在加上一个很小的数防止出现为0的适应度
def translateDNA(self,pop): #pop表示种群矩阵,一行表示一个二进制编码表示的DNA,矩阵的行数为种群数目
weights = []
for i in range(np.size(pop,0)):
weight = torch.tensor(pop[i],dtype= torch.double)
weight.view(2,-1)
weights.append(weight)
return weights
def crossover_and_mutation(self,pop, CROSSOVER_RATE = 0.8):
new_pop = []
for father in pop: #遍历种群中的每一个个体,将该个体作为父亲
child = father #孩子先得到父亲的全部基因(这里我把一串二进制串的那些0,1称为基因)
if np.random.rand() < CROSSOVER_RATE: #产生子代时不是必然发生交叉,而是以一定的概率发生交叉
mother = pop[np.random.randint(POP_SIZE)] #再种群中选择另一个个体,并将该个体作为母亲
cross_points = np.random.randint(low=0, high=DNA_SIZE*2) #随机产生交叉的点
child[cross_points:] = mother[cross_points:] #孩子得到位于交叉点后的母亲的基因
self.mutation(child) #每个后代有一定的机率发生变异
new_pop.append(child)
return new_pop
def mutation(self,child, MUTATION_RATE=0.003):
if np.random.rand() < MUTATION_RATE: #以MUTATION_RATE的概率进行变异
mutate_point = np.random.randint(0, DNA_SIZE*2) #随机产生一个实数,代表要变异基因的位置
child[mutate_point] = child[mutate_point]^1 #将变异点的二进制为反转
def select(self,pop, fitness): # nature selection wrt pop's fitness
idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,
p=(fitness)/(fitness.sum()) )
return pop[idx]
def print_info(self,pop):
fitness = self.get_fitness(pop)
max_fitness_index = np.argmax(fitness)
print("max_fitness:", fitness[max_fitness_index])
weights = self.translateDNA(pop)
print("best gene:", pop[max_fitness_index])
print("weights:", weights[max_fitness_index])
return weights[max_fitness_index],fitness[max_fitness_index]
if __name__ == "__main__":
ga = GA()
pop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE*2)) #matrix (POP_SIZE, DNA_SIZE)
pop.size
for _ in range(N_GENERATIONS):#迭代N代
weights = ga.translateDNA(pop)
pop = np.array(ga.crossover_and_mutation(pop, CROSSOVER_RATE))
#F_values = F(translateDNA(pop)[0], translateDNA(pop)[1])#x, y --> Z matrix
fitness = ga.get_fitness(pop)
pop = ga.select(pop, fitness) #选择生成新的种群
good_weight ,best_acc = ga.print_info(pop)
|
import unittest
from jgi_mg_assembly.pipeline_steps.step import Step
import util
class StepTest(unittest.TestCase):
"""
Tests the pipeline step base class.
"""
def test_step_bad_fn(self):
name = "Foo"
version_name = "Bar"
base_command = "foo"
output_dir = "output_dir"
mystep = Step(name, version_name, base_command, util.get_config()["scratch"], output_dir, False)
with util.captured_stdout() as (out, err):
params = ["flag1", "flag2"]
(exit_code, command) = mystep.run(*params)
self.assertIn("raised an OSError", out.getvalue())
self.assertEqual("foo flag1 flag2", command)
self.assertNotEqual(exit_code, 0)
def test_step_ok(self):
name = "Just ls"
version_name = "ls"
base_command = "ls"
output_dir = "output_dir"
mystep = Step(name, version_name, base_command, util.get_config()["scratch"], output_dir, True)
with util.captured_stdout() as (out, err):
(exit_code, command) = mystep.run()
self.assertIn("Successfully ran {}".format(name), out.getvalue())
self.assertEqual(exit_code, 0)
self.assertEqual("ls", command)
|
import os
import subprocess
import argparse
# prefix = os.path.join(os.path.expanduser("~"), "Projects", "pandaPI")
prefix = os.path.join(os.path.expanduser("~"), "projects", "pandaPI")
# planFilePath = os.path.join(prefix, "ipc-2020-plans", "po-plans", "IPC-2020")
# planFilePath = os.path.join(prefix, "ipc-2020-plans", "inval-po")
# planFilePath = os.path.join(prefix, "ipc-2020-plans", "inval-to")
planFilePath = os.path.join(prefix, "ipc-2020-plans", "plans", "IPC-2020")
# parser = argparse.ArgumentParser(description='Process Command Line Arguments')
# parser.add_argument('--file', type=str)
numFile = 1000
# domainDir = os.path.join(prefix, "HTN-po-domains")
# domainDir = os.path.join(prefix, "HTN-po-domains-invalid")
# domainDir = os.path.join(prefix, "HTN-to-domains-invalid")
domainDir = os.path.join(prefix, "HTN-to-domains-valid")
if not os.path.exists(domainDir):
os.mkdir(domainDir)
visitedNumFile = 0
for filename in os.listdir(planFilePath):
visitedNumFile = visitedNumFile + 1
absFilePath = os.path.join(planFilePath, filename)
with open(absFilePath) as f:
domainFile = f.readline().strip()
problemFile = f.readline().strip()
plan = f.readline()
domainFileName = domainFile.split("/")[-1].split(".")[0]
pfileName = problemFile.split("/")[-1].split(".")[0]
if "domain" in pfileName:
pfileName = domainFileName
temp = domainFile
domainFile = problemFile
problemFile = temp
# print("problem file name>>>>>>>>>>>>>>>> " + problemFile + "\n")
domainName = problemFile.split("/")[-2]
# print("domain file name>>>>>>>>>>>>>>>>> " + domainFile + "\n")
parentDir = os.path.join(domainDir, domainName)
if not os.path.exists(parentDir):
os.mkdir(parentDir)
pfileDir = os.path.join(parentDir, pfileName)
newDomainDir = os.path.join(pfileDir, "domain")
newProblemDir = os.path.join(pfileDir, "problem")
parsedDir = os.path.join(pfileDir, "parsed")
groundedDir = os.path.join(pfileDir, "grounded")
planDir = os.path.join(parentDir, pfileDir, "plans")
if not os.path.exists(pfileDir):
os.mkdir(pfileDir)
os.mkdir(parsedDir)
os.mkdir(groundedDir)
if not os.path.exists(planDir):
os.mkdir(planDir)
absParseOutputPath = os.path.join(parsedDir, pfileName + "." + "htn")
absGroundOutputPath = os.path.join(groundedDir, pfileName + "." + "sas")
absDomainPath = os.path.join(prefix, domainFile)
absProblemPath = os.path.join(prefix, problemFile)
grounderPath = os.path.join(prefix, "pandaPIgrounder/pandaPIgrounder")
parserPath = os.path.join(prefix, "pandaPIparser/pandaPIparser")
if not os.path.exists(newDomainDir):
os.mkdir(newDomainDir)
if not os.path.exists(newProblemDir):
os.mkdir(newProblemDir)
subprocess.run(["cp", absDomainPath, newDomainDir])
subprocess.run(["cp", absProblemPath, newProblemDir])
execGrounder = "./{}".format(os.path.relpath(grounderPath))
execParser = "./{}".format(os.path.relpath(parserPath))
subprocess.run([execParser, absDomainPath, absProblemPath, absParseOutputPath])
subprocess.run([execGrounder, "-D", absParseOutputPath, absGroundOutputPath])
numExistedPlans = len(os.listdir(planDir))
planFileName = "plan_{n}.txt".format(n = numExistedPlans + 1)
absPlanFilePath = os.path.join(planDir, planFileName)
with open(absPlanFilePath, "w") as f:
f.write(plan)
if visitedNumFile >= numFile:
break
print("Number of instances read: ")
print(visitedNumFile)
|
import unittest, math, sys, os
from datetime import date
from dateutil.relativedelta import relativedelta
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pdgatools import Rating, RoundRating
class TestRating(unittest.TestCase):
def setUp(self):
# Create list of round ratings starting at 2020-01-01 with a result from the first day of each month up
# to and including 2022-01-01. The first rating is 900 and it increases by 5 for each date so that the
# last rating is 1020.
self.round_ratings = [RoundRating(date=date(year=2020 + x // 12, month=x % 12 + 1, day=1), rating=900 + x * 5) for x in range(0, 25)]
def test_round_ratings_in_date_range(self):
# Return all within a 12 month period
update_date = date(year=2020, month=12, day=1)
expected = self.round_ratings[0:12]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(self.round_ratings, update_date, order=Rating.DataOrder.RECENT_LAST))
# Use date of most recent round in self.round_ratings (2022-01-01)
expected = self.round_ratings[12:]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(self.round_ratings, order=Rating.DataOrder.RECENT_LAST))
def test_round_ratings_in_date_range_8_records_12_months_back(self):
# There are 8 records 12 months back from this date, so it should return from 12 months back.
update_date = date(year=2022, month=6, day=1)
expected = self.round_ratings[17:]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(self.round_ratings, update_date, order=Rating.DataOrder.RECENT_LAST))
def test_round_ratings_in_date_range_less_than_8_records_12_months_back(self):
# There are only 7 records 12 months back from this date, so it should return 1 additional round.
update_date = date(year=2022, month=7, day=1)
expected = self.round_ratings[17:]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(self.round_ratings, update_date, order=Rating.DataOrder.RECENT_LAST))
# There are only 6 records 12 months back from this date, so it should return 1 additional round.
update_date = date(year=2022, month=8, day=1)
expected = self.round_ratings[17:]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(self.round_ratings, update_date, order=Rating.DataOrder.RECENT_LAST))
def test_round_ratings_in_date_range_less_than_8_records_24_months_back(self):
# There are only 7 records 24 months back from this date, so it should only return those 7.
update_date = date(year=2023, month=7, day=1)
expected = self.round_ratings[18:]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(self.round_ratings, update_date, order=Rating.DataOrder.RECENT_LAST))
def test_round_ratings_in_date_range_less_than_8_records_available_before_date(self):
# There are only 7 records available back from this date, so it should return those 7.
update_date = date(year=2020, month=7, day=1)
expected = self.round_ratings[:7]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(self.round_ratings, update_date, order=Rating.DataOrder.RECENT_LAST))
def test_round_ratings_in_date_range_unordered_list(self):
# There are only 7 records available back from this date, so it should return those 7.
order = [6, 7, 5, 8, 4, 9, 3, 10]
scrambled_round_ratings = [self.round_ratings[i] for i in order]
update_date = scrambled_round_ratings[order.index(min(order))].date + relativedelta(months=24)
expected = self.round_ratings[3:11]
self.assertListEqual(expected, Rating.round_ratings_in_date_range(scrambled_round_ratings, update_date, order=Rating.DataOrder.UNSORTED))
def test_remove_outliers_100pts(self):
# 10 ratings, mean=1000, 2.5sd~181. Last three are > 100 pts below average and should be removed.
round_ratings = [RoundRating(date=date.today(), rating=x) for x in [1045] * 7 + [895] * 3]
expected = round_ratings[:-3]
self.assertListEqual(expected, Rating.remove_outliers(round_ratings))
def test_remove_outliers_2p5sd(self):
# 11 ratings, mean=900, 2.5sd~41.5. Last one is > 2.5sd below average.
round_ratings = [RoundRating(date=date.today(), rating=x) for x in [905] * 10 + [850]]
expected = round_ratings[:-1]
self.assertListEqual(expected, Rating.remove_outliers(round_ratings))
def test_remove_outliers_too_few_ratings(self):
# 8 ratings, mean=900. First 2 are > 100 pts below average, but 7 must remain so only the worst should be removed.
round_ratings = [RoundRating(date=date.today(), rating=x) for x in [894, 896] + [1035] * 6]
expected = round_ratings[1:]
x = Rating.remove_outliers(round_ratings)
self.assertListEqual(expected, Rating.remove_outliers(round_ratings))
def test_double_most_recent_quarter(self):
round_ratings = [RoundRating(date=date.today(), rating=x) for x in [900 + y for y in range(9)]]
i = math.ceil(len(round_ratings) * 0.75)
expected = round_ratings + round_ratings[i:]
self.assertListEqual(expected, Rating.double_most_recent_quarter(round_ratings))
def test_double_most_recent_quarter_less_than_9_rounds(self):
round_ratings = [RoundRating(date=date.today(), rating=x) for x in [900 + y for y in range(8)]]
expected = round_ratings
self.assertListEqual(expected, Rating.double_most_recent_quarter(round_ratings))
def test_calculate_paul_mcbeth(self):
# Test if we can match actual rating from the PDGA website
round_ratings = [
RoundRating(date=date(2021, 9, 26), rating=1010), RoundRating(date=date(2021, 9, 26), rating=1053), RoundRating(date=date(2021, 9, 26), rating=1055),
RoundRating(date=date(2021, 9, 26), rating=1033), RoundRating(date=date(2021, 9, 19), rating=1071), RoundRating(date=date(2021, 9, 19), rating=1049),
RoundRating(date=date(2021, 9, 19), rating=1060), RoundRating(date=date(2021, 9, 12), rating=1048), RoundRating(date=date(2021, 9, 12), rating=1072),
RoundRating(date=date(2021, 9, 12), rating=1048), RoundRating(date=date(2021, 9, 12), rating=1044), RoundRating(date=date(2021, 9, 5), rating=1027),
RoundRating(date=date(2021, 9, 5), rating=1072), RoundRating(date=date(2021, 9, 5), rating=1027), RoundRating(date=date(2021, 8, 15), rating=1053),
RoundRating(date=date(2021, 8, 15), rating=1066), RoundRating(date=date(2021, 8, 15), rating=1007), RoundRating(date=date(2021, 8, 8), rating=1077),
RoundRating(date=date(2021, 8, 8), rating=1011), RoundRating(date=date(2021, 8, 8), rating=1020), RoundRating(date=date(2021, 8, 1), rating=1067),
RoundRating(date=date(2021, 8, 1), rating=1052), RoundRating(date=date(2021, 8, 1), rating=1059), RoundRating(date=date(2021, 7, 25), rating=1032),
RoundRating(date=date(2021, 7, 25), rating=1047), RoundRating(date=date(2021, 7, 25), rating=1047), RoundRating(date=date(2021, 7, 11), rating=1067),
RoundRating(date=date(2021, 7, 11), rating=1074), RoundRating(date=date(2021, 7, 11), rating=1045), RoundRating(date=date(2021, 6, 26), rating=1065),
RoundRating(date=date(2021, 6, 26), rating=1053), RoundRating(date=date(2021, 6, 26), rating=1079), RoundRating(date=date(2021, 6, 26), rating=1065),
RoundRating(date=date(2021, 6, 26), rating=1047), RoundRating(date=date(2021, 6, 6), rating=1064), RoundRating(date=date(2021, 6, 6), rating=1052),
RoundRating(date=date(2021, 6, 6), rating=1035), RoundRating(date=date(2021, 5, 30), rating=1054), RoundRating(date=date(2021, 5, 30), rating=1047),
RoundRating(date=date(2021, 5, 30), rating=1033), RoundRating(date=date(2021, 5, 16), rating=1051), RoundRating(date=date(2021, 5, 16), rating=1038),
RoundRating(date=date(2021, 5, 16), rating=1064), RoundRating(date=date(2021, 5, 9), rating=1035), RoundRating(date=date(2021, 5, 9), rating=1022),
RoundRating(date=date(2021, 5, 9), rating=1035), RoundRating(date=date(2021, 5, 9), rating=1052), RoundRating(date=date(2021, 5, 1), rating=1041),
RoundRating(date=date(2021, 5, 1), rating=1073), RoundRating(date=date(2021, 5, 1), rating=1066), RoundRating(date=date(2021, 5, 1), rating=1066),
RoundRating(date=date(2021, 4, 18), rating=1057), RoundRating(date=date(2021, 4, 18), rating=1065), RoundRating(date=date(2021, 4, 18), rating=1012),
RoundRating(date=date(2021, 3, 28), rating=1049), RoundRating(date=date(2021, 3, 28), rating=1056), RoundRating(date=date(2021, 3, 28), rating=1041),
RoundRating(date=date(2021, 3, 21), rating=1072), RoundRating(date=date(2021, 3, 21), rating=1080), RoundRating(date=date(2021, 3, 21), rating=1070),
RoundRating(date=date(2021, 3, 14), rating=1026), RoundRating(date=date(2021, 3, 14), rating=1043), RoundRating(date=date(2021, 3, 14), rating=1057),
RoundRating(date=date(2021, 3, 7), rating=1041), RoundRating(date=date(2021, 3, 7), rating=1051), RoundRating(date=date(2021, 3, 7), rating=1090),
RoundRating(date=date(2021, 3, 7), rating=1082), RoundRating(date=date(2021, 2, 28), rating=1075), RoundRating(date=date(2021, 2, 28), rating=1038),
RoundRating(date=date(2021, 2, 28), rating=1013), RoundRating(date=date(2021, 2, 28), rating=1071), RoundRating(date=date(2021, 1, 7), rating=1029),
RoundRating(date=date(2020, 11, 8), rating=1019), RoundRating(date=date(2020, 11, 8), rating=1037), RoundRating(date=date(2020, 11, 8), rating=1033),
RoundRating(date=date(2020, 10, 3), rating=1073), RoundRating(date=date(2020, 10, 3), rating=1058), RoundRating(date=date(2020, 10, 3), rating=1044),
# This last line was not included in the ratings calculation
RoundRating(date=date(2020, 9, 13), rating=1056), RoundRating(date=date(2020, 9, 13), rating=1064), RoundRating(date=date(2020, 9, 13), rating=1086)
]
expected_included = sorted(round_ratings[:78], key=lambda e: (e.date, e.rating))
expected_rating = 1050
r = Rating()
r.update(round_ratings, date(2021, 10, 12))
self.assertEqual(expected_rating, r.rating)
self.assertListEqual(expected_included, r.included)
def test_kevin(self):
round_ratings = [
RoundRating(date=date(2021, 9, 26), rating=1070), RoundRating(date=date(2021, 9, 26), rating=1027), RoundRating(date=date(2021, 9, 26), rating=1062),
RoundRating(date=date(2021, 9, 26), rating=1026), RoundRating(date=date(2021, 9, 12), rating=1037), RoundRating(date=date(2021, 9, 12), rating=1040),
RoundRating(date=date(2021, 9, 12), rating=1017), RoundRating(date=date(2021, 9, 12), rating=1024), RoundRating(date=date(2021, 9, 5), rating=1019),
RoundRating(date=date(2021, 9, 5), rating=1012), RoundRating(date=date(2021, 9, 5), rating=1042), RoundRating(date=date(2021, 8, 15), rating=1059),
RoundRating(date=date(2021, 8, 15), rating=1040), RoundRating(date=date(2021, 8, 15), rating=1040), RoundRating(date=date(2021, 8, 8), rating=1053),
RoundRating(date=date(2021, 8, 8), rating=1034), RoundRating(date=date(2021, 8, 8), rating=1015), RoundRating(date=date(2021, 8, 1), rating=1037),
RoundRating(date=date(2021, 8, 1), rating=1045), RoundRating(date=date(2021, 8, 1), rating=1037), RoundRating(date=date(2021, 7, 25), rating=1076),
RoundRating(date=date(2021, 7, 25), rating=1032), RoundRating(date=date(2021, 7, 25), rating=1040), RoundRating(date=date(2021, 7, 11), rating=1024),
RoundRating(date=date(2021, 7, 11), rating=1038), RoundRating(date=date(2021, 7, 11), rating=1067), RoundRating(date=date(2021, 7, 4), rating=997),
RoundRating(date=date(2021, 7, 4), rating=1052), RoundRating(date=date(2021, 6, 26), rating=1031), RoundRating(date=date(2021, 6, 26), rating=1064),
RoundRating(date=date(2021, 6, 26), rating=1052), RoundRating(date=date(2021, 6, 26), rating=1061), RoundRating(date=date(2021, 6, 26), rating=1065),
RoundRating(date=date(2021, 6, 6), rating=1058), RoundRating(date=date(2021, 6, 6), rating=1046), RoundRating(date=date(2021, 6, 6), rating=1040),
RoundRating(date=date(2021, 5, 30), rating=1061), RoundRating(date=date(2021, 5, 30), rating=1068), RoundRating(date=date(2021, 5, 30), rating=1013),
RoundRating(date=date(2021, 5, 16), rating=1024), RoundRating(date=date(2021, 5, 16), rating=1031), RoundRating(date=date(2021, 5, 16), rating=1031),
RoundRating(date=date(2021, 5, 1), rating=1008), RoundRating(date=date(2021, 5, 1), rating=1008), RoundRating(date=date(2021, 5, 1), rating=1027),
RoundRating(date=date(2021, 5, 1), rating=1067), RoundRating(date=date(2021, 4, 25), rating=1029), RoundRating(date=date(2021, 4, 25), rating=1019),
# This record was removed because it was > 2.5sd below average.
RoundRating(date=date(2021, 4, 25), rating=970),
RoundRating(date=date(2021, 4, 18), rating=1065), RoundRating(date=date(2021, 4, 18), rating=1050), RoundRating(date=date(2021, 4, 18), rating=1027),
RoundRating(date=date(2021, 4, 10), rating=1009), RoundRating(date=date(2021, 4, 10), rating=1006), RoundRating(date=date(2021, 4, 10), rating=987),
RoundRating(date=date(2021, 3, 28), rating=1026), RoundRating(date=date(2021, 3, 28), rating=1072), RoundRating(date=date(2021, 3, 28), rating=1019),
RoundRating(date=date(2021, 3, 21), rating=1015), RoundRating(date=date(2021, 3, 21), rating=1028), RoundRating(date=date(2021, 3, 21), rating=1048),
RoundRating(date=date(2021, 3, 14), rating=996), RoundRating(date=date(2021, 3, 14), rating=1043), RoundRating(date=date(2021, 3, 14), rating=989),
RoundRating(date=date(2021, 2, 28), rating=1082), RoundRating(date=date(2021, 2, 28), rating=1028), RoundRating(date=date(2021, 2, 28), rating=1041),
RoundRating(date=date(2021, 2, 28), rating=1046), RoundRating(date=date(2020, 11, 15), rating=1085), RoundRating(date=date(2020, 11, 15), rating=1027),
RoundRating(date=date(2020, 11, 15), rating=1003), RoundRating(date=date(2020, 11, 1), rating=1002), RoundRating(date=date(2020, 11, 1), rating=1032),
RoundRating(date=date(2020, 11, 1), rating=1018), RoundRating(date=date(2020, 10, 3), rating=1006), RoundRating(date=date(2020, 10, 3), rating=1051),
RoundRating(date=date(2020, 10, 3), rating=999)
]
expected_included = sorted(round_ratings[:48] + round_ratings[49:], key=lambda e: (e.date, e.rating))
# PDGA actually reports a rating of 1036 from this data. I E-mailed them and asked why and it turns out
# that they a) keep minor details secret and b) use raw, non-rounded round-ratings while we can only
# access rounded values. Thus we can't expect to always be perfect. In this case we get ~1035.4.
expected_rating = 1035
r = Rating()
r.update(round_ratings, date(2021, 10, 12))
self.assertEqual(expected_rating, r.rating)
self.assertListEqual(expected_included, r.included)
if __name__ == '__main__':
unittest.main()
|
# FIXME: Merge this with fixfmt.table!
from . import palide
from .lib import ansi, box
from .table import _get_header_position # FIXME
#-------------------------------------------------------------------------------
class _Table:
"""
Base class for tables.
"""
@staticmethod
def _normalize_column(col):
try:
name, fmt = col
except TypeError:
if callable(fmt):
name = getattr(fmt, "name", "col{}".format(i))
else:
raise TypeError(
"column is not a pair or callable: {}".format(col))
return fmt, name
def __init__(self, columns):
columns = ( self._normalize_column(c) for c in columns )
self._fmts, names = zip(*columns)
self._names = [
palide(n, f.width, pad_position=_get_header_position(f))
for f, n in zip(self._fmts, names)
]
def generate(self, rows):
def gen():
yield self.top()
yield self.header()
yield self.line()
for row in rows:
yield self.row(*row)
yield self.bottom()
return ( l for l in gen() if l is not None )
def print(self, rows, print=print):
for line in self.generate(rows):
print(line)
class TextTable(_Table):
"""
Simple table using plain ASCII decorations."
"""
def top(self):
return None
def header(self):
return " ".join(self._names)
def line(self):
return " ".join( "-" * f.width for f in self._fmts )
def row(self, *values):
return " ".join( f(v) for f, v in zip(self._fmts, values) )
def bottom(self):
return None
class BoxTable(_Table):
"""
Table framed with Unicode box drawing characters.
"""
DEFAULT_CONFIG = dict(
box_line=(box.SINGLE, None),
sep_line=box.SINGLE,
style=ansi.style(fg="light_gray"),
pad=" ",
)
def __init__(self, columns, config={}):
super().__init__(columns)
self.__cfg = dict(self.DEFAULT_CONFIG)
self.__cfg.update(config)
self.__frame = box.Frame(
( box.Frame.Column(
f.width, sep=self.__cfg["sep_line"], pad=self.__cfg["pad"])
for f in self._fmts ),
edge=self.__cfg["box_line"],
style=self.__cfg["style"])
def top(self):
return self.__frame.top()
def header(self):
return self.__frame.row(self._names)
def line(self):
return self.__frame.line(box.SINGLE)
def row(self, *values):
return self.__frame.row([ f(v) for f, v in zip(self._fmts, values) ])
def bottom(self):
return self.__frame.bottom()
|
import numpy as np
import pyrender
import trimesh
# render with gl camera
def render_glcam(model_in, # model name or trimesh
K = None,
Rt = None,
scale=1.0,
rend_size=(512, 512),
flat_shading=False):
# Mesh creation
if isinstance(model_in, str) is True:
mesh = trimesh.load(model_in, process=False)
else:
mesh = model_in.copy()
pr_mesh = pyrender.Mesh.from_trimesh(mesh)
# Scene creation
scene = pyrender.Scene()
# Adding objects to the scene
face_node = scene.add(pr_mesh)
# Caculate fx fy cx cy from K
fx, fy = K[0][0] * scale, K[1][1] * scale
cx, cy = K[0][2] * scale, K[1][2] * scale
# Camera Creation
cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy,
znear=0.1, zfar=100000)
cam_pose = np.eye(4)
cam_pose[:3, :3] = Rt[:3, :3].T
cam_pose[:3, 3] = -Rt[:3, :3].T.dot(Rt[:, 3])
scene.add(cam, pose=cam_pose)
# Set up the light
light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=10.0)
light_pose = cam_pose.copy()
light_pose[0:3, :] += np.array([[0], [100], [0]])
scene.add(light, pose=light_pose)
# Rendering offscreen from that camera
r = pyrender.OffscreenRenderer(viewport_width=rend_size[1],
viewport_height=rend_size[0],
point_size=1.0)
if flat_shading is True:
color, depth = r.render(scene, flags=pyrender.constants.RenderFlags.FLAT)
else:
color, depth = r.render(scene)
# rgb to bgr for cv2
color = color[:, :, [2, 1, 0]]
return depth, color
# render with cv camera
def render_cvcam(model_in, # model name or trimesh
K = None,
Rt = None,
scale=1.0,
rend_size=(512, 512),
flat_shading=False):
if np.array(K).all() == None:
K = np.array([[2000, 0, 256],
[0, 2000, 256],
[0, 0, 1]], dtype=np.float64)
if np.array(Rt).all() == None:
Rt = np.array([[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 1200]], dtype=np.float64)
# define R to transform from cvcam to glcam
R_cv2gl = np.array([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]])
Rt_cv = R_cv2gl.dot(Rt)
return render_glcam(model_in, K, Rt_cv, scale, rend_size, flat_shading)
|
# Generated by Django 2.2 on 2022-01-08 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0002_category_course_image_instructor_review_video'),
]
operations = [
migrations.AlterField(
model_name='image',
name='image_path',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='video',
name='video_path',
field=models.CharField(max_length=255),
),
]
|
# encoding: utf8
import env
from senti_analysis.train import train
from senti_analysis.models.model_v1 import get_model
if __name__ == '__main__':
model = get_model()
train(model)
|
from datetime import datetime
from typing import cast
import discord
from redbot.core import commands, i18n, checks, Config
from redbot.core.utils.common_filters import (
filter_invites,
filter_various_mentions,
escape_spoilers_and_mass_mentions,
)
from redbot.core.utils.mod import get_audit_reason
from .abc import MixinMeta
_ = i18n.Translator("Mod", __file__)
class ModInfo(MixinMeta):
"""
Commands regarding names, userinfo, etc.
"""
async def get_names_and_nicks(self, user):
names = await self.config.user(user).past_names()
nicks = await self.config.member(user).past_nicks()
if names:
names = [escape_spoilers_and_mass_mentions(name) for name in names if name]
if nicks:
nicks = [escape_spoilers_and_mass_mentions(nick) for nick in nicks if nick]
return names, nicks
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_nicknames=True)
@checks.admin_or_permissions(manage_nicknames=True)
async def rename(self, ctx: commands.Context, user: discord.Member, *, nickname: str = ""):
"""Change a user's nickname.
Leaving the nickname empty will remove it.
"""
nickname = nickname.strip()
me = cast(discord.Member, ctx.me)
if not nickname:
nickname = None
elif not 2 <= len(nickname) <= 32:
await ctx.send(_("Nicknames must be between 2 and 32 characters long."))
return
if not (
(me.guild_permissions.manage_nicknames or me.guild_permissions.administrator)
and me.top_role > user.top_role
and user != ctx.guild.owner
):
await ctx.send(
_(
"I do not have permission to rename that member. They may be higher than or "
"equal to me in the role hierarchy."
)
)
else:
try:
await user.edit(reason=get_audit_reason(ctx.author, None), nick=nickname)
except discord.Forbidden:
# Just in case we missed something in the permissions check above
await ctx.send(_("I do not have permission to rename that member."))
except discord.HTTPException as exc:
if exc.status == 400: # BAD REQUEST
await ctx.send(_("That nickname is invalid."))
else:
await ctx.send(_("An unexpected error has occured."))
else:
await ctx.send(_("Done."))
def handle_custom(self, user):
a = [c for c in user.activities if c.type == discord.ActivityType.custom]
if not a:
return None, discord.ActivityType.custom
a = a[0]
c_status = None
if not a.name and not a.emoji:
return None, discord.ActivityType.custom
elif a.name and a.emoji:
c_status = _("Custom: {emoji} {name}").format(emoji=a.emoji, name=a.name)
elif a.emoji:
c_status = _("Custom: {emoji}").format(emoji=a.emoji)
elif a.name:
c_status = _("Custom: {name}").format(name=a.name)
return c_status, discord.ActivityType.custom
def handle_playing(self, user):
p_acts = [c for c in user.activities if c.type == discord.ActivityType.playing]
if not p_acts:
return None, discord.ActivityType.playing
p_act = p_acts[0]
act = _("Playing: {name}").format(name=p_act.name)
return act, discord.ActivityType.playing
def handle_streaming(self, user):
s_acts = [c for c in user.activities if c.type == discord.ActivityType.streaming]
if not s_acts:
return None, discord.ActivityType.streaming
s_act = s_acts[0]
if isinstance(s_act, discord.Streaming):
act = _("Streaming: [{name}{sep}{game}]({url})").format(
name=discord.utils.escape_markdown(s_act.name),
sep=" | " if s_act.game else "",
game=discord.utils.escape_markdown(s_act.game) if s_act.game else "",
url=s_act.url,
)
else:
act = _("Streaming: {name}").format(name=s_act.name)
return act, discord.ActivityType.streaming
def handle_listening(self, user):
l_acts = [c for c in user.activities if c.type == discord.ActivityType.listening]
if not l_acts:
return None, discord.ActivityType.listening
l_act = l_acts[0]
if isinstance(l_act, discord.Spotify):
act = _("Listening: [{title}{sep}{artist}]({url})").format(
title=discord.utils.escape_markdown(l_act.title),
sep=" | " if l_act.artist else "",
artist=discord.utils.escape_markdown(l_act.artist) if l_act.artist else "",
url=f"https://open.spotify.com/track/{l_act.track_id}",
)
else:
act = _("Listening: {title}").format(title=l_act.name)
return act, discord.ActivityType.listening
def handle_watching(self, user):
w_acts = [c for c in user.activities if c.type == discord.ActivityType.watching]
if not w_acts:
return None, discord.ActivityType.watching
w_act = w_acts[0]
act = _("Watching: {name}").format(name=w_act.name)
return act, discord.ActivityType.watching
def handle_competing(self, user):
w_acts = [c for c in user.activities if c.type == discord.ActivityType.competing]
if not w_acts:
return None, discord.ActivityType.competing
w_act = w_acts[0]
act = _("Competing in: {competing}").format(competing=w_act.name)
return act, discord.ActivityType.competing
def get_status_string(self, user):
string = ""
for a in [
self.handle_custom(user),
self.handle_playing(user),
self.handle_listening(user),
self.handle_streaming(user),
self.handle_watching(user),
self.handle_competing(user),
]:
status_string, status_type = a
if status_string is None:
continue
string += f"{status_string}\n"
return string
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def userinfo(self, ctx, *, user: discord.Member = None):
"""Show information about a user.
This includes fields for status, discord join date, server
join date, voice state and previous names/nicknames.
If the user has no roles, previous names or previous nicknames,
these fields will be omitted.
"""
author = ctx.author
guild = ctx.guild
if not user:
user = author
# A special case for a special someone :^)
special_date = datetime(2016, 1, 10, 6, 8, 4, 443000)
is_special = user.id == 96130341705637888 and guild.id == 133049272517001216
roles = user.roles[-1:0:-1]
names, nicks = await self.get_names_and_nicks(user)
joined_at = user.joined_at if not is_special else special_date
since_created = (ctx.message.created_at - user.created_at).days
if joined_at is not None:
since_joined = (ctx.message.created_at - joined_at).days
user_joined = joined_at.strftime("%d %b %Y %H:%M")
else:
since_joined = "?"
user_joined = _("Unknown")
user_created = user.created_at.strftime("%d %b %Y %H:%M")
voice_state = user.voice
member_number = (
sorted(guild.members, key=lambda m: m.joined_at or ctx.message.created_at).index(user)
+ 1
)
created_on = _("{}\n({} days ago)").format(user_created, since_created)
joined_on = _("{}\n({} days ago)").format(user_joined, since_joined)
if any(a.type is discord.ActivityType.streaming for a in user.activities):
statusemoji = "\N{LARGE PURPLE CIRCLE}"
elif user.status.name == "online":
statusemoji = "\N{LARGE GREEN CIRCLE}"
elif user.status.name == "offline":
statusemoji = "\N{MEDIUM WHITE CIRCLE}\N{VARIATION SELECTOR-16}"
elif user.status.name == "dnd":
statusemoji = "\N{LARGE RED CIRCLE}"
elif user.status.name == "idle":
statusemoji = "\N{LARGE ORANGE CIRCLE}"
activity = _("Chilling in {} status").format(user.status)
status_string = self.get_status_string(user)
if roles:
role_str = ", ".join([x.mention for x in roles])
# 400 BAD REQUEST (error code: 50035): Invalid Form Body
# In embed.fields.2.value: Must be 1024 or fewer in length.
if len(role_str) > 1024:
# Alternative string building time.
# This is not the most optimal, but if you're hitting this, you are losing more time
# to every single check running on users than the occasional user info invoke
# We don't start by building this way, since the number of times we hit this should be
# infinitesimally small compared to when we don't across all uses of Red.
continuation_string = _(
"and {numeric_number} more roles not displayed due to embed limits."
)
available_length = 1024 - len(continuation_string) # do not attempt to tweak, i18n
role_chunks = []
remaining_roles = 0
for r in roles:
chunk = f"{r.mention}, "
chunk_size = len(chunk)
if chunk_size < available_length:
available_length -= chunk_size
role_chunks.append(chunk)
else:
remaining_roles += 1
role_chunks.append(continuation_string.format(numeric_number=remaining_roles))
role_str = "".join(role_chunks)
else:
role_str = None
data = discord.Embed(description=status_string or activity, colour=user.colour)
data.add_field(name=_("Joined Discord on"), value=created_on)
data.add_field(name=_("Joined this server on"), value=joined_on)
if role_str is not None:
data.add_field(
name=_("Roles") if len(roles) > 1 else _("Role"), value=role_str, inline=False
)
if names:
# May need sanitizing later, but mentions do not ping in embeds currently
val = filter_invites(", ".join(names))
data.add_field(
name=_("Previous Names") if len(names) > 1 else _("Previous Name"),
value=val,
inline=False,
)
if nicks:
# May need sanitizing later, but mentions do not ping in embeds currently
val = filter_invites(", ".join(nicks))
data.add_field(
name=_("Previous Nicknames") if len(nicks) > 1 else _("Previous Nickname"),
value=val,
inline=False,
)
if voice_state and voice_state.channel:
data.add_field(
name=_("Current voice channel"),
value="{0.mention} ID: {0.id}".format(voice_state.channel),
inline=False,
)
data.set_footer(text=_("Member #{} | User ID: {}").format(member_number, user.id))
name = str(user)
name = " ~ ".join((name, user.nick)) if user.nick else name
name = filter_invites(name)
avatar = user.avatar_url_as(static_format="png")
data.set_author(name=f"{statusemoji} {name}", url=avatar)
data.set_thumbnail(url=avatar)
if await self.bot.is_mod(author):
advancedlog = Config.get_conf(self, 544974305445019651, True, cog_name="AdvancedLog")
notes = await advancedlog.member(user).notes()
# If the user has notes, add them to the embed
if notes:
# Enumerate and format the notes
notes = "\n".join(f"{i+1}. <@{n['author']}>: {n['note']}" for i, n in enumerate(notes))
# trim if too long and add ...
if len(notes) > 1024:
notes = notes[:990] + "... ([p]notes <user>)"
data.add_field(
name=_("Mod Notes"),
value=notes,
inline=False,
)
await ctx.send(embed=data)
@commands.command()
async def names(self, ctx: commands.Context, *, user: discord.Member):
"""Show previous names and nicknames of a user."""
names, nicks = await self.get_names_and_nicks(user)
msg = ""
if names:
msg += _("**Past 20 names**:")
msg += "\n"
msg += ", ".join(names)
if nicks:
if msg:
msg += "\n\n"
msg += _("**Past 20 nicknames**:")
msg += "\n"
msg += ", ".join(nicks)
if msg:
msg = filter_various_mentions(msg)
await ctx.send(msg)
else:
await ctx.send(_("That user doesn't have any recorded name or nickname change."))
|
__all__ = ['Coffee', 'Widget', 'widgets', 'kill', 'pid', 'start', 'restart']
import os
from ubersicht._coffee import Coffee
import listify
from ubersicht._widgets import Widget
WIDGETS = "%s/Library/Application Support/Übersicht/widgets" % os.environ[
"HOME"]
def widgets():
"""return a list with widgets paths"""
for l in os.listdir(WIDGETS):
path = os.path.join(WIDGETS, l)
if os.path.splitext(path)[1] == ".widget" and os.path.isdir(path):
yield path
"""Übersicht.app process functions"""
def kill():
"""kill Übersicht.app process"""
_pid = pid()
if _pid:
os.system("kill -9 %s &> /dev/null" % _pid)
def pid():
"""return Übersicht.app pid"""
for l in os.popen("ps -ax").read().splitlines():
if "Übersicht.app/Contents/MacOS/Übersicht" in l:
return int(list(filter(None, l.split(" ")))[0])
def start():
"""open Übersicht.app"""
if not pid():
return os.system("open -a Übersicht")
def restart():
"""restart Übersicht.app"""
kill()
start()
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List
from nncf.common.graph.layer_attributes import BaseLayerAttributes
class TFNodeAttributes(BaseLayerAttributes):
"""
Contains the TF-specific attributes of the NNCFNode.
"""
def __init__(self, data_format: str):
"""
Initializes the TF-specific attributes of the NNCFNode.
:param data_format: The data format of the input and
output data of the node. One of the following:
`channels_last` or `channels_first`.
"""
self._data_format = data_format
def get_data_format(self) -> str:
"""
Returns the data format of the input and output data of the node.
:return: The data format of the input and output data of the node.
"""
return self._data_format
class TFWeightedNodeAttributes(TFNodeAttributes):
"""
Contains the TF-specific attributes of the NNCFNode with weight.
"""
def __init__(self, data_format: str, weight_shape: List[int]):
"""
Initializes the TF-specific attributes of the NNCFNode.
:param data_format: The data format of the input and
output data of the node. One of the following:
`channels_last` or `channels_first`.
:param weight_shape: The shape of the weight tensor.
"""
super().__init__(data_format)
self._weight_shape = weight_shape
def get_weight_shape(self) -> List[int]:
"""
Returns shape of the weight tensor.
:return: Shape of the weight tensor.
"""
return self._weight_shape
|
#!/usr/bin/python
#Makefile is driving me crazy. For now i will use python
import glob
import re
import os
from subprocess import call
sources = []
RebuildAll = True
#compiler and args to use
CXX = "g++"
CXXFLAGS = "-std=c++11 -Wc++11-extensions"
#this function will scan the filesystem and add .cpp in the specified direcotry to a list.
#it will aso store where the object will be created
def addSources(sourcedir, objdir): #adds all source fiels to a list, together with the name of the object file that they will correspond to
files = glob.glob(sourcedir + "/*.cpp") #look for cpp files in the specified dir
files.extend(glob.glob(sourcedir + "/**/*.cpp")) #also look in all subdirs
for src in files:
entry = {"src": src, "obj": re.sub('.cpp$','.o',src.replace(sourcedir, objdir,1)) }
sources.append(entry)
#determine if we need to rebuild a file
def determineRebuild(src, obj):
if RebuildAll:
return True
if not os.path.isfile(obj):
return True
if os.path.getmtime(src) > os.path.getmtime(obj):
return True
return False
# folder and name of the application to generate
TARGETDIR = "bin"
TARGET = "application"
addSources("../../../../src", "obj/MultiNode")
addSources("../../../../lib", "obj/lib")
addSources("../../implementations", "obj/implementations")
addSources(".", "obj")
#build object files
for src in sources:
command = CXX + " " + CXXFLAGS + " -c -o " + src["obj"] + " " + src["src"]
call("mkdir -p "+os.path.dirname(src["obj"]), shell=True)
if determineRebuild(src["src"],src["obj"]):
print command
call(command, shell=True)
#link application
call("mkdir -p "+TARGETDIR, shell=True)
objectfiles = " ".join(src["obj"] for src in sources)
command = CXX + " -o " + TARGETDIR + "/" + TARGET + " " + objectfiles + " "
print command
call(command, shell=True)
|
import json
import random
import string
from SoftLayer import SoftLayerAPIError, SshKeyManager
from jumpgate.common.error_handling import bad_request, duplicate, not_found
NULL_KEY = "AAAAB3NzaC1yc2EAAAABIwAAAIEArkwv9X8eTVK4F7pMlSt45pWoiakFk" \
"ZMwG9BjydOJPGH0RFNAy1QqIWBGWv7vS5K2tr+EEO+F8WL2Y/jK4ZkUoQgoi+n7" \
"DWQVOHsRijcS3LvtO+50Np4yjXYWJKh29JL6GHcp8o7+YKEyVUMB2CSDOP99eF9g5Q0d+1U" \
"2WVdBWQM="
class KeypairsV2(object):
def on_get(self, req, resp, tenant_id):
client = req.env['sl_client']
mgr = SshKeyManager(client)
keypairs = mgr.list_keys()
resp.body = {
'keypairs': [{
'keypair': format_keypair(keypair)} for keypair in keypairs]}
def on_post(self, req, resp, tenant_id):
body = json.loads(req.stream.read().decode())
try:
name = body['keypair']['name']
key = body['keypair'].get('public_key', generate_random_key())
except (KeyError, TypeError):
return bad_request(resp, 'Not all fields exist to create keypair.')
validate_result = validate_keypair_name(resp, name)
if not validate_result:
return
client = req.env['sl_client']
mgr = SshKeyManager(client)
# Make sure the key with that label doesn't already exist
existing_keys = mgr.list_keys(label=name)
if existing_keys:
return duplicate(resp, 'Duplicate key by that name')
try:
keypair = mgr.add_key(key, name)
resp.body = {'keypair': format_keypair(keypair)}
except SoftLayerAPIError as e:
if 'Unable to generate a fingerprint' in e.faultString:
return bad_request(resp, e.faultString)
if 'SSH key already exists' in e.faultString:
return duplicate(resp, e.faultString)
raise
class KeypairV2(object):
def on_get(self, req, resp, tenant_id, keypair_name):
client = req.env['sl_client']
mgr = SshKeyManager(client)
keys = mgr.list_keys(label=keypair_name)
if len(keys) == 0:
return not_found(resp, 'KeyPair not found')
keypair = mgr.get_key(keys[0]['id'])
resp.body = {'keypair': format_keypair(keypair)}
def on_delete(self, req, resp, tenant_id, keypair_name):
# keypair_name
client = req.env['sl_client']
mgr = SshKeyManager(client)
keys = mgr.list_keys(label=keypair_name)
if len(keys) == 0:
return not_found(resp, 'KeyPair not Found')
mgr.delete_key(keys[0]['id'])
resp.status = 202
def format_keypair(keypair):
return {
'fingerprint': keypair['fingerprint'],
'name': keypair['label'],
'public_key': keypair['key'],
'user': None
}
def generate_random_key():
chars = string.digits + string.ascii_letters
key = "".join([random.choice(chars) for _ in range(8)])
return "ssh-rsa %s %s@invalid" % (NULL_KEY, key)
def validate_keypair_name(resp, key_name):
safechars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safechars)
if clean_value != key_name:
bad_request(
resp, 'Keypair name contains unsafe characters')
return False
if not 0 < len(key_name) < 256:
bad_request(
resp, 'Keypair name must be between 1 and 255 characters long')
return False
return True
|
import hashlib
from django.db import migrations
def create_hashes_from_ip_addresses(apps, schema_editor):
Commenter = apps.get_model('blog', 'Commenter')
for commenter in Commenter.objects.all():
commenter.ip_hash = (
hashlib.sha256(commenter.ip_address.encode("utf-8"))
.digest()
.hex()
)
commenter.save(update_fields=['ip_hash'])
Commenter.objects.filter(comments__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_add_ip_hash_field'),
]
operations = [
migrations.RunPython(
create_hashes_from_ip_addresses,
migrations.RunPython.noop,
),
]
|
""" Uncertainty Sampling
This module contains a class that implements two of the most well-known uncertainty sampling
query strategies, which are least confidence and smallest margin (margin sampling).
"""
import numpy as np
from libact.base.interfaces import QueryStrategy, ContinuousModel
class UncertaintySampling(QueryStrategy):
"""Uncertainty Sampling
This class implements Uncertainty Sampling active learning algorithm [1]_.
Parameters
----------
model: libact.model.* object instance
The base model used for trainin, this model should support predict_real.
method: {'lc', 'sm'}, optional (default='lc')
least confidence (lc), it queries the instance whose posterior
probability of being positive is nearest 0.5 (for binary classification);
smallest margin (sm), it queries the instance whose posterior
probability gap between the most and the second probable labels is minimal;
Attributes
----------
References
----------
.. [1] Settles, Burr. "Active learning literature survey." University of
Wisconsin, Madison 52.55-66 (2010): 11.
"""
def __init__(self, *args, **kwargs):
"""Currently only LogisticRegression is supported."""
super(UncertaintySampling, self).__init__(*args, **kwargs)
self.model = kwargs.pop('model', None)
if self.model is None:
raise TypeError(
"__init__() missing required keyword-only argument: 'model'"
)
if not isinstance(self.model, ContinuousModel):
raise TypeError(
"model has to be a ContinuousModel"
)
self.model.train(self.dataset)
self.method = kwargs.pop('method', 'lc')
if self.method not in ['lc', 'sm']:
raise TypeError(
"supported methods are ['lc', 'sm'], the given one is: " + \
self.method
)
def make_query(self):
"""
Choices for method (default 'lc'):
'lc' (Least Confident), 'sm' (Smallest Margin)
"""
dataset = self.dataset
self.model.train(dataset)
unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries())
if self.method == 'lc': # least confident
ask_id = np.argmin(
np.max(self.model.predict_real(X_pool), axis=1)
)
elif self.method == 'sm': # smallest margin
dvalue = self.model.predict_real(X_pool)
if np.shape(dvalue)[1] > 2:
# Find 2 largest decision values
dvalue = -(np.partition(-dvalue, 2, axis=1)[:, :2])
margin = np.abs(dvalue[:, 0] - dvalue[:, 1])
ask_id = np.argmin(margin)
return unlabeled_entry_ids[ask_id]
def get_model(self):
"""Returns the model used by the last query"""
return self.model
|
from typing import Optional, List
import copy
import random
from math import sqrt, log
import numpy as np
from environments import DiscreteEnv, MazeEnv
from agents import AbstractAgent
BIG_NUMBER = 999999999999999999999
class Node:
def __init__(self, action: Optional[int] = None, parent: Optional[object] = None):
self.action = action # action taken
self.parent: Optional[Node] = parent # states of time step before
self.future_rewards: float = 0
self.reward: float = 0
self.visits: int = 0
self.children: List[Node] = [] # possible following state, action pairs
def get_action_chain(self) -> List[int]:
"""
:return: list of actions, from root node, to this action-node
"""
action_chain = [self.action] if self.action else []
node_to_add_action = self.parent
while node_to_add_action and node_to_add_action.parent:
action_chain.append(node_to_add_action.action)
node_to_add_action = node_to_add_action.parent
return list(reversed(action_chain))
def get_depth(self) -> int:
"""
:return: depth of node from root
"""
depth = 1
depth_of_children = []
for child in self.children:
depth_of_children.append(child.get_depth())
depth += max(depth_of_children) if depth_of_children else 0
return depth
def generate_tree(self) -> dict:
"""
for easier debugging, generate tree as json
"""
root_dict = {}
for child in self.children:
root_dict[child.action] = {
'visits': child.visits,
'future_rewards': child.future_rewards,
'children': child.generate_tree()
}
return root_dict
class MCTreeSearchAgent(AbstractAgent):
def __init__(self, env: DiscreteEnv, alpha: float = 0.01, alpha_min: float = 0,
alpha_reduction: float = 0.0, gamma: float = 0.99, playouts_per_action: int = 10000, c: float = 1.41,
rollout_policy_agent: Optional[AbstractAgent] = None, visualize: bool = False,
name: str = 'MCTreeSearchAgent'):
super().__init__(env, alpha=alpha, alpha_min=alpha_min, alpha_reduction=alpha_reduction, name=name)
self.gamma = gamma
self.playouts_per_action = playouts_per_action # for given state, how many playouts in total for the decision
self.c = c # exploration factor of uct formula, sqrt(2) in literature, but can be changed depending on env
self.a = None # action which was chosen by act function
self.root_node: Optional[Node] = None
self.simulation_counter = 0 # counts amount of simulation playouts
self.visualize = visualize # shows path of nodes, if env is supported
# Agent to choose actions in simulation. Epsilon and Alpha won't get reduced, son min's are unnecessary
self.rollout_policy_agent = rollout_policy_agent
def reset(self) -> None:
self.a = None # action which was chosen by act function
self.root_node: Optional[Node] = None
self.simulation_counter = 0 # counts amount of simulation playouts
def get_possible_actions(self, node: Node) -> range:
# return actions possible for env, probably depending on node
return range(self.env.action_space.n)
def act(self, observation: int) -> int:
while self.simulation_counter < self.playouts_per_action:
# 1. Selection: choose promising leaf node, that is not end of game
if self.root_node:
promising_leaf = self.choose_promising_leaf_node()
else:
promising_leaf = self.root_node = Node()
# 2. Expansion: expand promising node
for action in self.get_possible_actions(promising_leaf):
promising_leaf.children.append(Node(action=action, parent=promising_leaf))
# 3. Simulation: choose one of the new expanded nodes, simulate playouts
actions_to_promising = promising_leaf.get_action_chain()
# start from root node, execute action until the node
root_env_copy = copy.deepcopy(self.env)
is_done = False
for action in actions_to_promising:
_, _, done, _ = root_env_copy.step(action)
is_done = is_done or done
self.simulation_counter += 1
if not is_done:
for child in promising_leaf.children:
root_env_copy_for_child = copy.deepcopy(root_env_copy)
state, reward, done, _ = root_env_copy_for_child.step(child.action)
if child.reward != 0:
child.reward = (1 - self.alpha) * child.reward + self.alpha * reward
else:
child.reward = reward
future_rewards = 0
if not done:
future_rewards = self.gamma * self.rollout_policy_agent.get_state_value(state)
child.visits += 1
if child.future_rewards != 0:
child.future_rewards = (
(1 - self.alpha) * child.future_rewards + self.alpha * future_rewards)
else:
child.future_rewards = future_rewards
# 4. Backpropagation: Update all parent nodes in the chain
update_node = child
while update_node.parent:
update_node = update_node.parent
update_node.visits += 1
if self.visualize and isinstance(self.env, MazeEnv):
self.env.visualize_mcts_tree(self.root_node)
# choose action with highest estimated reward
children_values = self._get_child_values()
self.a = int(np.argmax(children_values))
self.simulation_counter = 0 # reset simulation_counter for next action decision
print(self.a, ": ", children_values)
return self.a
def train(self, s_next: int, reward: float, done: bool) -> None:
if self.root_node.children:
for child in self.root_node.children:
if child.action == self.a:
self.root_node = child
self.root_node.parent = None
else: # either state was not detected in expansion phase, or something went wrong
self.root_node = Node()
def choose_promising_leaf_node(self) -> Node:
node = self.root_node
while node.children:
# choose node with highest uct value
children = sorted(node.children, key=self.uct, reverse=True)
node = children[0] # node with highest uct value
return node
def uct(self, node: Node) -> float:
if node.visits == 0:
return BIG_NUMBER
else:
return self._get_node_value(node) + self.c * sqrt(log(node.parent.visits) / node.visits)
def _get_child_values(self) -> List[float]:
child_values = []
for child in self.root_node.children:
child_values.append(self._get_node_value(child))
return child_values
def _get_node_value(self, node: Node) -> float:
if not node.children:
return node.reward + node.future_rewards
else:
child_values = list([self._get_node_value(child) for child in node.children])
return node.reward + self.gamma * max(child_values)
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse cloudbuild config files.
"""
import base64
import os
from apitools.base.protorpclite import messages as proto_messages
from apitools.base.py import encoding as apitools_encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.core import exceptions
import yaml
import yaml.parser
# Don't apply camel case to keys for dict or list values with these field names.
# These correspond to map fields in our proto message, where we expect keys to
# be sent exactly as the user typed them, without transformation to camelCase.
_SKIP_CAMEL_CASE = ['secretEnv', 'secret_env', 'substitutions']
class NotFoundException(exceptions.Error):
def __init__(self, path):
msg = '{path} could not be found'.format(
path=path or 'Cloud Build configuration',
)
super(NotFoundException, self).__init__(msg)
class FileReadException(exceptions.Error):
def __init__(self, path):
msg = '{path} could not be read'.format(
path=path or 'Cloud Build configuration',
)
super(FileReadException, self).__init__(msg)
class ParserError(exceptions.Error):
def __init__(self, path, msg):
msg = 'parsing {path}: {msg}'.format(
path=path or 'Cloud Build configuration',
msg=msg,
)
super(ParserError, self).__init__(msg)
class BadConfigException(exceptions.Error):
def __init__(self, path, msg):
msg = '{path}: {msg}'.format(
path=path or 'Cloud Build configuration',
msg=msg,
)
super(BadConfigException, self).__init__(msg)
def _SnakeToCamelString(field_name):
"""Change a snake_case string into a camelCase string.
Args:
field_name: str, the string to be transformed.
Returns:
str, the transformed string.
"""
parts = field_name.split('_')
if not parts:
return field_name
# Handle field_name with leading '_'s by collapsing them into the next part.
# Legit field names will never look like this, but completeness of the
# function is important.
leading_blanks = 0
for p in parts:
if not p:
leading_blanks += 1
else:
break
if leading_blanks:
parts = parts[leading_blanks:]
if not parts:
# If they were all blanks, then we over-counted by one because of split
# behavior.
return '_'*(leading_blanks-1)
parts[0] = '_'*leading_blanks + parts[0]
return ''.join(parts[:1] + [s.capitalize() for s in parts[1:]])
def _SnakeToCamel(msg):
"""Transform all dict field names that are snake_case to camelCase.
If a field is in _SKIP_CAMEL_CASE then its value is not further transformed.
Args:
msg: dict, list, or other. If 'other', the function returns immediately.
Returns:
Same type as message, except all field names except "secrets" that were
snake_case are now camelCase.
"""
if isinstance(msg, dict):
return {
_SnakeToCamelString(key):
_SnakeToCamel(val) if key not in _SKIP_CAMEL_CASE else val
for key, val in msg.iteritems()
}
elif isinstance(msg, list):
return [_SnakeToCamel(elem) for elem in msg]
else:
return msg
def _UnpackCheckUnused(obj, msg_type):
"""Stuff a dict into a proto message, and fail if there are unused values.
Args:
obj: dict(), The structured data to be reflected into the message type.
msg_type: type, The proto message type.
Raises:
ValueError: If there is an unused value in obj.
Returns:
Proto message, The message that was created from obj.
"""
msg = apitools_encoding.DictToMessage(obj, msg_type)
def _CheckForUnusedFields(obj):
"""Check for any unused fields in nested messages or lists."""
if isinstance(obj, proto_messages.Message):
unused_fields = obj.all_unrecognized_fields()
if unused_fields:
if len(unused_fields) > 1:
# Because this message shows up in a dotted path, use braces.
# eg .foo.bar.{x,y,z}
unused_msg = '{%s}' % ','.join(sorted(unused_fields))
else:
# For single items, omit the braces.
# eg .foo.bar.x
unused_msg = unused_fields[0]
raise ValueError('.%s: unused' % unused_msg)
for used_field in obj.all_fields():
try:
field = getattr(obj, used_field.name)
_CheckForUnusedFields(field)
except ValueError as e:
raise ValueError('.%s%s' % (used_field.name, e))
if isinstance(obj, list):
for i, item in enumerate(obj):
try:
_CheckForUnusedFields(item)
except ValueError as e:
raise ValueError('[%d]%s' % (i, e))
_CheckForUnusedFields(msg)
return msg
def LoadCloudbuildConfigFromStream(stream, messages, params=None,
path=None):
"""Load a cloudbuild config file into a Build message.
Args:
stream: file-like object containing the JSON or YAML data to be decoded
messages: module, The messages module that has a Build type.
params: dict, parameters to substitute into the Build spec.
path: str or None. Optional path to be used in error messages.
Raises:
NotFoundException: If the file does not exist.
ParserError: If there was a problem parsing the file.
BadConfigException: If the config file has illegal values.
Returns:
Build message, The build that got decoded.
"""
# Turn the data into a dict
try:
structured_data = yaml.safe_load(stream)
if not isinstance(structured_data, dict):
raise ParserError(path, 'Could not parse into a message.')
except yaml.parser.ParserError as pe:
raise ParserError(path, pe)
# Transform snake_case into camelCase.
structured_data = _SnakeToCamel(structured_data)
# Then, turn the dict into a proto message.
try:
build = _UnpackCheckUnused(structured_data, messages.Build)
except ValueError as e:
raise BadConfigException(path, '%s' % e)
subst = structured_data.get('substitutions', {})
if params:
subst.update(params)
build.substitutions = cloudbuild_util.EncodeSubstitutions(subst, messages)
# Re-base64-encode secrets[].secretEnv values, which apitools' DictToMessage
# "helpfully" base64-decodes since it can tell it's a bytes field. We need to
# send a base64-encoded string in the JSON request, not raw bytes.
for s in build.secrets:
for i in s.secretEnv.additionalProperties:
i.value = base64.b64encode(i.value)
# Some problems can be caught before talking to the cloudbuild service.
if build.source:
raise BadConfigException(path, 'config cannot specify source')
if not build.steps:
raise BadConfigException(path, 'config must list at least one step')
return build
def LoadCloudbuildConfigFromPath(path, messages, params=None):
"""Load a cloudbuild config file into a Build message.
Args:
path: str. Path to the JSON or YAML data to be decoded.
messages: module, The messages module that has a Build type.
params: dict, parameters to substitute into a templated Build spec.
Raises:
NotFoundException: If the file does not exist.
ParserError: If there was a problem parsing the file.
BadConfigException: If the config file has illegal values.
Returns:
Build message, The build that got decoded.
"""
if not os.path.exists(path):
raise NotFoundException(path)
try:
with open(path) as f:
return LoadCloudbuildConfigFromStream(f, messages, params, path=path)
except EnvironmentError:
# EnvironmentError is parent of IOError, OSError and WindowsError.
# Raised when file does not exist or can't be opened/read.
raise FileReadException(path)
|
class Gear(object):
def __init__(self, chainring, cog, observer, wheel=None):
self.__chainring = chainring
self.__cog = cog
self.__wheel = wheel
self.__observer = observer
@property
def chainring(self):
return self.__chainring
@property
def cog(self):
return self.__cog
@property
def wheel(self):
return self.__wheel
@property
def observer(self):
return self.__observer
@property
def ratio(self):
return self.chainring / self.cog
@property
def gear_inches(self):
return self.ratio * self.wheel.diameter
def set_cog(self, new_cog):
self.__cog = new_cog
self.changed()
def set_chainring(self, chainring):
self.__chainring = chainring
self.changed()
def changed(self):
self.observer.changed(self.chainring, self.cog)
|
#!/usr/bin/env python
import os
import sys
import subprocess
import re
PATCHESDIR = "patches"
QUILT_PC = ".pc"
def execute_command_line(arguments, cwd = None):
process = subprocess.Popen(arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd = cwd)
stdoutstring, stderrstring = process.communicate()
returncode = process.poll()
return stdoutstring, stderrstring, returncode
def which(executablename):
stdoutstring, stderrstring, returncode = execute_command_line(['which', executablename])
if not returncode == 0:
return None
else:
return stdoutstring
def is_quilt_installed():
if sys.platform == 'win32':
return False
path = which('quilt')
if path is None:
return False
stdoutstring, stderrstring, returncode = execute_command_line(['quilt', '--version'])
if not returncode == 0:
return False
version_re = re.compile(r'(\d).(\d\d)')
match = version_re.match(stdoutstring)
if not match:
return False
return True
def apply_patches_using_quilt():
returncode = subprocess.call(['quilt', 'push', '-a'])
if not returncode == 0:
raise Exception("error in applying the patches, please apply by hand using quilt push")
def undo_patches_using_quilt():
returncode = subprocess.call(['quilt', 'pop', '-a'])
if not returncode == 0:
raise Exception("error in undoing the patches, please undo by hand using quilt pop -a")
def run_patch(patchname, patchfile):
arguments = ['patch', '-p1', '--backup', '--prefix={0}/{1}/'.format(QUILT_PC, patchname), '-E', '-i', patchfile]
returncode = subprocess.call(arguments)
if not returncode == 0:
raise Exception("could not apply patch {0}".format(patchname))
def apply_patches_using_patch():
with open("patches/series", "r") as f:
lines = f.readlines()
patches = [x.strip() for x in lines]
patches = [x for x in patches if len(x) > 0]
for patch in patches:
path = os.path.join(PATCHESDIR, patch)
run_patch(patch, path)
def main(undo_patches = False):
print("checking if quilt is installed ... ")
if not is_quilt_installed():
print("... no")
if undo_patches:
print("quilt is not installed, cannot undo the patches")
sys.exit(1)
else:
print("applying patches to source code")
apply_patches_using_patch()
else:
print("... yes")
if undo_patches:
print("quilt is install, will try to undo the patches")
undo_patches_using_quilt()
else:
print("applying patches to source code")
apply_patches_using_quilt()
print("all patches applied")
if __name__ == '__main__':
main()
|
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
from src.data.dataset import GPReviewDataset
def create_dataloaders(df, tokenizer, batch_size):
df_train, df_test = train_test_split(df, test_size=0.1, random_state=42)
df_val, df_test = train_test_split(df, test_size=0.5, random_state=42)
ds = GPReviewDataset(target = df.sentiemnt.to_numpy(), tokenizer=tokenizer)
return [DataLoader(ds(review=df), batch_size=batch_size, num_workers=4)for df in [df_train, df_val, df_test]]
|
# ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2020-03-08 11:30
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test19.py
# ----------------------------------------------
# 单例模式的 N 种实现方法,就是程序在不同位置都可以且仅可以取到同一个实例
# 函数装饰器实现
def singleton(cls):
_instance = {}
def inner():
if cls not in _instance:
# cls 作为 key,value 值为 cls 的实例化
_instance[cls] = cls()
return _instance[cls]
return inner
@singleton
class Cls(object):
def __init__(self):
print("__init__")
# 类装饰器实现
class Singleton:
def __init__(self, cls):
self._cls = cls
self._instance = {}
def __call__(self, *args, **kwargs):
if self._cls not in self._instance:
self._instance[self._cls] = self._cls()
return self._instance[self._cls]
@Singleton
class Cls2:
def __init__(self):
print("__init__2")
# 使用 new 关键字实现单例模式
class Singleton1(object):
# 类属性,公共属性
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
print("__init__3")
# 使用 metaclass 实现单例模式
class Singleton3(type):
_instance = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instance:
cls._instance[cls] = super(Singleton3, cls).__call__(*args, **kwargs)
return cls._instance[cls]
class Singleton4(metaclass=Singleton3):
def __init__(self):
print("__init__4")
if __name__ == "__main__":
c = Cls()
d = Cls()
print(id(c) == id(d))
e = Cls2()
f = Cls2()
print(id(e) == id(f))
g = Singleton1()
h = Singleton1()
print(id(g) == id(h))
i = Singleton4()
j = Singleton4()
print(id(i) == id(j))
|
import logging
from retrying import retry
from sqlalchemy import (
Column,
Integer,
String,
create_engine,
MetaData,
Table
)
from sqlalchemy.orm import mapper, scoped_session, sessionmaker
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.orm.util import class_mapper
from atomicpuppy.atomicpuppy import EventCounter, counter_circuit_breaker
metadata = MetaData()
counters_table = Table(
'atomicpuppy_counters', metadata,
Column('key', String(4000), primary_key=True),
Column('position', Integer),
)
class SqlCounter(EventCounter):
class Counter:
def __init__(self, key, position):
self.key = key
self.position = position
_logger = logging.getLogger(__name__)
def __init__(self, connection_string, instance):
self._logger = logging.getLogger(__name__)
self._engine = create_engine(connection_string)
self._ensured_schema = False
self._instance_name = instance
self._setup_mapper()
self._start_session = scoped_session(sessionmaker(bind=self._engine))
@classmethod
def _setup_mapper(cls):
try:
class_mapper(cls.Counter)
except UnmappedClassError:
mapper(cls.Counter, counters_table)
@retry(wait_exponential_multiplier=1000, wait_exponential_max=1000, stop_max_delay=6000)
def __getitem__(self, stream):
self._logger.debug("Fetching last read event for stream " + stream)
key = self._key(stream)
val = self._read_position(key)
if val is None:
return -1
val = int(val)
self._logger.info(
"Last read event for stream %s is %d",
stream,
val)
return val
@counter_circuit_breaker
def __setitem__(self, stream, val):
# insert or update where instance = xxx and stream = xxx
key = self._key(stream)
# make sure the schema is there
self._ensure_schema()
s = self._start_session()
counter = s.query(self.Counter).filter_by(key=key).first()
if counter:
counter.position = val
else:
counter = self.Counter(key=key, position=val)
s.add(counter)
s.commit()
s.close()
def _read_position(self, key):
# make sure the schema is there
self._ensure_schema()
s = self._start_session()
counter = s.query(SqlCounter.Counter).filter_by(key=key).first()
if counter:
pos = counter.position
else:
pos = None
s.close()
return pos
def _key(self, stream):
return '{}:{}'.format(self._instance_name, stream)
def _ensure_schema(self):
if self._ensured_schema:
return
counters_table.create(self._engine, checkfirst=True)
self._ensured_schema = True
|
n1 = int(input('Digite um número: '))
print('Seu número é: {}, O antecessor dele é: {}, o sucessor dele é: {}'.format(n1, n1-1,n1+1))
|
#!/usr/bin/env python3
import sys
from os import path, linesep
from glob import iglob
from configparser import ConfigParser
from datetime import datetime
def fatal(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
sys.exit(1)
def to_stringlist(arg):
if len(arg) == 0:
return []
return [v.strip() for v in arg.split(',')]
def wrap_literals(arg, sep=' '):
return (',' + sep).join([f'QStringLiteral("{v}")' for v in arg])
def wrap_list(arg):
return f'QStringList{{{wrap_literals(arg)}}}'
def need_exclude(arg):
res = []
for v in arg:
if path.basename(v).startswith('harbour-'):
res.append(v)
return res
def main():
if len(sys.argv) != 2:
fatal('Usage: generator <output_dir>')
output_dir = sys.argv[1]
if not path.isdir(output_dir):
fatal('The argument must be a directory:', output_dir)
known_apps = {}
config = ConfigParser()
for inipath in iglob(path.join(path.dirname(__file__), '*.ini')):
config.read(inipath)
for app in config.sections():
if app in known_apps:
source = known_apps[app]['source']
fatal(f'Duplicate entry "{app}": first occurrence was "{source}" then "{path}"')
app_dict = config[app]
if not ('config' in app_dict or 'cache' in app_dict or 'local_data' in app_dict):
fatal(f'{path}: none of config, cache or local_data is specified for "{app}"')
known_apps[app] = {
'source': path,
'config': to_stringlist(app_dict.get('config', fallback='')),
'cache': to_stringlist(app_dict.get('cache', fallback='')),
'local_data': to_stringlist(app_dict.get('local_data', fallback=''))
}
config.clear()
config = None
with open(path.join(output_dir, 'known_apps.hpp'), 'w') as writer:
writer.write(f'''\
// Generated by "harbour-mashka/known_apps/generator.py" at {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}.
// Do not edit manually!
#pragma once
inline static QList<KnownApp> known_apps_initializer() {{
return {{
''')
exclude = []
for name in sorted(known_apps.keys()):
app = known_apps[name]
writer.write(f'''\
{{
{wrap_literals([name])},
{wrap_list(app['config'])},
{wrap_list(app['cache'])},
{wrap_list(app['local_data'])}
}},
''')
exclude += need_exclude(app['config'])
exclude += need_exclude(app['cache'])
exclude += need_exclude(app['local_data'])
writer.write(f'''\
}};
}}
inline static QStringList exclude_paths_initializer() {{
return {{
{wrap_literals(exclude, linesep + ' ')}
}};
}}
''')
if __name__ == '__main__':
main()
|
#! /usr/bin/env python3
"""sort a given (text) file, line by line, alphabetically
depending on the value of the second parameter, the resulting file is either saved
in the same directory as the original (with the suffix "sorted") or in the system's
temporary directory
"""
import sys
import os
TMP = '/tmp'
if sys.platform.startswith('win'):
TMP = "C:\\Windows\\Temp"
def main(fn, tmp=False):
"""sorting the lines of the file and write the result to a new file"""
if tmp:
fnew = os.path.join(TMP, os.path.basename(fn))
else:
fnew = '_sorted'.join(os.path.splitext(fn))
with open(fn) as _in, open(fnew, "w") as _out:
regels = _in.readlines()
regels.sort()
for x in regels:
_out.write(x)
return fnew
if __name__ == '__main__':
print(sys.argv)
if len(sys.argv) > 1:
fn = main(sys.argv[1])
else:
fn = main(input("Geef naam van te sorteren file op: "))
print("klaar, output in", fn)
|
"""
A component with a large number of inputs is finite differenced.
"""
import numpy as np
from openmdao.lib.optproblems.scalable import Discipline
from openmdao.main.api import Assembly, Component, set_as_top
N = 200
np.random.seed(12345)
class Model(Assembly):
def configure(self):
self.add('comp', Discipline(prob_size=N))
self.comp.C_y = np.random.random((N, N))
if __name__ == "__main__":
from time import time
top = set_as_top(Model())
top.run()
inputs = ['comp.y_in']
outputs = ['comp.y_out']
inputs = ['comp.y_in[%d, 0]'%n for n in range(N)]
outputs = ['comp.y_out[%d, 0]'%n for n in range(N)]
import sys
if len(sys.argv) > 1 and '-prof' in sys.argv:
import cProfile
import pstats
sys.argv.remove('-prof') #unittest doesn't like -prof
cProfile.run('J = top.driver.calc_gradient(inputs=inputs, outputs=outputs, mode = "forward")', 'profout')
p = pstats.Stats('profout')
p.strip_dirs()
p.sort_stats('cumulative', 'time')
p.print_stats()
print '\n\n---------------------\n\n'
p.print_callers()
print '\n\n---------------------\n\n'
p.print_callees()
else:
t0 = time()
J = top.driver.calc_gradient(inputs=inputs,
outputs=outputs,
mode = 'forward')
print 'Time elapsed', time() - t0
# python -m cProfile -s time fd_scalable.py >z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for data preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
def filter_sparse_rows(sp_mat, max_cols):
"""Filter rows of a CSR sparse matrix to retain upto max_cols."""
all_remove_idx = []
num_removed = np.zeros((sp_mat.indptr.shape[0] - 1,), dtype=np.int64)
for ii in range(sp_mat.shape[0]):
row_st = sp_mat.indptr[ii]
row_en = sp_mat.indptr[ii + 1]
my_scores = sp_mat.data[row_st:row_en]
if len(my_scores) > max_cols:
remove = np.argpartition(-my_scores, max_cols)[max_cols:]
all_remove_idx.extend(remove + row_st)
num_removed[ii] = len(all_remove_idx)
new_data = np.delete(sp_mat.data, all_remove_idx)
new_indices = np.delete(sp_mat.indices, all_remove_idx)
new_indptr = np.copy(sp_mat.indptr)
new_indptr[1:] -= num_removed
return sp.csr_matrix((new_data, new_indices, new_indptr), shape=sp_mat.shape)
|
import random
import yaml
from enums import ChallengeType, TeamLevel
with open("src/challenges.yml", "r") as challenge_file:
challenges = yaml.load(challenge_file, Loader=yaml.Loader)
class SkillChallenge:
"""Represents a challenge of a team"""
def __init__(self, challenge: ChallengeType):
self.challenge = challenge
def calculate_raw_score(self, team_level: TeamLevel) -> tuple:
"""Calculates the raw score for this challenge of a team's skil level"""
# Gets the skill levels predicted range
predicted_range = challenges[self.challenge.name]["predicted_points"][
team_level.name
]
# If its a timed score, it can be a float, use random.uniform instead of random.randint
if challenges[self.challenge.name]["is_timed_score"]:
return round(
random.uniform(predicted_range["min"], predicted_range["max"]), 2
)
else:
return random.randint(predicted_range["min"], predicted_range["max"])
|
from pkg_resources import require
|
import math
import random
import re
import time
import aiohttp
import discord
from discord.ext import commands
import pandas as pd
pd.set_option('display.max_rows', 1000)
import datetime
from fuzzywuzzy import process
class WaitTimes(commands.Cog):
def __init__(self, client):
self._CACHE_TIME = 60 * 3 # minutes
self.client = client
self.parks = ["WaltDisneyWorldMagicKingdom",
"WaltDisneyWorldEpcot",
"WaltDisneyWorldHollywoodStudios",
"WaltDisneyWorldAnimalKingdom",
"UniversalIslandsOfAdventure",
"UniversalStudiosFlorida"]
self.df_parks_waittime = pd.DataFrame()
self.spellings = []
self.last_retrieve = 0
@commands.Cog.listener()
async def on_ready(self):
print('Wait Times cog ready')
async def get_parks_waittime(self):
for park in self.parks:
d = await get_park_async(park)
df = pd.json_normalize(d)
df = df[~(df["meta.type"] == 'RESTAURANT')]
df['park'] = park
columns = ["park", "name", "waitTime", "status", "active", "lastUpdate"]
df = df.filter(columns)
df['lastUpdate'] = pd.to_datetime(df['lastUpdate']).dt.tz_convert('America/New_York')
self.df_parks_waittime = self.df_parks_waittime.append(df)
self.make_spellings()
self.last_retrieve = time.time()
return self
def make_spellings(self):
self.spellings = self.df_parks_waittime['name'].astype(str).values.tolist()
@commands.command(aliases=['wait', 'queue', 'queues', 'ride', 'rides', 'attraction', 'attractions'])
async def waits(self, ctx, *, ride=None):
"""
Get wait times for Orlando theme park attractions.
"""
if ride is None:
await ctx.send('Please choose an attraction.')
return
if ride.lower() == "update":
await ctx.send('Wait times updated.')
if self.last_retrieve != 0:
await self.get_parks_waittime()
return
if time.time() - self.last_retrieve > self._CACHE_TIME:
await self.get_parks_waittime()
got_cache = True
else:
got_cache = False
extract_spelling = process.extract(ride, self.spellings, limit=1)
closest_word = extract_spelling[0][0]
data = self.df_parks_waittime
if got_cache:
data.set_index("name", inplace=True)
data.head()
ride_embed = self.make_ride_embed(data.loc[closest_word])
await ctx.send(embed=ride_embed)
def make_ride_embed(self, ride_df):
park = re.split('(?=[A-Z])', ride_df.park)
name = ride_df.name
wait_time = ride_df.waitTime
status = ride_df.status
active = ride_df.active
last_update = ride_df.lastUpdate.to_pydatetime()
if not pd.isnull(last_update):
last_update = datetime.datetime(last_update.year, last_update.month, last_update.day,
last_update.hour, last_update.minute, last_update.second)
else:
last_update = ''
desc = ""
if (status is None and not active) or wait_time is None or math.isnan(wait_time):
desc = f'Status: Closed'
elif status == "Closed" and math.isnan(wait_time):
desc = f'Status: {status}'
else:
desc = f'**{int(wait_time)}** minutes\n' \
f'Status: {status}'
desc += f'\n\n{" ".join(park).strip()}'
embed = discord.Embed(
title=name,
description=desc,
color=random.randint(1, 16777215)
)
embed.set_footer(text=f'{last_update}')
return embed
async def get_park_async(park: str):
async with aiohttp.ClientSession() as session:
url_waittime = f"https://api.themeparks.wiki/preview/parks/{park}/waittime"
async with session.get(url_waittime) as response:
assert 200 == response.status, response.reason
return await response.json()
def setup(client):
client.add_cog(WaitTimes(client))
|
from collections import OrderedDict
# list of supported video modes and, for each, resolution, default target bitrate for recording, supported profiles and bitrate limits
VIDEO_MODES = OrderedDict ([
("4K DCI", {
"width": 4096,
"height": 2048,
"previewDownsamplingFactor": 2,
"recording" : { "bitrate": 60000},
"profiles" : {
"baseline": {"min_bitrate": 500, "max_bitrate": 50000},
"main": {"min_bitrate": 500, "max_bitrate": 50000},
"high": {"min_bitrate": 500, "max_bitrate": 50000}
}
}),
("4K UHD", {
"width": 3840,
"height": 1920,
"previewDownsamplingFactor": 2,
"recording" : { "bitrate": 60000},
"profiles" : {
"baseline": {"min_bitrate": 500, "max_bitrate": 50000},
"main": {"min_bitrate": 500, "max_bitrate": 50000},
"high": {"min_bitrate": 500, "max_bitrate": 50000}
}
}),
("2.8K", {
"width": 2880,
"height": 1440,
"previewDownsamplingFactor": 2,
"recording" : { "bitrate": 45000},
"profiles" : {
"baseline": {"min_bitrate": 500, "max_bitrate": 50000},
"main": {"min_bitrate": 500, "max_bitrate": 50000},
"high": {"min_bitrate": 500, "max_bitrate": 50000}
}
}),
("2K", {
"width": 2048,
"height": 1024,
"previewDownsamplingFactor": 2,
"recording" : { "bitrate": 30000},
"profiles" : {
"baseline": {"min_bitrate": 500, "max_bitrate": 50000},
"main": {"min_bitrate": 500, "max_bitrate": 50000},
"high": {"min_bitrate": 500, "max_bitrate": 50000}
}
}),
("HD", {
"width": 1920,
"height": 960,
"previewDownsamplingFactor": 2,
"recording" : { "bitrate": 30000},
"profiles" : {
"baseline": {"min_bitrate": 500, "max_bitrate": 50000},
"main": {"min_bitrate": 500, "max_bitrate": 50000},
"high": {"min_bitrate": 500, "max_bitrate": 50000}
}
})
])
|
import os
import time
from abc import ABC, abstractmethod
from pathlib import PurePosixPath
from typing import Dict, Iterable, List, NamedTuple, Optional, Union
from atlassian import Bitbucket
from gitlab import DEVELOPER_ACCESS, Gitlab, GitlabError, GitlabHttpError, \
OWNER_ACCESS, REPORTER_ACCESS
from gitlab.v4.objects import Group, Project, User
from tqdm import tqdm
# -----------------------------------------------------------------------------
# please provide credentials through these environment variables
# -----------------------------------------------------------------------------
GITLAB_URL = os.getenv('GITLAB_URL')
GITLAB_TOKEN = os.getenv('GITLAB_TOKEN')
BITBUCKET_URL = os.getenv('BITBUCKET_URL')
BITBUCKET_USER = os.getenv('BITBUCKET_USER')
BITBUCKET_TOKEN = os.getenv('BITBUCKET_TOKEN')
# -----------------------------------------------------------------------------
# please change these config options according to your needs
# -----------------------------------------------------------------------------
# how to handle duplicates. one of 'error' (raise an exception), 'ignore' (don't import),
# 'rename' (import under a different name)
on_duplicate = 'ignore'
# common prefix to add before all groups. supports subgroups like namespace/subgroup (Optional)
group_prefix = ''
# the max. number of imports to run at the same time
# (one import per CPU core on your GitLab server should work fine)
parallel_imports = 4
# don't import projects with these Bitbucket project keys (optional)
project_blacklist = []
# map bitbucket permissions to these gitlab access levels
permission_map = {
'PROJECT_READ': REPORTER_ACCESS,
'REPO_READ': REPORTER_ACCESS,
'PROJECT_WRITE': DEVELOPER_ACCESS,
'REPO_WRITE': DEVELOPER_ACCESS,
'PROJECT_ADMIN': OWNER_ACCESS,
'REPO_ADMIN': OWNER_ACCESS,
}
# -----------------------------------------------------------------------------
class ProjectMapping(NamedTuple):
bb_project: str
"""the Bitbucket project name (in GitLab: group name)"""
bb_repo: str
"""the Bitbucket repository name (in GitLab: project name)"""
gl_group: str
"""the new group name in GitLab (may point to a subgroup, e.g. "group/subgroup")"""
gl_project: str
"""the new project slug in GitLab (under which URL the project will be accessible)"""
@property
def gitlab_path(self):
return f"{self.gl_group}/{self.gl_project}"
class BitbucketRepoGenerator(ABC):
def __init__(self):
# check params
check_env('BITBUCKET_URL')
check_env('BITBUCKET_USER')
check_env('BITBUCKET_TOKEN')
# connect to bitbucket
self.bitbucket = Bitbucket(
url=BITBUCKET_URL, username=BITBUCKET_USER, password=BITBUCKET_TOKEN)
self.group_count: Optional[int] = None
@abstractmethod
def yield_repos(self) -> Iterable[ProjectMapping]:
pass
class BitbucketMainRepoGenerator(BitbucketRepoGenerator):
def __init__(self):
super().__init__()
print(f"requesting all repos from {BITBUCKET_URL} that are visible to {BITBUCKET_USER}")
self.projects = list(self.bitbucket.project_list())
self.group_count = len(self.projects)
def yield_repos(self):
# iterate over all projects (groups) and repos (projects) in bitbucket
counter = 0
for bb_project in tqdm(self.projects, unit='project groups'):
bb_project_slug = bb_project['key']
if bb_project_slug in project_blacklist:
continue
gl_group = get_gitlab_group(bb_project_slug)
# list all repos in this group
for bb_repo in self.bitbucket.repo_list(bb_project_slug):
bb_repo_slug = bb_repo['slug']
project = ProjectMapping(
bb_project=bb_project_slug,
bb_repo=bb_repo_slug,
gl_group=gl_group,
gl_project=bb_repo_slug,
)
yield project
counter += 1
tqdm.write(f"{counter} Bitbucket repos have been returned")
class BitbucketPersonalRepoGenerator(BitbucketRepoGenerator):
def __init__(self):
super().__init__()
print(f"requesting all users from {BITBUCKET_URL} that are visible to {BITBUCKET_USER}")
self.users = list(self.bitbucket.get_users_info(limit=None))
self.group_count = len(self.users)
def yield_repos(self) -> Iterable[ProjectMapping]:
counter = 0
for bb_user in tqdm(self.users, unit='users'):
bb_user_slug = bb_user['slug']
if bb_user_slug in project_blacklist:
continue
bb_user_path = f'~{bb_user_slug}'
# list all repos in this group
bb_repos = list(self.bitbucket.repo_list(bb_user_path))
if not bb_repos:
tqdm.write(f"skipping {bb_user_slug}, no personal projects found")
for bb_repo in bb_repos:
bb_repo_slug = bb_repo['slug']
project = ProjectMapping(
bb_project=bb_user_path,
bb_repo=bb_repo_slug,
gl_group=bb_user_slug,
gl_project=bb_repo_slug,
)
yield project
counter += 1
tqdm.write(f"{counter} Bitbucket repos have been returned")
def check_env(env: str):
if not os.getenv(env):
raise ValueError(f"please provide {env} as environment variable")
def get_gitlab_group(bitbucket_project):
if group_prefix:
return str(PurePosixPath(group_prefix.strip('/')) / bitbucket_project)
else:
return bitbucket_project
def copy_permissions(dry_run=False):
# prepare bitbucket & gitlab
bitbucket = Bitbucket(url=BITBUCKET_URL, username=BITBUCKET_USER, password=BITBUCKET_TOKEN)
gitlab = Gitlab(GITLAB_URL, private_token=GITLAB_TOKEN)
user_map = {}
gitlab.auth()
current_user = gitlab.user
# go through all bitbucket projects
project_list = list(bitbucket.project_list())
for bb_project in tqdm(project_list, unit='project'):
bb_project_slug = bb_project['key']
tqdm.write(f"----- {bb_project_slug} -----")
# skip when blacklisted
if bb_project_slug in project_blacklist:
tqdm.write(f"skipping blacklisted project {bb_project_slug}")
continue
# skip when there are no repos
bb_repo_list = list(bitbucket.repo_list(bb_project_slug))
if not bb_repo_list:
tqdm.write(f"skipping empty project {bb_project_slug}")
continue
# copy group permissions
bb_project_users = list(bitbucket.project_users(bb_project_slug))
gl_group_path = get_gitlab_group(bb_project_slug)
gl_group = gitlab.groups.get(gl_group_path)
copy_permissions_for(
gitlab, user_map, bb_project_users, gl_group, current_user, dry_run=dry_run)
# copy project permissions
for bb_repo in bb_repo_list:
repo_slug = bb_repo['slug']
bb_repo_users = list(bitbucket.repo_users(bb_project_slug, repo_slug))
gl_project = gitlab.projects.get(f'{gl_group_path}/{repo_slug}')
copy_permissions_for(
gitlab, user_map, bb_repo_users, gl_project, current_user, dry_run=dry_run)
print("finished fixing permissions")
def copy_permissions_for(gitlab: Gitlab, user_map: Dict[str, User], bb_users: List[Dict],
gl_entity: Union[Group, Project], current_user: User, dry_run=False):
# break early if there are no users
entity_type = type(gl_entity).__name__
if not bb_users:
tqdm.write(f"no permissions to copy for {entity_type} {gl_entity.path}")
return
# try to map permissions for all users
users_granted = {}
for bb_user in bb_users:
bb_user_name = bb_user['user']['slug']
bb_user_access = bb_user['permission']
if bb_user_name not in user_map:
response = gitlab.users.list(username=bb_user_name)
user_map[bb_user_name] = response[0] if response else None
gl_user = user_map[bb_user_name]
gl_user_access = permission_map[bb_user_access]
if gl_user:
users_granted[gl_user.username] = gl_user_access
tqdm.write(f"adding {gl_user.username} to {entity_type} {gl_entity.path} as {bb_user_access}")
if not dry_run:
try:
gl_entity.members.create({'user_id': gl_user.id, 'access_level': gl_user_access})
except GitlabError as e:
try:
gl_entity.members.create({'user_id': gl_user.id, 'access_level': gl_user_access - 10})
except GitlabError as e:
if "already exists" in str(e):
tqdm.write(f"user {gl_user.username} already exists in {entity_type} {gl_entity.path}")
elif "inherited membership from group" in str(e):
tqdm.write(f"ignoring lower access to {entity_type} {gl_entity.path} for {gl_user.username}")
else:
tqdm.write(f"failed to add {gl_user.username} to {entity_type} {gl_entity.path}: {e}")
# remove the current user, if someone else was added as admin
admin_added = any(level >= 50 for level in users_granted.values())
if admin_added:
tqdm.write(f"deleting {current_user.username} from {entity_type} {gl_entity.path}")
if not dry_run:
try:
gl_entity.members.delete(current_user.id)
except GitlabError as e:
if "404" not in str(e):
tqdm.write(f"failed to delete {current_user.username} from {entity_type} {gl_entity.path}: {e}")
else:
tqdm.write(f"no new owner was added to {gl_entity.path}, keeping {current_user.username} as owner")
def import_main_projects():
repo_generator = BitbucketMainRepoGenerator()
import_projects(repo_generator)
def import_personal_projects():
repo_generator = BitbucketPersonalRepoGenerator()
import_projects(repo_generator)
def import_projects(repo_generator: BitbucketRepoGenerator):
# import all projects
print(f"importing {repo_generator.group_count} project groups in GitLab at {GITLAB_URL}")
gitlab = Gitlab(GITLAB_URL, private_token=GITLAB_TOKEN)
projects_iter = repo_generator.yield_repos()
processing: List[Project] = []
counter = 0
# imports are running asynchronously and in parallel. we frequently check the status
# of each import and queue new imports until we run out of jobs to process
while True:
if len(processing) < parallel_imports:
try:
project: ProjectMapping = next(projects_iter)
tqdm.write(f"importing {project.gitlab_path}")
job = trigger_import(gitlab, project)
if job:
processing.append(job)
counter += 1
except StopIteration:
tqdm.write(f"all imports were triggered, waiting for running jobs to finish")
break
else:
processing = check_and_sleep(gitlab, processing)
# almost finished, just wait for the last few jobs
while processing:
processing = check_and_sleep(gitlab, processing)
print(f"{counter} projects were imported in GitLab")
def check_and_sleep(gitlab: Gitlab, processing: List[Project], sleep_time=1.0) -> List[Project]:
updated = []
for job in processing:
status = gitlab.projects.get(job.id)
if status.import_status == 'started':
updated.append(status)
else:
if status.import_status == 'finished':
tqdm.write(f"import of {status.path_with_namespace} finished successfully")
else:
tqdm.write(f"warning: import of {status.path_with_namespace} finished "
f"with status {status.import_status}")
if len(updated) >= parallel_imports:
time.sleep(sleep_time)
return updated
def trigger_import(gitlab: Gitlab, project: ProjectMapping) -> Optional[Project]:
if on_duplicate == 'error':
return _trigger_import(gitlab, project)
elif on_duplicate in ('ignore', 'rename'):
try:
return _trigger_import(gitlab, project)
except GitlabHttpError as e:
if e.response_code == 422 and "Path has already been taken" in str(e):
if on_duplicate == 'ignore':
tqdm.write(f"repo {project.gitlab_path} already exists, skipping")
elif on_duplicate == 'rename':
# TODO find a way to try suffixes until it works...
tqdm.write(f"repo {project.gitlab_path} already exists, renaming")
return _trigger_import(gitlab, project, suffix="_BB")
else:
print(f"there was an unexpected error while importing {project}. {e}")
raise e
else:
raise ValueError(f"unexpected value {on_duplicate} for on_duplicate")
def _trigger_import(gitlab: Gitlab, project: ProjectMapping, suffix: str = None) -> Project:
# define the namespace
gl_project_slug = project.gl_project
if suffix:
gl_project_slug += suffix
# start the import process
result = gitlab.projects.import_bitbucket_server(
bitbucket_server_url=BITBUCKET_URL,
bitbucket_server_username=BITBUCKET_USER,
personal_access_token=BITBUCKET_TOKEN,
bitbucket_server_project=project.bb_project,
bitbucket_server_repo=project.bb_repo,
new_name=gl_project_slug,
target_namespace=project.gl_group,
)
job = gitlab.projects.get(result['id'])
return job
def main():
# import all projects in the main namespace
print("== importing Bitbucket projects from the main namespace ==")
import_main_projects()
# now we copy all permissions (these are not covered by the gitlab import)
print("== copying members and permissions for all projects that were migrated ==")
copy_permissions()
# import all personal projects (permissions are set correctly here)
print("== importing Bitbucket projects from the user namespace ==")
import_personal_projects()
if __name__ == '__main__':
main()
|
import os
from functools import partial
import numpy
import pyproj
import shapely.geometry as shp
from shapely.ops import transform
from triangle import triangulate
from vistas.core.color import RGBColor
from vistas.core.gis.elevation import ElevationService, TILE_SIZE, meters_per_px
from vistas.core.graphics.factory import MapMeshFactory, MeshFactoryWorker, use_event_loop
from vistas.core.graphics.feature.geometry import FeatureGeometry
from vistas.core.graphics.feature.shader import FeatureShaderProgram
from vistas.core.graphics.mesh import Mesh
from vistas.core.plugins.data import FeatureDataPlugin
class FeatureFactoryWorker(MeshFactoryWorker):
task_name = "Building Features"
@use_event_loop
def run(self):
verts, indices, normals, colors = [None] * 4
if self.factory.needs_vertices and not self.task.should_stop:
verts, indices, normals = self.factory.generate_meshes(self.task)
self.factory.needs_vertices = False
if self.factory.needs_color and not self.task.should_stop:
colors = self.factory.generate_colors(self.task)
self.factory.needs_color = False
self.sync_with_main(
self.factory.update_features, kwargs=dict(vertices=verts, indices=indices, normals=normals, colors=colors),
block=True
)
class FeatureFactory(MapMeshFactory):
""" A MapMeshFactory for handling polygon rendering. """
worker_class = FeatureFactoryWorker
def __init__(self, extent, data_src: FeatureDataPlugin, shader=None, plugin=None, initial_zoom=10):
super().__init__(extent, shader or FeatureShaderProgram(), plugin, initial_zoom)
self._color_func = None
self._render_thread = None
if not isinstance(data_src, FeatureDataPlugin):
raise TypeError("data_src is not of type FeatureDataPlugin!")
self.data_src = data_src
self.use_cache = self.data_src is not None
if self.use_cache:
ext = '.{}'.format(self.data_src.path.split('.')[-1])
self._cache = self.data_src.path.replace(ext, '.ttt')
self._npz_path = self._cache + '.npz'
self.offsets = None
self.needs_vertices = True
self.needs_color = False
@property
def zoom(self):
return self._zoom
@zoom.setter
def zoom(self, zoom):
if zoom != self._zoom:
self._zoom = zoom
self.needs_vertices = True
tiles = self.extent.tiles(self.zoom)
self._ul = tiles[0]
self._br = tiles[-1]
self.build()
def update_features(self, vertices=None, indices=None, normals=None, colors=None):
# Update geometry information
if all(x is not None for x in (vertices, indices, normals)):
if not self.items:
num_indices = num_vertices = len(indices)
geometry = FeatureGeometry(num_indices, num_vertices, indices=indices, vertices=vertices)
geometry.normals = normals
mesh = Mesh(geometry, self.shader)
self.items.append(mesh)
else:
mesh = self.items[0]
geometry = mesh.geometry
geometry.vertices = vertices
geometry.indices = indices
geometry.compute_bounding_box()
mesh.update()
# Update color buffer
if self.items and colors is not None:
self.items[0].geometry.colors = colors
self.update()
def generate_meshes(self, task=None):
""" Generates polygon mesh vertices for a feature collection """
mercator = pyproj.Proj(init='EPSG:3857')
mbounds = self.mercator_bounds
# Check if a cache for this feature exists
if self.use_cache and os.path.exists(self._npz_path):
if task:
task.status = task.INDETERMINATE
nfile = numpy.load(self._npz_path)
verts = nfile['verts']
# Build it out
else:
task.progress = 0
task.target = self.data_src.get_num_features()
project = partial(pyproj.transform, self.extent.projection, mercator) # Our projection method
tris = []
offsets = []
offset = 0
for feature in self.data_src.get_features():
shape = transform(project, shp.shape(feature['geometry']))
if task:
task.inc_progress()
if isinstance(shape, shp.Polygon):
polys = [list(shape.exterior.coords)[:-1]]
elif isinstance(shape, shp.MultiPolygon):
polys = [list(p.exterior.coords)[:-1] for p in shape]
else:
raise ValueError("Can't render non polygons!")
for p in polys:
triangulation = triangulate(dict(vertices=numpy.array(p)))
t = triangulation.get('vertices')[triangulation.get('triangles')].reshape(-1, 2)
offset += t.size
tris.append(t)
offsets.append(offset)
triangles = numpy.concatenate(tris)
offsets = numpy.array(offsets)
# Make room for elevation info
xs = triangles[:, 0]
ys = triangles[:, 1]
verts = numpy.dstack((ys, xs, numpy.zeros_like(xs)))[0]
verts = verts.astype(numpy.float32)
# cache the vertices
if self.use_cache:
numpy.savez(self._cache, verts=verts, offsets=offsets)
# Translate vertices to scene coordinates
# Scale vertices according to current mercator_bounds
verts[:, 1] = (verts[:, 1] - mbounds.left) / (mbounds.right - mbounds.left)
verts[:, 0] = (1 - (verts[:, 0] - mbounds.bottom) / (mbounds.top - mbounds.bottom))
# Get data DEM to sample elevation from.
e = ElevationService()
dem = e.create_data_dem(self.extent, self.zoom, merge=True)
dheight, dwidth = dem.shape
# Index into current DEM and assign heights
us = numpy.floor(verts[:, 1] * dwidth).astype(int)
vs = numpy.floor(verts[:, 0] * dheight).astype(int)
verts[:, 2] = dem[vs, us].ravel() / meters_per_px(self.zoom)
# Scale vertices based on tile size
verts[:, 0] *= (self._br.y - self._ul.y + 1) * TILE_SIZE
verts[:, 1] *= (self._br.x - self._ul.x + 1) * TILE_SIZE
normals = e.create_data_dem(
self.extent, self.zoom, merge=True, src=ElevationService.AWS_NORMALS
)[vs, us].ravel()
# Vertex indices are assumed to be unique
indices = numpy.arange(verts.shape[0])
return verts, indices, normals
@staticmethod
def _default_color_function(feature, data):
"""
Base color function for coloring features.
:param feature: The feature to color
:param data: Persistent data throughout the life of a single render
:return: The RGBColor to color the feature
"""
return RGBColor(0.5, 0.5, 0.5)
def set_color_function(self, func):
self._color_func = func
def generate_colors(self, task=None):
""" Generates a color buffer for the feature collection """
# Color indices are stored in the cache
if self.offsets is None:
self.offsets = numpy.load(self._npz_path)['offsets']
color_func = self._color_func
if not color_func:
color_func = self._default_color_function
colors = []
if task:
task.progress = 0
task.target = self.data_src.get_num_features()
# We use a mutable data structure that is limited to this thread's scope and can be mutated
# based on color_func's scope. This allows multiple color threads to occur without locking.
mutable_color_data = {}
for i, feature in enumerate(self.data_src.get_features()):
if i == 0:
left = 0
else:
left = self.offsets[i - 1]
right = self.offsets[i]
if task:
if task.should_stop:
break
task.inc_progress()
num_vertices = (right - left) // 2
color = numpy.array(color_func(feature, mutable_color_data).rgb.rgb_list, dtype=numpy.float32)
for v in range(num_vertices):
colors.append(color)
colors = numpy.stack(colors)
return colors
|
class Tomcat(object):
def __init__(self , home , ver):
self.home = home
self.version = ver
return None
def display(self):
print(self.home)
print(self.version)
class Apache(Tomcat):
def __init__(self , home , ver):
self.home = home
self.version = ver
return None
tom_ob = Tomcat('/home/tomcat9' , '7.97')
apa_ob = Apache('/etc/httpd' , '2.4')
tom_ob.display()
apa_ob.display
|
import pytest
from clean_architecture.rest.app import create_app
from clean_architecture.rest.settings import TestConfig
@pytest.fixture
def app():
app = create_app(TestConfig)
return app
|
from __future__ import print_function
from __future__ import division
from . import _C
import numpy as np
import matplotlib.pyplot as plt
import fuzzytools.matplotlib.plots as cplots
import fuzzytools.matplotlib.colors as cc
from fuzzytools.datascience.statistics import dropout_extreme_percentiles
import pandas as pd
###################################################################################################################################################
def plot_class_distribution(lcdataset, lcset_names,
figsize=None,
uses_log_scale=1,
caption=None,
):
#for ks,lcset_name in enumerate([lcset_name1, lcset_name2]):
#ax = axs[ks]
lcset = lcdataset[lcset_names[0]]
lcobj_classes = lcset.get_lcobj_classes()
pop_dict = {lcset_name:lcdataset[lcset_name].get_lcobj_classes() for lcset_name in lcset_names}
title = ''
title += 'SNe class distribution'+'\n'
title += f'survey={lcset.survey}-{"".join(lcset.band_names)}'+'\n'
plt_kwargs = {
#'ylabel':'' if ks>0 else None,
'title':title[:-1],
#'cmap':cc.colorlist_to_cmap([cc.NICE_COLORS_DICT['nice_gray']]),
'uses_log_scale':uses_log_scale,
'figsize':figsize,
}
fig, ax = cplots.plot_hist_labels(pop_dict, lcset.class_names, **plt_kwargs)
fig.tight_layout()
fig.text(.1,.1, caption)
plt.plot()
def plot_sigma_distribution(lcdataset, set_name:str,
figsize:tuple=(15,10),
):
attr = 'obse'
return plot_values_distribution(lcdataset, set_name, attr,
figsize,
)
def plot_values_distribution(lcdataset, set_name:str, attr:str,
title='?',
xlabel='?',
p=0.5,
figsize:tuple=(15,10),
):
lcset = lcdataset[set_name]
fig, axes = plt.subplots(len(lcset.class_names), len(lcset.band_names), figsize=figsize)
for kb,b in enumerate(lcset.band_names):
for kc,c in enumerate(lcset.class_names):
ax = axes[kc,kb]
plot_dict = {c:dropout_extreme_percentiles(lcset.get_lcset_values_b(b, attr, c), p, mode='upper')[0]}
plot_df = pd.DataFrame.from_dict(plot_dict, orient='columns')
kwargs = {
'fig':fig,
'ax':ax,
'xlabel':xlabel if kc==len(lcset.class_names)-1 else None,
'ylabel':'' if kb==0 else None,
'title':f'band={b}' if kc==0 else '',
#'xlim':(None if c=='SLSN' else (0, 150)) if attr=='obs' else None,
#'xlim':[0, 80],
#'bins':500,#100 if c=='SLSN' else 500,
'uses_density':True,
'legend_loc':'upper right',
'cmap':cc.get_cmap(cc.get_default_colorlist()[kc:])
}
fig, ax = cplots.plot_hist_bins(plot_df, **kwargs)
### multiband colors
ax.grid(color=_C.COLOR_DICT[b])
[ax.spines[border].set_color(_C.COLOR_DICT[b]) for border in ['bottom', 'top', 'right', 'left']]
[ax.spines[border].set_linewidth(2) for border in ['bottom', 'top', 'right', 'left']]
fig.suptitle(title, va='bottom', y=.99)#, fontsize=14)
fig.tight_layout()
plt.show()
|
#------------------------------------
# Logging
#------------------------------------
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("luna")
#------------------------------------
# Generate RAW data
#------------------------------------
from luna.datatypes.dimensional import DataTimePoint
from luna.datatypes.dimensional import DataTimePointSeries
# Generate a point every 6 seconds for about 10 minutes
dataTimePointSeries = DataTimePointSeries()
for i in range(104):
data = {'temperature_C': [20.0+i]}
dataTimePoint = DataTimePoint(t = 1436021994 + (i*6),
tz = "Europe/Rome",
data = data)
dataTimePointSeries.append(dataTimePoint)
print('DEMO: Generated dataTimeSeries: {}'.format(dataTimePointSeries))
#------------------------------------
# Initializa a (temporary) storage
#------------------------------------
from luna.storages.sqlite.storage import SQLiteStorage
storage = SQLiteStorage(in_memory=True, can_initialize=True)
#------------------------------------
# Store Point data
#------------------------------------
storage.put(dataTimePointSeries, id='328or6c269')
print('DEMO: Stored dataTimeSeries: ok')
#------------------------------------
# Get (all) RAW data
#------------------------------------
dataTimePointSeries = storage.get(id='328or6c269', tz='Europe/London')
print('DEMO: Loaded dataTimeSeries: {}'.format(dataTimePointSeries))
dataTimePointSeries.force_load()
print('DEMO: Loaded dataTimeSeries (load forced): {}'.format(dataTimePointSeries))
for dataTimePoint in dataTimePointSeries:
print('DEMO: dataTimePoint @ t={} (dt={}), data="{}"'.format(dataTimePoint.t, dataTimePoint.dt, dataTimePoint.data))
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for c_format.py.
"""
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import util
from grit.tool import build
class CFormatUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="IDS_QUESTIONS">Do you want to play questions?</message>
<message name="IDS_QUOTES">
"What's in a name, <ph name="NAME">%s<ex>Brandon</ex></ph>?"
</message>
<message name="IDS_LINE_BREAKS">
Was that rhetoric?
No.
Statement. Two all. Game point.
</message>
<message name="IDS_NON_ASCII">
\xc3\xb5\\xc2\\xa4\\\xc2\xa4\\\\xc3\\xb5\xe4\xa4\xa4
</message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('c_format', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
self.assertEqual(u"""\
#include "resource.h"
const char* GetString(int id) {
switch (id) {
case IDS_QUESTIONS:
return "Do you want to play questions?";
case IDS_QUOTES:
return "\\"What\\'s in a name, %s?\\"";
case IDS_LINE_BREAKS:
return "Was that rhetoric?\\nNo.\\nStatement. Two all. Game point.";
case IDS_NON_ASCII:
return "\\303\\265\\xc2\\xa4\\\\302\\244\\\\xc3\\xb5\\344\\244\\244";
default:
return 0;
}
}""", output)
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zone Purge List reference model.
The grants from FAD CBSD objects of SAS UUT and SAS test harnesses are purged if either:
- PPA/GWPZ: the CBSD lies within a list of PPA areas or GWPZ areas
- FSS+GWBL: within 150 KMs from a list of FSSs
and the grant frequency range overlaps with the protected entity's frequency range.
"""
import logging
from reference_models.common import data
from reference_models.pre_iap_filtering import pre_iap_util
def zonePurgeReferenceModel(sas_uut_fad, sas_test_harness_fads,
ppa_records, pal_records, gwpz_records, fss_records):
"""Executes the Zone PPA, GWPZ and FSS+GWBL purge reference model.
CBSDs from the FAD objects of SAS UUT and SAS test harnesses that lie within the
PPA area or GWPZ area or within 150 KMs from the FSS site will have their grant
purged if the grant overlaps with frequency range of the protected entities.
A frequency range of 3650-3700 MHz is considered for all the FSSs.
It is assumed that the FSS records passed as input have a GWBL station within
150 KMs and thus the FSS exclusion zone is activated.
Grants are purged directly from the input FADs.
Args:
sas_uut_fad: A |FullActivityDump| object containing the FAD records of SAS UUT.
sas_test_harness_fads: A list of |FullActivityDump| objects containing the FAD records
from SAS test harnesses.
ppa_records: List of PPA record dictionaries.
pal_records: List of PAL record dictionaries.
gwpz_records: List of GWPZ record dictionaries.
fss_records: List of FSS record dictionaries. All the FSSs in this list should have
at least one GWBL within 150KMs.
"""
logging.info('Performing zone purge with PPA (%s), PAL (%s), GWPZ (%s), and FSS with GWBL (%s)',
ppa_records, pal_records, gwpz_records, fss_records)
# Get the list of all CBSDs from the FAD objects of UUT and test harness
cbsds = sas_uut_fad.getCbsdRecords()
for fad in sas_test_harness_fads:
cbsds.extend(fad.getCbsdRecords())
# Perform purge for each PPA
for ppa_record in ppa_records:
logging.info('Purging in PPA (%s)', ppa_record)
# Get all the CBSDs within the PPA polygon
cbsds_within_ppa = pre_iap_util.getCbsdsWithinPolygon(cbsds, ppa_record['zone'])
logging.info('CBSDs within PPA: %s', cbsds_within_ppa)
if cbsds_within_ppa:
# Get all the cbsds that are not part of the PPA cluster list
cbsds_not_part_of_ppa_cluster = data.getCbsdsNotPartOfPpaCluster(
cbsds_within_ppa, ppa_record)
logging.info('CBSDs which are not part of the PPA cluster list: %s',
cbsds_not_part_of_ppa_cluster)
if cbsds_not_part_of_ppa_cluster:
# Get the frequency of the PPA
ppa_frequency_range = pre_iap_util.getPpaFrequencyRange(ppa_record, pal_records)
logging.info('PPA frequency range: %s', ppa_frequency_range)
# Purge the grants of CBSDs that are overlapping PPA frequency
pre_iap_util.purgeOverlappingGrants(cbsds_not_part_of_ppa_cluster,
ppa_frequency_range)
# Perform purge for each GWPZ
for gwpz_record in gwpz_records:
logging.info('Purging in GWPZ (%s)', gwpz_record)
# Get the CBSDs that are witin the GWPZ polygon
cbsds_within_gwpz = pre_iap_util.getCbsdsWithinPolygon(cbsds, gwpz_record['zone'])
logging.info('CBSDs within GWPZ: %s', cbsds_within_gwpz)
if cbsds_within_gwpz:
# Purge the overlapping grants
pre_iap_util.purgeOverlappingGrants(cbsds_within_gwpz,
gwpz_record['record']['deploymentParam'][0]
['operationParam']['operationFrequencyRange'])
# Perform GWBL+FSS purge for each FSS
for fss_record in fss_records:
logging.info('Purging near FSS (%s)', fss_record)
fss_point, fss_info, _ = data.getFssInfo(fss_record)
# Get the CBSDs that are present within 150kms of FSS entity
cbsds_neighboring_fss = pre_iap_util.getFssNeighboringCbsdsWithGrants(
cbsds, fss_point, pre_iap_util.FSS_GWBL_PROTECTION_DISTANCE_KM)
logging.info('CBSDs near the FSS: %s', cbsds_neighboring_fss)
if cbsds_neighboring_fss:
# Purge the overlapping grants
pre_iap_util.purgeOverlappingGrants(cbsds_neighboring_fss,
pre_iap_util.FSS_GWBL_PROTECTION_FREQ_RANGE)
|
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.exceptions import PermissionDenied
from django.utils.decorators import method_decorator
from django_filters.views import FilterView
from django_tables2.views import SingleTableMixin
from guardian.mixins import LoginRequiredMixin
from service_catalog.filters.support_filter import SupportFilter
from service_catalog.models import Support
from service_catalog.tables.support_tables import SupportTable
@method_decorator(login_required, name='dispatch')
class SupportListView(LoginRequiredMixin, SingleTableMixin, FilterView):
table_pagination = {'per_page': 10}
table_class = SupportTable
model = Support
template_name = 'generics/list.html'
filterset_class = SupportFilter
def dispatch(self, *args, **kwargs):
if not self.request.user.is_superuser:
raise PermissionDenied
return super(SupportListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = "Supports"
return context
|
"""
Apply automatic fixes for known errors in cmorized data
All functions in this module will work even if no fixes are available
for the given dataset. Therefore is recommended to apply them to all
variables to be sure that all known errors are
fixed.
"""
from ._fixes.fix import Fix
from .check import _get_cmor_checker
def fix_file(filename, short_name, project, dataset, output_dir):
"""
Fix files before ESMValTool can load them
This fixes are only for issues that prevent iris from loading the cube or
that cannot be fixed after the cube is loaded.
Original files are not overwritten.
Parameters
----------
filename: str
Path to the original file
short_name: str
Variable's short name
project: str
dataset:str
output_dir: str
Output directory for fixed files
Returns
-------
str:
Path to the fixed file
"""
for fix in Fix.get_fixes(
project=project, dataset=dataset, variable=short_name):
filename = fix.fix_file(filename, output_dir)
return filename
def fix_metadata(cube, short_name, project, dataset, cmor_table=None,
mip=None):
"""
Fix cube metadata if fixes are required and check it anyway
This method collects all the relevant fixes for a given variable, applies
them and checks the resulting cube (or the original if no fixes were
needed) metadata to ensure that it complies with the standards of its
project CMOR tables.
Parameters
----------
cube: iris.cube.Cube
Cube to fix
short_name; str
Variable's short name
project: str
dataset: str
cmor_table: str, optional
CMOR tables to use for the check, if available
mip: str, optional
Variable's MIP, if available
Returns
-------
iris.cube.Cube:
Fixed and checked cube
Raises
------
CMORCheckError:
If the checker detects errors in the metadata that it can not fix.
"""
for fix in Fix.get_fixes(
project=project, dataset=dataset, variable=short_name):
cube = fix.fix_metadata(cube)
if cmor_table and mip:
checker = _get_cmor_checker(
table=cmor_table,
mip=mip,
short_name=short_name,
fail_on_error=False,
automatic_fixes=True)
checker(cube).check_metadata()
return cube
def fix_data(cube, short_name, project, dataset, cmor_table=None, mip=None):
"""
Fix cube data if fixes add present and check it anyway.
This method assumes that metadata is already fixed and checked.
This method collects all the relevant fixes for a given variable, applies
them and checks resulting cube (or the original if no fixes were
needed) metadata to ensure that it complies with the standards of its
project CMOR tables.
Parameters
----------
cube: iris.cube.Cube
Cube to fix
short_name; str
Variable's short name
project: str
dataset: str
cmor_table: str, optional
CMOR tables to use for the check, if available
mip: str, optional
Variable's MIP, if available
Returns
-------
iris.cube.Cube:
Fixed and checked cube
Raises
------
CMORCheckError:
If the checker detects errors in the data that it can not fix.
"""
for fix in Fix.get_fixes(
project=project, dataset=dataset, variable=short_name):
cube = fix.fix_data(cube)
if cmor_table and mip:
checker = _get_cmor_checker(
table=cmor_table,
mip=mip,
short_name=short_name,
fail_on_error=False,
automatic_fixes=True)
checker(cube).check_data()
return cube
|
from django.shortcuts import render_to_response
# Create your views here.
def home(request, template_name='home.html'):
data = {
'name': '<Your Name Here>'
}
return render_to_response(template_name,
data)
|
"""Main plotting module."""
import math
import locale
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def init_params(german_labels=True, font_size=20, font_family='Carlito',
pdf_padding=0.1, pdf_bbox='tight', pdf_fonttype=42,
deact_warnings=True):
"""Initialize RC parameters for matplotlib plots."""
if german_labels:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
mpl.rcParams['font.size'] = font_size
mpl.rcParams['font.family'] = font_family
mpl.rcParams['savefig.pad_inches'] = pdf_padding
mpl.rcParams['savefig.bbox'] = pdf_bbox
plt.rcParams['pdf.fonttype'] = pdf_fonttype
mpl.rcParams['hatch.linewidth'] = 2
if deact_warnings:
mpl.rcParams.update({'figure.max_open_warning': 0})
def znes_colors(n=None):
"""Return dict with ZNES colors.
Examples
--------
>>> znes_colors().keys() # doctest: +ELLIPSIS
dict_keys(['darkblue', 'red', 'lightblue', 'orange', 'grey',...
Original author: @ckaldemeyer
"""
colors = {
'darkblue': '#00395B',
'red': '#B54036',
'lightblue': '#74ADC0',
'orange': '#EC6707',
'grey': '#BFBFBF',
'dimgrey': 'dimgrey',
'lightgrey': 'lightgrey',
'slategrey': 'slategrey',
'darkgrey': '#A9A9A9'
}
# allow for a dict of n colors
if n is not None:
if n > len(colors):
raise IndexError('Number of requested colors is too big.')
else:
return {k: colors[k] for k in list(colors)[:n]}
else:
return colors
def znes_colors_hatched(n, diff_colors=4):
"""Return list of dicts with ZNES colors with hatches."""
colors = list(znes_colors().values())
hatches = ['//', '\\\\', '////', '\\\\\\\\']
return_list = list()
for i in range(n):
if i < diff_colors:
return_list += [{'color': colors[i % diff_colors],
'edgecolor': 'w'}]
else:
return_list += [{'color': colors[i % diff_colors],
'hatch': hatches[((math.floor(i/diff_colors) - 1)
% 4)],
'edgecolor': 'w'}]
return return_list
def get_colors(nr_cols, **kwargs):
"""Get color parameters list of dictionaries."""
color_params = list()
if 'colors' in kwargs:
for color in kwargs['colors']:
color_params += [{'color': color}]
elif 'hatches' in kwargs:
if 'diff_colors' in kwargs:
color_params = znes_colors_hatched(
nr_cols, diff_colors=kwargs['diff_colors'])
else:
color_params = znes_colors_hatched(nr_cols)
else:
colors = list(znes_colors(nr_cols).values())
for color in colors:
color_params += [{'color': color}]
return color_params
def create_multipage_pdf(file_name='plots.pdf', figs=None, dpi=300,
mute=False):
"""Save all open matplotlib figures into a multipage pdf-file.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df1 = pd.DataFrame(np.random.randn(24, 2))
>>> ax1 = df1.plot(kind='line')
>>>
>>> df2 = pd.DataFrame(np.random.randn(24, 2))
>>> ax2 = df2.plot(kind='scatter', x=0, y=1)
>>>
>>> # mute is set to true to surpress writing a pdf file
>>> create_multipage_pdf(file_name='plots.pdf', dpi=300, mute=True)
False
Original author: @ckaldemeyer
"""
if mute is True:
# set return flag to false if no output is written
flag = False
else:
pp = PdfPages(file_name)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pp, format='pdf')
pp.close()
# close all existing figures
for fig in figs:
plt.close(fig)
# set return flag
flag = True
return flag
def monthlyBar(data, figsize=[12, 5.5], legend_loc='best', legend=True,
return_objs=False, **kwargs):
"""Create bar chart of sum of monthly unit commitment."""
monSum = data.resample('M').sum()/1e3
monSum.rename(index=lambda x: x.strftime('%b'), inplace=True)
nr_cols = len(monSum.columns)
if 'color_params' in kwargs:
color_params = kwargs['color_params']
else:
color_params = get_colors(nr_cols, **kwargs)
fig, ax = plt.subplots(figsize=figsize)
pos_bottom = 0
neg_bottom = 0
for col, color_param in zip(monSum.columns, color_params):
mean_val = monSum[col].mean()
if mean_val >= 0:
ax.bar(monSum.index, monSum[col],
bottom=pos_bottom, **color_param)
pos_bottom += monSum[col]
elif mean_val < 0:
ax.bar(monSum.index, monSum[col],
bottom=neg_bottom, **color_param)
neg_bottom += monSum[col]
if 'demand' in kwargs:
monDemand = kwargs['demand'].resample('M').sum()/1e3
monDemand.rename(index=lambda x: x.strftime('%b'), inplace=True)
ax.bar(monSum.index, monDemand,
width=0.25, color=znes_colors()['lightgrey'], alpha=0.75,
linewidth=0)
ax.grid(linestyle='--', which='major', axis='y')
if 'ylabel' in kwargs:
ax.set_ylabel(kwargs['ylabel'])
else:
ax.set_ylabel('Gesamtwärmemenge in GWh')
if 'xlabel' in kwargs:
ax.set_xlabel(kwargs['xlabel'])
if 'title' in kwargs:
ax.set_title(kwargs['title'])
if 'suptitle' in kwargs:
fig.suptitle(kwargs['suptitle'])
if legend:
if 'labels' in kwargs:
labels = kwargs['labels']
else:
labels = monSum.columns.to_list()
if legend_loc[:7] == 'outside':
if legend_loc[8:] == 'right':
ax.legend(labels=labels, loc='upper right',
bbox_to_anchor=(1.27, 1),
ncol=1)
elif legend_loc[8:] == 'bottom':
ax.legend(labels=labels, loc='lower left',
bbox_to_anchor=(0, -0.265),
ncol=nr_cols)
else:
ax.legend(labels=labels, loc=legend_loc)
if return_objs:
return fig, ax
def load_curve(data, figsize=[8, 5], linewidth=2.5, legend_loc='best', return_objs=False,
**kwargs):
"""Plot the sorted (annual) load curves of units."""
data = data.apply(lambda x: x.sort_values(ascending=False).values)
data.reset_index(drop=True, inplace=True)
nr_cols = len(data.columns)
color_params = get_colors(nr_cols, **kwargs)
fig, ax = plt.subplots(figsize=figsize)
for col, color_param in zip(data.columns, color_params):
ax.plot(data[col], linewidth=linewidth, **color_param)
ax.grid(linestyle='--')
if 'ylabel' in kwargs:
ax.set_ylabel(kwargs['ylabel'])
else:
ax.set_ylabel(r'Wärmestrom $\dot{Q}$ in MW')
if 'xlabel' in kwargs:
ax.set_xlabel(kwargs['xlabel'])
else:
ax.set_xlabel('Stunden')
if 'title' in kwargs:
ax.set_title(kwargs['title'])
if 'suptitle' in kwargs:
fig.suptitle(kwargs['suptitle'])
if 'labels' in kwargs:
labels = kwargs['labels']
else:
labels = data.columns.to_list()
if legend_loc[:7] == 'outside':
if legend_loc[8:] == 'right':
ax.legend(labels=labels, loc='upper right',
bbox_to_anchor=(1.33, 1),
ncol=1)
elif legend_loc[8:] == 'bottom':
anchor = (0, -0.35)
if nr_cols > 4:
nr_cols = round(nr_cols/2)
anchor = (0, -0.45)
ax.legend(labels=labels, loc='lower left',
bbox_to_anchor=anchor,
ncol=nr_cols)
else:
ax.legend(labels=labels, loc=legend_loc)
if return_objs:
return fig, ax
|
from sanic_testing.manager import TestManager
url = "/api/skill/all_skills"
def test_all_skills(app: TestManager):
_, response = app.test_client.get(url)
assert response.status == 200
|
from flask import request, jsonify
from api.index import home_blu
from api.index.utils.handle_json import handle_json
@home_blu.route('/home')
def home():
path = request.args.get('path')
json_dict = handle_json(path)
return jsonify(json_dict)
|
"""
In the Core module you can find all basic classes and functions which form the backbone of the toolbox.
"""
import warnings
import numbers
import numpy as np
import numpy.ma as ma
import collections
from copy import copy, deepcopy
from numbers import Number
from scipy import integrate
from scipy.linalg import block_diag
from scipy.optimize import root
from scipy.interpolate import interp1d, interp2d, RectBivariateSpline, RegularGridInterpolator
from .registry import get_base
__all__ = ["Domain", "EvalData", "Parameters",
"Base", "BaseFraction", "StackedBase",
"Function", "ConstantFunction", "ComposedFunctionVector",
"find_roots", "sanitize_input", "real", "dot_product_l2",
"normalize_base", "project_on_base", "change_projection_base",
"back_project_from_base",
"calculate_scalar_product_matrix",
"calculate_base_transformation_matrix",
"calculate_expanded_base_transformation_matrix",
]
def sanitize_input(input_object, allowed_type):
"""
Sanitizes input data by testing if *input_object* is an array of type *allowed_type*.
Args:
input_object: Object which is to be checked.
allowed_type: desired type
Return:
input_object
"""
input_object = np.atleast_1d(input_object)
for obj in np.nditer(input_object, flags=["refs_ok", "zerosize_ok"]):
if not isinstance(obj.item(), allowed_type):
raise TypeError("Only objects of type: {0} accepted.".format(allowed_type))
return input_object
class BaseFraction:
"""
Abstract base class representing a basis that can be used to describe functions of several variables.
"""
def __init__(self, members):
self.members = members
def scalar_product_hint(self):
"""
Empty Hint that can return steps for scalar product calculation.
Note:
Overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`
"""
pass
def function_space_hint(self):
"""
Empty Hint that can return properties which uniquely define
the function space of the :py:class:`.BaseFraction`.
Note:
Overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`.
"""
pass
def derive(self, order):
"""
Basic implementation of derive function.
Empty implementation, overwrite to use this functionality.
For an example implementation see :py:class:`.Function`
Args:
order (:class:`numbers.Number`): derivative order
Return:
:py:class:`.BaseFraction`: derived object
"""
if order == 0:
return self
else:
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def scale(self, factor):
"""
Factory method to obtain instances of this base fraction, scaled by the
given factor. Empty function, overwrite to implement custom
functionality. For an example implementation see :py:class:`.Function`.
Args:
factor: Factor to scale the vector.
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def raise_to(self, power):
"""
Raises this fraction to the given *power*.
Args:
power (:obj:`numbers.Number`): power to raise the fraction onto
Return:
raised fraction
"""
if power == 1:
return self
else:
raise NotImplementedError("Implement this functionality to make use of it.")
def get_member(self, idx):
"""
Getter function to access members.
Empty function, overwrite to implement custom functionality.
For an example implementation see :py:class:`.Function`
Note:
Empty function, overwrite to implement custom functionality.
Args:
idx: member index
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def __call__(self, *args, **kwargs):
"""
Spatial evaluation of the base fraction.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
"""
raise NotImplementedError("This is an empty function."
" Overwrite it in your implementation to use this functionality.")
def add_neutral_element(self):
"""
Return the neutral element of addition for this object.
In other words: `self + ret_val == self`.
"""
raise NotImplementedError(
"This is an empty function. Overwrite it in your implementation "
"to use this functionality.")
def mul_neutral_element(self):
"""
Return the neutral element of multiplication for this object.
In other words: `self * ret_val == self`.
"""
raise NotImplementedError(
"This is an empty function. Overwrite it in your implementation "
"to use this functionality.")
def evaluation_hint(self, values):
"""
If evaluation can be accelerated by using special properties of a function, this function can be
overwritten to performs that computation. It gets passed an array of places where the caller
wants to evaluate the function and should return an array of the same length, containing the results.
Note:
This implementation just calls the normal evaluation hook.
Args:
values: places to be evaluated at
Returns:
numpy.ndarray: Evaluation results.
"""
return self(values)
def _apply_operator(self, operator, additive=False):
"""
Return a new base fraction with the given operator applied.
Args:
operator: Object that can be applied to the base fraction.
additive: Define if the given operator is additive. Default: False.
For an additive operator G and two base fractions f, h the
relation G(f + h) = G(f) + G(h) holds. If the operator is
not additive the derivatives will be discarded.
"""
raise NotImplementedError(
"This is an empty function. Overwrite it in your implementation "
"to use this functionality.")
def real(self):
"""
Return the real part of the base fraction.
"""
return self._apply_operator(np.real, additive=True)
def imag(self):
"""
Return the imaginary port of the base fraction.
"""
return self._apply_operator(np.imag, additive=True)
def conj(self):
"""
Return the complex conjugated base fraction.
"""
return self._apply_operator(np.conj, additive=True)
class Function(BaseFraction):
"""
Most common instance of a :py:class:`.BaseFraction`.
This class handles all tasks concerning derivation and evaluation of
functions. It is used broad across the toolbox and therefore incorporates
some very specific attributes. For example, to ensure the accurateness of
numerical handling functions may only evaluated in areas where they provide
nonzero return values. Also their domain has to be taken into account.
Therefore the attributes *domain* and *nonzero* are provided.
To save implementation time, ready to go version like
:py:class:`.LagrangeFirstOrder` are provided in the
:py:mod:`pyinduct.simulation` module.
For the implementation of new shape functions subclass this implementation
or directly provide a callable *eval_handle* and callable
*derivative_handles* if spatial derivatives are required for the
application.
Args:
eval_handle (callable): Callable object that can be evaluated.
domain((list of) tuples): Domain on which the eval_handle is defined.
nonzero(tuple): Region in which the eval_handle will return
nonzero output. Must be a subset of *domain*
derivative_handles (list): List of callable(s) that contain
derivatives of eval_handle
"""
# TODO: overload add and mul operators
def __init__(self, eval_handle, domain=(-np.inf, np.inf), nonzero=(-np.inf, np.inf), derivative_handles=None):
super().__init__(None)
self._vectorial = False
self._function_handle = None
self._derivative_handles = None
self.domain = set()
self.nonzero = set()
for kw, val in zip(["domain", "nonzero"], [domain, nonzero]):
if not isinstance(val, set):
if isinstance(val, tuple):
val = {val}
else:
raise TypeError("(Set of) or tuple(s) has to be provided "
"for {0}".format(kw))
setattr(self, kw, domain_simplification(val))
self.function_handle = eval_handle
self.derivative_handles = derivative_handles
@property
def derivative_handles(self):
return self._derivative_handles
@derivative_handles.setter
def derivative_handles(self, eval_handle_derivatives):
if eval_handle_derivatives is None:
eval_handle_derivatives = []
if not isinstance(eval_handle_derivatives, collections.abc.Iterable):
eval_handle_derivatives = [eval_handle_derivatives]
for der_handle in eval_handle_derivatives:
if not isinstance(der_handle, collections.Callable):
raise TypeError("derivative_handles must be callable")
self._derivative_handles = eval_handle_derivatives
@property
def function_handle(self):
return self._function_handle
@function_handle.setter
def function_handle(self, eval_handle):
# handle must be callable
if not isinstance(eval_handle, collections.Callable):
raise TypeError("callable has to be provided as function_handle")
# handle must return scalar when called with scalar
test_value = next(iter(self.domain))[1]
if test_value is np.inf:
test_value = 1
if not isinstance(eval_handle(test_value), Number):
print(test_value)
print(type(eval_handle(test_value)))
raise TypeError("callable must return number when called with scalar")
self._function_handle = eval_handle
# test vectorial input
test_data = np.array([test_value] * 10)
try:
res = eval_handle(test_data)
except BaseException as e:
# looks like the function does _not_ handle vectorial input
self._vectorial = False
return
if not isinstance(res, np.ndarray):
# raise TypeError("callable must return np.ndarray when called with vector")
self._vectorial = False
return
if res.shape != test_data.shape:
# raise TypeError("result of call with vector must be of same shape")
self._vectorial = False
return
self._vectorial = True
def _check_domain(self, values):
"""
Checks if values fit into domain.
Args:
values (array_like): Point(s) where function shall be evaluated.
Raises:
ValueError: If values exceed the domain.
"""
values = np.atleast_1d(values)
if values.dtype == complex:
raise TypeError("Only real valued arguments considered for "
"pyinduct function. \nProvide value: {}\n"
"Data type: {}".format(values, values.dtype))
mask = np.full(len(values), False)
for interval in self.domain:
d_mask = np.logical_and(values >= interval[0],
values <= interval[1])
np.logical_or(mask, d_mask, out=mask)
if not all(mask):
raise ValueError("Function evaluated outside it's domain {} with {}"
"".format(self.domain,
values[np.logical_not(mask)]))
def __call__(self, argument):
"""
Handle that is used to evaluate the function on a given point.
Args:
argument: Function parameter
Return:
function value
"""
self._check_domain(argument)
if self._vectorial:
if not isinstance(argument, np.ndarray):
# a little convenience helper here
argument = np.array(argument)
return self._function_handle(argument)
else:
try:
ret_val = []
for arg in argument:
ret_val.append(self._function_handle(arg))
return np.array(ret_val)
except TypeError as e:
return self._function_handle(argument)
def get_member(self, idx):
"""
Implementation of the abstract parent method.
Since the :py:class:`.Function` has only one member (itself) the
parameter *idx* is ignored and *self* is returned.
Args:
idx: ignored.
Return:
self
"""
return self
def raise_to(self, power):
"""
Raises the function to the given *power*.
Warning:
Derivatives are lost after this action is performed.
Args:
power (:obj:`numbers.Number`): power to raise the function to
Return:
raised function
"""
if power == 1:
return self
def raise_factory(func):
def _raised_func(z):
return np.power(func(z), power)
return _raised_func
new_obj = deepcopy(self)
new_obj.derivative_handles = None
new_obj.function_handle = raise_factory(self.function_handle)
return new_obj
def scale(self, factor):
"""
Factory method to scale a :py:class:`.Function`.
Args:
factor : :obj:`numbers.Number` or a callable.
"""
if factor == 1:
return self
def scale_factory(func):
def _scaled_func(z):
if isinstance(factor, collections.Callable):
return factor(z) * func(z)
else:
return factor * func(z)
return _scaled_func
new_obj = deepcopy(self)
if isinstance(factor, collections.Callable):
# derivatives are lost
new_obj.derivative_handles = None
new_obj.function_handle = scale_factory(self._function_handle)
else:
# derivatives can be scaled
new_obj.derivative_handles = [scale_factory(der_handle) for der_handle in self.derivative_handles]
new_obj.function_handle = scale_factory(self._function_handle)
return new_obj
def derive(self, order=1):
r"""
Spatially derive this :py:class:`.Function`.
This is done by neglecting *order* derivative handles and to select
handle :math:`\text{order} - 1` as the new evaluation_handle.
Args:
order (int): the amount of derivations to perform
Raises:
TypeError: If *order* is not of type int.
ValueError: If the requested derivative order is higher than the
provided one.
Returns:
:py:class:`.Function` the derived function.
"""
if not isinstance(order, int):
raise TypeError("only integer allowed as derivation order")
if order == 0:
return self
if order < 0 or order > len(self.derivative_handles):
raise ValueError("function cannot be differentiated that often.")
new_obj = deepcopy(self)
new_obj.derivative_handles = self.derivative_handles[order - 1:]
new_obj.function_handle = new_obj.derivative_handles.pop(0)
return new_obj
def scalar_product_hint(self):
"""
Return the hint that the :py:func:`._dot_product_l2` has to
calculated to gain the scalar product.
"""
return dot_product_l2
def function_space_hint(self):
"""
Return the hint that this function is an element of the
an scalar product space which is uniquely defined by
the scalar product :py:meth:`.scalar_product_hint`.
Note:
If you are working on different function spaces, you have
to overwrite this hint in order to provide more properties
which characterize your specific function space. For
example the domain of the functions.
"""
return self.scalar_product_hint(), self.domain
@staticmethod
def from_data(x, y, **kwargs):
"""
Create a :py:class:`.Function` based on discrete data by
interpolating.
The interpolation is done by using :py:class:`interp1d` from scipy,
the *kwargs* will be passed.
Args:
x (array-like): Places where the function has been evaluated .
y (array-like): Function values at *x*.
**kwargs: all kwargs get passed to :py:class:`.Function` .
Returns:
:py:class:`.Function`: An interpolating function.
"""
dom = kwargs.pop("domain", (min(x), max(x)))
nonzero = kwargs.pop("nonzero", dom)
der_handles = kwargs.pop("derivative_handles", None)
interp = interp1d(x, y, **kwargs)
# TODO fix this behaviour
def wrapper(z):
res = interp(z)
if res.size == 1:
return np.float(res)
return res
func = Function(eval_handle=wrapper,
domain=dom,
nonzero=nonzero,
derivative_handles=der_handles)
return func
def add_neutral_element(self):
return ConstantFunction(0, domain=self.domain)
def mul_neutral_element(self):
return ConstantFunction(1, domain=self.domain)
def _apply_operator(self, operator, additive=False):
"""
Return a new function with the given operator applied.
See docstring of :py:meth:`.BaseFraction._apply_operator`.
"""
def apply(func):
def handle(z):
return operator(func(z))
return handle
new_obj = deepcopy(self)
new_obj.function_handle = apply(self.function_handle)
if additive:
new_obj.derivative_handles = [
apply(f) for f in self.derivative_handles]
else:
new_obj.derivative_handles = None
return new_obj
class ConstantFunction(Function):
"""
A :py:class:`.Function` that returns a constant value.
This function can be differentiated without limits.
Args:
constant (number): value to return
Keyword Args:
**kwargs: All other kwargs get passed to :py:class:`.Function`.
"""
def __init__(self, constant, **kwargs):
self._constant = constant
func_kwargs = dict(eval_handle=self._constant_function_handle)
if "nonzero" in kwargs:
if constant == 0:
if kwargs["nonzero"] != set():
raise ValueError("Constant Function with constant 0 must have an"
" empty set nonzero area.")
if "domain" in kwargs:
if kwargs["nonzero"] != kwargs["domain"]:
raise ValueError(
"Constant Function is expected to be constant on the complete "
"domain. Nonzero argument is prohibited")
else:
func_kwargs["domain"] = kwargs["nonzero"]
func_kwargs["nonzero"] = kwargs["nonzero"]
else:
if "domain" in kwargs:
func_kwargs["domain"] = kwargs["domain"]
func_kwargs["nonzero"] = kwargs["domain"]
if constant == 0:
func_kwargs["nonzero"] = set()
if "derivative_handles" in kwargs:
warnings.warn(
"Derivative handles passed to ConstantFunction are discarded")
super().__init__( **func_kwargs)
def _constant_function_handle(self, z):
return self._constant * np.ones_like(z)
def derive(self, order=1):
if not isinstance(order, int):
raise TypeError("only integer allowed as derivation order")
if order == 0:
return self
if order < 0:
raise ValueError("only derivative order >= 0 supported")
zero_func = deepcopy(self)
zero_func._constant = 0
zero_func.nonzero = set()
return zero_func
class ComposedFunctionVector(BaseFraction):
r"""
Implementation of composite function vector :math:`\boldsymbol{x}`.
.. math::
\boldsymbol{x} = \begin{pmatrix}
x_1(z) \\
\vdots \\
x_n(z) \\
\xi_1 \\
\vdots \\
\xi_m \\
\end{pmatrix}
"""
def __init__(self, functions, scalars):
funcs = sanitize_input(functions, Function)
scals = sanitize_input(scalars, Number)
BaseFraction.__init__(self, {"funcs": funcs, "scalars": scals})
def __call__(self, arguments):
f_res = np.array([f(arguments) for f in self.members["funcs"]])
s_res = self.members["scalars"]
if f_res.ndim > 1:
s_res = s_res[:, None] * np.ones_like(f_res)
res = np.concatenate((f_res, s_res))
return res
def scalar_product_hint(self):
func_hints = [f.scalar_product_hint() for f in self.members["funcs"]]
scalar_hints = [dot_product for s in self.members["scalars"]]
return func_hints + scalar_hints
def function_space_hint(self):
"""
Return the hint that this function is an element of the
an scalar product space which is uniquely defined by
* the scalar product
:py:meth:`.ComposedFunctionVector.scalar_product`
* :code:`len(self.members["funcs"])` functions
* and :code:`len(self.members["scalars"])` scalars.
"""
func_hints = [f.function_space_hint() for f in self.members["funcs"]]
scalar_hints = [dot_product for s in self.members["scalars"]]
return func_hints + scalar_hints
def get_member(self, idx):
if idx < len(self.members["funcs"]):
return self.members["funcs"][idx]
elif idx - len(self.members["funcs"]) < len(self.members["scalars"]):
return self.members["scalars"][idx - len(self.members["funcs"])]
else:
raise ValueError("wrong index")
def scale(self, factor):
if isinstance(factor, ComposedFunctionVector):
if not len(self.members["funcs"]) == len(factor.members["funcs"]):
raise ValueError
if not len(self.members["scalars"]) == len(factor.members["scalars"]):
raise ValueError
return self.__class__(np.array(
[func.scale(scale) for func, scale in
zip(self.members["funcs"], factor.members["funcs"])]),
[scalar * scale for scalar, scale in
zip(self.members["scalars"], factor.members["scalars"])],
)
elif isinstance(factor, Number):
return self.__class__(
np.array([func.scale(factor) for func in self.members["funcs"]]),
np.array([scal * factor for scal in self.members["scalars"]])
)
else:
raise TypeError("ComposedFunctionVector can only be scaled with "
"compatible ComposedFunctionVector of with a"
"constant scalar")
def mul_neutral_element(self):
"""
Create neutral element of multiplication that is compatible to this
object.
Returns: Comp. Function Vector with constant functions returning 1 and
scalars of value 1.
"""
funcs = [f.mul_neutral_element() for f in self.members["funcs"]]
scalar_constants = [1 for f in self.members["scalars"]]
neut = ComposedFunctionVector(funcs, scalar_constants)
return neut
def add_neutral_element(self):
"""
Create neutral element of addition that is compatible to this
object.
Returns: Comp. Function Vector with constant functions returning 0 and
scalars of value 0.
"""
funcs = [f.add_neutral_element() for f in self.members["funcs"]]
scalar_constants = [0 for f in self.members["scalars"]]
neut = ComposedFunctionVector(funcs, scalar_constants)
return neut
def _apply_operator(self, operator, additive=False):
"""
Return a new composed function vector with the given operator applied.
See docstring of :py:meth:`.BaseFraction._apply_operator`.
"""
funcs = [f._apply_operator(operator, additive=additive)
for f in self.members["funcs"]]
scalar_constants = [operator(s) for s in self.members["scalars"]]
new_obj = ComposedFunctionVector(funcs, scalar_constants)
return new_obj
class ConstantComposedFunctionVector(ComposedFunctionVector):
r"""
Constant composite function vector :math:`\boldsymbol{x}`.
.. math::
\boldsymbol{x} = \begin{pmatrix}
z \mapsto x_1(z) = c_1 \\
\vdots \\
z \mapsto x_n(z) = c_n \\
d_1 \\
\vdots \\
c_n \\
\end{pmatrix}
Args:
func_constants (array-like): Constants for the functions.
scalar_constants (array-like): The scalar constants.
**func_kwargs: Keyword args that are passed to the ConstantFunction.
"""
def __init__(self, func_constants, scalar_constants, **func_kwargs):
func_constants = sanitize_input(func_constants, Number)
scalars = sanitize_input(scalar_constants, Number)
funcs = [ConstantFunction(c, **func_kwargs) for c in func_constants]
super().__init__(funcs, scalars)
class ApproximationBasis:
"""
Base class for an approximation basis.
An approximation basis is formed by some objects on which given distributed
variables may be projected.
"""
def scalar_product_hint(self):
"""
Hint that returns steps for scalar product calculation with elements of
this base.
Note:
Overwrite to implement custom functionality.
"""
raise NotImplementedError()
def function_space_hint(self):
"""
Hint that returns properties that characterize the functional
space of the fractions.
It can be used to determine if function spaces match.
Note:
Overwrite to implement custom functionality.
"""
raise NotImplementedError()
def is_compatible_to(self, other):
"""
Helper functions that checks compatibility between two approximation
bases.
In this case compatibility is given if the two bases live in the same
function space.
Args:
other (:py:class:`.Approximation Base`): Approximation basis to
compare with.
Returns: True if bases match, False if they do not.
"""
return self.function_space_hint() == other.function_space_hint()
class Base(ApproximationBasis):
"""
Base class for approximation bases.
In general, a :py:class:`.Base` is formed by a certain amount of
:py:class:`.BaseFractions` and therefore forms finite-dimensional subspace
of the distributed problem's domain. Most of the time, the user does not
need to interact with this class.
Args:
fractions (iterable of :py:class:`.BaseFraction`): List, array or
dict of :py:class:`.BaseFraction`'s
matching_base_lbls (list of str): List of labels from exactly matching
bases, for which no transformation is necessary.
Useful for transformations from bases that 'live' in
different function spaces but evolve with the same time
dynamic/coefficients (e.g. modal bases).
intermediate_base_lbls (list of str): If it is certain that this base
instance will be asked (as destination base) to return a
transformation to a source base, whose implementation is
cumbersome, its label can be provided here. This will trigger the
generation of the transformation using build-in features.
The algorithm, implemented in :py:class:`.get_weights_transformation`
is then called again with the intermediate base as destination base
and the 'old' source base. With this technique arbitrary long
transformation chains are possible, if the provided intermediate
bases again define intermediate bases.
"""
def __init__(self, fractions,
matching_base_lbls=None, intermediate_base_lbls=None):
fractions = sanitize_input(fractions, BaseFraction)
# check type
base_space = fractions[0].function_space_hint()
if not all([frac.function_space_hint() == base_space
for frac in fractions]):
raise ValueError("Provided fractions must be compatible!")
self.fractions = fractions
self.matching_base_lbls = matching_base_lbls
if self.matching_base_lbls is None:
self.matching_base_lbls = []
if isinstance(self.matching_base_lbls, str):
self.matching_base_lbls = [self.matching_base_lbls]
self.intermediate_base_lbls = intermediate_base_lbls
if self.intermediate_base_lbls is None:
self.intermediate_base_lbls = []
if isinstance(self.intermediate_base_lbls, str):
self.intermediate_base_lbls = [self.intermediate_base_lbls]
def __iter__(self):
return iter(self.fractions)
def __len__(self):
return len(self.fractions)
def __getitem__(self, item):
return self.fractions[item]
@staticmethod
def _transformation_factory(info, equivalent=False):
mat = calculate_expanded_base_transformation_matrix(info.src_base,
info.dst_base,
info.src_order,
info.dst_order,
use_eye=equivalent)
def handle(weights):
return np.dot(mat, weights)
return handle
def transformation_hint(self, info):
"""
Method that provides a information about how to transform weights from
one :py:class:`.BaseFraction` into another.
In Detail this function has to return a callable, which will take the
weights of the source- and return the weights of the target system. It
may have keyword arguments for other data which is required to perform
the transformation. Information about these extra keyword arguments
should be provided in form of a dictionary whose keys are keyword
arguments of the returned transformation handle.
Note:
This implementation covers the most basic case, where the two
:py:class:`.BaseFraction`'s are of same type. For any other case it
will raise an exception. Overwrite this Method in your
implementation to support conversion between bases that differ from
yours.
Args:
info: :py:class:`.TransformationInfo`
Raises:
NotImplementedError:
Returns:
Transformation handle
"""
if info.src_lbl == info.dst_lbl:
# trivial case
return self._transformation_factory(info, equivalent=True), None
# check for matching bases
match_cond_src = (self is info.src_base
and info.dst_lbl in self.matching_base_lbls)
match_cond_dst = (self is info.dst_base
and info.src_lbl in self.matching_base_lbls)
if match_cond_src or match_cond_dst:
# bases are a match
if len(info.dst_base) != len(info.src_base):
msg = "Base length mismatch: len({})={} != len({})={}"
raise ValueError(msg.format(info.src_lbl, len(info.src_base),
info.dst_lbl, len(info.dst_base)))
if info.src_order >= info.dst_order:
# forward weights
return self._transformation_factory(info, True), None
# check for compatible base
compat_cond_src = (self is info.src_base
and self.is_compatible_to(info.dst_base))
compat_cond_dst = (self is info.dst_base
and self.is_compatible_to(info.src_base))
if compat_cond_src or compat_cond_dst:
# bases are compatible, use standard approach
return self._transformation_factory(info), None
if self.intermediate_base_lbls is not None:
# try intermediate bases
for inter_lbl in self.intermediate_base_lbls:
trafo, hint = self._get_intermediate_transform(info, inter_lbl)
if trafo is not None:
return trafo, hint
# No Idea what to do.
return None, None
def _get_intermediate_transform(self, info, inter_lbl):
if self is info.src_base:
# build trafo from us to middleman
intermediate_info = get_transformation_info(
info.src_lbl, inter_lbl,
info.src_order, info.src_order
)
handle = get_weight_transformation(intermediate_info)
if info.dst_lbl == inter_lbl:
# middleman is the source -> we are finished
return handle, None
# create hint from middleman to dst
hint = get_transformation_info(
inter_lbl, info.dst_lbl,
info.src_order, info.dst_order
)
return handle, hint
if self is info.dst_base:
# build trafo from middleman to us
intermediate_info = get_transformation_info(
inter_lbl, info.dst_lbl,
info.src_order, info.dst_order
)
handle = get_weight_transformation(intermediate_info)
if info.src_lbl == inter_lbl:
# middleman is the source -> we are finished
return handle, None
# create hint from src to middleman
hint = get_transformation_info(
info.src_lbl, inter_lbl,
info.src_order, info.src_order
)
return handle, hint
# No Idea what to do.
return None, None
def scalar_product_hint(self):
"""
Hint that returns steps for scalar product calculation with elements of
this base.
Note:
Overwrite to implement custom functionality.
"""
return self.fractions[0].scalar_product_hint()
def function_space_hint(self):
"""
Hint that returns properties that characterize the functional
space of the fractions.
It can be used to determine if function spaces match.
Note:
Overwrite to implement custom functionality.
"""
return self.fractions[0].function_space_hint()
def derive(self, order):
"""
Basic implementation of derive function.
Empty implementation, overwrite to use this functionality.
Args:
order (:class:`numbers.Number`): derivative order
Return:
:py:class:`.Base`: derived object
"""
if order == 0:
return self
else:
return self.__class__([f.derive(order) for f in self.fractions])
def scale(self, factor):
"""
Return a scaled instance of this base.
If factor is iterable, each element will be scaled independently.
Otherwise, a common scaling is applied to all fractions.
Args:
factor: Single factor or iterable of factors (float or callable) to
scale this base with.
"""
try:
if len(factor) != len(self.fractions):
raise ValueError("If factor is an iterable, its length has to"
"match the number of base fractions. "
"len(factor)={} but len(fractions)={}"
.format(len(factor), len(self.fractions)))
return self.__class__([
f.scale(s) for f, s in zip(self.fractions, factor)
])
except TypeError:
# factor is not iterable
if factor == 1:
return self
else:
return self.__class__([f.scale(factor) for f in self.fractions])
def raise_to(self, power):
"""
Factory method to obtain instances of this base, raised by the given power.
Args:
power: power to raise the basis onto.
"""
if power == 1:
return self
else:
raise ValueError("This funcionality is deprecated.")
def get_attribute(self, attr):
"""
Retrieve an attribute from the fractions of the base.
Args:
attr(str): Attribute to query the fractions for.
Returns:
:py:class:`np.ndarray`: Array of ``len(fractions)`` holding the
attributes. With `None` entries if the attribute is missing.
"""
return np.array([getattr(frac, attr, None) for frac in self.fractions])
class StackedBase(ApproximationBasis):
"""
Implementation of a basis vector that is obtained by stacking different
bases onto each other. This typically occurs when the bases of coupled
systems are joined to create a unified system.
Args:
base_info (OrderedDict): Dictionary with `base_label` as keys and
dictionaries holding information about the bases as values.
In detail, these Information must contain:
- sys_name (str): Name of the system the base is associated with.
- order (int): Highest temporal derivative order with which the
base shall be represented in the stacked base.
- base (:py:class:`.ApproximationBase`): The actual basis.
"""
def __init__(self, base_info):
self.base_lbls = []
self.system_names = []
self.orders = []
self._bases = []
self._cum_frac_idxs = [0]
self._cum_weight_idxs = [0]
for lbl, info in base_info.items():
# public properties
self.base_lbls.append(lbl)
self.system_names.append(info["sys_name"])
order = info["order"]
self.orders.append(order)
base = info["base"]
# internal properties
self._bases.append(base)
self._cum_frac_idxs.append(self._cum_frac_idxs[-1] + len(base))
self._cum_weight_idxs.append(self._cum_weight_idxs[-1]
+ (order + 1) * len(base))
self.fractions = np.concatenate([b.fractions for b in self._bases])
self._size = self._cum_frac_idxs.pop(-1)
self._weight_size = self._cum_weight_idxs.pop(-1)
def scalar_product_hint(self):
return NotImplemented
def function_space_hint(self):
return hash(self)
def is_compatible_to(self, other):
return False
def scale(self, factor):
raise NotImplementedError("Stacked base should not be scaled.")
def transformation_hint(self, info):
"""
If *info.src_lbl* is a member, just return it, using to correct
derivative transformation, otherwise return `None`
Args:
info (:py:class:`.TransformationInfo`): Information about the
requested transformation.
Return:
transformation handle
"""
if info.src_order != 0:
# this can be implemented but is not really meaningful
return None, None
# we only know how to get from a stacked base to one of our parts
if info.src_base != self:
return None, None
if info.dst_lbl not in self.base_lbls:
return None, None
# check maximum available derivative order
dst_idx = self.base_lbls.index(info.dst_lbl)
init_src_ord = self.orders[dst_idx]
if info.dst_order > init_src_ord:
return None, None
# get transform
trans_mat = calculate_expanded_base_transformation_matrix(
info.dst_base,
info.dst_base,
init_src_ord,
info.dst_order,
use_eye=True)
start_idx = self._cum_weight_idxs[dst_idx]
length = (init_src_ord + 1) * len(self._bases[dst_idx])
def selection_func(weights):
assert len(weights) == self._weight_size
return trans_mat @ weights[start_idx: start_idx + length]
return selection_func, None
def domain_simplification(domain):
"""
Simplify a domain, given by possibly overlapping subdomains.
Args:
domain (set): Set of tuples, defining the (start, end) points of the
subdomains.
Returns:
list: Simplified domain.
"""
new_dom = set()
temp_dom = list()
# sort sub domains
for idx, sub_dom in enumerate(domain):
if sub_dom[0] > sub_dom[1]:
temp_dom.append(sub_dom[::-1])
else:
temp_dom.append(sub_dom)
# look for overlapping sub domains
for s_idx, start_dom in enumerate(temp_dom):
candidates = []
for e_idx, end_dom in enumerate(temp_dom):
if s_idx == e_idx:
continue
if start_dom[0] > end_dom[0]:
# second one starts earlier, postpone
continue
if start_dom[1] > end_dom[0]:
# two domains overlap
candidates.append(e_idx)
if not candidates:
continue
greatest_idx = candidates[np.argmax([temp_dom[idx][1]
for idx in candidates])]
if start_dom[1] >= temp_dom[greatest_idx][1]:
# the second domain is a real sub set of the first one
# save only the first
new_dom.add(start_dom)
else:
# second one goes further -> join them
new_dom.add((start_dom[0], temp_dom[greatest_idx][1]))
if new_dom and new_dom != domain:
return domain_simplification(new_dom)
else:
return set(temp_dom)
def domain_intersection(first, second):
"""
Calculate intersection(s) of two domains.
Args:
first (set): (Set of) tuples defining the first domain.
second (set): (Set of) tuples defining the second domain.
Return:
set: Intersection given by (start, end) tuples.
"""
if isinstance(first, tuple):
first = [first]
if isinstance(first, set):
first = list(first)
if isinstance(second, tuple):
second = [second]
if isinstance(second, set):
second = list(second)
intersection = set()
first_idx = 0
second_idx = 0
last_first_idx = 0
last_second_idx = 0
last_first_upper = None
last_second_upper = None
while first_idx < len(first) and second_idx < len(second):
# TODO remove interval and boundary checking? should be done before
if last_first_upper is not None and first_idx is not last_first_idx:
if last_first_upper >= first[first_idx][0]:
raise ValueError("Intervals not ordered!")
if last_second_upper is not None and second_idx is not last_second_idx:
if last_second_upper >= second[second_idx][0]:
raise ValueError("Intervals not ordered!")
if first[first_idx][0] > first[first_idx][1]:
raise ValueError("Interval boundaries given in wrong order")
if second[second_idx][0] > second[second_idx][1]:
raise ValueError("Interval boundaries given in wrong order")
# backup for interval order check
last_first_idx = first_idx
last_second_idx = second_idx
last_first_upper = first[first_idx][1]
last_second_upper = second[second_idx][1]
# no common domain -> search
if second[second_idx][0] <= first[first_idx][0] <= second[second_idx][1]:
# common start found in 1st domain
start = first[first_idx][0]
elif first[first_idx][0] <= second[second_idx][0] <= first[first_idx][1]:
# common start found in 2nd domain
start = second[second_idx][0]
else:
# intervals have no intersection
first_idx += 1
continue
# add end
if first[first_idx][1] <= second[second_idx][1]:
end = first[first_idx][1]
first_idx += 1
else:
end = second[second_idx][1]
second_idx += 1
# complete domain found
if not np.isclose(start, end):
intersection.add((start, end))
return intersection
def integrate_function(func, interval):
"""
Numerically integrate a function on a given interval using
:func:`.complex_quadrature`.
Args:
func(callable): Function to integrate.
interval(list of tuples): List of (start, end) values of the intervals
to integrate on.
Return:
tuple: (Result of the Integration, errors that occurred during the
integration).
"""
result = 0
err = 0
for area in interval:
res = complex_quadrature(func, area[0], area[1])
result += res[0]
err += res[1]
return np.real_if_close(result), err
def complex_quadrature(func, a, b, **kwargs):
"""
Wraps the scipy.qaudpack routines to handle complex valued functions.
Args:
func (callable): function
a (:obj:`numbers.Number`): lower limit
b (:obj:`numbers.Number`): upper limit
**kwargs: Arbitrary keyword arguments for desired scipy.qaudpack routine.
Return:
tuple: (real part, imaginary part)
"""
def real_func(x):
return np.real(func(x))
def imag_func(x):
return np.imag(func(x))
real_integral = integrate.quad(real_func, a, b, **kwargs)
imag_integral = integrate.quad(imag_func, a, b, **kwargs)
return (real_integral[0] + 1j * imag_integral[0],
real_integral[1] + imag_integral[1])
def dot_product(first, second):
"""
Calculate the inner product of two vectors. Uses numpy.inner but with
complex conjugation of the second argument.
Args:
first (:obj:`numpy.ndarray`): first vector
second (:obj:`numpy.ndarray`): second vector
Return:
inner product
"""
return np.inner(first, np.conj(second))
def dot_product_l2(first, second):
r"""
Calculate the inner product on L2.
Given two functions :math:`\varphi(z)` (*first*) and :math:`\psi(z)`
(*second*) this functions calculates
.. math::
\left< \varphi(z) | \psi(z) \right> =
\int\limits_{\Gamma_0}^{\Gamma_1}
\varphi(\zeta) \bar\psi(\zeta) \,\textup{d}\zeta \:.
Args:
first (:py:class:`.Function`): first function :math:`\varphi(z)`
second (:py:class:`.Function`): second function :math:`\psi(z)`
Return:
inner product
"""
if not isinstance(first, Function) or not isinstance(second, Function):
raise TypeError("Wrong type(s) supplied. both must be a {0}".format(Function))
if not first.domain == second.domain:
raise ValueError("Domains of arguments must be identical, "
"but {} and {} were given".format(first.domain,
second.domain))
nonzero = domain_intersection(first.nonzero, second.nonzero)
areas = domain_intersection(first.domain, nonzero)
if 0:
# TODO let Function Class handle product to gain more speed
if type(first) is type(second):
pass
# standard case
def func(z):
"""
Take the complex conjugate of the second element and multiply it
by the first.
"""
return first(z) * np.conj(second(z))
for area in areas:
test_point = area[0]
if test_point == -np.inf:
test_point = area[1]
if test_point == np.inf:
test_point = 0
first_num = first(test_point)
if np.iscomplexobj(first_num):
warnings.warn(
"The built-in l2 dot product (dot_product_l2) of pyinduct no \n"
"longer takes complex conjugation of the first argument but "
"of the second."
)
result, error = integrate_function(func, areas)
return result
def vectorize_scalar_product(first, second, scalar_product):
r"""
Call the given :code:`scalar_product` in a loop for the arguments
in :code:`left` and :code:`right`.
Given two vectors of functions
.. math::
\boldsymbol{\varphi}(z)
= \left(\varphi_0(z), \dotsc, \varphi_N(z)\right)^T
and
.. math::
\boldsymbol{\psi}(z) = \left(\psi_0(z), \dotsc, \psi_N(z)\right)^T` ,
this function computes
:math:`\left< \boldsymbol{\varphi}(z) | \boldsymbol{\psi}(z) \right>_{L2}`
where
.. math::
\left< \varphi_i(z) | \psi_j(z) \right>_{L2} =
\int\limits_{\Gamma_0}^{\Gamma_1}
\bar\varphi_i(\zeta) \psi_j(\zeta) \,\textup{d}\zeta \:.
Herein, :math:`\bar\varphi_i(\zeta)` denotes the complex conjugate and
:math:`\Gamma_0` as well as :math:`\Gamma_1` are derived by computing the
intersection of the nonzero areas of the involved functions.
Args:
first (callable or :obj:`numpy.ndarray`): (1d array of n) callable(s)
second (callable or :obj:`numpy.ndarray`): (1d array of n) callable(s)
Raises:
ValueError, if the provided arrays are not equally long.
Return:
numpy.ndarray: Array of inner products
"""
# sanitize input
first = np.atleast_1d(first)
second = np.atleast_1d(second)
try:
iter(scalar_product)
except TypeError:
scalar_product = (scalar_product, )
if len(first) != len(second):
raise ValueError("Provided function vectors must be of same length.")
# calculate output size and allocate output
out = np.zeros(first.shape, dtype=complex)
# TODO propagate vectorization into _dot_product_l2 to save this loop
# loop over entries
for idx, (f, s) in enumerate(zip(first, second)):
for m_idx, scal_prod in enumerate(scalar_product):
out[idx] += scal_prod(f.get_member(m_idx), s.get_member(m_idx))
return np.real_if_close(out)
def calculate_scalar_matrix(values_a, values_b):
"""
Convenience version of py:function:`calculate_scalar_product_matrix` with :py:func:`numpy.multiply` hardcoded as
*scalar_product_handle*.
Args:
values_a (numbers.Number or numpy.ndarray): (array of) value(s) for rows
values_b (numbers.Number or numpy.ndarray): (array of) value(s) for columns
Return:
numpy.ndarray: Matrix containing the pairwise products of the elements from *values_a* and *values_b*.
"""
return calculate_scalar_product_matrix(sanitize_input(values_a, Number),
sanitize_input(values_b, Number),
np.multiply)
def calculate_scalar_product_matrix(base_a, base_b, scalar_product=None,
optimize=True):
r"""
Calculates a matrix :math:`A` , whose elements are the scalar products of
each element from *base_a* and *base_b*, so that
:math:`a_{ij} = \langle \mathrm{a}_i\,,\: \mathrm{b}_j\rangle`.
Args:
base_a (:py:class:`.ApproximationBase`): Basis a
base_b (:py:class:`.ApproximationBase`): Basis b
scalar_product: (List of) function objects that are passed the members
of the given bases as pairs. Defaults to the scalar product given by
`base_a`
optimize (bool): Switch to turn on the symmetry based speed up.
For development purposes only.
Return:
numpy.ndarray: matrix :math:`A`
"""
if not base_a.is_compatible_to(base_b):
raise TypeError("Bases must be from the same type.")
if scalar_product is None:
scalar_product = base_a.scalar_product_hint()
fractions_a = base_a.fractions
fractions_b = base_b.fractions
if optimize and base_a == base_b:
# since the scalar_product commutes whe can save some operations
dim = fractions_a.shape[0]
output = np.zeros((dim, dim), dtype=np.complex)
i, j = np.mgrid[0:dim, 0:dim]
# compute only upper half
upper_idxs = np.triu_indices(dim)
i_upper = i[upper_idxs]
j_upper = j[upper_idxs]
output[upper_idxs] = vectorize_scalar_product(fractions_a[i_upper],
fractions_a[j_upper],
scalar_product)
# reconstruct using symmetry
output += np.conjugate(np.triu(output, 1)).T
return np.real_if_close(output)
else:
i, j = np.mgrid[0:fractions_a.shape[0],
0:fractions_b.shape[0]]
fractions_i = fractions_a[i]
fractions_j = fractions_b[j]
res = vectorize_scalar_product(fractions_i.flatten(),
fractions_j.flatten(),
scalar_product)
return res.reshape(fractions_i.shape)
def project_on_base(state, base):
"""
Projects a *state* on a basis given by *base*.
Args:
state (array_like): List of functions to approximate.
base (:py:class:`.ApproximationBase`): Basis to project onto.
Return:
numpy.ndarray: Weight vector in the given *base*
"""
if not isinstance(base, ApproximationBasis):
raise TypeError("Projection only possible on approximation bases.")
# compute <x(z, t), phi_i(z)> (vector)
projections = calculate_scalar_product_matrix(base.__class__(state),
base)
# compute <phi_i(z), phi_j(z)> for 0 < i, j < n (matrix)
scale_mat = calculate_scalar_product_matrix(base, base)
res = np.linalg.inv(scale_mat) @ projections.T
return np.reshape(res, (scale_mat.shape[0],))
def project_on_bases(states, canonical_equations):
"""
Convenience wrapper for :py:func:`.project_on_base`.
Calculate the state, assuming it will be constituted by the dominant
base of the respective system. The keys from the dictionaries
*canonical_equations* and *states* must be the same.
Args:
states: Dictionary with a list of functions as values.
canonical_equations: List of :py:class:`.CanonicalEquation` instances.
Returns:
numpy.array: Finite dimensional state as 1d-array corresponding to the
concatenated dominant bases from *canonical_equations*.
"""
q0 = np.array([])
for ce in canonical_equations:
lbl = ce.dominant_lbl
q0 = np.hstack(tuple([q0] + [project_on_base(state, get_base(lbl))
for state in states[ce.name]]))
return q0
def back_project_from_base(weights, base):
"""
Build evaluation handle for a distributed variable that was approximated
as a set of *weights* om a certain *base*.
Args:
weights (numpy.ndarray): Weight vector.
base (:py:class:`.ApproximationBase`): Base to be used for the projection.
Return:
evaluation handle
"""
if isinstance(weights, Number):
weights = np.asarray([weights])
if weights.shape[0] != base.fractions.shape[0]:
raise ValueError("Lengths of weights and initial_initial_functions "
"do not match!")
def eval_handle(z):
res = sum([weights[i] * base.fractions[i](z)
for i in range(weights.shape[0])])
return real(res)
return eval_handle
def change_projection_base(src_weights, src_base, dst_base):
"""
Converts given weights that form an approximation using *src_base*
to the best possible fit using *dst_base*.
Args:
src_weights (numpy.ndarray): Vector of numbers.
src_base (:py:class:`.ApproximationBase`): The source Basis.
dst_base (:py:class:`.ApproximationBase`): The destination Basis.
Return:
:obj:`numpy.ndarray`: target weights
"""
pro_mat = calculate_base_transformation_matrix(src_base, dst_base)
return project_weights(pro_mat, src_weights)
def project_weights(projection_matrix, src_weights):
"""
Project *src_weights* on new basis using the provided *projection_matrix*.
Args:
projection_matrix (:py:class:`numpy.ndarray`): projection between
the source and the target basis;
dimension (m, n)
src_weights (:py:class:`numpy.ndarray`): weights in the source basis;
dimension (1, m)
Return:
:py:class:`numpy.ndarray`: weights in the target basis;
dimension (1, n)
"""
src_weights = sanitize_input(src_weights, Number)
return np.dot(projection_matrix, src_weights)
class TransformationInfo:
"""
Structure that holds information about transformations between different
bases.
This class serves as an easy to use structure to aggregate information,
describing transformations between different
:py:class:`.BaseFraction` s. It can be tested for equality to check the
equity of transformations and is hashable
which makes it usable as dictionary key to cache different transformations.
Attributes:
src_lbl(str): label of source basis
dst_lbl(str): label destination basis
src_base(:obj:`numpy.ndarray`): source basis in form of an array of
the source Fractions
dst_base(:obj:`numpy.ndarray`): destination basis in form of an
array of the destination Fractions
src_order: available temporal derivative order of source weights
dst_order: needed temporal derivative order for destination weights
"""
def __init__(self):
self.src_lbl = None
self.dst_lbl = None
self.src_base = None
self.dst_base = None
self.src_order = None
self.dst_order = None
def as_tuple(self):
return self.src_lbl, self.dst_lbl, self.src_order, self.dst_order
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
if not isinstance(other, TransformationInfo):
raise TypeError("Unknown type to compare with")
return self.as_tuple() == other.as_tuple()
def mirror(self):
"""
Factory method, that creates a new TransformationInfo object by
mirroring *src* and *dst* terms.
This helps handling requests to different bases.
"""
new_info = TransformationInfo()
new_info.src_lbl = self.dst_lbl
new_info.src_base = self.dst_base
new_info.src_order = self.dst_order
new_info.dst_lbl = self.src_lbl
new_info.dst_base = self.src_base
new_info.dst_order = self.src_order
return new_info
def get_weight_transformation(info):
"""
Create a handle that will transform weights from *info.src_base* into
weights for *info-dst_base* while paying respect to the given derivative
orders.
This is accomplished by recursively iterating through source and
destination bases and evaluating their :attr:`transformation_hints`.
Args:
info(:py:class:`.TransformationInfo`): information about the requested
transformation.
Return:
callable: transformation function handle
"""
# TODO since this lives in core now, get rid of base labels
# try to get help from the destination base
handle, hint = info.dst_base.transformation_hint(info)
if handle is None:
# try source instead
handle, hint = info.src_base.transformation_hint(info)
if handle is None:
raise TypeError(
("get_weight_transformation(): \n"
"You requested information about how to transform to '{0}'({1}) \n"
"from '{3}'({4}), furthermore the source derivative order is \n"
"{2} and the target one is {5}. No transformation could be \n"
"found, remember to implement your own 'transformation_hint' \n"
"method for non-standard bases.").format(
info.dst_lbl,
info.dst_base.__class__.__name__,
info.dst_order,
info.src_lbl,
info.src_base.__class__.__name__,
info.src_order,
))
# check termination criterion
if hint is None:
# direct transformation possible
return handle
kwargs = {}
new_handle = None
if hasattr(hint, "extras"):
# try to gain transformations that will satisfy the extra terms
for dep_lbl, dep_order in hint.extras.items():
new_info = copy(info)
new_info.dst_lbl = dep_lbl
new_info.dst_base = get_base(dep_lbl)
new_info.dst_order = dep_order
dep_handle = get_weight_transformation(new_info)
kwargs[dep_lbl] = dep_handle
if hint.src_lbl is not None:
# transformation to assistant system required
new_handle = get_weight_transformation(hint)
def last_handle(weights):
if new_handle:
return handle(new_handle(weights), **kwargs)
else:
return handle(weights, **kwargs)
return last_handle
def get_transformation_info(source_label, destination_label,
source_order=0, destination_order=0):
"""
Provide the weights transformation from one/source base to
another/destination base.
Args:
source_label (str): Label from the source base.
destination_label (str): Label from the destination base.
source_order: Order from the available time derivative
of the source weights.
destination_order: Order from the desired time derivative
of the destination weights.
Returns:
:py:class:`.TransformationInfo`: Transformation info object.
"""
info = TransformationInfo()
info.src_lbl = source_label
info.src_base = get_base(info.src_lbl)
info.src_order = source_order
info.dst_lbl = destination_label
info.dst_base = get_base(info.dst_lbl)
info.dst_order = destination_order
return info
def calculate_expanded_base_transformation_matrix(src_base, dst_base,
src_order, dst_order,
use_eye=False):
r"""
Constructs a transformation matrix :math:`\bar V` from basis given by
*src_base* to basis given by *dst_base* that also transforms all temporal
derivatives of the given weights.
See:
:py:func:`.calculate_base_transformation_matrix` for further details.
Args:
dst_base (:py:class:`.ApproximationBase`): New projection base.
src_base (:py:class:`.ApproximationBase`): Current projection base.
src_order: Temporal derivative order available in *src_base*.
dst_order: Temporal derivative order needed in *dst_base*.
use_eye (bool): Use identity as base transformation matrix.
(For easy selection of derivatives in the same base)
Raises:
ValueError: If destination needs a higher derivative order than source
can provide.
Return:
:obj:`numpy.ndarray`: Transformation matrix
"""
if src_order < dst_order:
raise ValueError(("higher 'dst_order'({0}) demanded than "
+ "'src_order'({1}) can provide for this strategy."
"").format(dst_order, src_order))
# build core transformation
if use_eye:
core_transformation = np.eye(src_base.fractions.size)
else:
core_transformation = calculate_base_transformation_matrix(src_base,
dst_base)
# build block matrix
part_transformation = block_diag(*[core_transformation
for i in range(dst_order + 1)])
complete_transformation = np.hstack([part_transformation]
+ [np.zeros((part_transformation.shape[0],
src_base.fractions.size))
for i in range(src_order - dst_order)])
return complete_transformation
def calculate_base_transformation_matrix(src_base, dst_base, scalar_product=None):
"""
Calculates the transformation matrix :math:`V` , so that the a
set of weights, describing a function in the
*src_base* will express the same function in the *dst_base*, while
minimizing the reprojection error.
An quadratic error is used as the error-norm for this case.
Warning:
This method assumes that all members of the given bases have
the same type and that their
:py:class:`.BaseFraction` s, define compatible scalar products.
Raises:
TypeError: If given bases do not provide an
:py:func:`.scalar_product_hint` method.
Args:
src_base (:py:class:`.ApproximationBase`): Current projection base.
dst_base (:py:class:`.ApproximationBase`): New projection base.
scalar_product (list of callable): Callbacks for product calculation.
Defaults to `scalar_product_hint` from `src_base`.
Return:
:py:class:`numpy.ndarray`: Transformation matrix :math:`V` .
"""
if not src_base.is_compatible_to(dst_base):
raise TypeError("Source and destination base must be from the same "
"type.")
p_mat = calculate_scalar_product_matrix(dst_base, src_base, scalar_product)
q_mat = calculate_scalar_product_matrix(dst_base, dst_base, scalar_product)
# compute V matrix, where V = inv(Q)*P
v_mat = np.dot(np.linalg.inv(q_mat), p_mat)
return v_mat
def normalize_base(b1, b2=None, mode="right"):
r"""
Takes two :py:class:`.ApproximationBase`'s :math:`\boldsymbol{b}_1` ,
:math:`\boldsymbol{b}_1` and normalizes them so that
:math:`\langle\boldsymbol{b}_{1i}\,
,\:\boldsymbol{b}_{2i}\rangle = 1`.
If only one base is given, :math:`\boldsymbol{b}_2`
defaults to :math:`\boldsymbol{b}_1`.
Args:
b1 (:py:class:`.ApproximationBase`): :math:`\boldsymbol{b}_1`
b2 (:py:class:`.ApproximationBase`): :math:`\boldsymbol{b}_2`
mode (str): If *mode* is
* *right* (default): b2 will be scaled
* *left*: b1 will be scaled
* *both*: b1 and b2 will be scaled
Raises:
ValueError: If :math:`\boldsymbol{b}_1`
and :math:`\boldsymbol{b}_2` are orthogonal.
Return:
:py:class:`.ApproximationBase` : if *b2* is None,
otherwise: Tuple of 2 :py:class:`.ApproximationBase`'s.
Examples:
Consider the following two bases with only one finite
dimensional vector/fraction
>>> import pyinduct as pi
>>> b1 = pi.Base(pi.ComposedFunctionVector([], [2]))
>>> b2 = pi.Base(pi.ComposedFunctionVector([], [2j]))
depending on the *mode* kwarg the result of the normalization
>>> from pyinduct.core import generic_scalar_product
... def print_normalized_bases(mode):
... b1n, b2n = pi.normalize_base(b1, b2, mode=mode)
... print("b1 normalized: ", b1n[0].get_member(0))
... print("b2 normalized: ", b2n[0].get_member(0))
... print("dot product: ", generic_scalar_product(b1n, b2n))
is different by means of the normalized base *b1n* and *b2n*
but coincides by the value of dot product:
>>> print_normalized_bases("right")
... # b1 normalized: 2
... # b2 normalized: (0.5-0j)
... # dot product: [1.]
>>> print_normalized_bases("left")
... # b1 normalized: (-0+0.5j)
... # b2 normalized: 2j
... # dot product: [1.]
>>> print_normalized_bases("both")
... # b1 normalized: (0.7071067811865476+0.7071067811865476j)
... # b2 normalized: (0.7071067811865476+0.7071067811865476j)
... # dot product: [1.]
"""
res = generic_scalar_product(b1, b2)
if any(np.abs(res) < np.finfo(float).eps):
raise ValueError("Given base fractions are orthogonal, "
"no normalization possible.")
if b2 is None:
scale_factors = np.real_if_close(np.sqrt(1 / res.astype(complex)))
return b1.scale(scale_factors)
# test provided scalar product
factor = 1 + 1j
conj_factor = np.conj(factor)
sc_1c = generic_scalar_product(b1.scale(factor), b2)
sc_2c = generic_scalar_product(b1, b2.scale(factor))
if np.isclose(sc_1c, factor * res).all() \
and np.isclose(sc_2c, conj_factor * res).all():
variant = "second_conjugated"
elif np.isclose(sc_1c, conj_factor * res).all() \
and np.isclose(sc_2c, factor * res).all():
variant = "first_conjugated"
else:
raise ValueError("Provided bases defines irregular scalar product")
# compute scaling
scale_factors = 1 / res
if mode == "both":
scale_factors = np.real_if_close(np.sqrt(scale_factors.astype(complex)))
if variant == "first_conjugated":
scale_factors = np.conj(scale_factors)
# scale the bases
if mode == "left":
ret1 = b1.scale(scale_factors)
ret2 = b2
elif mode == "right":
ret1 = b1
ret2 = b2.scale(np.conj(scale_factors))
elif mode == "both":
ret1 = b1.scale(scale_factors)
ret2 = b2.scale(np.conj(scale_factors))
else:
raise ValueError("Unknown mode '{}'".format(mode))
return ret1, ret2
def generic_scalar_product(b1, b2=None, scalar_product=None):
"""
Calculates the pairwise scalar product between the elements
of the :py:class:`.ApproximationBase` *b1* and *b2*.
Args:
b1 (:py:class:`.ApproximationBase`): first basis
b2 (:py:class:`.ApproximationBase`): second basis, if omitted
defaults to *b1*
scalar_product (list of callable): Callbacks for product calculation.
Defaults to `scalar_product_hint` from `b1`.
Note:
If *b2* is omitted, the result can be used to normalize
*b1* in terms of its scalar product.
"""
if b2 is None:
b2 = b1
if type(b1) != type(b2):
raise TypeError("only arguments of same type allowed.")
if scalar_product is None:
scalar_product = b1.scalar_product_hint()
res = vectorize_scalar_product(b1, b2, scalar_product)
return np.real_if_close(res)
def find_roots(function, grid, n_roots=None, rtol=1.e-5, atol=1.e-8,
cmplx=False, sort_mode="norm"):
r"""
Searches *n_roots* roots of the *function* :math:`f(\boldsymbol{x})`
on the given *grid* and checks them for uniqueness with aid of *rtol*.
In Detail :py:func:`scipy.optimize.root` is used to find initial candidates
for roots of :math:`f(\boldsymbol{x})` . If a root satisfies the criteria
given by atol and rtol it is added. If it is already in the list,
a comprehension between the already present entries' error and the
current error is performed. If the newly calculated root comes
with a smaller error it supersedes the present entry.
Raises:
ValueError: If the demanded amount of roots can't be found.
Args:
function (callable): Function handle for math:`f(\boldsymbol{x})`
whose roots shall be found.
grid (list): Grid to use as starting point for root detection.
The :math:`i` th element of this list provides sample points
for the :math:`i` th parameter of :math:`\boldsymbol{x}` .
n_roots (int): Number of roots to find. If none is given, return
all roots that could be found in the given area.
rtol: Tolerance to be exceeded for the difference of two roots
to be unique: :math:`f(r1) - f(r2) > \textrm{rtol}` .
atol: Absolute tolerance to zero: :math:`f(x^0) < \textrm{atol}` .
cmplx(bool): Set to True if the given *function* is complex valued.
sort_mode(str): Specify tho order in which the extracted roots shall be
sorted. Default "norm" sorts entries by their :math:`l_2` norm,
while "component" will sort them in increasing order by every
component.
Return:
numpy.ndarray of roots; sorted in the order they are returned by
:math:`f(\boldsymbol{x})` .
"""
if isinstance(grid[0], Number):
grid = [grid]
dim = len(grid)
if cmplx:
assert dim == 2
function = complex_wrapper(function)
roots = []
errors = []
grids = np.meshgrid(*[row for row in grid])
values = np.vstack([arr.flatten() for arr in grids]).T
# iterate over test_values
val = iter(values)
while True:
try:
res = root(function, next(val), tol=atol)
except StopIteration:
break
if not res.success:
continue
calculated_root = np.atleast_1d(res.x)
error = np.linalg.norm(res.fun)
# check for absolute tolerance
if error > atol:
continue
# check if root lies in expected area
abort = False
for rt, ar in zip(calculated_root, grid):
if ar.min() - atol > rt or ar.max() + atol < rt:
abort = True
break
if abort:
continue
if roots:
# check whether root is already present in cache
cmp_arr = np.isclose(calculated_root, roots, atol=rtol)
cmp_vec = [all(elements) for elements in cmp_arr]
if any(cmp_vec):
idx = cmp_vec.index(True)
if errors[idx] > error:
roots[idx] = calculated_root
errors[idx] = error
# TODO check jacobian (if provided)
# to identify roots of higher order
continue
roots.append(calculated_root)
errors.append(error)
if n_roots is None:
n_roots = len(roots)
if n_roots == 0:
# Either no roots have been found or zero roots have been requested
return np.array([])
if len(roots) < n_roots:
raise ValueError("Insufficient number of roots detected. ({0} < {1}) "
"Check provided function (see `visualize_roots`) or "
"try to increase the search area.".format(
len(roots), n_roots))
valid_roots = np.array(roots)
# sort roots
if sort_mode == "norm":
# sort entries by their norm
idx = np.argsort(np.linalg.norm(valid_roots, axis=1))
sorted_roots = valid_roots[idx, :]
elif sort_mode == "component":
# completely sort first column before we start
idx = np.argsort(valid_roots[:, 0])
sorted_roots = valid_roots[idx, :]
for layer in range(valid_roots.shape[1] - 1):
for rt in sorted_roots[:, layer]:
eq_mask = np.isclose(sorted_roots[:, layer], rt, rtol=rtol)
idx = np.argsort(sorted_roots[eq_mask, layer + 1])
sorted_roots[eq_mask] = sorted_roots[eq_mask][idx, :]
else:
raise ValueError("Sort mode: {} not supported.".format(sort_mode))
good_roots = sorted_roots[:n_roots]
if cmplx:
return good_roots[:, 0] + 1j * good_roots[:, 1]
if dim == 1:
return good_roots.flatten()
return good_roots
def complex_wrapper(func):
"""
Wraps complex valued functions into two-dimensional functions.
This enables the root-finding routine to handle it as a
vectorial function.
Args:
func (callable): Callable that returns a complex result.
Return:
two-dimensional, callable: function handle,
taking x = (re(x), im(x) and returning [re(func(x), im(func(x)].
"""
def wrapper(x):
val = func(np.complex(x[0], x[1]))
return np.array([np.real(val),
np.imag(val)])
return wrapper
class Parameters:
"""
Handy class to collect system parameters.
This class can be instantiated with a dict, whose keys will the
become attributes of the object.
(Bunch approach)
Args:
kwargs: parameters
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Domain(object):
"""
Helper class that manages ranges for data evaluation, containing
parameters.
Args:
bounds (tuple): Interval bounds.
num (int): Number of points in interval.
step (numbers.Number): Distance between points (if homogeneous).
points (array_like): Points themselves.
Note:
If num and step are given, num will take precedence.
"""
def __init__(self, bounds=None, num=None, step=None, points=None):
if points is not None:
# check for correct boundaries
if bounds and not all(bounds == points[[0, -1]]):
raise ValueError("Given 'bounds' don't fit the provided data.")
# check for correct length
if num is not None and len(points) != num:
raise ValueError("Given 'num' doesn't fit the provided data.")
# points are given, easy one
self._values = np.atleast_1d(points)
self._limits = (self._values.min(), self._values.max())
self._num = self._values.size
# check for evenly spaced entries
if self._num > 1:
steps = np.diff(self._values)
equal_steps = np.allclose(steps, steps[0])
if step:
if not equal_steps or step != steps[0]:
raise ValueError("Given 'step' doesn't fit the provided "
"data.")
else:
if equal_steps:
step = steps[0]
else:
step = np.nan
self._step = step
elif bounds and num:
self._limits = bounds
self._num = num
self._values, self._step = np.linspace(bounds[0],
bounds[1],
num,
retstep=True)
if step is not None and not np.isclose(self._step, step):
raise ValueError("could not satisfy both redundant "
"requirements for num and step!")
elif bounds and step:
self._limits = bounds
# calculate number of needed points but save correct step size
self._num = int((bounds[1] - bounds[0]) / step + 1.5)
self._values, self._step = np.linspace(bounds[0],
bounds[1],
self._num,
retstep=True)
if np.abs(step - self._step)/self._step > 1e-1:
warnings.warn("desired step-size {} doesn't fit to given "
"interval, changing to {}".format(step,
self._step))
else:
raise ValueError("not enough arguments provided!")
# mimic some ndarray properties
self.shape = self._values.shape
self.view = self._values.view
def __repr__(self):
return "Domain(bounds={}, step={}, num={})".format(self.bounds,
self._step,
self._num)
def __len__(self):
return len(self._values)
def __getitem__(self, item):
return self._values[item]
@property
def step(self):
return self._step
@property
def bounds(self):
return self._limits
@property
def points(self):
return self._values
@property
def ndim(self):
return self._values.ndim
def real(data):
"""
Check if the imaginary part of :code:`data` vanishes
and return its real part if it does.
Args:
data (numbers.Number or array_like): Possibly complex data to check.
Raises:
ValueError: If provided data can't be converted within
the given tolerance limit.
Return:
numbers.Number or array_like: Real part of :code:`data`.
"""
candidates = np.real_if_close(data, tol=100)
if candidates.dtype == 'complex':
raise ValueError("Imaginary part does not vanish, "
+ "check for implementation errors.")
# TODO make numpy array to common data type (even for scalar values)
if candidates.size == 1:
return float(candidates)
return candidates
class EvalData:
"""
This class helps managing any kind of result data.
The data gained by evaluation of a function is stored together with the
corresponding points of its evaluation. This way all data needed for
plotting or other postprocessing is stored in one place.
Next to the points of the evaluation the names and units of the included
axes can be stored.
After initialization an interpolator is set up, so that one can interpolate
in the result data by using the overloaded :py:meth:`__call__` method.
Args:
input_data: (List of) array(s) holding the axes of a regular grid on
which the evaluation took place.
output_data: The result of the evaluation.
Keyword Args:
input_labels: (List of) labels for the input axes.
input_units: (List of) units for the input axes.
name: Name of the generated data set.
fill_axes: If the dimension of `output_data` is higher than the
length of the given `input_data` list, dummy entries will be
appended until the required dimension is reached.
enable_extrapolation (bool): If True, internal interpolators will allow
extrapolation. Otherwise, the last giben value will be repeated for
1D cases and the result will be padded with zeros for cases > 1D.
fill_value: If invalid data is encountered, it will be replaced with
this value before interpolation is performed.
Examples:
When instantiating 1d EvalData objects, the list can be omitted
>>> axis = Domain((0, 10), 5)
>>> data = np.random.rand(5,)
>>> e_1d = EvalData(axis, data)
For other cases, input_data has to be a list
>>> axis1 = Domain((0, 0.5), 5)
>>> axis2 = Domain((0, 1), 11)
>>> data = np.random.rand(5, 11)
>>> e_2d = EvalData([axis1, axis2], data)
Adding two Instances (if the boundaries fit, the data will be
interpolated on the more coarse grid.) Same goes for subtraction and
multiplication.
>>> e_1 = EvalData(Domain((0, 10), 5), np.random.rand(5,))
>>> e_2 = EvalData(Domain((0, 10), 10), 100*np.random.rand(5,))
>>> e_3 = e_1 + e_2
>>> e_3.output_data.shape
(5,)
Interpolate in the output data by calling the object
>>> e_4 = EvalData(np.array(range(5)), 2*np.array(range(5))))
>>> e_4.output_data
array([0, 2, 4, 6, 8])
>>> e_5 = e_4([2, 5])
>>> e_5.output_data
array([4, 8])
>>> e_5.output_data.size
2
one may also give a slice
>>> e_6 = e_4(slice(1, 5, 2))
>>> e_6.output_data
array([2., 6.])
>>> e_5.output_data.size
2
For multi-dimensional interpolation a list has to be provided
>>> e_7 = e_2d([[.1, .5], [.3, .4, .7)])
>>> e_7.output_data.shape
(2, 3)
"""
def __init__(self, input_data, output_data,
input_labels=None, input_units=None,
enable_extrapolation=False,
fill_axes=False, fill_value=None,
name=None):
# check type and dimensions
if isinstance(input_data, np.ndarray) and input_data.ndim == 1:
# accept single array for single dimensional input
input_data = [input_data]
elif isinstance(input_data, Domain) and input_data.points.ndim == 1:
# some goes for domains
input_data = [input_data]
else:
assert isinstance(input_data, list)
# convert numpy arrays to domains
input_data = [Domain(points=entry)
if isinstance(entry, np.ndarray) else entry
for entry in input_data]
# if a list with names is provided, the dimension must fit
if input_labels is None:
input_labels = ["" for i in range(len(input_data))]
if not isinstance(input_labels, list):
input_labels = [input_labels]
assert len(input_labels) == len(input_data)
# if a list with units is provided, the dimension must fit
if input_units is None:
input_units = ["" for i in range(len(input_data))]
if not isinstance(input_units, list):
input_units = [input_units]
assert len(input_units) == len(input_data)
assert isinstance(output_data, np.ndarray)
if output_data.size == 0:
raise ValueError("No initialisation possible with an empty array!")
if fill_axes:
# add dummy axes to input_data for missing output dimensions
dim_diff = output_data.ndim - len(input_data)
for dim in range(dim_diff):
input_data.append(Domain(points=np.array(
range(output_data.shape[-(dim_diff - dim)]))))
input_labels.append("")
input_units.append("")
# output_data has to contain len(input_data) dimensions
assert len(input_data) == output_data.ndim
for dim in range(len(input_data)):
assert len(input_data[dim]) == output_data.shape[dim]
self.input_data = input_data
self.output_data = output_data
self.min = np.nanmin(output_data)
self.max = np.nanmax(output_data)
if len(input_data) == 1:
if enable_extrapolation:
fill_val = "extrapolate"
else:
fill_val = (output_data[0], output_data[-1])
self._interpolator = interp1d(
input_data[0],
np.ma.fix_invalid(output_data, fill_value=fill_value),
axis=-1,
bounds_error=False,
fill_value=fill_val)
elif len(input_data) == 2 and output_data.ndim == 2:
# pure 2d case
if enable_extrapolation:
raise ValueError("Extrapolation not supported for 2d data. See "
"https://github.com/scipy/scipy/issues/8099"
"for details.")
if len(input_data[0]) > 3 and len(input_data[1]) > 3 and False:
# special treatment for very common case (faster than interp2d)
# boundary values are used as fill values
self._interpolator = RectBivariateSpline(
*input_data,
np.ma.fix_invalid(output_data, fill_value=fill_value)
)
else:
# this will trigger nearest neighbour interpolation
fill_val = None
# if enable_extrapolation:
# fill_val = None
# else:
# Since the value has to be the same at every border
# fill_val = 0
self._interpolator = interp2d(
input_data[0],
input_data[1],
np.ma.fix_invalid(output_data.T, fill_value=fill_value),
bounds_error=False,
fill_value=fill_val)
else:
if enable_extrapolation:
fill_val = None
else:
# Since the value has to be the same at every border
fill_val = 0
self._interpolator = RegularGridInterpolator(
input_data,
np.ma.fix_invalid(output_data, fill_value=fill_value),
bounds_error=False,
fill_value=fill_val)
# handle names and units
self.input_labels = input_labels
self.input_units = input_units
self.name = name
if self.name is None:
self.name = ""
def adjust_input_vectors(self, other):
"""
Check the the inputs vectors of `self` and `other` for compatibility
(equivalence) and harmonize them if they are compatible.
The compatibility check is performed for every input_vector in
particular and examines whether they share the same boundaries.
and equalize to the minimal discretized axis.
If the amount of discretization steps between the two instances differs,
the more precise discretization is interpolated down onto the less
precise one.
Args:
other (:py:class:`.EvalData`): Other EvalData class.
Returns:
tuple:
- (list) - New common input vectors.
- (numpy.ndarray) - Interpolated self output_data array.
- (numpy.ndarray) - Interpolated other output_data array.
"""
assert len(self.input_data) == len(other.input_data)
if self.input_data == other.input_data:
return self.input_data, self.output_data, other.output_data
input_data = []
for idx in range(len(self.input_data)):
# check if axis have the same length
if self.input_data[idx].bounds != other.input_data[idx].bounds:
raise ValueError("Boundaries of input vector {0} don't match."
" {1} (self) != {2} (other)".format(
idx,
self.input_data[idx].bounds,
other.input_data[idx].bounds
))
# check which axis has the worst discretization
if len(self.input_data[idx]) <= len(other.input_data[idx]):
input_data.append(self.input_data[idx])
else:
input_data.append(other.input_data[idx])
# interpolate data
interpolated_self = self.interpolate(input_data)
interpolated_other = other.interpolate(input_data)
return (input_data,
interpolated_self.output_data,
interpolated_other.output_data)
def add(self, other, from_left=True):
"""
Perform the element-wise addition of the output_data arrays from `self`
and `other`
This method is used to support addition by implementing
__add__ (fromLeft=True) and __radd__(fromLeft=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`
The summation operation is performed on the interpolated output_data.
If `other` is a :class:`numbers.Number` it is added according to
numpy's broadcasting rules.
Args:
other (:py:class:`numbers.Number` or :py:class:`.EvalData`): Number
or EvalData object to add to self.
from_left (bool): Perform the addition from left if True or from
right if False.
Returns:
:py:class:`.EvalData` with adapted input_data and output_data as
result of the addition.
"""
if isinstance(other, numbers.Number):
if from_left:
output_data = self.output_data + other
else:
output_data = other + self.output_data
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} + {}".format(self.name, other))
elif isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
# add the output arrays
if from_left:
output_data = self_output_data + other_output_data
_name = self.name + " + " + other.name
else:
output_data = other_output_data + self_output_data
_name = other.name + " + " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __radd__(self, other):
return self.add(other, from_left=False)
def __add__(self, other):
return self.add(other)
def sub(self, other, from_left=True):
"""
Perform the element-wise subtraction of the output_data arrays from
`self` and `other` .
This method is used to support subtraction by implementing
__sub__ (from_left=True) and __rsub__(from_left=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`.
The subtraction operation is performed on the interpolated output_data.
If `other` is a :class:`numbers.Number` it is handled according to
numpy's broadcasting rules.
Args:
other (:py:class:`numbers.Number` or :py:class:`.EvalData`): Number
or EvalData object to subtract.
from_left (boolean): Perform subtraction from left if True or from
right if False.
Returns:
:py:class:`.EvalData` with adapted input_data and output_data as
result of subtraction.
"""
if isinstance(other, numbers.Number):
if from_left:
output_data = self.output_data - other
else:
output_data = other - self.output_data
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} - {}".format(self.name, other))
elif isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
# subtract the output arrays
if from_left:
output_data = self_output_data - other_output_data
_name = self.name + " - " + other.name
else:
output_data = other_output_data - self_output_data
_name = other.name + " - " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __rsub__(self, other):
return self.sub(other, from_left=False)
def __sub__(self, other):
return self.sub(other)
def mul(self, other, from_left=True):
"""
Perform the element-wise multiplication of the output_data arrays from
`self` and `other` .
This method is used to support multiplication by implementing
__mul__ (from_left=True) and __rmul__(from_left=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`.
The multiplication operation is performed on the interpolated
output_data. If `other` is a :class:`numbers.Number` it is handled
according to numpy's broadcasting rules.
Args:
other (:class:`numbers.Number` or :py:class:`.EvalData`): Factor
to multiply with.
from_left boolean: Multiplication from left if True or from right
if False.
Returns:
:py:class:`.EvalData` with adapted input_data and output_data as
result of multiplication.
"""
if isinstance(other, numbers.Number):
if from_left:
output_data = self.output_data * other
else:
output_data = other * self.output_data
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} - {}".format(self.name, other))
elif isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
# addition der output array
output_data = other_output_data * self_output_data
if from_left:
_name = self.name + " * " + other.name
else:
_name = other.name + " * " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __rmul__(self, other):
return self.mul(other, from_left=False)
def __mul__(self, other):
return self.mul(other)
def matmul(self, other, from_left=True):
"""
Perform the matrix multiplication of the output_data arrays from
`self` and `other` .
This method is used to support matrix multiplication (@) by implementing
__matmul__ (from_left=True) and __rmatmul__(from_left=False)).
If `other**` is a :py:class:`.EvalData`, the `input_data` lists of
`self` and `other` are adjusted using :py:meth:`.adjust_input_vectors`.
The matrix multiplication operation is performed on the interpolated
output_data.
If `other` is a :class:`numbers.Number` it is handled according to
numpy's broadcasting rules.
Args:
other (:py:class:`EvalData`): Object to multiply with.
from_left (boolean): Matrix multiplication from left if True or
from right if False.
Returns:
:py:class:`EvalData` with adapted input_data and output_data as
result of matrix multiplication.
"""
if isinstance(other, EvalData):
(input_data, self_output_data, other_output_data
) = self.adjust_input_vectors(other)
if self.output_data.shape != other.output_data.shape:
raise ValueError("Dimension mismatch")
if from_left:
output_data = self_output_data @ other_output_data
_name = self.name + " @ " + other.name
else:
output_data = other_output_data @ self_output_data
_name = other.name + " @ " + self.name
return EvalData(input_data=deepcopy(input_data),
output_data=output_data,
name=_name)
else:
return NotImplemented
def __rmatmul__(self, other):
return self.matmul(other, from_left=False)
def __matmul__(self, other):
return self.matmul(other)
def __pow__(self, power):
"""
Raise the elements form `self.output_data` element-wise to `power`.
Args:
power (:class:`numbers.Number`): Power to raise to.
Returns:
:py:class:`EvalData` with self.input_data and output_data as results
of the raise operation.
"""
if isinstance(power, numbers.Number):
output_data = self.output_data ** power
return EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="{} ** {}".format(self.name, power))
else:
return NotImplemented
def sqrt(self):
"""
Radicate the elements form `self.output_data` element-wise.
Return:
:py:class:`EvalData` with self.input_data and output_data as result
of root calculation.
"""
output_data = np.sqrt(self.output_data)
ed = EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="sqrt({})".format(self.name))
return ed
def abs(self):
"""
Get the absolute value of the elements form `self.output_data` .
Return:
:py:class:`EvalData` with self.input_data and output_data as result
of absolute value calculation.
"""
output_data = np.abs(self.output_data)
ed = EvalData(input_data=deepcopy(self.input_data),
output_data=output_data,
name="abs({})".format(self.name))
return ed
def __call__(self, interp_axes, as_eval_data=True):
"""
Interpolation method for output_data.
Determines, if a one, two or three dimensional interpolation is used.
Method can handle slice objects in the pos lists.
One slice object is allowed per axis list.
Args:
interp_axes (list(list)): Axis positions in the form
- 1D: [axis] with axis=[1,2,3]
- 2D: [axis1, axis2] with axis1=[1,2,3] and axis2=[0,1,2,3,4]
as_eval_data (bool): Return the interpolation result as EvalData
object. If `False`, the output_data array of the results is
returned.
Returns:
:py:class:`EvalData` with pos as input_data and to pos interpolated
output_data.
"""
if len(self.input_data) == 1:
# special case for 1d data where the outermost list can be omitted
if isinstance(interp_axes, slice):
interp_axes = [interp_axes]
if isinstance(interp_axes, list) and \
all([isinstance(e, Number) for e in interp_axes]):
interp_axes = [interp_axes]
assert isinstance(interp_axes, list)
dim_err = len(self.input_data) - len(interp_axes)
assert dim_err >= 0
interp_axes += [slice(None) for x in range(dim_err)]
assert len(interp_axes) == len(self.input_data)
_list = []
for i, interp_points in enumerate(interp_axes):
if isinstance(interp_points, slice):
_entry = self.input_data[i][interp_points]
if _entry is None:
raise ValueError("Quantity resulting from slice is empty!")
else:
try:
_entry = list(interp_points)
except TypeError as e:
raise ValueError("Coordinates must be given as iterable!")
_list.append(_entry)
res = self.interpolate(_list)
if as_eval_data:
return res
else:
return res.output_data
def interpolate(self, interp_axis):
"""
Main interpolation method for output_data.
If one of the output dimensions is to be interpolated at one single
point, the dimension of the output will decrease by one.
Args:
interp_axis (list(list)): axis positions in the form
- 1D: axis with axis=[1,2,3]
- 2D: [axis1, axis2] with axis1=[1,2,3] and axis2=[0,1,2,3,4]
Returns:
:py:class:`EvalData` with `interp_axis` as new input_data and
interpolated output_data.
"""
assert isinstance(interp_axis, list)
assert len(interp_axis) == len(self.input_data)
# check if an axis has been degenerated
domains = [Domain(points=axis) for axis in interp_axis if len(axis) > 1]
if len(self.input_data) == 1:
interpolated_output = self._interpolator(interp_axis[0])
elif len(self.input_data) == 2:
interpolated_output = self._interpolator(*interp_axis)
if isinstance(self._interpolator, interp2d):
interpolated_output = interpolated_output.T
else:
dims = tuple(len(a) for a in interp_axis)
coords = np.array(
[a.flatten() for a in np.meshgrid(*interp_axis, indexing="ij")])
interpolated_output = self._interpolator(coords.T).reshape(dims)
out_arr = ma.masked_invalid(interpolated_output).squeeze()
return EvalData(input_data=domains,
output_data=out_arr,
name=self.name)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from kokemomo.plugins.engine.utils.km_model_utils import *
from kokemomo.plugins.engine.model.km_storage.impl.km_rdb_adapter import adapter
__author__ = 'hiroki'
"""
It is the accessor to group table to be used in the KOKEMOMO.
[Table Layouts]
id:Integer
name:String
parent:Integer
create_at:DateTime(Automatic Updates)
update_at:DateTime(Automatic Updates)
[Basic Usage]
You can use the each method from the acquisition of the session in getSession ().
-example-----------------------------------------------------------
from lib.kmgrouptable import get_session,add,find,find_all,delete
def search_parameter():
session = get_session()
result = find_all(session)
session.close()
return result
-------------------------------------------------------------------
"""
class KMGroup(adapter.Model):
__tablename__ = 'km_group'
id = adapter.Column(adapter.Integer, autoincrement=True, primary_key=True)
name = adapter.Column(adapter.String(254))
parent_id = adapter.Column(adapter.Integer)
def __init__(self, data=None):
if data is None:
self.name = ''
self.parent_id = -1
else:
self.set_data(data)
def __repr__(self):
return create_repr_str(self)
def get_json(self):
return create_json(self)
def set_data(self, data):
self.error = None
self.name = data.get_request_parameter('name', default='')
self.parent_id = data.get_request_parameter('parent_id', default=-1)
@classmethod
def get(cls, id):
if id is None:
group = KMGroup()
else:
group = super(KMGroup, cls).get(id=id)
return group
|
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import unittest
try:
import ansible_collections.amazon.aws.plugins.modules.aws_s3 as s3
except ImportError:
pytestmark = pytest.mark.skip("This test requires the s3 Python libraries")
from ansible.module_utils.six.moves.urllib.parse import urlparse
boto3 = pytest.importorskip("boto3")
class TestUrlparse(unittest.TestCase):
def test_urlparse(self):
actual = urlparse("http://test.com/here")
self.assertEqual("http", actual.scheme)
self.assertEqual("test.com", actual.netloc)
self.assertEqual("/here", actual.path)
def test_is_fakes3(self):
actual = s3.is_fakes3("fakes3://bla.blubb")
self.assertEqual(True, actual)
def test_get_s3_connection(self):
aws_connect_kwargs = dict(aws_access_key_id="access_key",
aws_secret_access_key="secret_key")
location = None
rgw = True
s3_url = "http://bla.blubb"
actual = s3.get_s3_connection(None, aws_connect_kwargs, location, rgw, s3_url)
self.assertEqual(bool("bla.blubb" in str(actual._endpoint)), True)
|
import argparse
import sys
import enum
from . import scanners
from .environment import GITHUB_PAT
# Program exit codes
class ExitCode(enum.Enum):
OK = 0
MISSING_ARGUMENTS = 1
LANGUAGE_NOT_SUPPORTED = 2
ERROR_OPENING_DEPENDENCIES = 3
languages = {
# language:ecosystem
"python": "pip",
}
if __name__ == "__main__":
# Command line arguments
parser = argparse.ArgumentParser()
help_text = ""
for lang in languages.keys():
help_text += lang + ";"
parser.add_argument(
"language",
help="Language to use. Supported: {}".format(help_text)
)
parser.add_argument("dependencies", help="Path to dependencies file.")
args = parser.parse_args()
# Check language support
args.language = args.language.lower()
if args.language not in languages.keys():
print("Language not supported.")
sys.exit(ExitCode.LANGUAGE_NOT_SUPPORTED)
if args.language == "python":
scanner = scanners.Pip(args.dependencies, GITHUB_PAT)
if scanner is None:
sys.exit(ExitCode.ERROR_OPENING_DEPENDENCIES)
_ = scanner.get_advisories()
|
"""tsipy includes tools for signal degradation correction and fusion.
Originally, it was built for processing measurements of Total Solar Irradiance (TSI).
However, the package implements tools for degradation correction and sensor fusion
not particular of any measurement quantity.
"""
from .__version__ import version
__version__ = version
|
"""
Contains utilities regarding messages
"""
from math import ceil
class Paginate:
'Chop a string into even chunks of max_length around the given separator'
def __init__(self, string, enclose=('```\n', '```'),
page_size=2000, separator='\n'):
self._string = string
self._prefix = enclose[0]
self._affix = enclose[1]
self._size = page_size - len(self._prefix) - len(self._affix)
self._separator = separator
self._r_seek = len(string)
self._pages_yielded = 0
def __iter__(self):
return self
def __next__(self):
if self._r_seek <= 0:
raise StopIteration()
self._pages_yielded += 1
if self._r_seek <= self._size:
string = self._wrap_string(-self._r_seek)
self._r_seek = 0
return string
split = self._string.rfind(
self._separator, -self._r_seek, self._size - self._r_seek
) + 1
if split:
string = self._wrap_string(-self._r_seek, split)
self._r_seek -= len(string) - len(self._prefix) - len(self._affix)
else:
string = self._wrap_string(
-self._r_seek, self._size - self._r_seek
)
self._r_seek -= self._size
return string
def _wrap_string(self, start, stop=None):
return self._prefix + self._string[start:stop] + self._affix
def prefix_next(self, prefix):
'Return next page prefixed but still smaller than page_size'
old_size = self._size
self._size -= len(prefix)
string = self.__next__()
self._size = old_size
return prefix + string
@property
def pages_yielded(self):
'Return the number of pages yielded by the iterator so far'
return self._pages_yielded
@property
def pages_left(self):
'Return number of remaining pages if the iterator is called normally'
return ceil(self._r_seek / self._size)
async def notify_owner(bot, messages):
'Send message to the private channel of the owner'
user = await bot.fetch_user(bot.owner_id)
for message in messages:
await user.send(message)
async def message_input(ctx, prompt, timeout=60):
'Prompt user for input and wait for response or timeout'
message = await ctx.bot.say(prompt)
user_input = await ctx.bot.wait_for_message(
timeout=timeout,
author=ctx.message.author,
channel=ctx.message.channel)
if not user_input:
await ctx.bot.edit_message(
message,
new_content='Timed out, cancelling.')
return user_input
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import common
from common import TestDriver
from common import IntegrationTest
from decorators import ChromeVersionEqualOrAfterM
from decorators import SkipIfForcedBrowserArg
import json
class ClientConfig(IntegrationTest):
# Ensure client config is fetched at the start of the Chrome session, and the
# session ID is correctly set in the chrome-proxy request header.
def testClientConfig(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.SleepUntilHistogramHasEntry(
'DataReductionProxy.ConfigService.FetchResponseCode')
t.LoadURL('http://check.googlezip.net/test.html')
responses = t.GetHTTPResponses()
self.assertEqual(2, len(responses))
for response in responses:
chrome_proxy_header = response.request_headers['chrome-proxy']
header_values = [v.strip(' ') for v in chrome_proxy_header.split(',')]
self.assertTrue(any(v[:2] == 's=' for v in header_values))
self.assertFalse(any(v[:3] == 'ps=' for v in header_values))
self.assertFalse(any(v[:4] == 'sid=' for v in header_values))
# Verify that the proxy server honored the session ID.
self.assertHasChromeProxyViaHeader(response)
self.assertEqual(200, response.status)
# Ensure Chrome uses a direct connection when no valid client config is given.
@SkipIfForcedBrowserArg('data-reduction-proxy-config-url')
def testNoClientConfigUseDirect(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
# The test server won't respond with a valid client config.
t.UseNetLog()
t.AddChromeArg('--data-reduction-proxy-config-url='
'https://chromeproxy-test.appspot.com')
t.SleepUntilHistogramHasEntry(
'DataReductionProxy.ConfigService.FetchResponseCode')
t.LoadURL('http://check.googlezip.net/test.html')
responses = t.GetHTTPResponses()
self.assertEqual(2, len(responses))
for response in responses:
self.assertNotHasChromeProxyViaHeader(response)
# Ensure client config is fetched at the start of the Chrome session, and the
# variations ID is set in the request.
# Disabled on android because the net log is not copied yet. crbug.com/761507
@ChromeVersionEqualOrAfterM(62)
def testClientConfigVariationsHeader(self):
with TestDriver() as t:
t.UseNetLog()
t.AddChromeArg('--enable-spdy-proxy-auth')
# Force set the variations ID, so they are send along with the client
# config fetch request.
t.AddChromeArg('--force-variation-ids=42')
t.LoadURL('http://check.googlezip.net/test.html')
variation_header_count = 0
# Look for the request made to data saver client config server.
data = t.StopAndGetNetLog()
for i in data["events"]:
dumped_event = json.dumps(i)
if dumped_event.find("datasaver.") !=-1 and\
dumped_event.find(".googleapis.com") !=-1 and\
dumped_event.find("clientConfigs") != -1 and\
dumped_event.find("headers") != -1 and\
dumped_event.find("accept-encoding") != -1 and\
dumped_event.find("x-client-data") !=-1:
variation_header_count = variation_header_count + 1
# Variation IDs are set. x-client-data should be present in the request
# headers.
self.assertLessEqual(1, variation_header_count)
# Ensure client config is fetched at the start of the Chrome session, and the
# variations ID is not set in the request.
# Disabled on android because the net log is not copied yet. crbug.com/761507
@ChromeVersionEqualOrAfterM(62)
def testClientConfigNoVariationsHeader(self):
with TestDriver() as t:
t.UseNetLog()
t.AddChromeArg('--enable-spdy-proxy-auth')
t.LoadURL('http://check.googlezip.net/test.html')
variation_header_count = 0
# Look for the request made to data saver client config server.
data = t.StopAndGetNetLog()
for i in data["events"]:
dumped_event = json.dumps(i)
if dumped_event.find("datasaver.") !=-1 and\
dumped_event.find(".googleapis.com") !=-1 and\
dumped_event.find("clientConfigs") != -1 and\
dumped_event.find("headers") != -1 and\
dumped_event.find("accept-encoding") != -1 and\
dumped_event.find("x-client-data") !=-1:
variation_header_count = variation_header_count + 1
# Variation IDs are not set. x-client-data should not be present in the
# request headers.
self.assertEqual(0, variation_header_count)
if __name__ == '__main__':
IntegrationTest.RunAllTests()
|
b='Hu Xi Shu He Xun Ku Juan Xiao Xi Yan Han Zhuang Jun Di Xie Ji Wu Yan Lu Han Yan Huan Men Ju Dao Bei Fen Lin Kun Hun Tun Xi Cui Wu Hong Chao Fu Wo Jiao Cong Feng Ping Qiong Ruo Xi Qiong Xin Chao Yan Yan Yi Jue Yu Gang Ran Pi Xiong Gang Sheng Chang Shao Xiong Nian Geng Wei Chen He Kui Zhong Duan Xia Hui Feng Lian Xuan Xing Huang Jiao Jian Bi Ying Zhu Wei Tuan Shan Xi Nuan Nuan Chan Yan Jiong Jiong Yu Mei Sha Wei Zha Jin Qiong Rou Mei Huan Xu Zhao Wei Fan Qiu Sui Yang Lie Zhu Jie Zao Gua Bao Hu Yun Nan Shi Liang Bian Gou Tui Tang Chao Shan En Bo Huang Xie Xi Wu Xi Yun He He Xi Yun Xiong Nai Shan Qiong Yao Xun Mi Lian Ying Wu Rong Gong Yan Qiang Liu Xi Bi Biao Cong Lu Jian Shu Yi Lou Peng Sui Yi Teng Jue Zong Yun Hu Yi Zhi Ao Wei Liu Han Ou Re Jiong Man Kun Shang Cuan Zeng Jian Xi Xi Xi Yi Xiao Chi Huang Chan Ye Tan Ran Yan Xun Qiao Jun Deng Dun Shen Jiao Fen Si Liao Yu Lin Tong Shao Fen Fan Yan Xun Lan Mei Tang Yi Jiong Men Jing Jiao Ying Yu Yi Xue Lan Tai Zao Can Sui Xi Que Zong Lian Hui Zhu Xie Ling Wei Yi Xie Zhao Hui Da Nong Lan Ru Xian He Xun Jin Chou Dao Yao'
|
import subprocess
import argparse
import logging
import shutil
import os
import re
logging.basicConfig(format='%(asctime)s %(message)s')
logging.getLogger().setLevel(logging.INFO)
REPO_URL_TEMPLATE = "https://github.com/openshift/{}.git"
valid_commit_regex = '^([A-Z]+-[0-9]+|#[0-9]+|merge|no-issue)'
############################################################################################
# This is used to generate the documentation of assisted-service between different versions.
############################################################################################
parser = argparse.ArgumentParser()
parser.add_argument("--from-version", help="From version to document", type=str, required=True)
parser.add_argument("--to-version", help="To version to document", type=str, required=True)
parser.add_argument("--documentation-dir", help="deployment yaml file to update", type=str,
default=os.path.join(os.path.dirname(__file__), "../versions_documentation"))
parser.add_argument("--repo", help="repo to document", type=str, default="assisted-service")
args = parser.parse_args()
documentation_path = os.path.join(args.documentation_dir, args.repo)
def main():
version_documentation_list = list()
if not os.path.exists(documentation_path):
os.makedirs(documentation_path)
os.mkdir("temp")
try:
git_logs_line = get_versions_log()
process_logs(git_logs_line, version_documentation_list)
write_documentation_to_file(version_documentation_list)
except Exception as ex:
logging.error('Failed to process versions documentation: {}'.format(ex))
finally:
shutil.rmtree("temp")
def process_logs(git_logs_line, version_documentation_list):
for line in git_logs_line:
line = line.strip().decode("utf-8")
if is_line_metadata(line):
continue
line = line.strip('* ')
if re.match(valid_commit_regex, line):
version_documentation_list.append(line)
def get_versions_log():
repo_url = REPO_URL_TEMPLATE.format(args.repo)
subprocess.check_output("git clone {}".format(repo_url), shell=True, cwd="temp")
raw_log = subprocess.check_output("git log {tagS}...{tagE} ".format(tagS=args.from_version, tagE=args.to_version),
shell=True, cwd="temp/assisted-service")
print("*"*0)
git_logs_line = raw_log.splitlines()
return git_logs_line
def write_documentation_to_file(version_documentation_list):
version_documentation = '\n'.join(version_documentation_list)
file_name = "version_documentation_{}_to_{}".format(args.from_version, args.to_version)
file_path = os.path.join(documentation_path, file_name)
logging.info("Writing version documentation to {}".format(file_path))
logging.info("Version documentation: {}".format(version_documentation))
with open(file_path, 'w') as f:
f.write(version_documentation)
def is_line_metadata(line):
line_starts_with = ['commit', 'Author', 'Date:']
for meta_prefix in line_starts_with:
if line.startswith(meta_prefix):
return True
if line == '':
return True
if __name__ == "__main__":
main()
|
def cons_count(s):
"""
>>> cons_count('hello')
3
>>> cons_count('I\\'m fine')
3
"""
pass
|
x = int(input())
arr = []
for _ in range(x):
arr.append(list(map(int, input().split())))
for i in range(1, len(arr)):
arr[i][0] = min(arr[i-1][1], arr[i-1][2]) + arr[i][0]
arr[i][1] = min(arr[i-1][0], arr[i-1][2]) + arr[i][1]
arr[i][2] = min(arr[i-1][0], arr[i-1][1]) + arr[i][2]
print(min(arr[x-1][0], arr[x-1][1], arr[x-1][2]))
|
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import time
import os
import rosnode
import roslaunch
import subprocess
from roslaunch import main as ros_launch
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
class NodeItem:
NONE = 'None'
RUNNING = 'Running'
ABARTED = 'Aborted'
SHUTDOWN = 'Shutdown'
NODE_EXCEPTION = ['/rosout']
def __init__(self, node_uri, node_name):
rsc = os.path.join(get_pkg_dir('airbus_plugin_node_manager'),'resources')
self._icon_node_start = QIcon(rsc+'/start.png')
self._icon_node_stop = QIcon(rsc+'/stop.png')
self.uri = QLabel(node_uri)
self.uri.setContentsMargins(0,0,10,0)
self.uri.setMinimumHeight(40)
self.name = QLabel(node_name)
self.name.setContentsMargins(0,0,10,0)
self.name.setMinimumHeight(40)
self.status = QLabel(self.RUNNING)
self.status.setStyleSheet('qproperty-alignment: AlignCenter;')
self.status.setMinimumSize(QSize(100,40))
self.ping = QLabel('...')
self.ping.setStyleSheet("qproperty-alignment: AlignCenter;")
self.button_start_stop = QPushButton()
self.button_start_stop.setIcon(self._icon_node_stop)
self.button_start_stop.setIconSize(QSize(30,30))
self.button_start_stop.setFixedSize(QSize(100,40))
self.button_start_stop_widget = self.setup_start_stop_button(self.button_start_stop)
if node_name not in self.NODE_EXCEPTION:
self.button_start_stop.clicked.connect(self.start_stop_slot)
self.button_start_stop.setEnabled(False)
self.current_status = self.NONE
def setup_start_stop_button(obj_ui):
widget = QWidget()
layout = QHBoxLayout(widget)
layout.setSpacing(6)
layout.setContentsMargins(0, 0, 0, 0)
spacer_left = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
spacer_right = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
layout.addItem(spacer_left)
layout.addWidget(obj_ui)
layout.addItem(spacer_right)
return widget
def start_stop_slot(self):
self.button_start_stop.setEnabled(False)
if self.current_status == self.RUNNING:
self.stop_node()
else:
self.start_node()
def start_node(self):
rospy.loginfo('%s::started()'%self.name.text())
launch_file = self.name.text().replace('/','')
launch_file += '.launch'
subprocess.Popen(['roslaunch',
'node_launchers',
launch_file])
def stop_node(self):
rospy.loginfo('%s::stoped()'%self.name.text())
rosnode._rosnode_cmd_kill(['fake','fake',self.name.text()])
def refresh(self, status, ping=None):
if ping is not None:
self.ping.setText(str("%.3f"%ping))
else:
self.ping.setText('...')
if status != self.current_status:
self.current_status = status
self.button_start_stop.setEnabled(True)
self.status.setText(self.current_status)
if self.current_status == self.RUNNING:
self.status.setStyleSheet("background:rgb(0,255,0);")
self.button_start_stop.setIcon(self._icon_node_stop)
elif self.current_status == self.ABARTED:
self.status.setStyleSheet("background:rgb(255,0,0);")
self.button_start_stop.setIcon(self._icon_node_start)
elif self.current_status == self.SHUTDOWN:
self.status.setStyleSheet("background:rgb(255,255,0);")
self.button_start_stop.setIcon(self._icon_node_start)
else:
self.status.setStyleSheet("background:rgb(255,255,255);")
self.status.setText('Unknown')
#End of file
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import random
import abc
__appname__ = ""
__author__ = "Marco Sirabella"
__copyright__ = ""
__credits__ = ["Marco Sirabella"] # Authors and bug reporters
__license__ = "GPL"
__version__ = "1.0"
__maintainers__ = "Marco Sirabella"
__email__ = "msirabel@gmail.com"
__status__ = "Prototype" # "Prototype", "Development" or "Production"
__module__ = ""
"""thanks to this guy for helping teach about backpropogation with complex maths"""
"""https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/"""
import functools
def pLock(func):
@property
@functools.wraps(func)
def wrapper(self):
if not hasattr(self, '_lock'):
self._lock = {}
elif not isinstance(self._lock, dict):
raise
if self._done and self._lock.get(func.__name__, False):
return self._lock[func.__name__]
else:
r = func(self)
self._lock[func.__name__] = r
return r
return wrapper
class Axon():
def __init__(self, iNeuron, oNeuron, weight=None):
self.iNeuron = iNeuron
self.oNeuron = oNeuron
self.bind()
if weight:
self.weight = weight
else:
self.weight = random.random()
@property
def value(self):
return self.weight * self.iNeuron.out
@property
def error(self):
return self.oNeuron.derivative * self.weight
def backprop(self, eta):
delta_error = self.oNeuron.derivative * self.iNeuron.out
self.new_weight = self.weight - eta * delta_error
return self.new_weight
def lock(self):
self.weight = self.new_weight
self.oNeuron._done = False
self.iNeuron._done = False
def unbind(self):
self.iNeuron._oAxon.remove(self)
self.oNeuron._iAxon.remove(self)
def bind(self):
self.iNeuron._oAxon.append(self)
self.oNeuron._iAxon.append(self)
import math
class Neuron():
def __init__(self, operator=sum):
self.op = operator
self._iAxon = []
self._oAxon = []
self._done = False
@pLock
def net(self):
return self.op(i.value for i in self._iAxon)
@pLock
def out(self):
return 1 / (1 + math.exp(-self.net))
@pLock
def net_derivative(self):
return sum(axon.error for axon in self._oAxon)
@pLock
def partial_derivative(self):
return self.out * (1 - self.out)
@pLock
def derivative(self):
return self.partial_derivative * self.net_derivative
def f_connect(self, other, weight=None):
return Axon(self, other, weight)
def back_pass(self, eta=0.5):
for a in self._oAxon:
a.backprop(eta)
def lock(self):
for a in self._oAxon:
a.lock()
class Static(Neuron):
def __init__(self, value):
super().__init__()
self._value = value
@property
def out(self):
return self._value
class Input(Neuron):
def __init__(self):
super().__init__()
self._value = 0
@property
def out(self):
return self._value
@out.setter
def input(self, value):
self._value = value
class Output(Neuron):
def __init__(self):
super().__init__()
self._target = 0
@property
def target(self):
return self._target
@target.setter
def target(self, value):
self._done = False
self._target = value
@pLock
def net_derivative(self):
self._done = True
return -(self.target - self.out)
@property
def error(self):
return 0.5 * (self.target - self.out) ** 2
import itertools
import numpy
class NNet(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __init__(self, eta=0, dataset=None):
self.eta = eta
self.dataset = dataset
self.axons = []
@property
def inputs(self):
return [input.out for input in self._inputs]
@inputs.setter
def inputs(self, values):
for input, value in zip(self._inputs, values):
input.input = value
@property
def outputs(self):
return [output.out for output in self._outputs]
@outputs.setter
def outputs(self, values):
for output, value in zip(self._outputs, values):
output.target = value
@property
def neurons(self):
return self._inputs + sum(self._hiddens, []) + self._outputs
def back_pass(self):
try:
for axon in self.axons:
axon.backprop(self.eta)
finally:
for axon in self.axons:
axon.lock()
def train(self, epoch, dataset=None, verbose=True):
if not dataset:
dataset = self.dataset
age = 0
try:
while age < epoch:
datum = tuple(dataset.items())[
random.randint(0, len(dataset) - 1)]
self.inputs = datum[0]
self.outputs = datum[1]
self.back_pass()
age += 1
if verbose:
print('epoch is {}'.format(age), end='\r')
except KeyboardInterrupt:
self.back_pass()
finally:
if verbose:
print()
def error(self, accuracy, dataset=None):
if not dataset:
dataset = self.dataset
error = 0
for _ in range(accuracy):
#self.train(1, dataset, False)
datum = tuple(dataset.items())[
random.randint(0, len(dataset) - 1)]
self.inputs = datum[0]
self.outputs = datum[1]
error += sum(output.error for output in self._outputs)
return error / accuracy
class DFFNet(NNet):
"""
Deep fried forward neural network
>>> z = DFFNet(2, [2], 1)
"""
def __init__(self, input_neurons, hidden_neurons, output_neurons, eta=1,
dataset=None):
super().__init__(eta, dataset=dataset)
self._inputs = [ Input() for _ in range(input_neurons)]
self._hiddens = [[Neuron() for _ in range(i)] for i in hidden_neurons]
self._outputs = [ Output() for _ in range(output_neurons)]
self._neurons = [self._inputs] + self._hiddens + [self._outputs]
self.weave()
def weave(self):
for one, next in zip(self._neurons, self._neurons[1:]):
for iNeuron, oNeuron in itertools.product(one, next):
self.axons.append(Axon(iNeuron, oNeuron))
@staticmethod
def x_layers(outlayer, inlayer):
for oNeuron in inlayer:
for iNeuron in outlayer:
oNeuron.f_connect(iNeuron)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
from typing import List
from git import CheckoutError
from too_many_repos.models.wrapped_repo import WrappedRepo
def repos_checkout_master(repos: List[WrappedRepo], force_dirty: bool = False, force_unpushed: bool = False) -> None:
for repo in repos:
if repo.is_master:
print(repo.name + ' [Success] : already master')
elif repo.has_changes and not force_dirty:
print(repo.name + ' : [Failed] : repo is dirty')
elif repo.commits_ahead_remote and not force_unpushed:
print(repo.name + ' : [Failed] : unpushed commits')
else:
try:
repo.git.checkout('master')
print(repo.name + ' : [Success]')
except CheckoutError:
print(repo.name + ' : [Failed] : checkout error')
|
#!/usr/bin/python
import subprocess
import sys
import cgi
import datetime
import re
import requests
validMac = False
ERROR = False
form = cgi.FieldStorage()
user = "READONLY_USER_HERE"
pwd = "PASSWORD"
OUI = form.getvalue('OUI')
host = form.getvalue('HOST')
def formatOUI(OUI):
ot=OUI[0:2]
tf=OUI[2:4]
fs=OUI[5:7]
fmac = ot+":"+tf+":"+fs+":00:00:00"
return fmac
fOUI = formatOUI(OUI)
webCmd = "show ip arp | i {}".format(OUI[0:7])
def printHeader():
print "Content-type: text/html"
print ""
print "<html><head>"
print "<title>OUI Finder</title></head><body>"
print "<br />Time run: " + str(datetime.datetime.now()) + "<br>"
def checkInput():
pattern = re.compile('[a-fA-F0-9]{4}.[a-fA-F0-9]{2}')
if re.match(pattern,OUI[0:7]):
return True
else:
return False
def sanitize(outp):
item=[]
outp = outp.split('# STATS ')[0]
outp = outp.split(' * ')
del outp[0]
print "<BR>"
item = []
for i in outp:
entry = []
i = i.replace('changed=False','')
if "Internet" not in i:
entry.append(i.split(' ')[0])
else:
entry.append(i.split(' ')[0])
i = i.split(' Internet ')
del i[0]
for j in i:
j = j.split(' ')
j = [k for k in j if k]
del j[1]
del j[2]
entry.append(j)
item.append(entry)
return item
def displaySanitized(hosts):
totHosts = 0
for i in hosts:
if len(i)>1:
totHosts+=(len(i)-1)
print "<CENTER>"
print "Number of hosts found: " + str(totHosts)
print "<TABLE border='1' cellpadding='10'> "
for item in hosts:
if len(item) == 1:
print "<TR><TH colspan='3'>"
print item[0]
print "</TH></TR>"
print "<TR><TH>IP</TH><TH>MAC</TH><TH>VLAN</TH>"
print "<TR><TD colspan='3'>No hosts found</TD></TR>"
else:
print "<TR><TH colspan='3'>"
print item[0]
print "</TH></TR>"
print "<TR><TH>IP</TH><TH>MAC</TH><TH>VLAN</TH>"
for i in range(1,len(item)):
print "<TR><TD>"
print item[i][0]
print "</TD><TD>"
print item[i][1]
print "</TD><TD>"
print item[i][2]
print "</TD></TR>"
print "</TABLE>"
def executeCmd(host):
cmd = """ansible-playbook /ansible/plays/show_cmd.yml --limit '"""+host+"""' -e 'user="{0}" pass="{1}" cmd="{2}"' | sed 's/\\\\n/\\n/g'""".format(user,pwd,webCmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
outp = str(p.communicate()[0])
if 'Authentication failed.' in outp:
print "<CENTER><H1>***ERROR!***<br>Authentication failed.</H1><h3>Check credentials</h3></CENTER>"
displaySanitized(sanitize(outp))
def lookup(OUI):
MAC_URL = 'http://macvendors.co/api/%s'
r = requests.get(MAC_URL % OUI)
print "<CENTER><h3>Vendor Name: "+(r.json()['result']['company'])+"</h3></CENTER>"
printHeader()
validMac = checkInput()
if validMac == False:
print "<CENTER><h3>{} OUI not formatted correctly, please use xxxx.xx (Cisco format).</h3></CENTER>".format(OUI)
else:
try:
lookup(fOUI)
except:
ERROR = True
print "<CENTER>OUI not found in database!<br>Check and try again</CENTER>"
if ERROR == False:
executeCmd(host)
|
"""Document processor Endpoint."""
from typing import Dict, Optional
from time import time
from fastapi import APIRouter, HTTPException, Query
from lib.db import integrate_phrase_data
from phrase_counter.ingest import ingest_doc
from pydantic import BaseModel
from phrase_api.logger import LoggerSetup
from lib.status_updater import (
status_detector, get_named_entities, get_stop_words_regex
)
from phrase_api.lib.frequent_remover import freq_regex
# ------------------------------ Initialization -------------------------------
router = APIRouter()
logger = LoggerSetup(__name__, "debug").get_minimal()
NE_LIST = get_named_entities()
STOP_PATTERN = get_stop_words_regex()
# Frequents
FREQ_NE, FREQ_STOPS = freq_regex("ne"), freq_regex("stop")
# ---------------------------- function definition ----------------------------
class PhraseDocument(BaseModel):
"""Schema for payload in doc-process endpoint."""
document: str
@router.post(
"/api/doc-process/",
response_model=dict,
tags=["Document Process"],
status_code=201,
)
async def process_document(
doc: PhraseDocument,
doc_type: str = Query("TEXT", enum=["TEXT", "HTML", "URL"]),
ngram_range: str = "1,5",
replace_stop: bool = False,
tag_stop: bool = False,
tag_highlight: bool = False,
sitename: Optional[str] = None,
doc_id: Optional[str] = None,
) -> Dict[str, str]:
"""**Getting document content, processing & saving results in db.**
**Arguments:**
* **doc_type**: Type of the document given. Either `TEXT`, `HTML` or `URL`.
* **replace_stop**: Whether to replace stop words and remove them in process.
* **tag_stop**: Whether to set status for stop phrases as `suggested-stop`.
Note that if **replace_stop** is set to *True* setting this argument to *True*
is meaningless.
* **ngram_range**: range on ngram. e.g 1,6
* **sitename**: Name of the site while using AASAAM services.
* **doc_id**: Optional document identifier.
**Payload Example**: <br>
```
{
"document" :"<p> hello world </p>
}
```
"""
try:
logger.info("Starting")
s_tot = time()
# ---------------------------------- INGEST ----------------------------------
logger.info("Counting phrases")
s_ingest = time()
ngram_range = list(map(int, ngram_range.split(",")))
phrase_count_res = ingest_doc(
doc=doc.document,
doc_type=doc_type,
remove_stop_regex=FREQ_STOPS,
remove_highlight_regex=FREQ_NE,
ngram_range=ngram_range
)
e_ingest = time()
logger.debug(
"Time taken for ingesting document: %.1f ms", (e_ingest - s_ingest) * 1000
)
# ----------------------------- Status Detector -----------------------------
logger.info("Detecting Statuses")
s_status = time()
phrase_count_res["status"] = [
status_detector(
phrase, STOP_PATTERN, NE_LIST
) for phrase in phrase_count_res["bag"]
]
e_status = time()
logger.debug(
"Time taken for status detection: %.1f ms", (e_status - s_status) * 1000
)
# --------------------------- Integration ---------------------------
logger.info("Integrating nodes")
s_integrate = time()
integrate_phrase_data(phrase_count_res)
e_integrate = time()
logger.debug(
"Time taken for upserting document: %.1f ms",
(e_integrate - s_integrate) * 1000
)
# ---------------------------------------------------------------
res = {"message": "Results integration done."}
logger.info("Results integration done!")
e_tot = time()
logger.debug("Total time: %.3f Seconds", e_tot - s_tot)
return res
except HTTPException as err:
logger.error(err)
raise HTTPException(status_code=400) from err
except Exception as err:
logger.error(err)
raise HTTPException(status_code=400) from err
|
import math
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from scratch_ml.deep_learning.optimizers import Adam
from scratch_ml.deep_learning import NeuralNetwork
from scratch_ml.deep_learning.layers import Dense, Dropout, Conv2D, Flatten, Activation, BatchNormalization
from scratch_ml.utils import to_categorical, train_test_split, CrossEntropy
class Autoencoder():
"""Autoencoder with deep fully-connected neural networks."""
def __init__(self):
pass
def encoder():
pass
def decoder():
pass
if __name__ == '__main__':
ae = Autoencoder()
|
import enum
import typing
from dataclasses import dataclass
class MessageType(enum.Enum):
Action = 1 # data: str representing command
Message = 2 # data: each line of message in a list
UpdateCharacter = 3 # data: updated character
SyncDataResponse = 4 # data: all syncable data
SyncDataRequest = 5 # data: none
ShowRequest = 6 # data: str of id to show
@dataclass(frozen=True)
class Packet:
type: MessageType
receiver: typing.Optional[str]
sender: str
data: typing.Any
origin_command: str = None
def make_character_packet(player, me: str, origin_command: str) -> Packet:
return Packet(MessageType.UpdateCharacter, None, me, player, origin_command)
def make_chat_packet(msg: typing.List[str], me: str, origin_command: str):
return Packet(MessageType.Message, None, me, msg, origin_command)
def make_sync_request_packet(receiver: str, me: str, origin_command: str):
return Packet(MessageType.SyncDataRequest, receiver, me, None, origin_command)
def make_sync_response_packet(me: str, origin_command: str, data):
return Packet(MessageType.SyncDataResponse, None, me, data, origin_command)
def make_show_request_packet(me: str, receiver: str, origin_command: str, data):
return Packet(MessageType.ShowRequest, receiver, me, data, origin_command)
|
# Generated by Django 2.0 on 2017-12-23 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lecturer', '0031_auto_20171223_1537'),
]
operations = [
migrations.AddField(
model_name='unit',
name='class_day',
field=models.CharField(blank=True, choices=[('SUNDAY', 'Sun'), ('MONDAY', 'Mon'), ('TUESDAY', 'Tue'), ('WEDNESDAY', 'Wed'), ('THURSDAY', 'Thurs'), ('FRIDAY', 'Fri'), ('SATURDAY', 'Sat')], max_length=50),
),
]
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.java.package import deploy_jar # TODO: Should move to the JVM package.
from pants.backend.java.target_types import DeployJarTarget # TODO: Should move to the JVM package.
from pants.backend.scala.compile import scalac
from pants.backend.scala.dependency_inference import rules as dep_inf_rules
from pants.backend.scala.goals import check, repl, tailor
from pants.backend.scala.target_types import (
ScalacPluginTarget,
ScalaJunitTestsGeneratorTarget,
ScalaJunitTestTarget,
ScalaSourcesGeneratorTarget,
ScalaSourceTarget,
ScalatestTestsGeneratorTarget,
ScalatestTestTarget,
)
from pants.backend.scala.target_types import rules as target_types_rules
from pants.backend.scala.test import scalatest
from pants.jvm import classpath, jdk_rules, resources
from pants.jvm import util_rules as jvm_util_rules
from pants.jvm.goals import lockfile
from pants.jvm.resolve import coursier_fetch, coursier_setup, jvm_tool
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.test import junit
def target_types():
return [
DeployJarTarget,
JvmArtifactTarget,
ScalaJunitTestTarget,
ScalaJunitTestsGeneratorTarget,
ScalaSourceTarget,
ScalaSourcesGeneratorTarget,
ScalacPluginTarget,
ScalatestTestTarget,
ScalatestTestsGeneratorTarget,
]
def rules():
return [
*scalac.rules(),
*scalatest.rules(),
*check.rules(),
*tailor.rules(),
*repl.rules(),
*classpath.rules(),
*junit.rules(),
*deploy_jar.rules(),
*lockfile.rules(),
*coursier_fetch.rules(),
*coursier_setup.rules(),
*jvm_util_rules.rules(),
*jdk_rules.rules(),
*dep_inf_rules.rules(),
*target_types_rules(),
*jvm_tool.rules(),
*resources.rules(),
]
|
#!/home/ssericksen/anaconda2/bin/python2.7
# evaluate F1 and MCC metrics on new targets. Assume 10% hit fractions,
# and predict top 10% of cpds by score as the actives
import numpy as np
import pandas as pd
import informer_functions as inf
import sklearn as sk
import sys
try:
matrix = sys.argv[1] # 1 or 2
targ = sys.argv[2] # pknb, bglf4, or rop18
except:
print('')
print(' eval_rocauc_newtarg.py matrix targ')
print('')
print(' 1 or 2 pknb, bglf4, or rop18')
print('')
exit
rankings_file = '../output_newtargs/pkis'+matrix+'_'+targ+'_model_rankings_v1.2.csv'
activity_matrix_file = '../data/data_newtargs_pkis'+matrix+'cpds.csv'
df_continuous = pd.read_csv( activity_matrix_file, index_col='molid')
df_binary = inf.get_binary( df_continuous )
df_binary.index = df_binary.index.map(str)
df_rankings = pd.read_csv( rankings_file, index_col='molid' )
df_rankings.index = df_rankings.index.map(str)
df_rankings.replace('informer', -1000.0, inplace=True)
print('model,inf_hits,hits_recovered,tot_hits,F1,MCC')
for model in df_rankings.columns:
if df_rankings[model].count() < 300:
print("model:{} and target:{} missing significant portion of scored cpds, skipping metric eval".format(model,targ))
s_labels = df_binary[targ].rename('labels')
s_rankings = df_rankings[model].astype(float).rename('scores')
df_temp = pd.concat( [s_labels, s_rankings], axis=1, sort=False )
inf_hits = df_temp[ df_temp['scores'] == -1000.0 ]['labels'].sum()
tot_hits = df_temp['labels'].sum()
hits_recovered = np.nan
f1 = np.nan
mcc = np.nan
else:
s_labels = df_binary[targ].rename('labels')
s_rankings = df_rankings[model].astype(float).rename('scores')
df_temp = pd.concat( [s_labels, s_rankings], axis=1, sort=False )
# do not count negative informers as false positives
df_temp = df_temp[ ~((df_temp['scores'] == -1000.0) & (df_temp['labels'] == False)) ]
df_temp = df_temp.dropna( how='any')
# predict the top 10% ranking cpds as "active"
df_temp['binary_predictions'] = df_temp['scores'] <= df_temp['scores'].quantile(0.10)
predictions_arr = df_temp['binary_predictions'].values
labels_arr = df_temp['labels'].values
tot_hits = labels_arr.sum()
inf_hits = df_temp[ df_temp['scores'] == -1000.0 ]['labels'].sum()
f1 = sk.metrics.f1_score( labels_arr, predictions_arr )
mcc = sk.metrics.matthews_corrcoef( labels_arr, predictions_arr )
# so with truncated dataset (with negative informers removed), how many cpds in 10% of dataset?
N = int( round( len(df_temp) * 0.10 ) )
hits_recovered = df_temp.sort_values('scores').head( N )['labels'].sum()
print('{},{},{},{},{},{}').format( model, inf_hits, hits_recovered, tot_hits, f1, mcc )
|
import subprocess
workingDir = 'tale-linear-optimization-part1'
subprocess.call(['pandoc','-t','revealjs','-s',
'-o','content.html','content.md','--slide-level=2',
'-V','revealjs-url=../../reveal.js','--metadata', 'pagetitle="Uzdevumi"',
'--mathjax=https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML',
'-V','theme=white'], cwd=workingDir)
|
import os
import unittest
import logging
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from slicer.util import NodeModify, toBool, VTKObservationMixin
from Resources import QReadsResources
#
# QReads
#
class QReads(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "QReads" # TODO: make this more human readable by adding spaces
self.parent.categories = ["SlicerQReads"] # TODO: set categories (folders where the module shows up in the module selector)
self.parent.dependencies = ["DICOM"] # TODO: add here list of module names that this module requires
self.parent.contributors = ["John Doe (AnyWare Corp.)"] # TODO: replace with "Firstname Lastname (Organization)"
# TODO: update with short description of the module and a link to online module documentation
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
See more information in <a href="https://github.com/organization/projectname#QReads">module documentation</a>.
"""
# TODO: replace with organization, grant and thanks
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc., Andras Lasso, PerkLab,
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
"""
# Additional initialization step after application startup is complete
#slicer.app.connect("startupCompleted()", registerSampleData)
#
# QReadsWidget
#
class QReadsWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
BRIGHTNESS_STEP = 100.0
CONTRAST_STEP = 100.0
ZOOM_ACTIONS = ["100%", "200%", "400%", "1:1", "Fit to window"]
class CloseApplicationEventFilter(qt.QWidget):
def eventFilter(self, object, event):
if event.type() == qt.QEvent.Close:
slicer.util.mainWindow().writeSettings()
event.accept()
return True
return False
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
self._updatingGUIFromParameterNode = False
self.helpDialog = None
self.slabModeButtonGroup = None
self._closeApplicationEventFilter = QReadsWidget.CloseApplicationEventFilter()
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer).
# Additional widgets can be instantiated manually and added to self.layout.
uiWidget = slicer.util.loadUI(self.resourcePath('UI/QReads.ui'))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
self.slabModeButtonGroup = qt.QButtonGroup()
self.slabModeButtonGroup.addButton(self.ui.SlabModeMaxRadioButton, vtk.VTK_IMAGE_SLAB_MAX)
self.slabModeButtonGroup.addButton(self.ui.SlabModeMeanRadioButton, vtk.VTK_IMAGE_SLAB_MEAN)
self.slabModeButtonGroup.addButton(self.ui.SlabModeMinRadioButton, vtk.VTK_IMAGE_SLAB_MIN)
self.ui.ZoomComboBox.addItems(self.ZOOM_ACTIONS)
# Resize dock widget based on toolbar width
panelDockWidget = slicer.util.findChild(slicer.util.mainWindow(), "PanelDockWidget")
panelDockWidget.maximumWidth = self.ui.QReads.width
# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
# "setMRMLScene(vtkMRMLScene*)" slot.
#uiWidget.setMRMLScene(slicer.mrmlScene)
# Create logic class. Logic implements all computations that should be possible to run
# in batch mode, without a graphical user interface.
self.logic = QReadsLogic()
# Connections
# These connections ensure that we update parameter node when scene is closed
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.NodeAddedEvent, self.onNodeAdded)
# These connections ensure that whenever user changes some settings on the GUI, that is saved in the MRML scene
# (in the selected parameter node).
self.ui.ShowReferenceMarkersButton.connect("clicked()", self.updateParameterNodeFromGUI)
self.ui.ResetReferenceMarkersButton.connect("clicked()", QReadsLogic.resetReferenceMarkers)
self.ui.SlabButton.connect("clicked()", self.updateParameterNodeFromGUI)
self.slabModeButtonGroup.connect("buttonClicked(int)", self.updateParameterNodeFromGUI)
self.ui.SlabThicknessSliderWidget.connect("valueChanged(double)", self.updateParameterNodeFromGUI)
self.ui.InverseGrayButton.connect("clicked(bool)", self.updateParameterNodeFromGUI)
self.ui.EnableWLButton.connect("clicked(bool)", self.updateParameterNodeFromGUI)
self.ui.ResetWLButton.connect("clicked()", QReadsLogic.resetWindowLevel)
# Install event filters
slicer.util.mainWindow().installEventFilter(self._closeApplicationEventFilter)
# Increasing the level will make the image darker, whereas decreasing the level value will make the image brighter
self.ui.BrightnessUpButton.connect("clicked()", lambda step=-self.BRIGHTNESS_STEP: QReadsLogic.updateWindowLevel(levelStep=step))
self.ui.BrightnessDownButton.connect("clicked()", lambda step=self.BRIGHTNESS_STEP: QReadsLogic.updateWindowLevel(levelStep=step))
# Increasing window will reduce display contrast, whereas decreasing the window increases the brightness
self.ui.ContrastUpButton.connect("clicked()", lambda step=-self.CONTRAST_STEP: QReadsLogic.updateWindowLevel(windowStep=step))
self.ui.ContrastDownButton.connect("clicked()", lambda step=self.CONTRAST_STEP: QReadsLogic.updateWindowLevel(windowStep=step))
self.ui.CTBodySoftTissueWLPresetButton.connect("clicked()", lambda presetName="CT-BodySoftTissue": self.logic.setWindowLevelPreset(presetName))
self.ui.CTBoneWLPresetButton.connect("clicked()", lambda presetName="CT-Bone": self.logic.setWindowLevelPreset(presetName))
self.ui.CTBrainWLPresetButton.connect("clicked()", lambda presetName="CT-Head": self.logic.setWindowLevelPreset(presetName))
self.ui.CTLungWLPresetButton.connect("clicked()", lambda presetName="CT-Lung": self.logic.setWindowLevelPreset(presetName))
self.ui.ZoomComboBox.connect("currentTextChanged(QString)", self.updateParameterNodeFromGUI)
self.ui.DistanceMeasurementButton.connect("clicked()", self.createDistanceMeasurement)
self.ui.SwitchOrientationMarkerTypeButton.connect("clicked()", self.switchViewOrientationMarkerType)
self.ui.RulerVisibleButton.connect("clicked()", self.updateParameterNodeFromGUI)
self.ui.HelpButton.connect("clicked()", self.showHelp)
self.ui.CloseApplicationPushButton.connect("clicked()", slicer.util.quit)
# Make sure parameter node is initialized (needed for module reload)
self.initializeParameterNode()
# Hide main window components
slicer.util.setApplicationLogoVisible(False)
slicer.util.setMenuBarsVisible(False)
slicer.util.setModuleHelpSectionVisible(False)
slicer.util.setModulePanelTitleVisible(False)
slicer.util.setToolbarsVisible(False)
# Layout
slicer.app.layoutManager().setLayout(self.logic.registerCustomLayout())
for viewName, viewColor in QReadsLogic.SLICEVIEW_BACKGROUND_COLORS.items():
sliceWidget = slicer.app.layoutManager().sliceWidget(viewName)
sliceWidget.sliceController().pinButton().visible = False
slicer.util.findChild(sliceWidget, "frame").styleSheet = "border: 4px solid %s" % viewColor
sliceWidget.sliceView().setBackgroundColor(qt.QColor(qt.Qt.black))
sliceNode = sliceWidget.mrmlSliceNode()
sliceNode.SetSliceVisible(True);
# Set text color of SliceOffsetSlider spinbox by updating palette
# because the background color is already customized by updating
# the palette in "qMRMLSliceControllerWidgetPrivate::setColor()"
sliceBarWidget = slicer.util.findChild(sliceWidget, "BarWidget")
sliceOffsetSpinBox = slicer.util.findChild(sliceBarWidget, "SpinBox")
palette = sliceOffsetSpinBox.palette
palette.setColor(qt.QPalette.Text, qt.QColor("White"))
sliceOffsetSpinBox.palette = palette
# Move slice view controller bar to the bottom
sliceWidget.layout().addWidget(sliceWidget.sliceController())
for viewName, viewColor in QReadsLogic.THREEDVIEW_BACKGROUND_COLORS.items():
with NodeModify(slicer.util.getNode("vtkMRMLViewNode%s" % viewName)) as viewNode:
viewNode.SetBackgroundColor(0., 0., 0.)
viewNode.SetBackgroundColor2(0., 0., 0.)
viewNode.SetBoxVisible(False)
viewNode.SetAxisLabelsVisible(False)
viewNode.SetOrientationMarkerType(slicer.vtkMRMLAbstractViewNode.OrientationMarkerTypeAxes)
# Move 3D view controller bar to the bottom
threeDWidget = slicer.app.layoutManager().viewWidget(viewNode)
threeDWidget.layout().addWidget(threeDWidget.threeDController())
# ... and reconfigure behavior of the popup to appear above the controller bar
popupWidget = threeDWidget.findChild(ctk.ctkPopupWidget)
popupWidget.alignment = qt.Qt.AlignLeft | qt.Qt.AlignTop
popupWidget.horizontalDirection = qt.Qt.LeftToRight
popupWidget.verticalDirection = ctk.ctkBasePopupWidget.BottomToTop
def cleanup(self):
"""
Called when the application closes and the module widget is destroyed.
"""
self.removeObservers()
def enter(self):
"""
Called each time the user opens this module.
"""
# Make sure parameter node exists and observed
self.initializeParameterNode()
def exit(self):
"""
Called each time the user opens a different module.
"""
# Do not react to parameter node changes (GUI wlil be updated when the user enters into the module)
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
def onSceneStartClose(self, caller, event):
"""
Called just before the scene is closed.
"""
# Parameter node will be reset, do not use it anymore
self.setParameterNode(None)
def onSceneEndClose(self, caller, event):
"""
Called just after the scene is closed.
"""
# If this module is shown while the scene is closed then recreate a new parameter node immediately
if self.parent.isEntered:
self.initializeParameterNode()
@vtk.calldata_type(vtk.VTK_OBJECT)
def onNodeAdded(self, caller, event, calldata):
if slicer.mrmlScene.IsBatchProcessing():
return
node = calldata
if not isinstance(node, slicer.vtkMRMLScalarVolumeNode):
return
def _update():
slicer.app.processEvents()
slicer.app.layoutManager().resetThreeDViews()
self.updateParameterNodeFromVolumeNode(node)
QReadsLogic.setZoom(self._parameterNode.GetParameter("Zoom"))
# Dictionary of name and values
values = {QReadsLogic.DICOM_TAGS[tag]: value for tag, value in QReadsLogic.dicomTagValues(node).items()}
# Update window title
slicer.util.mainWindow().windowTitle = \
"CMRN: {PatientID} Patient Name: {PatientName} Exam: {StudyDescription} Series: {SeriesDescription}".format(**values)
# Delay update to ensure images are rendered
qt.QTimer.singleShot(750, _update)
def initializeParameterNode(self):
"""
Ensure parameter node exists and observed.
"""
# Parameter node stores all user choices in parameter values, node selections, etc.
# so that when the scene is saved and reloaded, these settings are restored.
self.setParameterNode(self.logic.getParameterNode())
def setParameterNode(self, inputParameterNode):
"""
Set and observe parameter node.
Observation is needed because when the parameter node is changed then the GUI must be updated immediately.
"""
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
# Unobserve previously selected parameter node and add an observer to the newly selected.
# Changes of parameter node are observed so that whenever parameters are changed by a script or any other module
# those are reflected immediately in the GUI.
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
if self._parameterNode is not None:
self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
# Initial GUI update
self.updateGUIFromParameterNode()
def showHelp(self):
"""
Display the help website of the application using a non-modal dialog.
"""
if self.helpDialog is None:
dialog = qt.QDialog(slicer.util.mainWindow())
layout = qt.QVBoxLayout()
dialog.setLayout(layout)
webWidget = slicer.qSlicerWebWidget()
layout.addWidget(webWidget)
webWidget.url = qt.QUrl("https://github.com/KitwareMedical/SlicerQReads#readme")
# Set initial size
dialog.size = slicer.util.mainWindow().size * 0.70
self.helpDialog = dialog
self.helpDialog.show()
def createDistanceMeasurement(self):
lineNode = slicer.mrmlScene.CreateNodeByClass("vtkMRMLMarkupsLineNode")
lineNode.SetName("Line")
slicer.mrmlScene.AddNode(lineNode)
lineNode.UnRegister(slicer.mrmlScene)
slicer.modules.markups.logic().AddNewDisplayNodeForMarkupsNode(lineNode)
# Setup placement
slicer.modules.markups.logic().SetActiveListID(lineNode)
slicer.app.applicationLogic().GetInteractionNode().SwitchToSinglePlaceMode()
def switchViewOrientationMarkerType(self):
"""Switch orientation marker type the next one based on the order defined in
vtkMRMLAbstractViewNode::OrientationMarkerTypeType enum.
"""
currentOrientationMarkerType = int(self._parameterNode.GetParameter("OrientationMarkerType"))
nextOrientationMarkerType = (currentOrientationMarkerType + 1) % slicer.vtkMRMLAbstractViewNode.OrientationMarkerType_Last
self._parameterNode.SetParameter("OrientationMarkerType", str(nextOrientationMarkerType))
def updateGUIFromParameterNode(self, caller=None, event=None):
"""
This method is called whenever parameter node is changed.
The module GUI is updated to show the current state of the parameter node.
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
# Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop)
self._updatingGUIFromParameterNode = True
# Toggle reference markers button
referenceMarkersVisible = toBool(self._parameterNode.GetParameter("ReferenceMarkersVisible"))
self.ui.ShowReferenceMarkersButton.checked = referenceMarkersVisible
# Enable/disable slab buttons and slider
slabEnabled = toBool(self._parameterNode.GetParameter("SlabEnabled"))
self.ui.SlabModeMaxRadioButton.enabled = slabEnabled
self.ui.SlabModeMeanRadioButton.enabled = slabEnabled
self.ui.SlabModeMinRadioButton.enabled = slabEnabled
self.ui.SlabThicknessSliderWidget.enabled = slabEnabled
# Update slab mode buttons
slabModeStr = self._parameterNode.GetParameter("SlabMode") if slabEnabled else QReadsLogic.DEFAULT_SLAB_MODE
getattr(self.ui, "SlabMode%sRadioButton" % slabModeStr).checked = True
volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
# Update slab slider
spacingInMm = max(volumeNode.GetSpacing()) if volumeNode is not None else 0.0
if slabEnabled:
slabThicknessInMm = float(self._parameterNode.GetParameter("SlabThicknessInMm"))
else:
slabThicknessInMm = spacingInMm
self.ui.SlabThicknessSliderWidget.minimum = spacingInMm
self.ui.SlabThicknessSliderWidget.value = slabThicknessInMm
# Update InverseGray button
inverseGray = toBool(self._parameterNode.GetParameter("InverseGray"))
self.ui.InverseGrayButton.checked = inverseGray
# Update WindowLevel button
windowLevel = toBool(self._parameterNode.GetParameter("WindowLevelEnabled"))
self.ui.EnableWLButton.checked = windowLevel
interactionMode = slicer.vtkMRMLInteractionNode.AdjustWindowLevel if windowLevel else slicer.vtkMRMLInteractionNode.ViewTransform
slicer.app.applicationLogic().GetInteractionNode().SetCurrentInteractionMode(interactionMode)
# Update ZoomComboBox
zoom = self._parameterNode.GetParameter("Zoom")
self.ui.ZoomComboBox.currentText = zoom
# Update OrientationMarkerType button
orientationMarkerType = int(self._parameterNode.GetParameter("OrientationMarkerType"))
# Update RulerVisible button
rulerVisible = toBool(self._parameterNode.GetParameter("RulerVisible"))
self.ui.RulerVisibleButton.checked = rulerVisible
# Update viewers
QReadsLogic.setReferenceMarkersVisible(referenceMarkersVisible)
QReadsLogic.setSlab(
QReadsLogic.slabModeFromString(slabModeStr),
QReadsLogic.slabThicknessInMmToNumberOfSlices(volumeNode, slabThicknessInMm))
QReadsLogic.setInverseGrayEnabled(inverseGray)
QReadsLogic.setZoom(zoom)
QReadsLogic.setOrientationMarkerType(orientationMarkerType)
QReadsLogic.setRulerVisible(rulerVisible)
# All the GUI updates are done
self._updatingGUIFromParameterNode = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
"""
This method is called when the user makes any change in the GUI.
The changes are saved into the parameter node (so that they are restored when the scene is saved and loaded).
"""
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch
self._parameterNode.SetParameter("ReferenceMarkersVisible", "true" if self.ui.ShowReferenceMarkersButton.checked else "false")
slabEnabled = self.ui.SlabButton.checked
self._parameterNode.SetParameter("SlabEnabled", "true" if slabEnabled else "false")
self._parameterNode.SetParameter("SlabMode", QReadsLogic.slabModeToString(self.slabModeButtonGroup.checkedId()))
self._parameterNode.SetParameter("SlabThicknessInMm", str(self.ui.SlabThicknessSliderWidget.value))
self._parameterNode.SetParameter("InverseGray", "true" if self.ui.InverseGrayButton.checked else "false")
self._parameterNode.SetParameter("WindowLevelEnabled", "true" if self.ui.EnableWLButton.checked else "false")
self._parameterNode.SetParameter("Zoom", self.ui.ZoomComboBox.currentText)
self._parameterNode.SetParameter("OrientationMarkerType",
str(slicer.util.getNodesByClass('vtkMRMLAbstractViewNode')[0].GetOrientationMarkerType()))
self._parameterNode.SetParameter("RulerVisible", "true" if self.ui.RulerVisibleButton.checked else "false")
self._parameterNode.EndModify(wasModified)
def updateParameterNodeFromVolumeNode(self, volumeNode):
self._parameterNode.SetParameter("SlabThicknessInMm", str(max(volumeNode.GetSpacing())))
#
# QReadsLogic
#
class QReadsLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
SLICEVIEW_BACKGROUND_COLORS = {
"Red": "#000099",
"Yellow": "#990000",
"Green": "#009900"
}
THREEDVIEW_BACKGROUND_COLORS = {
"QReads1": "#000000",
}
WINDOW_LEVEL_PRESETS = {
'CT-BodySoftTissue': (400, 40),
'CT-Bone': (2500, 300),
'CT-Head': (100, 40),
'CT-Lung': (1600, -600)
}
"""Windows level presets specified as (windows, level)"""
DEFAULT_SLAB_MODE = "Max"
SLAB_MODES = {
vtk.VTK_IMAGE_SLAB_MAX: "Max",
vtk.VTK_IMAGE_SLAB_MEAN: "Mean",
vtk.VTK_IMAGE_SLAB_MIN: "Min",
vtk.VTK_IMAGE_SLAB_SUM: "Sum"
}
"""Slab modes supported by vtkImageReslice"""
DICOM_TAGS = {
"0010,0010": "PatientName",
"0010,0020": "PatientID",
"0008,1030": "StudyDescription",
"0008,103e": "SeriesDescription"
}
"""Tags cached when loading data using QReadsLogic.loadDICOMDataDirectory()"""
DICOM_TAG_VALUES = {}
"""Map of the first instance UID of each loaded volume to tag values specified in QReadsLogic.DICOM_TAGS"""
def __init__(self):
"""
Called when the logic class is instantiated. Can be used for initializing member variables.
"""
ScriptedLoadableModuleLogic.__init__(self)
def setDefaultParameters(self, parameterNode):
"""
Initialize parameter node with default settings.
"""
if not parameterNode.GetParameter("ReferenceMarkersVisible"):
parameterNode.SetParameter("ReferenceMarkersVisible", "true")
if not parameterNode.GetParameter("SlabEnabled"):
parameterNode.SetParameter("SlabEnabled", "false")
if not parameterNode.GetParameter("SlabMode"):
parameterNode.SetParameter("SlabMode", QReadsLogic.DEFAULT_SLAB_MODE)
if not parameterNode.GetParameter("SlabThicknessInMm"):
parameterNode.SetParameter("SlabThicknessInMm", "1.0")
if not parameterNode.GetParameter("InverseGray"):
parameterNode.SetParameter("InverseGray", "false")
if not parameterNode.GetParameter("WindowLevelEnabled"):
parameterNode.SetParameter("WindowLevelEnabled", "false")
if not parameterNode.GetParameter("Zoom"):
parameterNode.SetParameter("Zoom", "Fit to window")
if not parameterNode.GetParameter("OrientationMarkerType"):
parameterNode.SetParameter("OrientationMarkerType", str(slicer.vtkMRMLAbstractViewNode.OrientationMarkerTypeAxes))
if not parameterNode.GetParameter("RulerVisible"):
parameterNode.SetParameter("RulerVisible", "false")
@staticmethod
def registerCustomLayout():
customLayout = (
"<layout type=\"vertical\">"
" <item>"
" <layout type=\"horizontal\">"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Red\">"
" <property name=\"orientation\" action=\"default\">Axial</property>"
" <property name=\"viewlabel\" action=\"relayout\">B</property>"
" <property name=\"viewcolor\" action=\"relayout\">{Red}</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Yellow\">"
" <property name=\"orientation\" action=\"default\">Sagittal</property>"
" <property name=\"viewlabel\" action=\"relayout\">R</property>"
" <property name=\"viewcolor\" action=\"relayout\">{Yellow}</property>"
" </view>"
" </item>"
" </layout>"
" </item>"
" <item>"
" <layout type=\"horizontal\">"
" <item>"
" <view class=\"vtkMRMLSliceNode\" singletontag=\"Green\">"
" <property name=\"orientation\" action=\"default\">Coronal</property>"
" <property name=\"viewlabel\" action=\"relayout\">G</property>"
" <property name=\"viewcolor\" action=\"relayout\">{Green}</property>"
" </view>"
" </item>"
" <item>"
" <view class=\"vtkMRMLViewNode\" singletontag=\"QReads1\">"
" <property name=\"viewlabel\" action=\"default\">1</property>"
" <property name=\"viewcolor\" action=\"default\">{QReads1}</property>"
" </view>"
" </item>"
" </layout>"
" </item>"
"</layout>").format(**QReadsLogic.SLICEVIEW_BACKGROUND_COLORS, **QReadsLogic.THREEDVIEW_BACKGROUND_COLORS)
customLayoutId = 503
layoutLogic = slicer.app.layoutManager().layoutLogic()
layoutLogic.GetLayoutNode().AddLayoutDescription(customLayoutId, customLayout)
return customLayoutId
@staticmethod
def setInverseGrayEnabled(enabled):
for volumeNode in slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode"):
if volumeNode.GetDisplayNode() is None:
continue
if enabled:
colorNodeID = "vtkMRMLColorTableNodeInvertedGrey"
else:
colorNodeID = "vtkMRMLColorTableNodeGrey"
volumeNode.GetDisplayNode().SetAndObserveColorNodeID(slicer.util.getNode(colorNodeID).GetID())
def setWindowLevelPreset(self, presetName):
for volumeNode in slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode"):
volumeDisplayNode = volumeNode.GetDisplayNode()
with NodeModify(volumeDisplayNode):
volumeDisplayNode.SetAutoWindowLevel(0)
volumeDisplayNode.SetWindowLevel(*self.WINDOW_LEVEL_PRESETS[presetName])
@staticmethod
def updateWindowLevel(windowStep=None, levelStep=None):
for volumeNode in slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode"):
volumeDisplayNode = volumeNode.GetDisplayNode()
with NodeModify(volumeDisplayNode):
window = volumeDisplayNode.GetWindow()
if windowStep is not None:
window = window + windowStep
level = volumeDisplayNode.GetLevel()
if levelStep is not None:
level = level + levelStep
volumeDisplayNode.SetAutoWindowLevel(0)
volumeDisplayNode.SetWindowLevel(window, level)
@staticmethod
def resetWindowLevel():
for volumeNode in slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode"):
volumeNode.GetDisplayNode().AutoWindowLevelOn()
@staticmethod
def slabModeToString(slabMode):
return QReadsLogic.SLAB_MODES[slabMode]
@staticmethod
def slabModeFromString(slabModeStr):
return {v: k for k, v in QReadsLogic.SLAB_MODES.items()}[slabModeStr]
@staticmethod
def slabThicknessInMmToNumberOfSlices(volumeNode, tichknessInMm):
if volumeNode is None:
return 1
assert tichknessInMm > 0
return int(tichknessInMm / max(volumeNode.GetSpacing()))
@staticmethod
def setSlab(mode, numberOfSlices):
assert numberOfSlices > 0
assert mode in QReadsLogic.SLAB_MODES.keys()
for sliceLogic in slicer.app.applicationLogic().GetSliceLogics():
reslice = sliceLogic.GetBackgroundLayer().GetReslice()
reslice.SetSlabMode(mode)
reslice.SetSlabNumberOfSlices(numberOfSlices)
sliceLogic.GetBackgroundLayer().Modified()
@staticmethod
def setReferenceMarkersVisible(visible):
for sliceLogic in slicer.app.applicationLogic().GetSliceLogics():
sliceLogic.GetSliceCompositeNode().SetSliceIntersectionVisibility(visible)
sliceLogic.GetSliceNode().SetWidgetVisible(visible)
@staticmethod
def resetReferenceMarkers():
for sliceLogic in slicer.app.applicationLogic().GetSliceLogics():
sliceLogic.GetSliceNode().SetOrientationToDefault()
# Commenting because it causes errors
# sliceLogic.RotateSliceToLowestVolumeAxes()
sliceLogic.FitSliceToAll()
@staticmethod
def setOrientationMarkerType(orientationMarkerType):
for viewNode in slicer.util.getNodesByClass('vtkMRMLAbstractViewNode'):
viewNode.SetOrientationMarkerType(orientationMarkerType)
@staticmethod
def setRulerVisible(visible):
for viewNode in slicer.util.getNodesByClass('vtkMRMLAbstractViewNode'):
rulerType = slicer.vtkMRMLAbstractViewNode.RulerTypeThin if visible else slicer.vtkMRMLAbstractViewNode.RulerTypeNone
viewNode.SetRulerType(rulerType)
viewNode.SetRulerColor(slicer.vtkMRMLAbstractViewNode.RulerColorYellow)
@staticmethod
def setZoom(zoom):
volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
if zoom == "Fit to window" or zoom == "100%":
for sliceLogic in slicer.app.applicationLogic().GetSliceLogics():
sliceLogic.FitSliceToAll()
elif zoom == "200%":
QReadsLogic.setSlicesZoom(0.5)
elif zoom == "400%":
QReadsLogic.setSlicesZoom(0.25)
elif zoom == "1:1":
QReadsLogic.setSlicesZoomOneToOne(volumeNode)
@staticmethod
def setSlicesZoom(factor):
for sliceLogic in slicer.app.applicationLogic().GetSliceLogics():
sliceNode = sliceLogic.GetSliceNode()
with NodeModify(sliceNode):
sliceLogic.FitSliceToAll()
fov = sliceNode.GetFieldOfView()
sliceNode.SetFieldOfView(fov[0] * factor, fov[1] * factor, fov[2])
@staticmethod
def setSlicesZoomOneToOne(volumeNode):
"""1:1 means image 1 image pixel to 1 screen pixel.
This means 512 by 512 image occupies 512 by 512 screen pixels.
"""
for sliceLogic in slicer.app.applicationLogic().GetSliceLogics():
sliceNode = sliceLogic.GetSliceNode()
with NodeModify(sliceNode):
QReadsLogic.setSliceZoomOneToOne(sliceLogic, volumeNode)
QReadsLogic.centerSlice(sliceLogic, volumeNode)
sliceLogic.SnapSliceOffsetToIJK()
@staticmethod
def setSliceZoomOneToOne(sliceLogic, volumeNode):
"""1:1 means image 1 image pixel to 1 screen pixel.
This means 512 by 512 image occupies 512 by 512 screen pixels.
"""
sliceNode = sliceLogic.GetSliceNode()
dimensions = sliceNode.GetDimensions()
spacing = volumeNode.GetSpacing()
fovX = dimensions[0] * spacing[0]
fovY = dimensions[1] * spacing[1]
fovZ = sliceLogic.GetVolumeSliceSpacing(volumeNode)[2] * dimensions[2]
sliceNode.SetFieldOfView(fovX, fovY, fovZ)
@staticmethod
def centerSlice(sliceLogic, volumeNode):
"""Set the origin to be the center of the volume in RAS.
Copied from vtkMRMLSliceLogic::FitSliceToVolume
"""
rasDimensions = [0.0, 0.0, 0.0]
rasCenter = [0.0, 0.0, 0.0]
slicerLogic = sliceLogic.GetVolumeRASBox(volumeNode, rasDimensions, rasCenter)
sliceNode = sliceLogic.GetSliceNode()
sliceToRAS = vtk.vtkMatrix4x4()
sliceToRAS.DeepCopy(sliceNode.GetSliceToRAS())
sliceToRAS.SetElement(0, 3, rasCenter[0])
sliceToRAS.SetElement(1, 3, rasCenter[1])
sliceToRAS.SetElement(2, 3, rasCenter[2])
sliceNode.GetSliceToRAS().DeepCopy(sliceToRAS)
sliceNode.SetSliceOrigin(0, 0, 0)
@staticmethod
def loadDICOMDataDirectory(dicomDataDir):
from DICOMLib import DICOMUtils
loadedNodeIDs = [] # this list will contain the list of all loaded node IDs
with DICOMUtils.TemporaryDICOMDatabase() as db:
DICOMUtils.importDicom(dicomDataDir, db)
patientUIDs = db.patients()
for patientUID in patientUIDs:
for nodeID in DICOMUtils.loadPatientByUID(patientUID):
# Retrieve tag values associated with first instance UID
node = slicer.mrmlScene.GetNodeByID(nodeID)
instanceUID = node.GetAttribute('DICOM.instanceUIDs').split()[0]
filename = db.fileForInstance(instanceUID)
QReadsLogic.DICOM_TAG_VALUES[instanceUID] = {
tag: db.fileValue(filename, tag) for tag in QReadsLogic.DICOM_TAGS
}
loadedNodeIDs.append(nodeID)
return loadedNodeIDs
@staticmethod
def dicomTagValues(volumeNode):
"""Return a dictionary of DICOM tags and values associated with first instance UID associated with volumeNode.
See QReadsLogic.DICOM_TAGS
"""
instanceUIDs = volumeNode.GetAttribute('DICOM.instanceUIDs').split()
return QReadsLogic.DICOM_TAG_VALUES[instanceUIDs[0]]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'MyWorkbookManagedIdentityResponse',
'MyWorkbookUserAssignedIdentitiesResponse',
'WorkbookManagedIdentityResponse',
'WorkbookUserAssignedIdentitiesResponse',
]
@pulumi.output_type
class MyWorkbookManagedIdentityResponse(dict):
"""
Customer Managed Identity
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MyWorkbookManagedIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MyWorkbookManagedIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MyWorkbookManagedIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: Optional[str] = None,
user_assigned_identities: Optional['outputs.MyWorkbookUserAssignedIdentitiesResponse'] = None):
"""
Customer Managed Identity
:param str type: The identity type.
:param 'MyWorkbookUserAssignedIdentitiesResponse' user_assigned_identities: Customer Managed Identity
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional['outputs.MyWorkbookUserAssignedIdentitiesResponse']:
"""
Customer Managed Identity
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class MyWorkbookUserAssignedIdentitiesResponse(dict):
"""
Customer Managed Identity
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MyWorkbookUserAssignedIdentitiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MyWorkbookUserAssignedIdentitiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MyWorkbookUserAssignedIdentitiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
principal_id: str,
tenant_id: str):
"""
Customer Managed Identity
:param str principal_id: The principal ID of resource identity.
:param str tenant_id: The tenant ID of resource.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of resource identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant ID of resource.
"""
return pulumi.get(self, "tenant_id")
@pulumi.output_type
class WorkbookManagedIdentityResponse(dict):
"""
Customer Managed Identity
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkbookManagedIdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkbookManagedIdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkbookManagedIdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: Optional[str] = None,
user_assigned_identities: Optional['outputs.WorkbookUserAssignedIdentitiesResponse'] = None):
"""
Customer Managed Identity
:param str type: The identity type.
:param 'WorkbookUserAssignedIdentitiesResponse' user_assigned_identities: Customer Managed Identity
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional['outputs.WorkbookUserAssignedIdentitiesResponse']:
"""
Customer Managed Identity
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class WorkbookUserAssignedIdentitiesResponse(dict):
"""
Customer Managed Identity
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkbookUserAssignedIdentitiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkbookUserAssignedIdentitiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkbookUserAssignedIdentitiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
principal_id: str,
tenant_id: str):
"""
Customer Managed Identity
:param str client_id: The client ID of resource.
:param str principal_id: The principal ID of resource identity.
:param str tenant_id: The tenant ID of resource.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The client ID of resource.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of resource identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant ID of resource.
"""
return pulumi.get(self, "tenant_id")
|
from sprinter.lib.dependencytree import DependencyTree, DependencyTreeException
LEGAL_TREE = {
'a': ['b', 'c', 'd'],
'd': [],
'c': [],
'b': ['d'],
'e': []
}
MISSING_ENTRY_TREE = {
'a': ['b', 'c', 'd'],
'b': []
}
CYCLIC_TREE = {
'a': ['b', 'c', 'd'],
'b': [],
'c': ['b'],
'd': ['a']
}
LEGAL_ORDER = []
class TestDependencyTree(object):
def test_proper_tree(self):
""" Test whether a proper dependency tree generated the correct output. """
dt = DependencyTree(LEGAL_TREE)
order = dt.order
added_elements = []
for el in order:
for dependency in LEGAL_TREE[el]:
assert dependency in added_elements, \
"Element %s depends on %s and not added yet with order of %s!" % \
(el, dependency, added_elements)
added_elements.append(el)
def test_missing_entry_tree(self):
""" Test if dependencytree catches a missing tree """
try:
DependencyTree(MISSING_ENTRY_TREE)
except DependencyTreeException:
return
raise("Missing entry tree did not raise an error!")
def test_cyclic_tree(self):
""" Test if dependencytree catches a cycle """
try:
DependencyTree(CYCLIC_TREE)
except DependencyTreeException:
return
raise("Cyclic tree did not raise an error!")
|
"""Adapted from Optimus:
https://github.com/xuqifan897/Optimus/blob/main/summa/mpu/layers.py
"""
import math
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from cubework.distributed import ParallelManager as pm
from cubework.distributed import broadcast
from cubework.distributed.collective import all_reduce
from cubework.global_vars import env
from cubework.utils import get_current_device, seed
from torch import Tensor
from torch.nn import Parameter
from .. import init
from ..utils import set_tensor_parallel_attribute_by_partition, to_2tuple
from ._operation import summa_AB, summa_ABT, add_bias_2d, classifier_2d, layernorm_2d
from ._utils import (
all_gather_tensor_2d,
assert_summa_initialization,
get_summa_dim_from_env,
reduce_scatter_tensor_2d,
split_batch_2d,
)
class Linear2D(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.skip_bias_add = skip_bias_add
# parallel settings
assert_summa_initialization()
self.row_rank = pm.PARALLEL_2D_COL.local_rank
self.col_rank = pm.PARALLEL_2D_ROW.local_rank
self.summa_dim = get_summa_dim_from_env()
# partitioning dimension
self.input_size_per_partition = self.in_features // self.summa_dim
self.hidden_size_per_partition = self.out_features // self.summa_dim
# create weight, shape: [k/q, h/q]
factory_kwargs = {"device": get_current_device(), "dtype": dtype}
self.weight = Parameter(
torch.empty(self.input_size_per_partition, self.hidden_size_per_partition, **factory_kwargs)
)
# create bias, shape: [h/q]
if bias:
self.bias = Parameter(torch.empty(self.out_features // self.summa_dim**2, **factory_kwargs))
else:
self.register_parameter("bias", None)
# initialize parameters
with seed(pm.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, self.summa_dim**2)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def forward(self, x: Tensor) -> Tensor:
# input: [m/q, n/q, k/q]
# output: [m/q, n/q, h/q]
out_shape = x.shape[:-1] + (self.hidden_size_per_partition,)
output = summa_AB(
x,
self.weight,
self.summa_dim,
out_shape,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
)
if self.bias is not None:
if self.skip_bias_add:
bias = add_bias_2d(
None,
self.bias,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
True,
)
return output, bias
else:
output = add_bias_2d(
output,
self.bias,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
False,
)
return output
else:
return output
class LayerNorm2D(nn.Module):
def __init__(self, normalized_shape: int, eps: float = 1e-05, dtype=None):
super().__init__()
# layer norm config
self.normalized_shape = normalized_shape
self.variance_epsilon = eps
# parallel setting
assert_summa_initialization()
self.row_rank = pm.PARALLEL_2D_COL.local_rank
self.col_rank = pm.PARALLEL_2D_ROW.local_rank
self.summa_dim = get_summa_dim_from_env()
# partitioning dimension
self.partitioned_partition = normalized_shape // self.summa_dim**2
# create parameters
factory_kwargs = {"device": get_current_device(), "dtype": dtype}
self.weight = Parameter(torch.ones(self.partitioned_partition, **factory_kwargs))
self.bias = Parameter(torch.zeros(self.partitioned_partition, **factory_kwargs))
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2)
set_tensor_parallel_attribute_by_partition(self.bias, self.summa_dim**2)
def forward(self, x: Tensor) -> Tensor:
with torch.no_grad():
E_x = torch.sum(x, dim=-1, keepdim=True) # [b/q, s, 1]
E_x = all_reduce(E_x, pm.PARALLEL_2D_ROW) / self.normalized_shape
# Var_x in the block below is the sum of input^2
Var_x = torch.sum(x * x, dim=-1, keepdim=True) # [b/q, s, 1]
Var_x = all_reduce(Var_x, pm.PARALLEL_2D_ROW) / self.normalized_shape
Var_x = Var_x - E_x * E_x # variance of x [b/q, s, 1]
# this time 1/sqrt(Var_x + epsilon)
Var_x = 1.0 / torch.sqrt(Var_x + self.variance_epsilon)
output = layernorm_2d(x, E_x, Var_x, self.normalized_shape, pm.PARALLEL_2D_ROW, pm.PARALLEL_2D_COL)
bias = add_bias_2d(
None,
self.bias,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
True,
)
scale = add_bias_2d(
None,
self.weight,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
True,
)
output = torch.addcmul(bias, scale, output)
return output
class PatchEmbedding2D(nn.Module):
def __init__(
self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
flatten: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
position_embed_initializer: Callable = init.zeros_(),
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
assert_summa_initialization()
self.summa_dim = get_summa_dim_from_env()
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.embed_size = embed_size
self.embed_size_per_partition = embed_size // (self.summa_dim**2)
self.weight = Parameter(
torch.empty(
(self.embed_size_per_partition, in_chans, *self.patch_size), device=get_current_device(), dtype=dtype
)
)
self.bias = Parameter(torch.empty(self.embed_size_per_partition, device=get_current_device(), dtype=dtype))
self.cls_token = Parameter(
torch.zeros((1, 1, self.embed_size_per_partition), device=get_current_device(), dtype=dtype)
)
self.pos_embed = Parameter(
torch.zeros(
(1, self.num_patches + 1, self.embed_size_per_partition), device=get_current_device(), dtype=dtype
)
)
self.reset_parameters(weight_initializer, bias_initializer, position_embed_initializer)
self._set_tensor_parallel_attribute()
def _set_tensor_parallel_attribute(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2)
set_tensor_parallel_attribute_by_partition(self.bias, self.summa_dim**2)
set_tensor_parallel_attribute_by_partition(self.cls_token, self.summa_dim**2)
set_tensor_parallel_attribute_by_partition(self.pos_embed, self.summa_dim**2)
def reset_parameters(self, weight_initializer, bias_initializer, position_embed_initializer):
with seed(pm.TENSOR):
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
fan_out = self.embed_size
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
bias_initializer(self.bias, fan_in=fan_in)
position_embed_initializer(self.pos_embed)
def forward(self, input_: Tensor) -> Tensor:
input_ = split_batch_2d(input_)
B, C, H, W = input_.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
weight = all_gather_tensor_2d(self.weight, 0, pm.PARALLEL_2D_COL)
bias = all_gather_tensor_2d(self.bias, 0, pm.PARALLEL_2D_COL)
output = F.conv2d(input_, weight, bias, stride=self.patch_size)
if self.flatten:
output = output.flatten(2).transpose(1, 2) # BCHW -> BNC
cls_token = all_gather_tensor_2d(self.cls_token, -1, pm.PARALLEL_2D_COL)
pos_embed = all_gather_tensor_2d(self.pos_embed, -1, pm.PARALLEL_2D_COL)
cls_token = cls_token.expand(output.shape[0], -1, -1)
output = torch.cat((cls_token, output), dim=1)
output = output + pos_embed
return output
class Embedding2D(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs,
):
super().__init__()
assert_summa_initialization()
self.summa_dim = get_summa_dim_from_env()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
embed_dim_per_partition = embedding_dim // self.summa_dim**2
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
self.weight = Parameter(
torch.empty((num_embeddings, embed_dim_per_partition), device=get_current_device(), dtype=dtype)
)
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2)
def reset_parameters(self, weight_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
input_ = split_batch_2d(input_)
weight = all_gather_tensor_2d(self.weight, -1, pm.PARALLEL_2D_COL)
output = F.embedding(input_, weight, self.padding_idx, *self.embed_args, **self.embed_kwargs)
return output
class VocabParallelEmbedding2D(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs,
):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
assert_summa_initialization()
self.summa_dim = get_summa_dim_from_env()
self.num_embeddings_per_partition = self.num_embeddings // self.summa_dim
self.embed_dim_per_partition = self.embed_dim // self.summa_dim
tensor_parallel_rank = pm.PARALLEL_2D_COL.local_rank
self.vocab_start_index = tensor_parallel_rank * self.num_embeddings_per_partition
self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition
self.weight = Parameter(
torch.empty(
(self.num_embeddings_per_partition, self.embed_dim_per_partition),
device=get_current_device(),
dtype=dtype,
)
)
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2)
def reset_parameters(self, weight_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if (
self.padding_idx is not None
and self.padding_idx >= self.vocab_start_index
and self.padding_idx < self.vocab_end_index
):
with torch.no_grad():
self.weight[self.padding_idx - self.vocab_start_index].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
output_parallel = F.embedding(
masked_input, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs
)
output_parallel[input_mask, :] = 0.0
output = reduce_scatter_tensor_2d(output_parallel, 0, pm.PARALLEL_2D_COL)
return output
class Classifier2D(nn.Module):
def __init__(
self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
assert_summa_initialization()
self.row_rank = pm.PARALLEL_2D_COL.local_rank
self.col_rank = pm.PARALLEL_2D_ROW.local_rank
self.summa_dim = get_summa_dim_from_env()
# partitioning dimension
self.input_size_per_partition = self.in_features // self.summa_dim**2
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(
torch.empty(self.num_classes, self.input_size_per_partition, device=get_current_device(), dtype=dtype)
)
self.has_weight = True
if bias:
self.bias = Parameter(torch.zeros(self.num_classes, device=get_current_device(), dtype=dtype))
else:
self.bias = None
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
with seed(pm.TENSOR):
fan_in, fan_out = self.in_features, self.num_classes
col_src_rank = pm.PARALLEL_2D_COL.rank_by_idx(0)
row_src_rank = pm.PARALLEL_2D_ROW.rank_by_idx(0)
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, col_src_rank, pm.PARALLEL_2D_COL)
broadcast(self.bias, row_src_rank, pm.PARALLEL_2D_ROW)
def forward(self, input_: Tensor) -> Tensor:
out_shape = input_.shape[:-1] + (self.num_classes,)
return classifier_2d(
input_,
self.weight,
self.bias,
self.summa_dim,
out_shape,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
)
class VocabParallelClassifier2D(nn.Module):
def __init__(
self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
# parallel setting
assert_summa_initialization()
self.row_rank = pm.PARALLEL_2D_COL.local_rank
self.col_rank = pm.PARALLEL_2D_ROW.local_rank
self.summa_dim = get_summa_dim_from_env()
# partitioning dimension
self.input_size_per_partition = in_features // self.summa_dim
self.output_size_per_partition = num_classes // self.summa_dim
# create weight, shape: [k/q, h/q]
factory_kwargs = {"device": get_current_device(), "dtype": dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(
torch.empty(self.output_size_per_partition, self.input_size_per_partition, **factory_kwargs)
)
self.has_weight = True
# create bias, shape: [h/q]
if bias:
self.bias = Parameter(torch.empty(self.num_classes // self.summa_dim**2, **factory_kwargs))
else:
self.bias = None
# initialize parameters
with seed(pm.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, self.summa_dim**2)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def forward(self, x: Tensor) -> Tensor:
# input: [m/q, n/q, k/q]
# output: [m/q, n/q, h/q]
out_shape = x.shape[:-1] + (self.output_size_per_partition,)
output = summa_ABT(
x,
self.weight,
self.summa_dim,
out_shape,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
)
if self.bias is not None:
output = add_bias_2d(
output,
self.bias,
pm.PARALLEL_2D_ROW,
pm.PARALLEL_2D_COL,
False,
)
return output
|
#MenuTitle: Report Area in Square Units
# -*- coding: utf-8 -*-
__doc__="""
Calculates the area of each selected glyph, and outputs it in square units. Increase precision by changing the value for PRECISION in line 9 (script will slow down).
"""
import GlyphsApp
PRECISION = 2 # higher numbers = more precision, but slower
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
GLYPHSAPPVERSION = NSBundle.bundleForClass_(GSMenu).infoDictionary().objectForKey_("CFBundleShortVersionString")
if GLYPHSAPPVERSION.startswith("1."):
measurementTool = NSClassFromString("GlyphsToolMeasurement").alloc().init()
else:
measurementTool = NSClassFromString("GSGuideLine")
def sliceIntersections( thisLayer, startPoint, endPoint ):
if GLYPHSAPPVERSION.startswith("2.0."):
return thisLayer.calculateIntersectionsStartPoint_endPoint_( startPoint, endPoint )
else:
return measurementTool.calculateIntersectionsForLayer_startPoint_endPoint_( thisLayer, startPoint, endPoint )
def sizeOfSlice( thisLayer, y ):
theseBounds = thisLayer.bounds
startPointX = theseBounds.origin.x - 10
endPointX = startPointX + theseBounds.size.width + 20
startPoint = NSPoint( startPointX, y )
endPoint = NSPoint( endPointX, y )
listOfIntersections = sliceIntersections( thisLayer, startPoint, endPoint )
totalLength = 0.0
if listOfIntersections and len(listOfIntersections) >= 4:
listOfIntersections.pop(0)
listOfIntersections.pop(-1)
for thisPairIndex in range(len(listOfIntersections)/2):
firstNode = listOfIntersections[ thisPairIndex*2 ].pointValue()
secondNode = listOfIntersections[ thisPairIndex*2+1 ].pointValue()
totalLength += abs( secondNode.x - firstNode.x )
return totalLength
def areaForLayer( thisLayer, precision = 2 ):
cleanLayer = thisLayer.copyDecomposedLayer()
cleanLayer.removeOverlap()
cleanLayer.addExtremePointsForce_(True)
cleanBounds = cleanLayer.bounds
lowerY = int( cleanBounds.origin.y )
upperY = lowerY + int( cleanBounds.size.height + 2 )
area = 0.0
for thisY in range(lowerY,upperY):
for thisRound in range(precision):
measurementHeight = float(thisY) + ( float(thisRound) / float(precision) )
area += sizeOfSlice( cleanLayer, measurementHeight )
return area / precision
def process( thisLayer ):
area = areaForLayer( thisLayer, PRECISION )
print "%.1f square units" % ( area )
# brings macro window to front and clears its log:
Glyphs.clearLog()
Glyphs.showMacroWindow()
# calculates areas for selected glyphs:
for thisLayer in listOfSelectedLayers:
thisGlyph = thisLayer.parent
print "Area of %s:" % (thisGlyph.name),
process( thisLayer )
|
from shared import *
from helper_functions import *
from app import app
from flask_restful import Api, Resource, reqparse, fields, marshal_with, marshal
#Wraps the flask 'app' with the Api function from flask-RESTful. Assigns this to the 'api' variable.
api = Api(app)
#---------------MAIN API------------------
#Fields for marshaling output in JSON using the 'marshal_with' decorator/the 'marshal' function of flask-RESTful.
#Only the fields listed here will be output as the reponse in JSON format during the specified HTTP request.
voucher_post_fields = {
'ref' : fields.String,
'serial' : fields.String,
}
voucher_get_fields = {
'ref' : fields.String,
'serial' : fields.String,
'expiryDate' : fields.String,
'originalBalance' : fields.String,
'clientCardRef' : fields.String,
'archived' : fields.String,
'remainingBalance' : fields.String,
'issueDate' : fields.String
}
#Classes that subclass the "Resource" class from flask-RESTful.
#These are the resources for the API and contain all HTTP C.R.U.D methods.
class VoucherListAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('issueDate', type=str, default="{:%Y-%m-%d}".format(datetime.now()), location = 'json')
self.reqparse.add_argument('expiryDate', type=str, default='{:%Y-%m-%d}'.format(datetime.now() + timedelta(days=365)), location='json')
self.reqparse.add_argument('originalBalance', type=str, default='50', location='json')
self.reqparse.add_argument('clientCardRef', type=str, default='', location='json')
self.reqparse.add_argument('creatingBranchRef', type=str, default='urn:x-memento:Branch:'+branch_id, location='json')
self.reqparse.add_argument('archived', type=str, default='false', location='json')
self.reqparse.add_argument('start', type=str, default='0')
self.reqparse.add_argument('max', type=str, default='50')
#self.reqparse.add_argument('remainingBalance', type=str, default='', location='json')
super(VoucherListAPI, self).__init__()
@marshal_with(voucher_post_fields)
def post(self):
print('we go to post!')
args = self.reqparse.parse_args()
voucher = Voucher()
setattr(voucher, 'serial', get_voucher_serial())
for key, value in args.items():
if hasattr(voucher, key):
setattr(voucher, key, value)
#setattr(voucher, 'remainingBalance', getattr(voucher, 'originalBalance'))
#exl = ['remainingBalance', 'ref']
xml_voucher = to_xml(voucher, 'voucher', 'remainingBalance')
print(xml_voucher)
headers = { 'content-type' : 'application/vnd.memento.Voucher+xml' }
req = requests.post(voucher_uri, headers=headers, auth=(username, password), data=xml_voucher, verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 201:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
setattr(voucher, 'ref', root.find('./identity').attrib['id']) #set the newly aquired id/uri for the voucher
return voucher, 201
def get(self):
args = self.reqparse.parse_args()
params = { 'start' : int(args['start']), 'max' : int(args['max']) }
req = requests.get( voucher_uri, auth=(username, password), params=params, verify=False)
#print(req.content)
#print(req.headers)
#print(req.status_code)
voucher_list = []
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
#voucher = Voucher()
#setattr(voucher, 'ref', ref)
for child in root:
if child.tag == 'voucher':
voucher = Voucher()
filled_voucher = xml_to_object(child, voucher)
voucher_list.append(marshal(filled_voucher, voucher_get_fields))
return { 'voucher_list' : voucher_list }, 200 #to be fixed
class VoucherAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('issueDate', type=str, default="{:%Y-%m-%d}".format(datetime.now()), location = 'json')
self.reqparse.add_argument('expiryDate', type=str, default='{:%Y-%m-%d}'.format(datetime.now() + timedelta(days=365)), location='json')
self.reqparse.add_argument('originalBalance', type=str, default='50', location='json')
self.reqparse.add_argument('clientCardRef', type=str, default='', location='json')
self.reqparse.add_argument('creatingBranchRef', type=str, default='urn:x-memento:Branch:'+branch_id, location='json')
self.reqparse.add_argument('archived', type=str, default='false', location='json')
#self.reqparse.add_argument('remainingBalance', type=str, default='', location='json')
super(VoucherAPI, self).__init__()
#api.add_resource(VoucherAPI, '/api/voucher', endpoint='vouchers')
#get a voucher by it's unique reference (id)
@marshal_with(voucher_get_fields)
def get(self, ref):
req = requests.get( voucher_uri +'/'+ ref, auth=(username, password), verify=False)
print(req.content)
print(req.headers)
print(req.status_code)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
voucher = Voucher()
setattr(voucher, 'ref', ref)
for child in root:
if hasattr(voucher, child.tag):
setattr(voucher, child.tag, root.find('./'+child.tag).text)
print(voucher.__dict__)
return voucher, 200 #to be fixed
def put(self, ref):
original = get_voucher(ref)
args = self.reqparse.parse_args()
voucher = Voucher()
for key, value in original.__dict__.items():
if hasattr(voucher, key):
setattr(voucher, key, value)
for key, value in args.items():
if hasattr(voucher, key):
setattr(voucher, key, value)
#setattr(voucher, 'originalBalance', '1000')
xml_voucher = to_xml(voucher, 'voucher', 'serial')
headers = {'content-type' : 'application/vnd.memento.Voucher+xml' }
req = requests.put(post_uri + '/' + ref, headers=headers, auth=(username, password), data=xml_voucher, verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
print(original.__dict__.items())
print(voucher.__dict__.items())
print(xml_voucher)
print(args.items())
return { 'result' : True }, 200
#def archive_voucher():
#pass
'''def put(self, ref):
original = get_voucher(ref)
#print(original)
voucher = Voucher()
for child in original:
if hasattr(voucher, child.tag):
setattr(voucher, child.tag, original.find('./'+child.tag).text)
for key, value in dict.items():
if hasattr(voucher, key) and dict[key] != False:
setattr(voucher, key, value)
xml_voucher = to_xml(voucher, 'voucher')
headers = {'content-type' : 'application/vnd.memento.Voucher+xml' }
req = requests.put(voucher_uri + ref, headers=headers, auth=(username, password), data=xml_voucher, verify=False)
return req.status_code'''
api.add_resource(VoucherListAPI, '/api/vouchers', endpoint='vouchers')
api.add_resource(VoucherAPI, '/api/vouchers/<ref>', endpoint='voucher')
#def update_voucher(ref):
#req = requests.get('localhost:5000/api/vouchers/'+ref)
#original = req.content
#print(req)
#------------Clients---------------
client_get_fields = {
'firstName' : fields.String,
'lastName' : fields.String,
'mobile' : fields.String,
'email' : fields.String,
'ref' : fields.String,
'vouchers' : fields.String
}
class ClientAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('firstName', type=str, default='', location = 'json')
self.reqparse.add_argument('lastName', type=str, default='', location= 'json')
self.reqparse.add_argument('mobile', type=str, location= 'json')
super(ClientAPI, self).__init__()
@marshal_with(client_get_fields)
def get(self, ref):
req = requests.get(client_uri + '/' + ref, auth=(username, password), verify=False)
#print(req.status_code)
#print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
client = Client()
setattr(client, 'ref', ref)
for child in root:
if child.tag == 'link' and child.attrib['rel'] == 'vouchers':
setattr(client, 'vouchers', child.attrib['href'])
if hasattr(client, child.tag):
setattr(client, child.tag, root.find('./'+child.tag).text)
return client, 200 #to be fixed
api.add_resource(ClientAPI, '/api/client/<ref>', endpoint='client')
def create_client():
xml = "<clientCard><firstName>Joe</firstName><lastName>Test</lastName><mobile>0833128991</mobile><archived>false</archived></clientCard>"
headers = {'content-type' : 'application/vnd.memento.ClientCard+xml' }
test_uri = 'https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/client'
req = requests.post(client_uri, headers=headers, auth=(username, password), data=xml, verify=False)
print(req.status_code)
print('Headers:', req.headers)
print(req.content)
def update_client():
xml = "<clientCard><firstName>Joe</firstName><lastName>Test</lastName><mobile>083317777</mobile></clientCard>"
headers = {'content-type' : 'application/vnd.memento.ClientCard+xml' }
test_uri = 'https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/client/9mkguGy0b6xpUgaBF65CIA'
req = requests.put(client_uri + '/' + 'XAzAN9Hwffcqp0cx_v7qJg', headers=headers, auth=(username, password), data=xml, verify=False)
print(req.status_code)
print('Headers:', req.headers)
with open('client_update.xml', 'w') as f:
f.write(str(req.content))
f.close()
def get_client(ref):
req = requests.get(client_uri + '/' + ref, auth=(username, password), verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
client = Client()
setattr(client, 'ref', ref)
for child in root:
if hasattr(client, child.tag):
setattr(client, child.tag, root.find('./'+child.tag).text)
return client, 200 #to be fixed
'''def get_client_vouchers(client_vouchers_uri):
test_uri = 'https://lbh1.eu.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/client/sH40eB0ICVBgK5KFMrokfA/voucher'
req = requests.get(test_uri, auth=(username, password), verify=False)
print(req.status_code)
with open('client_vouchers.xml', 'w') as f:
f.write(str(req.content))
f.close'''
def get_clients():
#test_uri = "https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/voucher"
req = requests.get(client_uri + '/' + 'Xbus3AT3eqOEJXsfXr6L_w', auth=(username, password), verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
'''print(len(root))
count = 0
for child in root:
if child.tag == 'voucher':
count += 1
print (count)
#print(child.tag)'''
'''else:
with open('client_list.xml', 'w') as f:
f.write(str(req.content))
f.close'''
def vouch_trans():
trans_uri = "https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/voucher/8tpbJWlBGIB5Z4CC00npvw/transactions"
xml = '<voucherTransaction><date>2016-02-17</date><transactionAmount>-100.00</transactionAmount><voucherRef>urn:x-memento:Voucher:8tpbJWlBGIB5Z4CC00npvw</voucherRef><branchRef>urn:x-memento:Branch:nPpLa0UY4UO5dn68TpPsiA</branchRef><creatingUserRef>urn:x-memento:User:ISLX8fGtdKIB8CMLSIlc7g</creatingUserRef><voucherUpdateType>MANUALLY_ADDED</voucherUpdateType><relatedTransactionDeleted>false</relatedTransactionDeleted><compensatingTransaction>false</compensatingTransaction></voucherTransaction>'
headers = {'content-type' : 'application/vnd.memento.VoucherTransaction+xm' }
req = requests.post(trans_uri, headers=headers, auth=(username, password), data=xml, verify=False)
print(req.status_code)
print(req.content)
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import ValidationError, DataRequired, Length, Email
from app.models import User
class EditProfileForm(FlaskForm):
username = StringField('Usuário', validators=[DataRequired()])
email = StringField('E-mail', validators=[DataRequired(), Email()])
about_me = TextAreaField('Sobre mim', validators=[Length(min=0, max=140)])
submit = SubmitField('Gravar')
def __init__(self, original_username, original_email, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
self.original_email = original_email
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Por favor, use um nome diferente.')
def validate_email(self, email):
if email.data != self.original_email:
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Por favor use um e-mail diferente.')
|
import requests
import json
import datetime
import src.heat as oh
url_base = "http://localhost"
### Keystone API
def get_token(id,passwd):
data = \
{"auth":
{
"identity":
{"password":
{"user":
{"domain":
{"name": "Default"},
"password": passwd,
"name": id
}
},
"methods": ["password"]
},
"scope":
{"system": {
"all": True
}
}
}
}
# pixed header
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
# TODO get project id
res = requests.post(url_base + '/identity/v3/auth/tokens', headers=headers, data=json.dumps(data), verify=True)
try:
token = res.headers['X-Subject-Token']
return token
except Exception as e:
print(e)
return None
def get_other_token(id, passwd, projectID):
data = \
{"auth":
{
"identity":
{"password":
{"user":
{"domain":
{"name": "Default"},
"password": passwd,
"name": id
}
},
"methods": ["password"]
}
}
}
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
data['auth']['scope'] = {
"project":
{"id": projectID }
}
res = requests.post(url_base + '/identity/v3/auth/tokens', headers=headers, data=json.dumps(data), verify=True)
token = res.headers['X-Subject-Token']
return token
def get_token_by_token(token,project_id):
data = \
{
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": token
}
},
"scope": {
"project": {
"id": project_id
}
}
}
}
# pixed header
headers = {'Content-Type': 'application/json', 'Accept': 'application/json', "X-Auth-Token":token}
# TODO get project id
res = requests.post(url_base + '/identity/v3/auth/tokens', headers=headers, data=json.dumps(data), verify=True)
try:
token = res.headers['X-Subject-Token']
return token
except Exception as e:
print(e)
return None
### Project API
def get_projectID(token):
url = url_base + "/identity/v3/auth/projects"
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
res = requests.get(url, headers=headers)
body = res.json()
projects_name = [x['name'] for x in body['projects'] if not x['name'] == "invisible_to_admin"]
projects_uuid = [ x['id'] for x in body['projects'] if not x['name'] == "invisible_to_admin"]
return projects_name, projects_uuid
### Instance API
# for ask what kinds of instances admin control on dashboard
def get_server_list(token):
server_uuid = []
server_names = []
url = url_base + "/compute/v2.1/servers"
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
res = requests.get(url, headers=headers)
body = res.json()
try:
server_uuid = [ x['id'] for x in body['servers']]
server_names = [ x['name'] for x in body['servers']]
except:
pass
return server_names, server_uuid
def get_server_id(token, server_name):
server_names, server_uuid = get_server_list(token)
index = -1
for i in range(len(server_names)):
if(server_name == server_names[i]):
index = i
break
if(index != -1):
return server_uuid[index]
else:
return -1
def get_server_info(token,server_uuid):
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
url = url_base + '/compute/v2.1/servers/%s'%server_uuid
res = requests.get(url=url, headers=headers)
return res.json()
def get_resource_list(token, server_uuid):
url = url_base + "/metric/v1/resource/generic/%s"%(server_uuid)
headers = {'Content-Type': 'application/json, */*', 'X-Auth-Token':token}
res = requests.get( url, headers = headers)
body = res.json()
return body
def get_resource_size(token, server_uuid):
body = get_resource_list(token,server_uuid)
headers = {'Content-Type': 'application/json, */*', 'X-Auth-Token': token}
PARAMS = {'start': None, 'granularity': None, 'resample': None, 'stop': None, 'aggregation': None, 'refresh': False}
try:
url = url_base + '/metric/v1/metric/%s/measures' % (body['metrics']['vcpus'])
res = requests.get(url=url, headers=headers, params=PARAMS)
cpu = res.json()[-1][2]
url = url_base + '/metric/v1/metric/%s/measures' % (body['metrics']['disk.root.size'])
res = requests.get(url=url, headers=headers, params=PARAMS)
disk = res.json()[-1][2]
url = url_base + '/metric/v1/metric/%s/measures' % (body['metrics']['memory'])
res = requests.get(url=url, headers=headers, params=PARAMS)
memory = res.json()[-1][2]/(1024)
return cpu, memory, disk
except Exception as e:
print(e)
return None
def get_mesuare_list(token, body):
now = datetime.datetime.now()
five_mins = datetime.timedelta(minutes=5)
five_mins_ago = now - five_mins
headers = {'Content-Type': 'application/json, */*', 'X-Auth-Token': token}
PARAMS = {'start': None, 'granularity': None, 'resample': None, 'stop': None, 'aggregation': None, 'refresh': False}
url = url_base + '/metric/v1/metric/%s/measures'%(body['metrics']['cpu_util'])
res = requests.get(url = url, headers = headers, params= PARAMS )
cpu = res.json()[-1][2]
url = url_base + '/metric/v1/metric/%s/measures' % (body['metrics']['memory.usage'])
res = requests.get(url=url, headers=headers, params=PARAMS)
memory = res.json()[-1][2]/(1024)
url = url_base + '/metric/v1/metric/%s/measures' % (body['metrics']['memory'])
res = requests.get(url=url, headers=headers, params=PARAMS)
memory /= res.json()[-1][2]/(1024)
url = url_base + '/metric/v1/metric/%s/measures' % (body['metrics']['disk.usage'])
res = requests.get(url=url, headers=headers, params=PARAMS)
disk = res.json()[-1][2]/(8*1024*1024*1024)
url = url_base + '/metric/v1/metric/%s/measures' % (body['metrics']['disk.root.size'])
res = requests.get(url=url, headers=headers, params=PARAMS)
disk /= res.json()[-1][2]
return cpu, memory, disk
# def get_stack_resource_list(token, project_id):
# url = url_base + "/heat-api/v1/%s/stacks" %(project_id)
# headers = {'Content-Type': 'application/json, */*', 'X-Auth-Token':token}
# res = requests.get( url, headers = headers )
# # print(res.json())
# body = res.json()['stacks']
# for elem in body:
# url = url_base + "/heat-api/v1/%s/stacks/%s/%s/resources?type=OS::Nova::Server"%(project_id,elem['stack_name'],elem['id'])
# headers = {'Content-Type': 'application/json, */*', 'X-Auth-Token':token, "id": elem['id']}
# res = requests.get( url, headers = headers ).json()["resources"]
# for i in res:
# resource_uuid = i["physical_resource_id"]
# resource_list = get_resource_list(token,resource_uuid)
# return body
### Flavor API
def get_flavor_id(token, server_uuid):
flavor_id = get_server_info(token, server_uuid)['server']['flavor']['id']
return flavor_id
def get_flavor_name(token, server_uuid):
flavor_id = get_server_info(token, server_uuid)['server']['flavor']['id']
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
url = url_base + '/compute/v2.1/flavors/%s' % flavor_id
res = requests.get(url=url, headers=headers)
return res.json()['flavor']['name']
def remove_flavor(token, flavor_id):
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
url = url_base + '/compute/v2.1/flavors/%s' % flavor_id
res = requests.delete(url=url, headers=headers)
return res
def create_flavor(token, flavor_name, vcpus, memory, storage):
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
url = url_base + '/compute/v2.1/flavors'
print(vcpus,memory,storage)
req = {
"flavor": {
"name": flavor_name,
"ram": memory,
"vcpus": vcpus,
"disk": storage
}
}
res = requests.post(url, headers=headers, data=json.dumps(req))
return res.json()
def get_flavor_list(token):
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
url = url_base + '/compute/v2.1/flavors'
res = requests.get(url, headers=headers)
return res.json()
# def get_flavor_info(token, flavorID):
# headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
# url = url_base + '/compute/v2.1/os-simple-tenant-usage/%s' % flavorID
# res = requests.get(url=url, headers=headers)
# return res.json()
### Image API
def get_image_list(token):
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
url = url_base + '/image/v2/images'
res = requests.get(url=url, headers=headers)
return res.json()
def cpuAlarm(token, server_uuid, cpu):
alter_url = 'http://localhost:5000/alarmAlter?uuid='+server_uuid
data = {
'alarm_actions': [alter_url],
'ok_actions': ['https://localhost:5000/ok'],
'name': 'cpu_hi',
'gnocchi_resources_threshold_rule': {
'evaluation_periods': 1,
'metric': 'cpu_util',
'resource_id': server_uuid,
'aggregation_method': 'mean',
'granularity': '300',
'threshold': cpu,
'comparison_operator': 'gt',
'resource_type': 'instance'
},
'insufficient_data_actions': ['https://localhost:5000/nodata'],
'type': 'gnocchi_resources_threshold',
'description': 'CPU High Average'
}
headers = {
'X-Auth-Token': token,
"Content-Type": "application/json"}
res = requests.post(url_base+":8042/v2/alarms", headers=headers, data=json.dumps(data))
# res = requests.post(URL, headers=headers, data = data)
s = res.content
u = str(s)
print(u)
def memoryAlarm(token, server_uuid, memory):
alter_url = 'http://localhost:5000/alarmAlter'#?uuid='+server_uuid'
data = {
'alarm_actions': [alter_url],
'ok_actions': ['https://localhost:5000/ok'],
'name': 'memory_hi',
'gnocchi_resources_threshold_rule': {
'evaluation_periods': 1,
'metric': 'memory.usage',
'resource_id': server_uuid,
'aggregation_method': 'mean',
'granularity': '300',
'threshold': memory,
'comparison_operator': 'gt',
'resource_type': 'instance'
},
'insufficient_data_actions': ['https://localhost:5000/nodata'],
'type': 'gnocchi_resources_threshold',
'description': 'CPU High Average'
}
headers = {
'X-Auth-Token': token,
"Content-Type": "application/json"}
res = requests.post(url_base+":8042/v2/alarms", headers=headers, data=json.dumps(data))
# res = requests.post(URL, headers=headers, data = data)
s = res.content
u = str(s)
print(u)
def diskAlarm(token, server_uuid, disk):
alter_url = 'http://localhost:5000/alarmAlter?uuid='+server_uuid
data = {
'alarm_actions': [alter_url],
'ok_actions': ['https://localhost:5000/ok'],
'name': 'disk_hi',
'gnocchi_resources_threshold_rule': {
'evaluation_periods': 1,
'metric': 'disk.usage',
'resource_id': server_uuid,
'aggregation_method': 'mean',
'granularity': '300',
'threshold': disk,
'comparison_operator': 'gt',
'resource_type': 'instance'
},
'insufficient_data_actions': ['https://localhost:5000/nodata'],
'type': 'gnocchi_resources_threshold',
'description': 'CPU High Average'
}
headers = {
'X-Auth-Token': token,
"Content-Type": "application/json"}
res = requests.post(url_base+":8042/v2/alarms", headers=headers, data=json.dumps(data))
# res = requests.post(URL, headers=headers, data = data)
s = res.content
u = str(s)
print(u)
### COMPOSIT ALARM DOESN'T WORK, NEED TO FIX IT
def createAlarm(token, server_uuid, cpu, memory, disk):
resource_cpu , resource_memory, resource_disk = get_resource_size(token, server_uuid)
print(resource_cpu , resource_memory, resource_disk )
memory = (int(memory)*resource_memory*1024)/100.0
disk = (int(disk)*resource_disk*1024)/100.0
alarmName = server_uuid+"Alarm"
data = {
'alarm_actions': ['http://localhost:5000/alarmAlter?uuid={server_uuid}'],
'ok_actions': ['https://localhost:5000/ok'],
'insufficient_data_actions': ['https://localhost:5000/nodata'],
'name': alarmName,
'type': 'composite',
'composite_rule': {
"or": [
{
"threshold": cpu if cpu!= None else 10000,
"metric": "cpu_util",
"type": "gnocchi_resources_threshold",
"resource_id": server_uuid,
"resource_type": "instance",
"aggregation_method": "last",
"granularity": "300",
'comparison_operator': 'gt'
},
{
"threshold": memory if memory!= None else 10000,
"metric": "memory.usage",
"type": "gnocchi_resources_threshold",
"resource_id": server_uuid,
"resource_type": "instance",
"aggregation_method": "last",
"granularity": "300"
},
{
"threshold": disk if disk!= None else 10000,
"metric": "disk.usage",
"type": "gnocchi_resources_threshold",
"resource_id": server_uuid,
"resource_type": "instance",
"aggregation_method": "last",
"granularity": "300",
}
]
}
}
headers = {
'X-Auth-Token': token,
"Content-Type": "application/json"}
res = requests.post(url_base+":8042/v2/alarms", headers=headers, data=json.dumps(data))
s = res.content
u = str(s)
print(u)
return
def stackUpdate(token, project_id, server_id, server_name, pred_cpu, pred_memory, pred_storage, rating):
if( pred_cpu != 1 or pred_memory != 1 or pred_storage != 1 ):
print("Need to Change")
cpu, memory, storage = get_resource_size(token,server_id)
cpu *= pred_cpu.round()
memory *= pred_memory.round(1)
storage *= pred_storage
memory = memory.round(1)*1024
storage = storage.round(1)
# flavor_prevID = get_flavor_id(token,server_id)
flavor_name = server_name + str(datetime.datetime.now())
try:
create_flavor(token, flavor_name, int(cpu), int(memory), int(storage))
try:
print( oh.resizeTemplate(project_id, server_name, server_id, flavor_name, token) )
except Exception as e:
print(e)
pass
# flavor remove
# oa.remove_flavor(token, flavor_prevID)
except Exception as e:
print(e)
pass
# resize here
else:
if(rating <= 20):
print("Need to copy and move")
oh.copyTemplate(project_id, server_name, server_id, token)
res={'result': 'alternative'}
return res
else:
print("Don't need change")
jsonResult = {
'pred_cpu': pred_cpu,
'pred_memory': pred_memory,
'pred_disk': pred_storage
}
resJson = json.dumps(str(jsonResult))
print("/stackUpdate -> ")
print(resJson)
res = {'result': True}
return res
def get_server_id_by_alarm(alarm_id, token):
url = url_base+":8042/v2/alarms/"+alarm_id
headers = {'Content-Type': 'application/json', 'X-Auth-Token': token}
res = requests.get(url, headers=headers)
body = res.json()
resource_id = body['gnocchi_resources_threshold_rule']['resource_id']
return resource_id
if __name__ == '__main__':
#flavor_id = get_flavor_id('gAAAAABdqLq6saLrqjBVA4p_e2qPTavL16-1Mqv7AP-kYb4b_NP0i1pFkrieZxMbYUrChAy-745b5FYHMl2KBUljb7znwU4DiEej7sKrxcHNFXo5RC2tPRBWRXP2PxjxWi_P9zTd7MvITx2dyZVUkBqOcOl2ykUrZPW6CVR3G55peqFeT3y5zCg', 'e6a04013-38aa-4ba6-a30f-88eb20d976ea')
get_server_id_by_alarm('412fa434-5e59-496d-84a7-f5d59127bd46')
# print(flavor_id)
# print(get_flavor_name('gAAAABdqLq6saLrqjBVA4p_e2qPTavL16-1Mqv7AP-kYb4b_NP0i1pFkrieZxMbYUrChAy-745b5FYHMl2KBUljb7znwU4DiEej7sKrxcHNFXo5RC2tPRBWRXP2PxjxWi_P9zTd7MvITx2dyZVUkBqOcOl2ykUrZPW6CVR3G55peqFeT3y5zCg', flavor_id))
# flavor = get_server_info('gAAAAABdpRhwm8DR6Yd4clbmRXquEsLhJ_sD53walnGxgVra4G7BnapscMRdvWe8R3nguVxOmL3lz1GIEKEL1bl_TVeGKoSj9Q2796tLu5QwJxiF442T0mkbEeYB9ncpXTWtAXML5Gonl_zXuysfHPA0xhfy3Cs904ahIPuz2Gr3yJKhiW-DGqQ', '7e07034a-caf0-421c-a0af-333936e6a15c')['server']['flavor']
# print(get_flavor_info('gAAAAABdpRhwm8DR6Yd4clbmRXquEsLhJ_sD53walnGxgVra4G7BnapscMRdvWe8R3nguVxOmL3lz1GIEKEL1bl_TVeGKoSj9Q2796tLu5QwJxiF442T0mkbEeYB9ncpXTWtAXML5Gonl_zXuysfHPA0xhfy3Cs904ahIPuz2Gr3yJKhiW-DGqQ', flavor['id'])
# token = get_other_token('admin','devstack','admin')
# token = get_token('admin','devstack')
# token = get_other_token('admin','devstack','0462be6630d042d086302acac64bead0')
# print(get_stack_resource_list('gAAAAABdqH2dpRKewingV81VRJL4CeCQod69F3bpzcmxsinNcjTN3oxoA8CNen5RaP_dEE5jSGGqpfd4ET-TQcq5qFHYnru6xxjmKbEcBMHcGAZJJF5m8sPXATRA5gZeSmoG3A2bNFmHpS65UIVcgG3c9aNm7yTGkt2Vs7V84YgPOuAVb-CBWI4', 'admin'))
# get_projectID(token)
# get_server_list(token)
# createAlarm(token, '7e07034a-caf0-421c-a0af-333936e6a15c', 0,0,0)
# get_server_info(token, '7e07034a-caf0-421c-a0af-333936e6a15c')
|
from greynoise import GreyNoise
from sw_greynoise import GreynoiseBaseClass
PLUGIN_VERSION = "v1.1.0"
class SwMain(GreynoiseBaseClass):
def __init__(self, context):
super(SwMain, self).__init__(context)
self.ip_address = context.inputs["ip_address"]
self.api_key = context.asset["api_key"]
self.session = GreyNoise(
api_key=self.api_key,
integration_name="greynoise-community-swimlane-" + PLUGIN_VERSION,
offering="community",
)
def execute(self):
output = []
response = self.session.ip(self.ip_address)
output.append(response)
return output
|
from .events import TLNetworkEvent
TransferEventType = "Transfer"
ApprovalEventType = "Approval"
class TokenEvent(TLNetworkEvent):
def __init__(self, web3_event, current_blocknumber, timestamp, user=None):
super().__init__(
web3_event, current_blocknumber, timestamp, from_to_types, user
)
self.token_address = web3_event.get("address")
class ValueEvent(TokenEvent):
@property
def value(self):
return self._web3_event.get("args").get("_value")
class TransferEvent(ValueEvent):
pass
class ApprovalEvent(ValueEvent):
pass
event_builders = {TransferEventType: TransferEvent, ApprovalEventType: ApprovalEvent}
from_to_types = {
TransferEventType: ["_from", "_to"],
ApprovalEventType: ["_owner", "_spender"],
}
standard_event_types = [TransferEventType, ApprovalEventType]
all_event_types = list(event_builders.keys())
|
from seahub.tags.models import Tags
from seahub.test_utils import BaseTestCase
class TagsManagerTest(BaseTestCase):
def setUp(self):
pass
def test_create_tag(self):
assert 'a' == Tags.objects.get_or_create_tag('a').name
|
import random
first = list()
first.append(" traditional ")
first.append(" poor ")
first.append(" southern ")
first.append(" areful ")
first.append(" united ")
first.append(" different ")
first.append(" tiny ")
first.append(" former ")
first.append(" exciting ")
first.append(" important ")
first.append(" every ")
first.append(" consistent ")
first.append(" useful ")
first.append(" aggressive ")
first.append(" massive ")
first.append(" administrative ")
first.append(" automatic ")
first.append(" successfully ")
first.append(" tall ")
first.append(" inner ")
first.append(" well-off ")
first.append(" secretive ")
first.append(" frightening ")
first.append(" empty ")
first.append(" pleasant ")
first.append(" frightening ")
first.append(" woozy ")
first.append(" laughable ")
first.append(" ossified ")
first.append(" wandering ")
first.append(" brainy ")
first.append(" yummy ")
first.append(" elfin ")
first.append(" long-term ")
first.append(" toothsome ")
first.append(" miscreant ")
first.append(" homely ")
first.append(" abaft ")
first.append(" halting ")
first.append(" chivalrous ")
first.append(" substantial ")
first.append(" psychotic ")
first.append(" momentous ")
first.append(" juicy ")
first.append(" exultant ")
first.append(" rampant ")
first.append(" mundane ")
first.append(" doubtful ")
first.append(" fascinated ")
first.append(" salty ")
first.append(" previous ")
first.append(" pointless ")
first.append(" whispering ")
first.append(" halting ")
first.append(" lame ")
first.append(" nonstop ")
def get_adjective(list):
# sort_list(second)
return random.choice(list)
|
from python.qrcode import *
qr = QRCode()
qr.setTypeNumber(4)
qr.setErrorCorrectLevel(ErrorCorrectLevel.M)
qr.addData('ssswhere comes qr!')
qr.make()
|
from construct import Struct, Byte
from constructutils import _global_struct_io_patch
def test_main():
s = Struct('a' / Byte)
# original behavior
_global_struct_io_patch.unpatch()
try:
value = s.parse(b'\x01')
assert '_io' in value
finally:
_global_struct_io_patch.patch()
# patched
value = s.parse(b'\x01')
assert '_io' not in value
def test_duplicate():
f = Struct._parse
_global_struct_io_patch.patch()
# make sure function isn't patched twice
assert f == Struct._parse
|
#Write an import statement for the Python random module
import random
class Die:
sides = 6
die = random.randint(1,sides)
def __init__(self):
'''
Define a constructor method with one attribute sides with a values of 6.
'''
self.sides = 6
def roll(self):
'''
Define a roll method that virtually rolls a die and returns the value.
'''
self.die = random.randint(1,self.sides)
def get_roll(self):
return self.die
|
import voluptuous as vol
from esphome.components import i2c, sensor
import esphome.config_validation as cv
from esphome.const import CONF_HUMIDITY, CONF_ID, CONF_NAME, CONF_TEMPERATURE, \
CONF_UPDATE_INTERVAL
from esphome.cpp_generator import Pvariable
from esphome.cpp_helpers import setup_component
from esphome.cpp_types import App, Application, PollingComponent
DEPENDENCIES = ['i2c']
MakeHTU21DSensor = Application.struct('MakeHTU21DSensor')
HTU21DComponent = sensor.sensor_ns.class_('HTU21DComponent', PollingComponent, i2c.I2CDevice)
HTU21DTemperatureSensor = sensor.sensor_ns.class_('HTU21DTemperatureSensor',
sensor.EmptyPollingParentSensor)
HTU21DHumiditySensor = sensor.sensor_ns.class_('HTU21DHumiditySensor',
sensor.EmptyPollingParentSensor)
PLATFORM_SCHEMA = sensor.PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(HTU21DComponent),
vol.Required(CONF_TEMPERATURE): cv.nameable(sensor.SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(HTU21DTemperatureSensor),
})),
vol.Required(CONF_HUMIDITY): cv.nameable(sensor.SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(HTU21DHumiditySensor),
})),
vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,
}).extend(cv.COMPONENT_SCHEMA.schema)
def to_code(config):
rhs = App.make_htu21d_sensor(config[CONF_TEMPERATURE][CONF_NAME],
config[CONF_HUMIDITY][CONF_NAME],
config.get(CONF_UPDATE_INTERVAL))
htu21d = Pvariable(config[CONF_ID], rhs)
sensor.setup_sensor(htu21d.Pget_temperature_sensor(), config[CONF_TEMPERATURE])
sensor.setup_sensor(htu21d.Pget_humidity_sensor(), config[CONF_HUMIDITY])
setup_component(htu21d, config)
BUILD_FLAGS = '-DUSE_HTU21D_SENSOR'
def to_hass_config(data, config):
return [sensor.core_to_hass_config(data, config[CONF_TEMPERATURE]),
sensor.core_to_hass_config(data, config[CONF_HUMIDITY])]
|
from django.shortcuts import render, redirect
#from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from .forms import UserRegistrationForm, ProfileUpdateForm, UserUpdateForm
from django.contrib.auth.decorators import login_required
# Create your views here.
##User creation form return user
##request.POST is kind of dictionary
#Eger post ile ilgili birsey var ise yani icersine yazi yazilip gönderilmisse bu durumda icerik is valid ile kontrol edilir
# yoksa form adi altinda labelslar ve inputlar anasayfaya gönderilir
def register(request):
if request.method == "POST":
form =UserRegistrationForm(request.POST)
if form.is_valid():
ak=form.save()
#ak return user name
username=form.cleaned_data.get('username')
messages.success(request, f" Your account has been {username} sucesfully created please register")
return redirect('login')
else:
form=UserRegistrationForm()
return render(request, 'users/register.html', {"form":form})
#request.Post is dictionry
@login_required
def profile(request):
if request.method=='POST':
u_form=UserUpdateForm(request.POST,instance=request.user)
p_form=ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
#username=form.cleaned_data.get('username')
messages.success(request, f" Your account has been sucesfully updated")
return redirect('profile')
else:
u_form=UserUpdateForm(instance=request.user)
p_form=ProfileUpdateForm(instance=request.user.profile)
context={
'u_form':u_form,
'p_form':p_form
}
return render (request, 'users/profile.html', context)
|
#!/usr/bin/env python3
"""Create custom invitations from a guest list with flowery decorations."""
import os
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
def create_card(name):
"""Creates a personalised invitation card with the provided name on it."""
card = Image.new('RGBA', (360, 288), 'white')
flower = Image.open('flower.png')
card.paste(flower, (10, 40), flower)
cut_guide = Image.new('RGBA', (364, 292), 'black')
cut_guide.paste(card, (2, 2))
draw_obj = ImageDraw.Draw(cut_guide)
fonts_folder = 'usr/share/fonts/TTF'
custom_font = ImageFont.truetype(os.path.join(fonts_folder,
'DejaVuSerif.ttf'), 72)
draw_obj.text((120, 100), name, fill='blue', font=custom_font)
cut_guide.save('{}-invite.png'.format(name))
with open('guests.txt') as f:
guests = f.readlines()
for guest in guests:
create_card(guest)
print('All invitations personalised and saved to the CWD - enjoy the dinner.')
|
import cachetools, cachetools.func, time, threading, traceback
hashkey = cachetools.keys.hashkey
tm = time.monotonic
Lock = threading.Lock
# buffer that refreshes in the bkgnd
class StaleBuffer:
# f returns what we want to serve
def __init__(self, f, ttr=5, ttl=10): # time to refresh / time to live
self.a = None
self.ts = tm()
self.l = Lock()
self.state = 'empty'
self.f = f
self.ttr = ttr
self.ttl = ttl
assert ttl>ttr
def dispatch_refresh(self):
def wrapper():
try:
r = self.f()
except Exception as e:
traceback.print_exc()
self.l.acquire()
self.state = 'nodispatch'
self.l.release()
else:
self.l.acquire()
self.state = 'nodispatch'
self.a = r
self.ts = tm()
self.l.release()
t = threading.Thread(target=wrapper, daemon=True)
t.start()
def get(self):
ttl = self.ttl
ttr = self.ttr
f = self.f
now = tm()
# we couldn't afford expensive locking everytime, so
if self.state=='nodispatch' and now - self.ts < ttr:
return self.a
if self.state=='dispatching':
return self.a
self.l.acquire()
try:
# cache is empty
if self.state == 'empty':
self.a = f()
self.ts = now
self.state = 'nodispatch'
# cache is not empty, no dispatch on the way
elif self.state == 'nodispatch':
# is fresh?
now = now
if now - self.ts>ttl:
# too old.
self.a = f()
self.ts = now
elif now - self.ts>ttr:
# kinda old
self.dispatch_refresh()
self.state = 'dispatching'
# use the stale version
else:
# data is fresh
pass
# cache is not empty, dispatch on the way
elif self.state == 'dispatching':
# return the stale version until dispatch finishes
pass
except Exception as e:
self.l.release()
raise e
else:
r = self.a
self.l.release()
return r
if __name__ == '__main__':
j = 1
def k():
global j
j+=1
time.sleep(2)
return j
sb = StaleBuffer(k, ttr=3, ttl=6)
for i in range(20):
print(sb.get(), sb.state)
time.sleep(0.5)
def stale_cache(ttr=3, ttl=6, maxsize=128):
def wrapper(f):
@cachetools.func.lru_cache(maxsize=maxsize)
def get_stale_buffer(*a, **kw):
def sbw():
return f(*a, **kw)
sb = StaleBuffer(sbw, ttr=ttr, ttl=ttl)
return sb
def inner(*a, **kw):
sb = get_stale_buffer(*a, **kw)
return sb.get()
return inner
return wrapper
if __name__ == '__main__':
j = 1
k = 1
@stale_cache()
def a(i):
global j
j+=1
time.sleep(1)
return i*j
@stale_cache()
def b(n):
global k
k+=1
time.sleep(1.5)
return k*n
for i in range(20):
print(a(3.5), b(6))
time.sleep(0.4)
|
#! /usr/bin/python3
#
# Copyright (c) 2018 Warren J. Jasper <wjasper@ncsu.edu>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from usb_1208LS import *
import time
import sys
def toContinue():
answer = input('Continue [yY]? ')
if (answer == 'y' or answer == 'Y'):
return True
else:
return False
def main():
miniLAB = usb_miniLAB()
while True:
print("\nminiLAB 1008 Testing")
print("----------------")
print("Hit 'b' to blink.")
print("Hit 's' to set user id.")
print("Hit 'g' to get user id.")
print("Hit 'f' to get serial number.")
print("Hit 'j' for information.")
print("Hit 'c' to test counter. ")
print("Hit 'd' to test digital I/O.")
print("Hit 't' to test digital bit I/O.")
print("Hit 'o' to test analog output.")
print("Hit 'i' to test analog input.")
print("Hin 'n' to test analog input scan.")
print("Hit 'e' to exit.")
ch = input('\n')
if ch == 'b':
miniLAB.Blink()
elif ch == 'e':
miniLAB.h.close()
exit(0)
elif ch == 'r':
miniLAB.Reset()
exit(0)
elif ch == 'c':
miniLAB.CInit() # initialize the counter
print('Connect DIO0 to CTR')
miniLAB.DConfig(miniLAB.DIO_AUXPORT,miniLAB.DIO_DIR_OUT)
for i in range(20):
miniLAB.DOut(miniLAB.DIO_AUXPORT, 1)
miniLAB.DOut(miniLAB.DIO_AUXPORT, 0)
print('Counter =', miniLAB.CIn()) # read the current count
elif ch == 'f':
print("Serial No: %s" % miniLAB.h.get_serial_number_string())
elif ch == 'g':
print('User ID =', miniLAB.GetID())
elif ch == 's':
id = int(input('Enter a user id (0-255): '))
miniLAB.SetID(id)
elif ch == 'j':
print("Manufacturer: %s" % miniLAB.h.get_manufacturer_string())
print("Product: %s" % miniLAB.h.get_product_string())
print("Serial No: %s" % miniLAB.h.get_serial_number_string())
elif ch == 'd':
print('Testing Digital I/O ...')
print('Connect pins 21 through 28 <--> 32 through 39 (Port A to Port B)')
miniLAB.DConfig(miniLAB.DIO_PORTA, 0x0) # Port A output
miniLAB.DConfig(miniLAB.DIO_PORTB, 0xff) # Port B input
miniLAB.DOut(miniLAB.DIO_PORTA, 0x0)
while (True):
try:
num = int(input('Enter a byte number [0x0-0xff]: '),16)
miniLAB.DOut(miniLAB.DIO_PORTA, num)
value = miniLAB.DIn(miniLAB.DIO_PORTB)
print('PortB: The number you entered =', hex(value))
for i in range(8):
value = miniLAB.DBitIn(miniLAB.DIO_PORTB, i)
print('Port B Bit',i,' =', hex(value))
except:
pass
miniLAB.DConfig(miniLAB.DIO_AUXPORT, 0xf) # AUXPORT all input
value = miniLAB.DIn(miniLAB.DIO_AUXPORT)
print('Auxilary Port: value =', hex(value))
if (toContinue() != True):
break
elif ch == 't':
print('Testing Digital Bit I/O ...')
print('Connect pins 21 through 28 <--> 32 through 39 (Port A to Port B)')
while (True):
try:
miniLAB.DOut(miniLAB.DIO_PORTA, 0x0) # reset the pin values
bit = int(input('Enter a bit value for output (0 | 1): '),16)
pin = int(input('Select a pin in port A [0-7]: '),16)
miniLAB.DBitOut(miniLAB.DIO_PORTA, pin, bit)
value = miniLAB.DIn(miniLAB.DIO_PORTB)
print('The number you entered 2^',pin,'= ',hex(value))
except:
pass
if (toContinue() != True):
break
elif ch == 'i':
print('Test Differential Analog Input')
chan = int(input('Select channel [0-3]: '))
if (chan < 0 or chan > 3):
break
print("\t\t1. +/- 20.V")
print("\t\t2. +/- 10.V")
print("\t\t3. +/- 5.V")
print("\t\t4. +/- 4.V")
print("\t\t5. +/- 2.5V")
print("\t\t6. +/- 2.0V")
print("\t\t7. +/- 1.25V")
print("\t\t8. +/- 1.0V")
gain = int(input("Select gain: [1-8] "))
if (gain == 1):
gain = miniLAB.BP_20_00V
elif (gain == 2):
gain = miniLAB.BP_10_00V
elif (gain == 3):
gain = miniLAB.BP_5_00V
elif (gain == 4):
gain = miniLAB.BP_4_00V
elif (gain == 5):
gain = miniLAB.BP_2_50V
elif (gain == 6):
gain = miniLAB.BP_2_00V
elif (gain == 7):
gain = miniLAB.BP_1_25V
elif (gain == 8):
gain = miniLAB.BP_1_00V
else:
break
while True:
value = miniLAB.AIn(chan, gain)
print('Channel:', chan,' value = ', hex(value), format(miniLAB.volts(gain,value),'.2f'))
time.sleep(1)
if (toContinue() != True):
break
elif ch == 'n':
print('Test Analog Input Scan')
nQueue = 4 # depth of the queue: must be 1, 2, 4 or 8
chanQueue = [0, 1, 2, 3, 0, 1, 2, 3]
gain = miniLAB.BP_10_00V
gainQueue = [gain, gain, gain, gain, gain, gain, gain, gain]
frequency = 150 # 150 Hz
count = 96 # must be an even number
options = miniLAB.AIN_EXECUTION | miniLAB.AIN_BURST_MODE
value = miniLAB.AInScan(count, frequency, nQueue, chanQueue, gainQueue, options)
print('Total number of samples = ', len(value))
for i in range(int(count/4)):
print('scan ',i, end=' ')
for j in range(4):
print(format(miniLAB.volts(gain,value[4*i+j]),'.2f'),end=' ')
print()
elif ch == 'o':
print('Testing the analog output.')
chan = int(input('Enter channel [0-1]: '))
value = int(input('Enter value [0-0x3ff]: '),16)
print('Output voltage =',format(value*5.0/1023.,'.3f'))
miniLAB.AOut(chan,value)
if __name__ == "__main__":
main()
|
# This file contains the WSGI configuration required to serve up your
# web application at http://<your-username>.pythonanywhere.com/
# It works by setting the variable 'application' to a WSGI handler of some
# description.
#
# The below has been auto-generated for your Flask project
import sys
import os
# add your project directory to the sys.path
project_home = u'/home/frenzymadness/devconf-coding-challenge-webform'
if project_home not in sys.path:
sys.path = [project_home] + sys.path
os.environ['CODING_CHALLENGE_ADMIN_PASSWORD'] = 'password'
# import flask app but need to call it "application" for WSGI to work
from form import app as application
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///form.db'
application.config['UPLOAD_FOLDER'] = '/home/frenzymadness/devconf-coding-challenge-webform/files'
|
# coding=utf-8
# Copyright (c) DLUP Contributors
import json
from pathlib import Path
from typing import Tuple, Union
import h5py
import torch
import torch.nn.functional as F
from pytorch_lightning import LightningModule
from dlup_lightning_mil.utils.model import get_backbone
from torch import nn
import numpy as np
import torchmetrics
class TileSupervision(LightningModule):
"""
Standard supervision on labelled tiles. Works with disk_filelist_dataset and dlup_wsi_dataset
Args
backbone: str
Description of backbone model. For now: "resnet18" or "shufflenet_v2_x1_0"
load_weights: str
Which weights to load. If none given, random initialization of model. Can be either "imagenet" or
the absolute path to saved model weights by VISSL
lr: float
learning rate for ADAM optimizer
"""
def __init__(self, backbone: str, load_weights: str, lr: float, weight_decay: float, num_classes: int, metric_level: str = 'slide'):
super().__init__()
self.lr = lr
self.weight_decay = weight_decay
backbone_dict = get_backbone(backbone=backbone, load_weights=load_weights)
num_features = backbone_dict['num_features']
self.feature_extractor = backbone_dict['model']
self.classifier = nn.Linear(in_features=num_features, out_features=num_classes)
self.save_hyperparameters()
# Initialize validation output
self.validation_output = self._reset_output()
self.test_output = self._reset_output()
# Initialize metrics
self.metric_level = metric_level
self.auroc = torchmetrics.AUROC()
self.f1 = torchmetrics.F1()
self.pr_curve = torchmetrics.PrecisionRecallCurve()
def _reset_output(self):
#TODO Add paths and/or metadata.... so that we can save this so that we can get back all the images
# for the predictions that we made.
return {'slide_id': [], 'patient_id': [], 'loss': [], 'target': [], 'prediction': [],
'paths': [], 'meta': {}, 'root_dir': []}
def forward(self, x):
x = self.feature_extractor(x)
x = self.classifier(x)
return x
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
return optimizer
def training_step(self, batch, batch_idx):
x, y = batch["x"], batch["y"]
y_hat = self(x)
loss = F.cross_entropy(y_hat, y.long())
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch["x"], batch["y"]
y_hat = self(x)
loss = F.cross_entropy(y_hat, y.long())
self.track_values(x, y, y_hat, loss, batch, self.validation_output)
self.log("val_loss", loss)
return loss
def test_step(self, batch, batch_idx):
x, y = batch["x"], batch["y"]
y_hat = self(x)
loss = F.cross_entropy(y_hat, y.long())
self.track_values(x, y, y_hat, loss, batch, self.test_output)
return loss
def validation_epoch_end(self, validation_step_outputs) -> None:
self.log_metrics(prefix='val', output=self.validation_output)
self.validation_output = self._reset_output()
def test_epoch_end(self, test_step_outputs) -> None:
self.log_metrics(prefix='test', output=self.test_output)
self.test_output = self._reset_output()
def track_values(self, x, y, y_hat, loss, batch, output_reference):
output_reference["target"] += y.tolist() # Bathc size values
output_reference["prediction"] += torch.nn.functional.softmax(y_hat.cpu(), dim=1)[:,
1].tolist() # batch size values
output_reference["loss"].append(loss.tolist()) # 1 value
output_reference["patient_id"] += batch["patient_id"] # Batch size values
output_reference["slide_id"] += batch["slide_id"] # batch size values
output_reference["paths"] += batch["paths"]
output_reference["root_dir"] += batch["root_dir"]
# For the DLUPSlideImage dataset...
if 'meta' in batch.keys():
for key in batch["meta"].keys():
if key not in output_reference["meta"].keys():
output_reference["meta"][key] = []
output_reference["meta"][key] += batch["meta"][key].tolist()
def log_metrics(self, prefix, output):
target = torch.ShortTensor(output["target"])
prediction = torch.Tensor(output["prediction"])
# Do something to do slide-level or patient-level AUC
# torch default collate returns strings as [[('string',)], [('string',)]]
slide_ids = np.array(output["slide_id"])
patient_ids = np.array(output["patient_id"])
# Do it slide-level for now
if self.metric_level == "patient":
ids = patient_ids
elif self.metric_level == "slide":
ids = slide_ids
else:
raise ValueError
unique_ids = np.unique(ids)
id_level_targets = torch.ShortTensor([target[ids == i].max() for i in unique_ids])
id_level_predictions = torch.Tensor([prediction[ids == i].mean() for i in unique_ids])
auroc_score = self.auroc(preds=id_level_predictions, target=id_level_targets)
f1_score = self.f1(preds=id_level_predictions, target=id_level_targets)
precision, recall, thresholds = self.pr_curve(preds=id_level_predictions, target=id_level_targets)
self.log(f"{prefix}_auc", auroc_score, prog_bar=True, logger=True)
self.log(f"{prefix}_f1", f1_score, prog_bar=True, logger=True)
if self.trainer.save_validation_output_to_disk:
#---- Save metrics
if not (Path(self.trainer.log_dir) / f'output/{prefix}').is_dir():
Path.mkdir(Path(self.trainer.log_dir) / f'output/{prefix}', parents=True)
metrics_to_save = {'auc': float(auroc_score),
'f1': float(f1_score),
'prcurve': {
'precision': precision.tolist(),
'recall': recall.tolist(),
'thresholds': thresholds.tolist()}
}
with open(Path(self.trainer.log_dir) / f'output/{prefix}/metrics.json', 'w') as f:
f.write(json.dumps(metrics_to_save))
#---- Save output
self.save_output(prefix)
def save_output(self, fold):
if fold == 'val':
output = self.validation_output
elif fold == 'test':
output = self.test_output
else:
raise NotImplementedError
unique_slide_ids = set(output['slide_id'])
for slide_id in unique_slide_ids:
Path.mkdir(Path(self.trainer.log_dir) / f'output/{fold}', parents=True, exist_ok=True)
hf = h5py.File(f'{self.trainer.log_dir}/output/{fold}/{slide_id}_output.h5', "a")
current_slide_indices = np.array(output['slide_id']) == slide_id
# It's a bit bloated, but np.array() allows for nice [True, False] indexing
# But h5 doesn't like the np.array string encoding, so we make them back into lists
slide_id = np.array(output['slide_id'])[current_slide_indices].tolist()
if len(set(slide_id)) == 1:
slide_id = list(set(slide_id)) # always the case, really
hf['slide_id'] = slide_id
else:
raise ValueError
patient_id = np.array(output['patient_id'])[current_slide_indices].tolist()
if len(set(patient_id)) == 1:
patient_id = list(set(patient_id))
hf['patient_id'] = patient_id
else:
raise ValueError
root_dir = np.array(output['root_dir'])[current_slide_indices].tolist()
if len(set(root_dir)) == 1:
root_dir = list(set(root_dir))
hf['root_dir'] = root_dir
else:
raise ValueError
hf['tile_prediction'] = np.array(output['prediction'])[current_slide_indices].tolist()
paths = np.array(output['paths'])[current_slide_indices].tolist()
if len(set(paths)) == 1:
paths = list(set(paths)) # THis is the case for DLUP Slide Image, as it refers to the .svs. Not for disk filelist
hf['paths'] = paths
target = np.array(output['target'])[current_slide_indices].tolist()
if len(set(target)) == 1:
target = list(set(target))
hf['target'] = target
else:
raise ValueError
if 'meta' in output.keys():
# Only DLUP SlideImage dataset gives keys and values in 'meta'
for key in output['meta'].keys():
hf[f'meta/{key}'] = np.array(output['meta'][key])[current_slide_indices].tolist()
hf.close()
|
#
# @lc app=leetcode.cn id=80 lang=python3
#
# [80] 删除有序数组中的重复项 II
#
# https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array-ii/description/
#
# algorithms
# Medium (57.54%)
# Likes: 452
# Dislikes: 0
# Total Accepted: 105.9K
# Total Submissions: 176.2K
# Testcase Example: '[1,1,1,2,2,3]'
#
# 给你一个有序数组 nums ,请你 原地 删除重复出现的元素,使每个元素 最多出现两次 ,返回删除后数组的新长度。
#
# 不要使用额外的数组空间,你必须在 原地 修改输入数组 并在使用 O(1) 额外空间的条件下完成。
#
#
#
# 说明:
#
# 为什么返回数值是整数,但输出的答案是数组呢?
#
# 请注意,输入数组是以「引用」方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。
#
# 你可以想象内部操作如下:
#
#
# // nums 是以“引用”方式传递的。也就是说,不对实参做任何拷贝
# int len = removeDuplicates(nums);
#
# // 在函数里修改输入数组对于调用者是可见的。
# // 根据你的函数返回的长度, 它会打印出数组中 该长度范围内 的所有元素。
# for (int i = 0; i < len; i++) {
# print(nums[i]);
# }
#
#
#
#
# 示例 1:
#
#
# 输入:nums = [1,1,1,2,2,3]
# 输出:5, nums = [1,1,2,2,3]
# 解释:函数应返回新长度 length = 5, 并且原数组的前五个元素被修改为 1, 1, 2, 2, 3 。 不需要考虑数组中超出新长度后面的元素。
#
#
# 示例 2:
#
#
# 输入:nums = [0,0,1,1,1,1,2,3,3]
# 输出:7, nums = [0,0,1,1,2,3,3]
# 解释:函数应返回新长度 length = 7, 并且原数组的前五个元素被修改为 0, 0, 1, 1, 2, 3, 3 。
# 不需要考虑数组中超出新长度后面的元素。
#
#
#
#
# 提示:
#
#
# 1
# -10^4
# nums 已按升序排列
#
#
#
# @lc code=start
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
p, count = 1,1
for i in range(1,len(nums)):
if nums[i] == nums[i-1]:
count += 1
else:
count = 1
if count <= 2:
nums[p] = nums[i]
p += 1
return p
# @lc code=end-
# def removeDuplicates(self, nums: List[int]) -> int:
# j,count = 1,1
# for i in range(1,len(nums)):
# if nums[i] == nums[i-1]:
# count += 1
# else:
# count = 1
# if count <= 2:
# nums[j] = nums[i]
# j += 1
# return j
# def removeDuplicates(self, nums: List[int]) -> int:
# start = 0
# for p in range(len(nums)):
# if start < 2 or nums[p] != nums[start-2]:
# nums[start] = nums[p]
# start += 1
# return start
|
avtobots = {"Оптімус Прайм": "Грузовик Peterbilt 379",
"Бамблбі": "Chevrolet Camaro", "Джаз": "Porsche 935 Turbo"}
avtobots.update({"Сентінел Прайм": "Пожежна машина"})
print(avtobots)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 02 19:15:18 2016
@author: Florian Wolf
__________________________________________________
### MacPyver.postgres ###
### The Swissknife like Python-Package for ###
### work in general and with GIS ###
__________________________________________________
"""
import psycopg2 as pg
import pandas as pd
from sqlalchemy import create_engine
##############################################################################
'''functions to create sql-command/ load tables to postgres
--> create_pg_Table_sql_command
--> create_pg_Table
--> create_pg_Table_load_to_pg
'''
def create_pg_Table_sql_command(full_path, tablename, sep=';', header=0, write= False):
'''
create_pg_Table_sql_command:
returns the executable SQL command for postgres
>>> create_pg_Table_sql_command(fullpath, tablename, sep=';', header=0)
fullPath --> full path plus the filename
tablename --> table name for the new table
sep --> default sep is ;
header --> default header is the first line in the csv
'''
#read csv with pandas
data = pd.read_csv(full_path, sep=sep,header=header)
#get header from the readed data
colL = list(data.columns)
#turn the columnnames to lower letters and replace sp[ace by underline]
colL = [x.lower().replace(' ','_') for x in colL]
#update the columnnames
data.columns = colL
colType = list(data.dtypes)
#create empty list, output
op_list = []
#append the columnname and the datatype /varchar length
for x in range(len(colL)):
if str(colType[x]) == 'int64':
op_list.append(colL[x] + ' integer')
#print colL[x] + ' integer, '
elif str(colType[x]) == 'float64' or str(colType[x]) == 'float':
op_list.append(colL[x] + ' float')
#print colL[x] + ' float, '
elif str(colType[x]) == 'object':
#get max length of the cells in the column
leng = data[colL[x]].map(lambda y: len(str(y))).max()
op_list.append(str(colL[x]) + ' varchar(' + str(leng) +')')
#print str(colL[x]) + ' varchar(' + str(leng) +'), '
else:
print colL[x] + ' error '
#return sql command and the data; this is used in the function create_pg_Table_load_to_pg
if write == True:
sql = "CREATE TABLE IF NOT EXISTS %s (index integer, %s)" % (tablename , (", ").join(op_list))
return sql, data
#return sql command and the data; this is used in the this function and create_pg_Table,
else:
sql = "CREATE TABLE IF NOT EXISTS %s (%s)" % (tablename , (", ").join(op_list))
return sql
def create_pg_Table(full_path, tablename, host, user, password, dbname, schema='public', port=5432, sep=';', header=0 ):
'''
create_pg_Table:
creates a table in the postgres database
>>> create_pg_Table(full_path, tablename, host,
user, password, dbname,
schema='public', port=5432,
sep=';', header=0):
fullPath --> full path plus the filename
tablename --> table name for the new table
host --> database host
user --> your database user name
password --> your database password
dbname --> database you want to create the table
schema --> schema you want to create the table, default is public
port --> port of your database, default is 5432
sep --> default sep is ;
header --> default header is the first line in the csv
'''
#get the sql command
sql = create_pg_Table_sql_command(full_path, tablename, sep, header, write= False)
#create the connection string for the database
pg_conn_str = "host=%s port=%d user=%s password=%s dbname=%s" % (host, port, user, password, dbname)
#connect to the database
pg_conn = pg.connect(pg_conn_str)
#create a cursor on the database
pg_cur = pg_conn.cursor()
try:
#execute the sql command
pg_cur.execute(sql)
#commit the sql command
pg_conn.commit()
print 'table created'
except:
print 'table exists already'
pg_conn.close()
def create_pg_Table_load_to_pg(full_path, tablename, host, user, password, dbname, schema='public', port=5432, sep=';', header=0, ):
'''
create_pg_Table_load_to_pg:
creates a table in the postgres database and loads the data to the
database
>>> create_pg_Table_load_to_pg(full_path, tablename, host,
user, password, dbname,
schema='public', port=5432,
sep=';', header=0):
fullPath --> full path plus the filename
tablename --> table name for the new table
host --> database host
user --> your database user name
password --> your database password
dbname --> database you want to create the table and load the data
schema --> schema you want to create the table, default is public
port --> port of your database, default is 5432
sep --> default sep is ;
header --> default header is the first line in the csv
'''
#get the sql command and the data from the given table
sql, data = create_pg_Table_sql_command(full_path, tablename, sep=sep, header=header, write= True)
#create the connection string for the database
pg_conn_str = "host=%s port=%d user=%s password=%s dbname=%s" % (host, port, user, password, dbname)
#connect to the database
pg_conn = pg.connect(pg_conn_str)
#create a cursor on the database
pg_cur = pg_conn.cursor()
try:
#execute the sql command
pg_cur.execute(sql)
#commit the sql command
pg_conn.commit()
print 'table created'
except:
print 'table exists already'
#create a engine connection to the database
engine = create_engine(r'postgresql://%s:%s@%s:%d/%s' % (user, password,host,port,dbname))
#append the data to the created table
data.to_sql(tablename,engine,if_exists='append')
pg_conn.close()
###############################################################################
###############################################################################
'''
communicate with postgres and execute sql querys
'''
class pg_communicate():
'''class to connect to postgres and execute querys
and get the return of the sql query
- init will create the connection
- refresh_con for refreshing the pointer to the database
- fetch to execute the query and get the return
- close to close the connetion
'''
def __init__(self, host, user, pw, db, port = 5432):
"""create all variables for the connection to the database
and create the connection to the database"""
self.host = host
self.user = user
self.pw = pw
self.db = db
self.port = port
#creat pg_con str
self.pg_conn_str = "host=%s port=%d user=%s password=%s dbname=%s" % (self.host, self.port, self.user, self.pw, self.db)
#connect to the database
self.pg_conn = pg.connect(self.pg_conn_str)
#create a cursor on the database
self.pg_cur = self.pg_conn.cursor()
def refresh_cur(self):
"""refresh pg cursor"""
self.pg_conn = pg.connect(self.pg_conn_str)
self.pg_cur = self.pg_conn.cursor()
def fetch(self, sql_query):
"""execute sql and fetch the return of the query"""
try:
#execute the sql command
self.pg_cur.execute(sql_query)
#fetch return
records = self.pg_cur.fetchall()
if not records:
print('Error: no line to fetch')
return False
return records
except pg.Error, e:
#print errormessages
print( e.diag.severity)
error_message = e.diag.message_primary
print( error_message)
if error_message =='current transaction is aborted, commands ignored until end of transaction block':
print('WARNING: refreshed cursor')
self.refresh_cur()
return False
def commit(self, sql_command):
"""commit some statement to the database, eg. creating a new table"""
try:
self.pg_cur.execute(sql_command)
self.pg_conn.commit()
except pg.Error, e:
print(e.diag.severity)
error_message = e.diag.message_primary
print( error_message)
if error_message =='current transaction is aborted, commands ignored until end of transaction block':
print('WARNING: refreshed cursor')
self.refresh_cur()
def close(self):
"""close connection to the postgres db"""
self.pg_conn.close()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 13:03:34 2020
@author: admin
"""
from tkinter import filedialog
from tkinter import messagebox
import os
def get_location():
loc = filedialog.askdirectory(initialdir=os.getcwd(), title='Select a save folder.')
os.chdir(loc)
return loc
def alert(msg):
print(msg)
messagebox.showinfo('Error', msg)
def tips(msg):
print(msg)
messagebox.showinfo('Quick Tips', msg)
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import tensorflow as tf
a = tf.constant(2)
b = tf.constant(3)
x = tf.add(a, b)
writer = tf.summary.FileWriter('./graphs',tf.get_default_graph())
with tf.Session() as ses:
result = ses.run(x)
print(result)
writer.close()
|
import os
def filter_str(text, in_filter, out_filter):
'''
text在out_filter,返回False
in_filter不等于None,text在里边返回True,不在里边返回False
in_filter等于None,返回True
'''
if in_filter is not None:
assert (isinstance(in_filter, list) or isinstance(in_filter, tuple))
if out_filter is not None:
assert (isinstance(out_filter, list) or isinstance(out_filter, tuple))
if out_filter is not None and text in out_filter:
return False
if in_filter is not None:
if text in in_filter:
return True
else:
return False
else:
return True
def list_all_files(rootdir, ext=None, non_ext=None):
'''
功能:获取目录下的所有文件
输入:
rootdir 目录,
not_ext 排除指定后缀[.json,.txt],优先级高于ext,必须是列表或元祖
ext 过滤指定后缀,为空不过滤比如[.png,.jpg],只返回.png和.jpg后缀(前提是not_ext里不包含此后缀),必须是列表或元祖
输出:
文件绝对路径列表
'''
_files = []
list = os.listdir(rootdir)
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isdir(path):
_files.extend(list_all_files(path, ext, non_ext))
if os.path.isfile(path):
if filter_str(os.path.splitext(path)[-1], ext, non_ext):
_files.append(path)
return _files
if __name__ == "__main__":
dir = '../chapter'
files = list_all_files(dir, ('.cpp', ))
cmakeFIle = dir + "/CMakeLists.txt"
print('文件总数:{},file: {}'.format(len(files), cmakeFIle))
with open(cmakeFIle, 'w') as f:
for name in files:
# os.rename(f, f.replace("main-", "chapter_"))
target = os.path.splitext(os.path.basename(name))[0]
print(target)
f.write('add_executable({} {}.cpp)\n'.format(target, target))
f.write('target_include_directories({} PRIVATE {})\n'.format(
target, '${CV_INCLUDE_DIRS}'))
f.write('target_link_directories({} PRIVATE {})\n'.format(
target, '${CV_LIBRARY_DIRS}'))
f.write('target_link_libraries({} PRIVATE {})\n\n'.format(
target, '${CV_LIBRARIES}'))
|
from collections import deque
from utils.solution_base import SolutionBase
class Solution(SolutionBase):
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if len(r):
if (tr := str(func(i))) == r[0]:
print(f"test {test_counter} passed")
else:
print(f"your result: {tr}")
print(f"test answer: {r[0]}")
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
_, root = self.parse_programs(data)
return root
def part2(self, data):
programs, root = self.parse_programs(data)
self.get_weights(programs, root)
q = deque([root])
target_weight = -1
while q:
node = q.popleft()
if len(programs[node][1]) > 0:
child = {}
for c in programs[node][1]:
w = programs[c][0] + (programs[c][2] if programs[c][2] != -1 else 0)
child[c] = w
if len(set(child.values())) == 1:
return target_weight - sum(child.values())
else:
ws = [*set(child.values())]
for w in ws:
if [*child.values()].count(w) == 1:
target = [c[0] for c in child.items() if c[1] == w][0]
q.append(target)
target_weight = [i for i in ws if i != w][0]
def parse_programs(self, data):
programs = {}
for line in data:
if " -> " in line:
parent, children = line.split(" -> ")
children = children.split(", ")
parent, weight = parent.split()
programs[parent] = [int(weight[1:-1]), children, 0]
else:
parent, weight = line.split()
programs[parent] = [int(weight[1:-1]), [], -1]
nodes = programs.keys()
children = []
for node in nodes:
children.extend(programs[node][1])
for node in nodes:
if node not in children:
return programs, node
def get_weights(self, programs, node):
if programs[node][2] == -1:
return programs[node][0]
else:
w = sum(self.get_weights(programs, child) for child in programs[node][1])
programs[node][2] = w
return programs[node][0] + w
|
#!/bin/python3
import os
#
# Complete the 'contacts' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts 2D_STRING_ARRAY queries as parameter.
#
def contacts(queries):
# Write your code here
from collections import Counter
counter = Counter()
for operation, word in queries:
if operation == 'add':
for i in range(1, len(word) + 1):
counter.update([word[0:i]])
elif operation == 'find':
yield counter[word]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
queries_rows = int(input().strip())
queries = []
for _ in range(queries_rows):
queries.append(input().rstrip().split())
result = contacts(queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
from typing import List, Optional
from fastapi.encoders import jsonable_encoder
from .models import Policy, PolicyCreate, PolicyUpdate
from .dsl import build_parser
def get(*, db_session, policy_id: int) -> Optional[Policy]:
"""Gets a policy by id."""
return db_session.query(Policy).filter(Policy.id == policy_id).first()
def get_by_name(*, db_session, name: str) -> Optional[Policy]:
"""Gets a policy by name."""
return db_session.query(Policy).filter(Policy.name == name).first()
def get_by_expression(*, db_session, expression: str) -> Optional[Policy]:
"""Gets a policy by expression."""
return db_session.query(Policy).filter(Policy.expression == expression).one_or_none()
def get_all(*, db_session):
"""Gets all policies."""
return db_session.query(Policy)
def create(*, db_session, policy_in: PolicyCreate) -> Policy:
"""Creates a new policy."""
policy = Policy(**policy_in.dict())
db_session.add(policy)
db_session.commit()
return policy
def create_all(*, db_session, policies_in: List[PolicyCreate]) -> List[Policy]:
"""Creates all policies."""
policies = [Policy(name=p.name) for p in policies_in]
db_session.bulk_save_insert(policies)
db_session.commit()
db_session.refresh()
return policies
def update(*, db_session, policy: Policy, policy_in: PolicyUpdate) -> Policy:
"""Updates a policy."""
policy_data = jsonable_encoder(policy)
update_data = policy_in.dict(skip_defaults=True)
for field in policy_data:
if field in update_data:
setattr(policy, field, update_data[field])
db_session.add(policy)
db_session.commit()
return policy
def create_or_update(*, db_session, policy_in: PolicyCreate) -> Policy:
"""Creates or updates a policy."""
update_data = policy_in.dict(skip_defaults=True)
q = db_session.query(Policy)
for attr, value in update_data.items():
q = q.filter(getattr(Policy, attr) == value)
instance = q.first()
if instance:
return update(db_session=db_session, policy=instance, policy_in=policy_in)
return create(db_session=db_session, policy_in=policy_in)
def parse(policy: str) -> dict:
"""Parse a policy."""
query = build_parser()
return query.parseString(policy, parseAll=True)
def delete(*, db_session, policy_id: int):
"""Delets a policy."""
policy = db_session.query(Policy).filter(Policy.id == policy_id).first()
db_session.delete(policy)
db_session.commit()
|
# coding: utf-8
import pytest
mysql = pytest.importorskip("mysql.connector")
from snlocest.areadb import AreaDataManager
def pytest_funcarg__areadb(request):
return AreaDataManager('geo', 'area_feature')
#return AreaDataManager('test20160513_geo', 'test20160513_area')
# エリアDBにデータがちゃんと入っているかのテスト
def test_areadb(areadb):
sql = 'select count(code) from {}'.format(areadb.table_name)
areadb.execute(sql)
res = areadb.cursor.fetchall()
assert res[0][0] == 1901
def test_contains(areadb):
res = areadb.contains([137.408818, 34.701704])
assert res[0] == 23201
assert res[3] == '愛知県豊橋市 '
|
import uuid
from collections import Counter
from functools import partial
from typing import (Callable,
Dict,
List)
import numpy as np
import pandas as pd
from sqlalchemy import func
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.session import Session
from alcor.models import eliminations
from alcor.models.star import Star
from . import filters
from . import (luminosity_function,
velocities_vs_magnitude,
velocity_clouds,
heatmaps,
toomre_diagram,
ugriz_diagrams)
ASTRONOMICAL_UNIT = 4.74
def draw(*,
group_id: uuid.UUID,
filtration_method: str,
nullify_radial_velocity: bool,
with_luminosity_function: bool,
with_velocities_vs_magnitude: bool,
with_velocity_clouds: bool,
lepine_criterion: bool,
heatmaps_axes: str,
with_toomre_diagram: bool,
with_ugriz_diagrams: bool,
desired_stars_count: int,
session: Session) -> None:
entities = star_query_entities(
filtration_method=filtration_method,
nullify_radial_velocity=nullify_radial_velocity,
lepine_criterion=lepine_criterion,
with_luminosity_function=with_luminosity_function,
with_velocities_vs_magnitude=with_velocities_vs_magnitude,
with_velocity_clouds=with_velocity_clouds,
heatmaps_axes=heatmaps_axes,
with_toomre_diagram=with_toomre_diagram,
with_ugriz_diagrams=with_ugriz_diagrams)
if not entities:
raise ValueError('No plotting options were chosen')
query = (session.query(Star)
.filter(Star.group_id == group_id))
if desired_stars_count:
query = (query.order_by(func.random())
.limit(desired_stars_count))
query = query.with_entities(*entities)
statement = query.statement
stars = pd.read_sql_query(sql=statement,
con=session.get_bind(),
index_col='id')
filtration_functions = stars_filtration_functions(method=filtration_method)
eliminations_counter = stars_eliminations_counter(
stars,
filtration_functions=filtration_functions,
group_id=group_id)
session.add(eliminations_counter)
session.commit()
stars = filtered_stars(stars,
filtration_functions=filtration_functions)
if nullify_radial_velocity:
set_radial_velocity_to_zero(stars)
if with_luminosity_function:
luminosity_function.plot(stars=stars)
if with_velocities_vs_magnitude:
if lepine_criterion:
velocities_vs_magnitude.plot_lepine_case(stars=stars)
else:
velocities_vs_magnitude.plot(stars=stars)
if with_velocity_clouds:
if lepine_criterion:
velocity_clouds.plot_lepine_case(stars=stars)
else:
velocity_clouds.plot(stars=stars)
if heatmaps_axes:
heatmaps.plot(stars=stars,
axes=heatmaps_axes)
if with_toomre_diagram:
toomre_diagram.plot(stars=stars)
if with_ugriz_diagrams:
ugriz_diagrams.plot(stars=stars)
def set_radial_velocity_to_zero(stars: pd.DataFrame) -> None:
distances_in_pc = stars['distance'] * 1e3
a1 = (-ASTRONOMICAL_UNIT * np.cos(stars['galactic_latitude'])
* np.sin(stars['galactic_longitude']))
b1 = (-ASTRONOMICAL_UNIT * np.sin(stars['galactic_latitude'])
* np.cos(stars['galactic_longitude']))
stars['u_velocity'] = ((a1 * stars['proper_motion_component_l']
+ b1 * stars['proper_motion_component_b'])
* distances_in_pc)
a2 = (ASTRONOMICAL_UNIT * np.cos(stars['galactic_latitude'])
* np.cos(stars['galactic_longitude']))
b2 = (-ASTRONOMICAL_UNIT * np.sin(stars['galactic_latitude'])
* np.sin(stars['galactic_longitude']))
stars['v_velocity'] = ((a2 * stars['proper_motion_component_l']
+ b2 * stars['proper_motion_component_b'])
* distances_in_pc)
b3 = ASTRONOMICAL_UNIT * np.cos(stars['galactic_latitude'])
stars['w_velocity'] = (b3 * stars['proper_motion_component_b']
* distances_in_pc)
def star_query_entities(*,
filtration_method: str,
nullify_radial_velocity: bool,
lepine_criterion: bool,
with_luminosity_function: bool,
with_velocities_vs_magnitude: bool,
with_velocity_clouds: bool,
heatmaps_axes: str,
with_toomre_diagram: bool,
with_ugriz_diagrams: bool
) -> List[InstrumentedAttribute]:
entities = []
if filtration_method != 'raw':
entities += [Star.distance,
Star.declination,
Star.u_velocity,
Star.v_velocity,
Star.w_velocity]
if filtration_method == 'restricted':
entities += [Star.b_abs_magnitude,
Star.v_abs_magnitude,
Star.r_abs_magnitude,
Star.i_abs_magnitude,
Star.proper_motion]
if nullify_radial_velocity:
entities += [Star.galactic_longitude,
Star.galactic_latitude,
Star.proper_motion_component_l,
Star.proper_motion_component_b,
Star.distance]
if lepine_criterion:
entities += [Star.right_ascension,
Star.declination,
Star.distance]
if with_luminosity_function:
entities += [Star.luminosity]
if with_velocities_vs_magnitude:
entities += [Star.luminosity,
Star.u_velocity,
Star.v_velocity,
Star.w_velocity]
if heatmaps_axes or with_velocity_clouds:
entities += [Star.u_velocity,
Star.v_velocity,
Star.w_velocity]
if with_toomre_diagram:
entities += [Star.u_velocity,
Star.v_velocity,
Star.w_velocity,
Star.spectral_type]
if with_ugriz_diagrams:
entities += [Star.b_abs_magnitude,
Star.v_abs_magnitude,
Star.r_abs_magnitude,
Star.i_abs_magnitude,
Star.spectral_type]
if entities:
entities += [Star.id]
return entities
def stars_filtration_functions(*,
method: str,
min_parallax: float = 0.025,
min_declination: float = 0.,
max_velocity: float = 500.,
min_proper_motion: float = 0.04,
max_v_apparent_magnitude: float = 19.
) -> Dict[str, Callable]:
result = {}
# TODO: fix geometry of a simulated region so that we don't need to use
# the 'full' filtration method
if method != 'raw':
result['by_parallax'] = partial(filters.by_parallax,
min_parallax=min_parallax)
result['by_declination'] = partial(filters.by_declination,
min_declination=min_declination)
result['by_velocity'] = partial(filters.by_velocity,
max_velocity=max_velocity)
if method == 'restricted':
result['by_proper_motion'] = partial(
filters.by_proper_motion,
min_proper_motion=min_proper_motion)
result['by_reduced_proper_motion'] = (
filters.by_reduced_proper_motion)
result['by_apparent_magnitude'] = partial(
filters.by_apparent_magnitude,
max_v_apparent_magnitude=max_v_apparent_magnitude)
return result
def stars_eliminations_counter(stars: pd.DataFrame,
filtration_functions: Dict[str, Callable],
group_id: uuid.UUID
) -> eliminations.StarsCounter:
eliminations_counter = Counter()
eliminations_counter['raw'] = stars.shape[0]
for criterion, filtration_function in filtration_functions.items():
stars_count_before_filtration = stars.shape[0]
stars = filtration_function(stars)
eliminations_counter[criterion] = (stars_count_before_filtration
- stars.shape[0])
return eliminations.StarsCounter(group_id=group_id,
**eliminations_counter)
def filtered_stars(stars: pd.DataFrame,
filtration_functions: Dict[str, Callable]) -> pd.DataFrame:
for criterion, filtration_function in filtration_functions.items():
stars = filtration_function(stars)
return stars
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.