content
stringlengths 5
1.05M
|
|---|
import re
def remove_escapes(msg) -> str:
"""
Returns a filtered string
removing \r
"""
filtered = msg.replace(r'\r', '')
return filtered
def pretty_print(msg) -> str:
"""
Returns a fully cleaned message
after filtering through a regex
"""
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
modified_text = ansi_escape.sub('', msg)
return modified_text
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
import itertools, operator, random, math
from scipy.sparse.linalg import spsolve_triangular
from sklearn import linear_model
import pandas as pd
def random_sampling(data, porpotion):
sampled_data = np.empty(data.shape)
sampled_data[:] = np.nan
n = data.shape[1]
for i in range(data.shape[0]):
sample_idx = random.sample(range(n), int(n*porpotion))
sampled_data[i][sample_idx] = data[i][sample_idx]
return sampled_data
def funkSVD(rating_mat, latent_features, learning_rate, iters):
n_s, n_t = rating_mat.shape[0], rating_mat.shape[1]
s_matrix, t_matrix = np.random.rand(n_s, latent_features), np.random.rand(latent_features, n_t)
# s_matrix, t_matrix = 0.5*np.ones((n_s, latent_features)), 0.5*np.ones((latent_features, n_t))
sse_initial = 0
for p in range(iters):
old_see = sse_initial
sse_initial = 0
for i in range(n_s):
for j in range(n_t):
if not math.isnan(rating_mat[i][j]):
diff = rating_mat[i][j] - s_matrix[i,:].dot(t_matrix[:,j])
sse_initial += diff**2
for k in range(latent_features):
s_matrix[i][k] += learning_rate*(2*diff*t_matrix[k][j])
t_matrix[k][j] += learning_rate*(2*diff*s_matrix[i][k])
est_mat = s_matrix.dot(t_matrix)
return est_mat
def ft_data(pop, tspan, dt):
"""
est_mat from funkSVD
"""
n = len(tspan)
y_ft = []
for i in range(pop.shape[0]):
fhat = np.fft.fft(pop[i], n)
PSD = fhat*np.conj(fhat)/n
freq = (1/(dt*n))*np.arange(n)
L = np.arange(1, np.floor(n/2), dtype= 'int')
indices = PSD > 5
PSDclean = PSD * indices
fhat = indices*fhat
ffilt = np.fft.ifft(fhat)
y_ft.append(ffilt)
return np.array(y_ft)
def funkSVD_ft(ft_matrix, rating_mat, latent_features, learning_rate, iters):
u,s,v = np.linalg.svd(ft_matrix, full_matrices=False)
n_s, n_t = rating_mat.shape[0], rating_mat.shape[1]
s_matrix, t_matrix = u, v
# s_matrix, t_matrix = 0.5*np.ones((n_s, latent_features)), 0.5*np.ones((latent_features, n_t))
sse_initial = 0
for p in range(iters):
old_see = sse_initial
sse_initial = 0
for i in range(n_s):
for j in range(n_t):
if not math.isnan(rating_mat[i][j]):
diff = rating_mat[i][j] - s_matrix[i,:].dot(t_matrix[:,j])
sse_initial += diff**2
for k in range(latent_features):
s_matrix[i][k] += learning_rate*(2*diff*t_matrix[k][j])
t_matrix[k][j] += learning_rate*(2*diff*s_matrix[i][k])
est_mat = s_matrix.dot(t_matrix)
return est_mat
def power_(d,order):
# d is the number of variables; order of polynomials
powers = []
for p in range(1,order+1):
size = d + p - 1
for indices in itertools.combinations(range(size), d-1): ##combinations
starts = [0] + [index+1 for index in indices]
stops = indices + (size,)
powers.append(tuple(map(operator.sub, stops, starts)))
return powers
def lib_terms(data,order,description):
#description is a list of name of variables, like [R, M, S]
#description of lib
descr = []
#data is the input data, like R,M,S; order is the total order of polynomials
d,t = data.shape # d is the number of variables; t is the number of time points
theta = np.ones((t,1), dtype=np.float64) # the first column of lib is '1'
P = power_(d,order)
descr = ["1"]
for i in range(len(P)):
new_col = np.zeros((t,1),dtype=np.float64)
for j in range(t):
new_col[j] = np.prod(np.power(list(data[:,j]),list(P[i])))
theta = np.hstack([theta, new_col.reshape(t,1)])
descr.append("{0} {1}".format(str(P[i]), str(description)))
# print((str(P[i]), str(description)))
return theta, descr
def sparsifyDynamics(Theta, dx, Lambda):
#theta.shape = 248*10 (time points*functions); dx.shape = 248*3 (time points*variables)
#need to ensure size or dimenssions !!!
# dx = dx.T
m,n = dx.shape #(248*3)
Xi = np.dot(np.linalg.pinv(Theta), dx) #Xi.shape = 10*3
# lambda is sparasification knob
for k in range(20): ###??
small_idx = (abs(Xi) < Lambda)
big_idx = (abs(Xi) >= Lambda)
Xi[small_idx] = 0
for i in range(n):
big_curr, = np.where(big_idx[:,i])
Xi[big_curr, i] = np.dot(np.linalg.pinv(Theta[:,big_curr]), dx[:,i])
return Xi
def sparseGalerkin(t, pop, Xi, polyorder):
theta, descr = lib_terms(np.array([pop]).T,polyorder,[])
dpop = theta.dot(Xi)
return dpop[0]
def time_different(dt, pop):
"""
dpop = (6*6000) (species * time)
centered first order derviate
"""
x = np.full_like(pop, fill_value = np.nan)
x[:, 1:-1] = (pop[:, 2:] - pop[:, :-2]) / (2*dt)
x[:,0] = (-11/6 *pop[:,0] + 3* pop[:,1] - 3/2*pop[:,2] + pop[:,3]/3) /dt
x[:,-1] = (11/6* pop[:,-1] -3* pop[:,-2] + 3/2* pop[:,-3] -pop[:,-4]/3)/dt
return x
def visual_param(Xi, descr):
small_idx = abs(Xi) < 1e-4
Xi[small_idx] = 0
new_set = [x.replace('(', '').replace(']', '') for x in descr]
name_s = descr
label = []
for str_ in new_set[1:]:
idx_ = [int(x) for x in str_.split(') [')[0].split(',')]
lab = ""
for idx, i in enumerate(idx_):
j = i
while j > 0:
lab += name_s[idx]
j -= 1
label.append(lab)
term_label = ['1'] + label
df_term = pd.DataFrame(Xi.T, index=term_label, columns=name_s)
return df_term
def bulid_prior(label, theta, descr, prior_dic):
df_prior = visual_param(np.zeros((len(label), theta.shape[1])), descr)
drop_index = []
for term in label:
idx_prev = df_prior.index
x_new = set()
for i, s in enumerate(prior_dic[term]):
lst_idx = [p.find(s) for p in idx_prev]
x, = np.where(np.array(lst_idx) == -1)
if i == 0:
x_new = set(x)
else:
x_new = x_new.intersection(x)
drop_index.append(list(x_new))
df_prior[term].iloc[list(x_new)] = 1
return df_prior, drop_index
|
# django libs
from django.http import FileResponse
from django.core.serializers import serialize
from django.utils import translation
from django.utils.translation import gettext
# deepeye setting & models & form
from main.models import Project
from .models import ClassificationModel, Dataset, Result, Weight
from .models import TestResult, Pred, PredProbe, TrainLog
from .model.train import main as classification_train
from .model.test import main as classification_test
from .model import DA
from .dataset_util import *
# common libs
from channels.generic.websocket import WebsocketConsumer
import glob
import json
import logging
import numpy as np
import os
import shutil
import sys
from threading import Thread
import time
import urllib
from main.project_type import ProjectType
from main import file_action
from main.log import get_logger
logger = get_logger(__name__)
class Classification(WebsocketConsumer):
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def websocket_receive(self, data):
logger.debug("[websocket_receive] data: {}".format(data))
data = json.loads(data['text'])
self.status = data['status']
logger.info(f"Data received from frontend with status of '{self.status}'")
if 'project_type' in data:
project_type = data['project_type']
if self.status == "lang-setting":
translation.activate(data["user-lang"])
# starting training
elif self.status == 'train':
thread = Thread(target = self.train, args = (data,))
thread.start()
# self.train(data)
elif self.status == 'stop':
pass
elif self.status == 'training-ended':
pass
# testing( upload_data / self_testdata / saved_dataset )
elif self.status == 'test':
self.predict(data)
# memo update
"""
elif self.status == 'memo_update':
if data["target_type"] == "dataset":
dataset = Dataset.objects.get(project=self.selected_project, name=data["selectedDatasetId"])
dataset.memo = data["memo"]
dataset.save()
elif data["target_type"] == "model":
model = ClassificationModel.objects.get(project=self.selected_project, name=data["selectedModelId"])
model.memo = data["memo"]
model.save()
logger.debug("memo update finish")
"""
def save_trainlog(self, data):
training_model = ClassificationModel.objects.get(id=data['model_id'])
train_log_record = TrainLog(
epoch = data['epoch'],
train_loss = data['train_loss'],
train_acc = data['train_acc'],
val_loss = data['val_loss'],
val_acc = data['val_acc'],
model = training_model,
)
train_log_record.save()
def train(self, data):
self.train_log = {}
# get Dataset param from DB
training_model = ClassificationModel.objects.get(id=data['model_id'])
project = Project.objects.get(name=data['project_name'])
dataset = Dataset.objects.get(name=data['dataset_name'],project=project)
base_dataset_path = dataset.dataset_path
default_test_ratio = dataset.default_test_ratio
default_val_ratio = dataset.default_val_ratio
# get training param from from
model_name = data['model_name']
architecture = data['architecture'].lower()
epochs = int(data['epoch'])
batch_size = int(data['batch'])
learning_rate = float(data['learning_rate'])
optimizer = data['optimizer'].lower()
fine_tuning = data['fine_tuning']
use_default_ratio = data['use_default_ratio']
val_ratio = int(data['val_ratio'])
test_ratio = int(data['test_ratio'])
memo = data['memo']
weights_path = data['weights_path']
weights_file_path = data['weights_file_path']
image_list_unique_id = data['image_list_unique_id']
logger.debug(f"image_list_unique_id: {image_list_unique_id}")
# make path & dir
model_root = file_action.get_model_path_by_model_name(model_name, project)
weights_path = file_action.get_weights_directory_by_model_name(model_name, project)
dataset_path = os.path.join(model_root, "dataset")
if os.path.exists(weights_path):
shutil.rmtree(weights_path)
if os.path.exists(dataset_path):
shutil.rmtree(dataset_path)
os.makedirs(weights_path, exist_ok=True)
os.makedirs(dataset_path, exist_ok=True)
# copy files
class_list = load_class_list(dataset)
num_classes = len(class_list)
# make finetuning waight path
if fine_tuning:
transfer_path = training_model.baseweight_set.get().weight.path
else:
transfer_path = 'none'
# get Augmentation flags
augmentation_flags = {
'horizontal_flip': data["horizontal_flip"],
'vertical_flip': data["vertical_flip"],
'rotate_30': data["rotate_30"],
'rotate_45': data["rotate_45"],
'rotate_90': data["rotate_90"],
'gaussian_noise': data["gaussian_noise"],
'blur': data["blur"],
'contrast': data["contrast"]
}
image_type = data['image_type'].lower()
if use_default_ratio:
train_list, _ = get_dataset_list(project, dataset, DatasetDataType.Train)
val_list, _ = get_dataset_list(project, dataset, DatasetDataType.Validation)
else:
train_list, _ = get_dataset_list(project, dataset, DatasetDataType.Train, image_list_unique_id)
val_list, _ = get_dataset_list(project, dataset, DatasetDataType.Validation, image_list_unique_id)
# Running training scripts
try:
classification_train(self,
data['model_id'],
model_root,
num_classes,
image_type,
train_list,
val_list,
augmentation_flags,
architecture,
epochs,
batch_size,
optimizer,
learning_rate,
transfer_path,
weights_path,
weights_file_path,
int(data["n_iter"]),
self.save_trainlog)
except Exception as e:
logger.debug(e)
logger.debug('The program is exiting...')
trans_message = gettext('training failed please check terminal')
self.send(text_data=json.dumps({'status': 'reload',
'message_type':'error',
'message':trans_message}))
finally:
logger.debug("Saving Model to dataset")
logger.debug(f"epoch: {self.train_log.get('epoch', '---')}")
logger.debug(f"status: {self.train_log.get('status', '---')}")
logger.debug(f"train_loss: {self.train_log.get('train_loss', '---')}")
logger.debug(f"train_acc: {self.train_log.get('train_acc', '---')}")
logger.debug(f"val_loss: {self.train_log.get('val_loss', '---')}")
logger.debug(f"val_acc: {self.train_log.get('val_acc', '---')}")
logger.debug(f"best_train_loss: {self.train_log.get('best_train_loss', '---')}")
logger.debug(f"best_val_loss: {self.train_log.get('best_val_loss', '---')}")
logger.debug(f"best_train_epoch: {self.train_log.get('best_train_epoch', '---')}")
logger.debug(f"best_val_epoch: {self.train_log.get('best_val_epoch', '---')}")
try:
training_model.epochs_runned = self.train_log['epoch']
training_model.train_status = self.train_log['status']
training_model.train_loss = self.train_log['train_loss']
training_model.train_acc = self.train_log['train_acc']
training_model.val_loss = self.train_log['val_loss']
training_model.val_acc = self.train_log['val_acc']
training_model.best_train_loss = self.train_log['best_train_loss']
training_model.best_val_loss = self.train_log['best_val_loss']
training_model.best_train_epoch = self.train_log['best_train_epoch']
training_model.best_val_epoch = self.train_log['best_val_epoch']
training_model.save()
except:
logger.info("fail: training model save")
if not use_default_ratio:
new_dataset_data = DatasetData.objects.filter(unique_id=image_list_unique_id)
for data in new_dataset_data:
data.model = training_model
data.save()
trans_message = gettext('training : {} training ended')
cancel = self.train_log.get('cancel', '')
if cancel == '':
training_model.delete()
self.send(text_data=json.dumps({'status': 'reload',
'message_type':'success',
'message': trans_message.format(model_name),
'cancel': cancel,
'project_id': project.id}))
sys.exit(0)
def predict(self, data):
# get common params from form
project = Project.objects.get(pk=data["project_id"])
project_type = ProjectType(project.project_type)
model = ClassificationModel.objects.get(id=data["model_id"], project=project)
weight = Weight.objects.get(model=model)
predict_type = data['predict_type'] # self_dataset / save_dataset / upload_dataset
if predict_type == "self_dataset":
train_flag = data['train_flag']
val_flag = data["val_flag"]
test_flag = data["test_flag"]
elif predict_type == "save_dataset":
dataset = Dataset.objects.get(pk=data['database_id'])
train_flag = data['train_flag']
val_flag = data["val_flag"]
test_flag = data["test_flag"]
elif predict_type == "upload_dataset":
train_flag = None
val_flag = None
test_flag = None
# get model params from DB
architecture = model.architecture_type
num_classes = model.dataset.classes
image_type = model.image_type
model_root = file_action.get_model_path(model)
training_class_list = load_class_list(model.dataset)
#
if predict_type == "self_dataset":
dataset = model.dataset
dataset_data_types = []
if train_flag:
dataset_data_types.append(DatasetDataType.Train)
if val_flag:
dataset_data_types.append(DatasetDataType.Validation)
if test_flag:
dataset_data_types.append(DatasetDataType.Test)
predict_list, dataset_data_list = get_dataset_list(project, dataset, dataset_data_types)
elif predict_type == "save_dataset":
dataset_data_types = []
if train_flag:
dataset_data_types.append(DatasetDataType.Train)
if val_flag:
dataset_data_types.append(DatasetDataType.Validation)
if test_flag:
dataset_data_types.append(DatasetDataType.Test)
predict_list, dataset_data_list = get_dataset_list(project, dataset, dataset_data_types)
else:
pass
# run predict
logger.debug("model.train_status: {}".format(model.train_status))
if model.train_status == 'finished' or model.train_status == 'stopped':
try:
logger.debug(architecture)
self.result = Result
preds, pred_probs, labels = classification_test(
self,
model_root,
project,
model,
num_classes,
image_type,
architecture,
predict_list,
training_class_list,
weight.path)
# delete test result database
all_test_result = TestResult.objects.all()
all_test_result.delete()
# create database
new_test_result = TestResult(model=model)
new_test_result.save()
for pred, pred_prob, label, dataset_data in zip(preds, pred_probs, labels, dataset_data_list):
new_pred = Pred(
test_result=new_test_result,
pred=pred,
model=model,
image_data=dataset_data.image_data
)
new_pred.save()
for p in pred_prob:
new_pred_prob = PredProbe(pred=new_pred, value=p)
new_pred_prob.save()
self.send(text_data=json.dumps({
'status': 'test-complete',
'dataset_id': dataset.id,
'test_result_id': new_test_result.id,
}))
except Exception as e:
logger.debug('Testing exiting on error...')
logger.debug(e)
self.send(text_data=json.dumps({'status': 'error',
'text': e}))
finally:
if predict_type == "self_dataset":
pass
elif predict_type == "save_dataset":
pass
elif predict_type == "upload_dataset":
logger.debug("Deleting upload files")
shutil.rmtree(tmp_dir, ignore_errors=True)
else:
trans_message =_('Chosen model training not completed')
self.send(text_data=json.dumps({'status': 'error',
'text': trans_message}))
|
from rest_framework import generics, viewsets
from django.http import HttpResponse
import requests
from .models import Game
from .serializers import GameSerializer
API_KEY = 'f88969b6f429963a6b586bd5966c7b80'
# class ListGame(generics.ListAPIView):
# queryset = Game.objects.all()
# serializer_class = GameSerializer
# class DetailGame(generics.RetrieveAPIView):
# queryset = Game.objects.all()
# serializer_class = GameSerializer
class GameViewSet(viewsets.ModelViewSet):
queryset = Game.objects.all()
serializer_class = GameSerializer
def search_view(request, query):
url = 'https://api-v3.igdb.com/games/'
headers = {'user-key': API_KEY}
data = 'fields *; search "{}"; limit 50;'.format(query)
response = requests.get(url, headers=headers, data=data)
return HttpResponse(response)
def artwork_view(request, game_id):
url = 'https://api-v3.igdb.com/covers'
headers = {
'user-key': API_KEY,
}
data = 'fields alpha_channel,animated,game,height,image_id,url,width; where game = {};'.format(game_id)
response = requests.get(url, headers=headers, data=data)
return HttpResponse(response)
|
#This class is here to maintain the occurrance of of two words together and marks this as phrase
class chains:
def __init__(self, text):
self.library = {}
self.text = text
def addChain(self,index, wordLevel = 3):
compound = ""
for i in range(wordLevel):
if ((i+index) <= len(self.text)-1):
compound += self.text[index+i].strip()+" "
#print compound
if i == 0:
continue
curr = self.library.get(compound.strip())
if curr == None:
self.library[compound.strip()] = 1
else:
self.library[compound.strip()] += 1
#print self.library
def compileChains(self):
significantChains = []
#print self.library
for key in self.library:
#print key
if self.library[key] > 3 and (len(key.replace(" ", "").strip()) > 5):
#print "found sig" + str(self.library[key])+ " : "+ str(key)
significantChains.append(key.strip())
for i in key.split(" "):
significantChains.append(i)
return significantChains
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""abloop.py: Skeleton operations of the abductive loop."""
__author__ = "Brian J. Goode"
import pandas as pd
import numpy as np
from scipy.stats import chi2
from sklearn.metrics import mutual_info_score
import seaborn as sns
from matplotlib import pyplot as plt
class Abloop:
hypotheses = {}
treatments = {}
def __init__(self):
return
def import_csv(self,csv,idx):
"""import_csv: import experimental data into the abductive loop"""
self.data = pd.read_csv(csv, index_col=idx)
return
def print_features(self):
cols = self.data.columns
print("Available Features:")
print(" " + "\n ".join(cols))
return
def set_prior(self):
"""set_prior: set prior beliefs in the abductive loop"""
## For future addition - have only for objective priors now...
pass
def add_hypothesis(self, inFeatures, depFeatures, model, treatment = None, label = None):
if not label:
label = len(self.hypotheses)
hypothesis = {
'x': inFeatures,
'y': depFeatures,
'model': model,
'treatment': treatment,
}
self.hypotheses[label] = hypothesis
return
def add_treatment(self, session_list, label = None):
"""set_treatment: set treatments for experiment"""
if not label:
label = len(self.treatments)
# Need to enforce criteria on session_list input...
# For now, it should literally be a list of sessions; future should be
# a dict of some design.
self.treatments[label] = session_list
return
def plot_treatment_distribution(self, h, xList, zList):
fig, ax = plt.subplots(figsize=(10,5))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
i = 0
legs = []
for t2 in zList:
means = []
stds = []
for t1 in xList:
x,y = self.prepareData(h,[t1],t2)
means.append(y.mean())
stds.append(y.std()*2.)
plt.errorbar(range(len(means)),means,yerr=stds, capsize = 10,marker='o', markersize=10, elinewidth=4, capthick=4, linewidth=4, label=t2)
legs.append(plt.Rectangle((0, 0), 1, 1, fc=sns.color_palette()[i]))
i += 1
plt.xticks(range(len(xList)),xList, fontsize=12)
plt.legend(legs, zList, frameon=False, fontsize = 12)
return
def prepareData(self, h, treatment_list=None, *args):
# Add in feature for train/test delineation...
data = self.data
if treatment_list:
session_list = []
for t in treatment_list:
session_list.extend(self.treatments[t])
data = self.data.loc[session_list]
for a in args:
session_list = self.treatments[a]
data = data[data.index.isin(session_list)]
D = data[h['x'] + h['y']].dropna()
x = D[h['x']].as_matrix()
y = D[h['y']].as_matrix()
return x,y
def estimate(self):
for label, h in self.hypotheses.iteritems():
x,y = self.prepareData(h)
_ = h['model'].train(x,y)
return
def estimate_predicted_effect_size(self, x0, h0, x1, h1):
""" Estimate effect size between h0, h1. """
# Standard deviation calculator between outputs h1, h2 for treatments...
# Plot below...
y0 = h0['model'].sample(x)
m0 = y0.mean()
sd0 = y0.std()
y1 = h1['model'].sample(x)
m1 = y1.mean()
sd1 = y1.std()
def effect_size(m0,m1,sd0,sd1):
num = m1-m0
den = ((sd0**2 + sd1**2)*0.5)**0.5
return num/den
return effect_size(m0,m1,sd0,sd1)
def estimate_treatment_effect_size(self, h, t0, t1):
""" Estimate effect size between h1, h2. """
# Standard deviation calculator between outputs h1, h2 for treatments...
# Plot below...
x,y0 = self.prepareData(h,[t0])
x,y1 = self.prepareData(h,[t1])
m0 = y0.mean()
m1 = y1.mean()
sd0 = y0.std()
sd1 = y1.std()
def effect_size(m0,m1,sd0,sd1):
num = m1-m0
den = ((sd0**2 + sd1**2)*0.5)**0.5
return num/den
return effect_size(m0,m1,sd0,sd1)
def print_estimates(self, hypothesis = None):
# FIX: Make sure prints right (handle features)
hypotheses = self.hypotheses
if hypothesis is not None:
hypotheses = dict(hypothesis, self.hypotheses[hypothesis])
for label, h in hypotheses.iteritems():
parameters = h['model'].getParameterEstimate()
print('Hypothesis {}'.format(label))
for pk,pv in parameters.iteritems():
print('\n'.join(['{:40}{}'.format(*x) for x in zip(h['x'],pv)]))
return
def plot_estimates(self, hypothesis = None):
# NOT DONE YET!
# Plot estimates here.
"""for bs, title in zip(bsamples.T,titles):
plt.figure()
_ = plt.hist(bs, 1000, normed = True)
plt.title(title)
plt.xlabel(r'$\beta$')
plt.ylabel(r'$P(\beta|y)$')"""
return
def calcMseErrDist(self, y_hat, y):
err_samples = y_hat.T - y
err_bar = (err_samples**2).mean(axis = 0)
dof = len(err_samples)
loc, scale = chi2.fit_loc_scale(err_bar, dof)
err_dist = chi2(dof, loc=loc, scale=scale)
return err_dist
def validate(self, hypothesis = None):
"""validate: validate model on out-of-sample data"""
hypotheses = self.hypotheses
if hypothesis is not None:
hypotheses = dict(hypothesis, self.hypotheses[hypothesis])
for label, h in hypotheses.iteritems():
x,y = self.prepareData(h)
y_hat = h['model'].sample(x)
h['err_dist'] = self.calcMseErrDist(y_hat,y)
return
def abduce_hypotheses(self, xaxis = None):
if not xaxis:
xaxis = np.logspace(0,19,1000,base=2)
hypotheses = self.hypotheses
P_he = []
for label, h in hypotheses.iteritems():
P_he.append(h['err_dist'].pdf(xaxis))
P_he_marginal = np.array(P_he)
P_he = np.array(P_he)
den = P_he.sum(axis=0)
P_he = P_he/den
self.xaxis = xaxis
self.P_he = P_he
self.P_he_marginal = P_he_marginal
return
def plot_abduce_hypotheses(self):
xaxis = self.xaxis
for phe, label in zip(self.P_he, self.hypotheses):
plt.plot(xaxis,phe, label=label)
plt.semilogx(basex = 10)
plt.xlabel('MSE', fontsize = 16)
_ = plt.ylabel(r'$P(h|\tilde e)$', fontsize = 16)
plt.legend()
return
def plot_abduce_hypotheses_marginal(self):
xaxis = self.xaxis
for phe, label in zip(self.P_he_marginal, self.hypotheses):
plt.plot(xaxis,phe, label=label)
plt.semilogx(basex = 10)
plt.xlabel('MSE', fontsize = 16)
_ = plt.ylabel(r'$P(h,\tilde e|H)$', fontsize = 16)
plt.legend()
return
def set_input_output(self, invar, outvar):
self.invar = invar
self.outvar = outvar
return
def abduce_results(self, depVarName):
features = self.data.columns
n_features = len(features)
data = self.data
MI = np.empty(shape = (n_features,n_features))
for i in range(n_features):
for j in range(n_features):
Cxy = np.histogram2d(data[features[i]].replace(np.nan,0),data[features[j]].replace(np.nan,0))[0]
MI[i,j] = mutual_info_score(None,None,contingency=Cxy)
MI = pd.DataFrame(MI,columns = data.columns, index = data.columns)
results = MI[depVarName].loc[self.invar].sort_values(ascending=False)
return results
def load(self):
pass
def save(self):
pass
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 6 23:20:56 2018
@author: Gaurav
"""
import numpy as np
import numba as nb
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import os
############################CLASS SPACE########################################
class Boundary:
def __init__(self,boundary_type,boundary_value):
self.DefineBoundary(boundary_type,boundary_value)
def DefineBoundary(self,boundary_type,boundary_value):
self.type=boundary_type
self.value=boundary_value
class Space:
def __init__(self):
pass
def CreateMesh(self,rowpts,colpts):
self.rowpts=rowpts
self.colpts=colpts
self.u=np.zeros((self.rowpts+2,self.colpts+2))
self.v=np.zeros((self.rowpts+2,self.colpts+2))
self.p=np.zeros((self.rowpts+2,self.colpts+2))
self.p_c=np.zeros((self.rowpts,self.colpts))
self.u_c=np.zeros((self.rowpts,self.colpts))
self.v_c=np.zeros((self.rowpts,self.colpts))
self.SetSourceTerm()
def SetDeltas(self,breadth,length):
self.dx=length/(self.colpts-1)
self.dy=breadth/(self.rowpts-1)
def SetInitialU(self,U):
self.u=U*self.u
def SetInitialV(self,V):
self.v=V*self.v
def SetInitialP(self,P):
self.p=P*self.p
def SetSourceTerm(self,S_x=0,S_y=0):
self.S_x=S_x
self.S_y=S_y
class Fluid:
def __init__(self,rho,mu):
self.SetFluidProperties(rho,mu)
def SetFluidProperties(self,rho,mu):
self.rho=rho
self.mu=mu
##########################BOUNDARY SPACE#######################################
def SetUBoundary(space,left,right,top,bottom):
if(left.type=="D"):
space.u[:,0]=left.value
elif(left.type=="N"):
space.u[:,0]=-left.value*space.dx+space.u[:,1]
if(right.type=="D"):
space.u[:,-1]=right.value
elif(right.type=="N"):
space.u[:,-1]=right.value*space.dx+space.u[:,-2]
if(top.type=="D"):
space.u[-1,:]=2*top.value-space.u[-2,:]
elif(top.type=="N"):
space.u[-1,:]=-top.value*space.dy+space.u[-2,:]
if(bottom.type=="D"):
space.u[0,:]=2*bottom.value-space.u[1,:]
elif(bottom.type=="N"):
space.u[0,:]=bottom.value*space.dy+space.u[1,:]
def SetVBoundary(space,left,right,top,bottom):
if(left.type=="D"):
space.v[:,0]=2*left.value-space.v[:,1]
elif(left.type=="N"):
space.v[:,0]=-left.value*space.dx+space.v[:,1]
if(right.type=="D"):
space.v[:,-1]=2*right.value-space.v[:,-2]
elif(right.type=="N"):
space.v[:,-1]=right.value*space.dx+space.v[:,-2]
if(top.type=="D"):
space.v[-1,:]=top.value
elif(top.type=="N"):
space.v[-1,:]=-top.value*space.dy+space.v[-2,:]
if(bottom.type=="D"):
space.v[0,:]=bottom.value
elif(bottom.type=="N"):
space.v[0,:]=bottom.value*space.dy+space.v[1,:]
def SetPBoundary(space,left,right,top,bottom):
if(left.type=="D"):
space.p[:,0]=left.value
elif(left.type=="N"):
space.p[:,0]=-left.value*space.dx+space.p[:,1]
if(right.type=="D"):
space.p[1,-1]=right.value
elif(right.type=="N"):
space.p[:,-1]=right.value*space.dx+space.p[:,-2]
if(top.type=="D"):
space.p[-1,:]=top.value
elif(top.type=="N"):
space.p[-1,:]=-top.value*space.dy+space.p[-2,:]
if(bottom.type=="D"):
space.p[0,:]=bottom.value
elif(bottom.type=="N"):
space.p[0,:]=bottom.value*space.dy+space.p[1,:]
########################FUNCTION SPACE#########################################
def SetTimeStep(CFL,space,fluid):
with np.errstate(divide='ignore'):
dt=CFL/np.sum([np.amax(space.u)/space.dx,np.amax(space.v)/space.dy])
#Escape condition if dt is infinity due to zero velocity initially
if np.isinf(dt):
dt=CFL*(space.dx+space.dy)
space.dt=dt
def GetStarredVelocities(space,fluid):
#Save object attributes as local variable with explicit typing for improved readability
rows=int(space.rowpts)
cols=int(space.colpts)
u=space.u.astype(float,copy=False)
v=space.v.astype(float,copy=False)
dx=float(space.dx)
dy=float(space.dy)
dt=float(space.dt)
S_x=float(space.S_x)
S_y=float(space.S_y)
rho=float(fluid.rho)
mu=float(fluid.mu)
u_star=u.copy()
v_star=v.copy()
u1_y=(u[2:,1:cols+1]-u[0:rows,1:cols+1])/(2*dy)
u1_x=(u[1:rows+1,2:]-u[1:rows+1,0:cols])/(2*dx)
u2_y=(u[2:,1:cols+1]-2*u[1:rows+1,1:cols+1]+u[0:rows,1:cols+1])/(dy**2)
u2_x=(u[1:rows+1,2:]-2*u[1:rows+1,1:cols+1]+u[1:rows+1,0:cols])/(dx**2)
v_face=(v[1:rows+1,1:cols+1]+v[1:rows+1,0:cols]+v[2:,1:cols+1]+v[2:,0:cols])/4
u_star[1:rows+1,1:cols+1]=u[1:rows+1,1:cols+1]-dt*(u[1:rows+1,1:cols+1]*u1_x+v_face*u1_y)+(dt*(mu/rho)*(u2_x+u2_y))+(dt*S_x)
v1_y=(v[2:,1:cols+1]-v[0:rows,1:cols+1])/(2*dy)
v1_x=(v[1:rows+1,2:]-v[1:rows+1,0:cols])/(2*dx)
v2_y=(v[2:,1:cols+1]-2*v[1:rows+1,1:cols+1]+v[0:rows,1:cols+1])/(dy**2)
v2_x=(v[1:rows+1,2:]-2*v[1:rows+1,1:cols+1]+v[1:rows+1,0:cols])/(dx**2)
u_face=(u[1:rows+1,1:cols+1]+u[1:rows+1,2:]+u[0:rows,1:cols+1]+u[0:rows,2:])/4
v_star[1:rows+1,1:cols+1]=v[1:rows+1,1:cols+1]-dt*(u_face*v1_x+v[1:rows+1,1:cols+1]*v1_y)+(dt*(mu/rho)*(v2_x+v2_y))+(dt*S_y)
space.u_star=u_star.copy()
space.v_star=v_star.copy()
#@nb.jit
def SolvePressurePoisson(space,fluid,left,right,top,bottom):
#Save object attributes as local variable with explicit typing for improved readability
rows=int(space.rowpts)
cols=int(space.colpts)
u_star=space.u_star.astype(float,copy=False)
v_star=space.v_star.astype(float,copy=False)
p=space.p.astype(float,copy=False)
dx=float(space.dx)
dy=float(space.dy)
dt=float(space.dt)
rho=float(fluid.rho)
factor=1/(2/dx**2+2/dy**2)
error=1
tol=1e-3
ustar1_x=(u_star[1:rows+1,2:]-u_star[1:rows+1,0:cols])/(2*dx)
vstar1_y=(v_star[2:,1:cols+1]-v_star[0:rows,1:cols+1])/(2*dy)
i=0
while(error>tol):
i+=1
p_old=p.astype(float,copy=True)
p2_xy=(p_old[2:,1:cols+1]+p_old[0:rows,1:cols+1])/dy**2+(p_old[1:rows+1,2:]+p_old[1:rows+1,0:cols])/dx**2
p[1:rows+1,1:cols+1]=(p2_xy)*factor-(rho*factor/dt)*(ustar1_x+vstar1_y)
error=np.amax(abs(p-p_old))
#Apply Boundary Conditions
SetPBoundary(space,left,right,top,bottom)
if(i>500):
tol*=10
#@nb.jit
def SolveMomentumEquation(space,fluid):
#Save object attributes as local variable with explicit typing for improved readability
rows=int(space.rowpts)
cols=int(space.colpts)
u_star=space.u_star.astype(float)
v_star=space.v_star.astype(float)
p=space.p.astype(float,copy=False)
dx=float(space.dx)
dy=float(space.dy)
dt=float(space.dt)
rho=float(fluid.rho)
u=space.u.astype(float,copy=False)
v=space.v.astype(float,copy=False)
p1_x=(p[1:rows+1,2:]-p[1:rows+1,0:cols])/(2*dx)
u[1:rows+1,1:cols+1]=u_star[1:rows+1,1:cols+1]-(dt/rho)*p1_x
p1_y=(p[2:,1:cols+1]-p[0:rows,1:cols+1])/(2*dy)
v[1:rows+1,1:cols+1]=v_star[1:rows+1,1:cols+1]-(dt/rho)*p1_y
def SetCentrePUV(space):
space.p_c=space.p[1:-1,1:-1]
space.u_c=space.u[1:-1,1:-1]
space.v_c=space.v[1:-1,1:-1]
def MakeResultDirectory(wipe=False):
cwdir=os.getcwd()
dir_path=os.path.join(cwdir,"Result")
if not os.path.isdir(dir_path):
os.makedirs(dir_path,exist_ok=True)
else:
if wipe:
os.chdir(dir_path)
filelist=os.listdir()
for file in filelist:
os.remove(file)
os.chdir(cwdir)
def WriteToFile(space,iteration,interval):
if(iteration%interval==0):
dir_path=os.path.join(os.getcwd(),"Result")
filename="PUV{0}.txt".format(iteration)
path=os.path.join(dir_path,filename)
with open(path,"w") as f:
for i in range(space.rowpts):
for j in range(space.colpts):
f.write("{}\t{}\t{}\n".format(space.p_c[i,j],space.u_c[i,j],space.v_c[i,j]))
#################################END OF FILE###################################
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of embedding layer with shared weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
import model_utils
class EmbeddingSharedWeights(tf.layers.Layer):
"""Calculates input embeddings and pre-softmax linear with shared weights."""
def __init__(self, vocab_size, hidden_size, method="gather"):
"""Specify characteristic parameters of embedding layer.
Args:
vocab_size: Number of tokens in the embedding. (Typically ~32,000)
hidden_size: Dimensionality of the embedding. (Typically 512 or 1024)
method: Strategy for performing embedding lookup. "gather" uses tf.gather
which performs well on CPUs and GPUs, but very poorly on TPUs. "matmul"
one-hot encodes the indicies and formulates the embedding as a sparse
matrix multiplication. The matmul formulation is wasteful as it does
extra work, however matrix multiplication is very fast on TPUs which
makes "matmul" considerably faster than "gather" on TPUs.
"""
super(EmbeddingSharedWeights, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
if method not in ("gather", "matmul"):
raise ValueError("method {} must be 'gather' or 'matmul'".format(method))
self.method = method
def build(self, _):
with tf.variable_scope("embedding_and_softmax", reuse=tf.AUTO_REUSE):
# Create and initialize weights. The random normal initializer was chosen
# randomly, and works well.
self.shared_weights = tf.get_variable(
"weights", [self.vocab_size, self.hidden_size],
initializer=tf.random_normal_initializer(
0., self.hidden_size ** -0.5))
self.built = True
def call(self, x):
"""Get token embeddings of x.
Args:
x: An int64 tensor with shape [batch_size, length]
Returns:
embeddings: float32 tensor with shape [batch_size, length, embedding_size]
padding: float32 tensor with shape [batch_size, length] indicating the
locations of the padding tokens in x.
"""
with tf.name_scope("embedding"):
if True:
# Create binary mask of size [batch_size, length]
mask = tf.to_float(tf.not_equal(x, 0))
# if self.method == "gather":
embeddings = tf.gather(self.shared_weights, x)
embeddings *= tf.expand_dims(mask, -1)
# else: # matmul
# embeddings = tpu_utils.embedding_matmul(
# embedding_table=self.shared_weights,
# values=tf.cast(x, dtype=tf.int32),
# mask=mask
# )
# embedding_matmul already zeros out masked positions, so
# `embeddings *= tf.expand_dims(mask, -1)` is unnecessary.
# Scale embedding by the sqrt of the hidden size
embeddings *= self.hidden_size ** 0.5
else:
embeddings = tf.nn.embedding_lookup(self.shared_weights, x)
return embeddings
def linear(self, x):
"""Computes logits by running x through a linear layer.
Args:
x: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
with tf.name_scope("presoftmax_linear"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
x = tf.reshape(x, [-1, self.hidden_size])
logits = tf.matmul(x, self.shared_weights, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
|
'''
This module provides a clingo.Theory class for a LPX theory.
'''
from clingo.theory import Theory
from ._clingolpx import lib as _lib, ffi as _ffi
__all__ = ['ClingoLPXTheory']
class ClingoLPXTheory(Theory):
'''
The DL theory.
'''
def __init__(self):
super().__init__("clingolpx", _lib, _ffi)
|
from distutils.core import setup, Extension
import glob
cppstring = Extension(
'cppstring',
define_macros=[],
include_dirs=['include', '/usr/local/include'],
libraries=[],
library_dirs=['/usr/local/lib'],
sources=glob.glob('src/*.cpp'),
extra_compile_args=['-MMD', '-MP', '-g', '-std=c++11'])
setup(
name='cppstring',
version='1.0',
description='Example package',
scripts=['scripts/pysplit', 'scripts/pyjoin'],
packages=['pystring'],
ext_modules=[cppstring])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySocialGiftOrderRefundModel(object):
def __init__(self):
self._mid = None
self._order_id = None
self._refund_price = None
self._refund_type = None
@property
def mid(self):
return self._mid
@mid.setter
def mid(self, value):
self._mid = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def refund_price(self):
return self._refund_price
@refund_price.setter
def refund_price(self, value):
self._refund_price = value
@property
def refund_type(self):
return self._refund_type
@refund_type.setter
def refund_type(self, value):
self._refund_type = value
def to_alipay_dict(self):
params = dict()
if self.mid:
if hasattr(self.mid, 'to_alipay_dict'):
params['mid'] = self.mid.to_alipay_dict()
else:
params['mid'] = self.mid
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.refund_price:
if hasattr(self.refund_price, 'to_alipay_dict'):
params['refund_price'] = self.refund_price.to_alipay_dict()
else:
params['refund_price'] = self.refund_price
if self.refund_type:
if hasattr(self.refund_type, 'to_alipay_dict'):
params['refund_type'] = self.refund_type.to_alipay_dict()
else:
params['refund_type'] = self.refund_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySocialGiftOrderRefundModel()
if 'mid' in d:
o.mid = d['mid']
if 'order_id' in d:
o.order_id = d['order_id']
if 'refund_price' in d:
o.refund_price = d['refund_price']
if 'refund_type' in d:
o.refund_type = d['refund_type']
return o
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_net import BaseNet
from .pointseg_modules import Fire, FireDeconv, SELayer, ASPP
class PSEncoder(BaseNet):
def __init__(self, input_shape, cfg, bn_d = 0.1):
super(PSEncoder, self).__init__()
bn_d = bn_d
self.bypass = cfg['bypass']
self.input_shape = input_shape
c, h, w = self.input_shape
### Ecnoder part
self.conv1a = nn.Sequential(nn.Conv2d(c, 64, kernel_size=(3, 5), stride=(1, 2), padding=(1, 2)),
nn.BatchNorm2d(64, momentum=bn_d),
nn.ReLU(inplace=True))
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=(1, 2), padding=1) # 1/4
# First block
self.fire_blk1 = nn.Sequential(
Fire(64, 16, 64, 64, bn=True, bn_d=bn_d, bypass=self.bypass),
Fire(128, 16, 64, 64, bn=True, bn_d=bn_d, bypass=self.bypass),
SELayer(128, reduction=2),
nn.MaxPool2d(kernel_size=3, stride=(1, 2), padding=1)) # 1/8
# second block
self.fire_blk2 = nn.Sequential(
Fire(128, 32, 128, 128, bn=True, bn_d=bn_d, bypass=self.bypass),
Fire(256, 32, 128, 128, bn=True, bn_d=bn_d, bypass=self.bypass),
SELayer(256, reduction=2),
nn.MaxPool2d(kernel_size=3, stride=(1, 2), padding=1)) # 1/16
self.fire_blk3 = nn.Sequential(
Fire(256, 48, 192, 192, bn=True, bn_d=bn_d, bypass=self.bypass),
Fire(384, 48, 192, 192, bn=True, bn_d=bn_d, bypass=self.bypass),
Fire(384, 64, 256, 256, bn=True, bn_d=bn_d, bypass=self.bypass),
Fire(512, 64, 256, 256, bn=True, bn_d=bn_d, bypass=self.bypass),
SELayer(512, reduction=2),
nn.MaxPool2d(kernel_size=3, stride=(2, 2), padding=1)) # 1/16
# third block
self.fire_blk4 = nn.Sequential(
Fire(512, 64, 256, 256, bn=True, bn_d=bn_d, bypass=self.bypass),
Fire(512, 64, 256, 256, bn=True, bn_d=bn_d, bypass=self.bypass),
SELayer(512, reduction=2),
nn.MaxPool2d(kernel_size=3, stride=(2, 2), padding=1)) # 1/32
# third block
self.fire_blk5 = nn.Sequential(
Fire(512, 80, 384, 384, bn=True, bn_d=bn_d, bypass=self.bypass),
Fire(768, 80, 384, 384, bn=True, bn_d=bn_d, bypass=False))
#SELayer(768, reduction=2),
#nn.MaxPool2d(kernel_size=3, stride=(2, 2), padding=1)) # 1/32
self.output_shapes = self.calc_output_shape()
def forward(self, x):
x_1a = self.conv1a(x) # (H, W/2)
x_p1 = self.pool1(x_1a)
### Encoder forward
x = self.fire_blk1(x_p1)
x = self.fire_blk2(x)
x = self.fire_blk3(x)
x = self.fire_blk4(x)
x = self.fire_blk5(x)
return x
def calc_output_shape(self):
c, h, w = self.input_shape
input = torch.rand((1, c, h, w))
self.eval()
with torch.no_grad():
x_se3 = self.forward(input)
return x_se3.shape
def get_output_shape(self):
return self.output_shapes
class PSDecoder(BaseNet):
def __init__(self, input_shape, cfg):
super(PSDecoder, self).__init__()
bn_d = 0.1
num_classes = len(cfg['classes'])
self.input_shapes = input_shape
self.p = cfg['dropout']
self.fdeconv_el = FireDeconv(128, 32, 128, 128, bn=True, bn_d=bn_d)
self.fdeconv_1 = FireDeconv(512, 64, 128, 128, bn=True, bn_d=bn_d)
self.fdeconv_2 = FireDeconv(512, 64, 64, 64, bn=True, bn_d=bn_d)
self.fdeconv_3 = FireDeconv(128, 16, 32, 32, bn=True, bn_d=bn_d)
self.fdeconv_4 = FireDeconv(64, 16, 32, 32, bn=True, bn_d=bn_d)
self.drop = nn.Dropout2d(p=self.p)
self.conv2 = nn.Sequential(nn.Conv2d(64, num_classes, kernel_size=3, stride=1, padding=1))
self.output_shape = self.calc_output_shape()
def forward(self, x):
x_1a, x_1b, x_se1, x_se2, x_se3, x_el = x
x_el = self.fdeconv_el(x_el)
### Decoder forward
x_fd1 = self.fdeconv_1(x_se3) # (H, W/8)
x_fd1_fused = torch.add(x_fd1, x_se2)
x_fd1_fused = torch.cat((x_fd1_fused, x_el), dim=1)
x_fd2 = self.fdeconv_2(x_fd1_fused) # (H, W/4)
x_fd2_fused = torch.add(x_fd2, x_se1)
x_fd3 = self.fdeconv_3(x_fd2_fused) # (H, W/2)
x_fd3_fused = torch.add(x_fd3, x_1a)
x_fd4 = self.fdeconv_4(x_fd3_fused) # (H, W/2)
x_fd4_fused = torch.add(x_fd4, x_1b)
x_d = self.drop(x_fd4_fused)
x = self.conv2(x_d)
return x
def calc_output_shape(self):
input = [torch.rand(in_shape) for in_shape in self.input_shapes]
self.eval()
with torch.no_grad():
out = self.forward(input)
return out.shape
def get_output_shape(self):
return self.output_shape
|
import pandas as pd
import itertools
############################
# Display
############################
def pd_print_all(df):
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print(df)
############################
# Search
############################
def pd_first_row(df):
for idx, row in df.iterrows():
return idx, row
def pd_is_one_row(df):
nRows = df.shape[0]
if nRows == 0:
return None, None
elif nRows > 1:
raise ValueError("Expected 1 match, got", nRows)
return pd_first_row(df)
def pd_rows_colval(df, colname, val):
return df[df[colname] == val]
# Get rows for which several columns have some exact values
# FIXME: Does not work with complex datatypes like tuple
# TODO: Implement partial matches
# TODO: Implement inequalities
def pd_query(df, queryDict, dropQuery=False):
assert isinstance(queryDict, dict)
if len(queryDict) == 0:
return df
elif len(df) == 0:
return df
else:
# If a non-existing column is requested, return empty
for k in queryDict.keys():
if k not in df.columns:
return pd.DataFrame(columns=df.columns)
# Query likes strings to be wrapped in quotation marks for later evaluation
strwrap = lambda val: '"' + val + '"' if isinstance(val, str) else str(val)
query = ' and '.join([colname+'=='+strwrap(val) for colname, val in queryDict.items()])
rez = df.query(query)
if dropQuery:
return rez.drop(columns=list(queryDict.keys()))
else:
return rez
# Return all rows for which values match the list exactly
def pd_query_exact(df, lst):
return pd_query(df, dict(zip(df.columns, lst)))
# Check if there is at least 1 row that matches the list exactly
def pd_row_exists(df, lst):
return len(pd_query_exact(df, lst)) > 0
##############################
# Merging and Stacking
##############################
# Add new row to dataframe, unless such a row is already present
def pd_append_row(df, lst, skip_repeat=False):
if skip_repeat:
if pd_row_exists(df, lst):
print("Skipping existing row", lst)
return df
else:
newRow = pd.DataFrame([lst], columns=df.columns)
return df.append(newRow, ignore_index=True)
# Appends all dataframes in a list
# A new column is added with values unique to the rows of original dataframes
def pd_vstack_df(dfLst, colName, colVals):
rez = pd.DataFrame()
for df, val in zip(dfLst, colVals):
df1 = df.copy()
df1[colName] = val
rez = rez.append(df1)
return rez.reset_index()
# Merge several dataframes with exactly the same structure by adding columns that have different values
# TODO: Test that dataframes are indeed equivalent
# TODO: Test that dataframe values are exactly the same except of split cols
def pd_merge_equivalent_df(dfLst, splitColNames, dfNames):
dfRez = dfLst[0].copy()
dfRez = dfRez.drop(splitColNames, axis=1)
for df, dfName in zip(dfLst, dfNames):
for colName in splitColNames:
dfRez[colName + '_' + dfName] = df[colName]
return dfRez
def pd_merge_multiple(dfNamesLst, dfLst, categoricalCols):
for i in range(len(dfNamesLst)):
if i == 0:
dfJoint = dfLst[i].copy()
else:
suffixes = ('', '_' + dfNamesLst[i])
dfJoint = pd.merge(dfJoint, dfLst[i], how="inner", on=categoricalCols, suffixes=suffixes)
# Problem is that columns of the very first df will lack suffixes, so need to add them manually
extraCols = set(dfLst[0].columns) - set(categoricalCols)
for extraCol in extraCols:
dfJoint[extraCol + '_' + dfNamesLst[0]] = dfJoint[extraCol]
dfJoint.drop(extraCol, axis=1, inplace=True)
return dfJoint
def merge_df_from_dict(dfDict, columnNames):
'''
:param dfDict: keys are extra column values as tuple. Values are dataframes. All dataframes must have same columns
:param columnNames: names of the extra columns
:return: a single dataframe that merges other dataframes using extra columns
'''
rezDFList = []
for k, v in dfDict.items():
if isinstance(k, str):
k = [k]
dfCopy = v.copy()
# Iterate in reverse order because we will be inserting each column at the beginning
for colname, colval in zip(columnNames[::-1], k[::-1]):
dfCopy.insert(0, colname, colval)
rezDFList += [dfCopy]
return pd.concat(rezDFList, sort=False).reset_index(drop=True)
##############################
# Delete
##############################
def drop_rows_byquery(df, queryLst):
dfRez = df.copy()
for queryDict in queryLst:
rows = pd_query(df, queryDict)
dfRez = dfRez.drop(index=rows.index)
return dfRez
##############################
# Constructors
##############################
# Get a dictionary where keys are column names and values are possible values for that column
# Construct a dataframe where rows are all combinations of provided column values
def outer_product_df(d):
rowsLst = list(itertools.product(*d.values()))
return pd.DataFrame(rowsLst, columns = list(d.keys()))
##############################
# Manipulation
##############################
# Move some of the columns in front in that order, the rest stay in the same order at the end
def pd_move_cols_front(df, colsMove):
colsNew = list(df.keys())
for col in colsMove[::-1]:
colsNew.insert(0, colsNew.pop(colsNew.index(col)))
return df.loc[:, colsNew]
def pd_category_to_column(df, catName, rezName):
'''
:param df: Pandas Dataframe
:param catName: Name of the column containing categorical data
:param rezName: Name of the column containing non-categorical data (e.g. float)
:return: Pandas Dataframe
Input a dataframe that has all columns categorical except one.
Also input the name of one of the categorical columns.
Drop that categorical column column. Instead, create a column for each value of categorical data,
with values of the non-categorical column
'''
categories = set(df[catName])
rez = pd.DataFrame()
for catVal in categories:
dfThis = df[df[catName] == catVal]
dfThis.rename(columns={rezName: catVal}, inplace=True)
rez = rez.append(dfThis)
return rez.reset_index()
# Convert list of combinations of two categorical variables into 2D indexed dataframe
def pd_pivot(df, xLabel, yLabel, valLabel, xVals=None, yVals=None):
# Construct 2D pivot
dfPivot = df.pivot(index=xLabel, columns=yLabel, values=valLabel)
# Change order of dimensions
xVals = xVals if xVals is not None else sorted(set(df[xLabel]))
yVals = yVals if yVals is not None else sorted(set(df[yLabel]))
print(xVals, yVals)
return dfPivot[yVals].loc[xVals]#.reindex(yVals)
|
import os
from flask import request, redirect, Flask, send_file
from pathlib import Path
app = Flask(__name__)
root = Path.cwd() / "__files__"
root.mkdir(exist_ok=True)
css = """
form {
display: flex;
flex-direction: column;
gap: 1rem;
width: max-content;
}
"""
@app.route("/upload/<path:name>", methods=["POST"])
def upload_file(name):
if "main.py" in name:
return "Failure"
with open(root / name, "wb+") as fout:
for line in request.files["file"].stream:
fout.write(line)
return "Success"
@app.route("/file", methods=["POST"])
def file():
if "filename" in request.files:
file = request.files["filename"]
with open(root / file.filename, "wb+") as fout:
fout.write(file.stream.read())
return redirect("/")
@app.route("/files/<path:name>", methods=["GET"])
def files(name):
return send_file(root / name)
@app.route("/", methods=["GET"])
def index():
return f"""
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
{css}
</style>
</head>
<body>
<h1>Hello world</h1>
<h2>Submit a file</h2>
<form action="/file" method="post" enctype="multipart/form-data">
<input type="file" id="filename" name="filename" />
<input type="submit" value="Upload" name="submitted" />
</form>
<h2>Files</h2>
<ol>
{get_available_files_html()}
</ol>
</body>
"""
def get_available_files_html():
return ''.join(
f"<li><a href='/files/{file}'>{file}</a></li>"
for file in os.listdir(str(root))
)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=8080)
|
import numpy
from src.dcf import (combine_cash_flow_simulations, compute_dcf,
get_random_numbers, simulate_cash_flow_values,
simulate_dcf)
def test_compute_dcf():
cash_flow_values = [1000000, 1000000, 4000000, 4000000, 6000000]
discount_rate = .05
expected = 13306728
assert round(compute_dcf(cash_flow_values, discount_rate)) == expected
def test_get_random_numbers():
# check that we get the right number of elements
assert len(get_random_numbers(size=0)) == 0
assert len(get_random_numbers(size=1)) == 1
assert len(get_random_numbers(size=4)) == 4
def test_simulate_cash_flow_values():
cash_flow_data = [[100, 20], [-500, 10]]
size = 3
simulations = simulate_cash_flow_values(cash_flow_data, size)
number_of_years = len(cash_flow_data)
assert len(simulations) == number_of_years
assert len(simulations[0]) == size
def test_combine_cash_flow_simulations():
# empty list
simulations = []
expected = []
assert combine_cash_flow_simulations(simulations) == expected
# one-element list
simulations = [numpy.array([100])]
expected = [[100]]
assert combine_cash_flow_simulations(simulations) == expected
# general case
simulations = [numpy.array([1, 2, 3, 4]), numpy.array(
[11, 22, 33, 44]), numpy.array([111, 222, 333, 444])]
expected = [[1, 11, 111], [2, 22, 222], [3, 33, 333], [4, 44, 444]]
assert combine_cash_flow_simulations(simulations) == expected
def test_simulate_dcf():
# check that we get the right number of elements
cash_flow_data = [[100, 20], [-500, 10]]
discount_rate = .05
assert len(simulate_dcf(cash_flow_data, discount_rate, 0)) == 0
assert len(simulate_dcf(cash_flow_data, discount_rate, 1)) == 1
assert len(simulate_dcf(cash_flow_data, discount_rate, 4)) == 4
|
#!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: na_santricity_discover
short_description: NetApp E-Series discover E-Series storage systems
description: Module searches a subnet range and returns any available E-Series storage systems.
author: Nathan Swartz (@ndswartz)
options:
subnet_mask:
description:
- This is the IPv4 search range for discovering E-Series storage arrays.
- IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255.
- Be sure to include all management paths in the search range.
type: str
required: true
ports:
description:
- This option specifies which ports to be tested during the discovery process.
- The first usable port will be used in the returned API url.
type: list
default: [8443]
required: false
proxy_url:
description:
- Web Services Proxy REST API URL. Example https://192.168.1.100:8443/devmgr/v2/
type: str
required: false
proxy_username:
description:
- Web Service Proxy username
type: str
required: false
proxy_password:
description:
- Web Service Proxy user password
type: str
required: false
proxy_validate_certs:
description:
- Whether to validate Web Service Proxy SSL certificate
type: bool
default: true
required: false
prefer_embedded:
description:
- Give preference to Web Services Embedded when an option exists for both Web Services Proxy and Embedded.
- Web Services Proxy will be utilized when available by default.
type: bool
default: false
required: false
notes:
- Only available for platforms E2800 or later (SANtricity Web Services Embedded REST API must be available).
- All E-Series storage systems with SANtricity version 11.62 or later will be discovered.
- Only E-Series storage systems without a set admin password running SANtricity versions prior to 11.62 will be discovered.
- Use SANtricity Web Services Proxy to discover all systems regardless of SANricity version or password.
requirements:
- ipaddress
"""
EXAMPLES = """
- name: Discover all E-Series storage systems on the network.
na_santricity_discover:
subnet_mask: 192.168.1.0/24
"""
RETURN = """
systems_found:
description: Success message
returned: on success
type: dict
sample: {"012341234123": {
"addresses": ["192.168.1.184", "192.168.1.185"],
"api_urls": ["https://192.168.1.184:8443/devmgr/v2/", "https://192.168.1.185:8443/devmgr/v2/"],
"label": "ExampleArray01",
"proxy_ssid: "",
"proxy_required": false},
"012341234567": {
"addresses": ["192.168.1.23", "192.168.1.24"],
"api_urls": ["https://192.168.1.100:8443/devmgr/v2/"],
"label": "ExampleArray02",
"proxy_ssid": "array_ssid",
"proxy_required": true}}
"""
import json
import multiprocessing
import threading
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import request
from ansible.module_utils._text import to_native
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
import ipaddress
except ImportError:
HAS_IPADDRESS = False
else:
HAS_IPADDRESS = True
class NetAppESeriesDiscover:
"""Discover E-Series storage systems."""
MAX_THREAD_POOL_SIZE = 256
CPU_THREAD_MULTIPLE = 32
SEARCH_TIMEOUT = 30
DEFAULT_CONNECTION_TIMEOUT_SEC = 30
DEFAULT_DISCOVERY_TIMEOUT_SEC = 300
def __init__(self):
ansible_options = dict(subnet_mask=dict(type="str", required=True),
ports=dict(type="list", required=False, default=[8443]),
proxy_url=dict(type="str", required=False),
proxy_username=dict(type="str", required=False),
proxy_password=dict(type="str", required=False, no_log=True),
proxy_validate_certs=dict(type="bool", default=True, required=False),
prefer_embedded=dict(type="bool", default=False, required=False))
required_together = [["proxy_url", "proxy_username", "proxy_password"]]
self.module = AnsibleModule(argument_spec=ansible_options, required_together=required_together)
args = self.module.params
self.subnet_mask = args["subnet_mask"]
self.prefer_embedded = args["prefer_embedded"]
self.ports = []
self.proxy_url = args["proxy_url"]
if args["proxy_url"]:
parsed_url = list(urlparse.urlparse(args["proxy_url"]))
parsed_url[2] = "/devmgr/utils/about"
self.proxy_about_url = urlparse.urlunparse(parsed_url)
parsed_url[2] = "/devmgr/v2/"
self.proxy_url = urlparse.urlunparse(parsed_url)
self.proxy_username = args["proxy_username"]
self.proxy_password = args["proxy_password"]
self.proxy_validate_certs = args["proxy_validate_certs"]
for port in args["ports"]:
if str(port).isdigit() and 0 < port < 2 ** 16:
self.ports.append(str(port))
else:
self.module.fail_json(msg="Invalid port! Ports must be positive numbers between 0 and 65536.")
self.systems_found = {}
def check_ip_address(self, systems_found, address):
"""Determine where an E-Series storage system is available at a specific ip address."""
for port in self.ports:
if port == "8080":
url = "http://%s:%s/devmgr/v2/storage-systems/1/" % (address, port)
else:
url = "https://%s:%s/devmgr/v2/storage-systems/1/" % (address, port)
try:
rc, sa_data = request(url + "symbol/getSAData", validate_certs=False, force_basic_auth=False, ignore_errors=True)
if rc == 401: # Unauthorized
self.module.warn("Fail over and discover any storage system without a set admin password. This will discover systems without a set password"
" such as newly deployed storage systems. Address [%s]." % address)
# Fail over and discover any storage system without a set admin password. This will cover newly deployed systems.
rc, graph = request(url + "graph", validate_certs=False, url_username="admin", url_password="", timeout=self.SEARCH_TIMEOUT)
sa_data = graph["sa"]["saData"]
if sa_data["chassisSerialNumber"] in systems_found:
systems_found[sa_data["chassisSerialNumber"]]["api_urls"].append(url)
else:
systems_found.update({sa_data["chassisSerialNumber"]: {"api_urls": [url], "label": sa_data["storageArrayLabel"],
"addresses": [], "proxy_ssid": "", "proxy_required": False}})
break
except Exception as error:
pass
def no_proxy_discover(self):
"""Discover E-Series storage systems using embedded web services."""
thread_pool_size = min(multiprocessing.cpu_count() * self.CPU_THREAD_MULTIPLE, self.MAX_THREAD_POOL_SIZE)
subnet = list(ipaddress.ip_network(u"%s" % self.subnet_mask))
thread_pool = []
search_count = len(subnet)
for start in range(0, search_count, thread_pool_size):
end = search_count if (search_count - start) < thread_pool_size else start + thread_pool_size
for address in subnet[start:end]:
thread = threading.Thread(target=self.check_ip_address, args=(self.systems_found, address))
thread_pool.append(thread)
thread.start()
for thread in thread_pool:
thread.join()
def verify_proxy_service(self):
"""Verify proxy url points to a web services proxy."""
try:
rc, about = request(self.proxy_about_url, validate_certs=self.proxy_validate_certs)
if not about["runningAsProxy"]:
self.module.fail_json(msg="Web Services is not running as a proxy!")
except Exception as error:
self.module.fail_json(msg="Proxy is not available! Check proxy_url. Error [%s]." % to_native(error))
def test_systems_found(self, systems_found, serial, label, addresses):
"""Verify and build api urls."""
api_urls = []
for address in addresses:
for port in self.ports:
if port == "8080":
url = "http://%s:%s/devmgr/" % (address, port)
else:
url = "https://%s:%s/devmgr/" % (address, port)
try:
rc, response = request(url + "utils/about", validate_certs=False, timeout=self.SEARCH_TIMEOUT)
api_urls.append(url + "v2/")
break
except Exception as error:
pass
systems_found.update({serial: {"api_urls": api_urls,
"label": label,
"addresses": addresses,
"proxy_ssid": "",
"proxy_required": False}})
def proxy_discover(self):
"""Search for array using it's chassis serial from web services proxy."""
self.verify_proxy_service()
subnet = ipaddress.ip_network(u"%s" % self.subnet_mask)
try:
rc, request_id = request(self.proxy_url + "discovery", method="POST", validate_certs=self.proxy_validate_certs,
force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password,
data=json.dumps({"startIP": str(subnet[0]), "endIP": str(subnet[-1]),
"connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC}))
# Wait for discover to complete
try:
for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC):
rc, discovered_systems = request(self.proxy_url + "discovery?requestId=%s" % request_id["requestId"],
validate_certs=self.proxy_validate_certs,
force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
if not discovered_systems["discoverProcessRunning"]:
thread_pool = []
for discovered_system in discovered_systems["storageSystems"]:
addresses = []
for controller in discovered_system["controllers"]:
addresses.extend(controller["ipAddresses"])
# Storage systems with embedded web services.
if "https" in discovered_system["supportedManagementPorts"] and self.prefer_embedded:
thread = threading.Thread(target=self.test_systems_found,
args=(self.systems_found, discovered_system["serialNumber"], discovered_system["label"], addresses))
thread_pool.append(thread)
thread.start()
# Storage systems without embedded web services.
else:
self.systems_found.update({discovered_system["serialNumber"]: {"api_urls": [self.proxy_url],
"label": discovered_system["label"],
"addresses": addresses,
"proxy_ssid": "",
"proxy_required": True}})
for thread in thread_pool:
thread.join()
break
sleep(1)
else:
self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask)
except Exception as error:
self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error))
except Exception as error:
self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error))
def update_proxy_with_proxy_ssid(self):
"""Determine the current proxy ssid for all discovered-proxy_required storage systems."""
# Discover all added storage systems to the proxy.
systems = []
try:
rc, systems = request(self.proxy_url + "storage-systems", validate_certs=self.proxy_validate_certs,
force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
except Exception as error:
self.module.fail_json(msg="Failed to ascertain storage systems added to Web Services Proxy.")
for system_key, system_info in self.systems_found.items():
if self.systems_found[system_key]["proxy_required"]:
for system in systems:
if system_key == system["chassisSerialNumber"]:
self.systems_found[system_key]["proxy_ssid"] = system["id"]
def discover(self):
"""Discover E-Series storage systems."""
if self.proxy_url:
self.proxy_discover()
self.update_proxy_with_proxy_ssid()
else:
self.no_proxy_discover()
self.module.exit_json(msg="Discover process complete.", systems_found=self.systems_found, changed=False)
def main():
discover = NetAppESeriesDiscover()
discover.discover()
if __name__ == "__main__":
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'PrivateEndpointServiceConnectionStatus',
'PublicNetworkAccessType',
]
class PrivateEndpointServiceConnectionStatus(str, Enum):
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
class PublicNetworkAccessType(str, Enum):
"""
Controls whether traffic from the public network is allowed to access the Attestation Provider APIs.
"""
ENABLED = "Enabled"
"""Enables public network connectivity to the Attestation Provider REST APIs."""
DISABLED = "Disabled"
"""Disables public network connectivity to the Attestation Provider REST APIs."""
|
import pytest
import numpy as np
import random
import cv2
from proyecto2.io import IO
from proyecto2.image import Image
class TestIO:
TEST1 = [[[124, 177, 144, 255], [ 54, 104, 72, 255], [151, 192, 165, 255], [200, 236, 214, 255], [103, 129, 116, 255]],
[[ 96, 133, 99, 255], [ 7, 37, 12, 255], [113, 128, 120, 255], [133, 138, 137, 255], [ 50, 50, 50, 255]],
[[145, 152, 117, 255], [125, 118, 103, 255], [118, 77, 105, 255], [ 66, 10, 53, 255], [175, 120, 147, 255]],
[[157, 131, 125, 255], [219, 176, 191, 255], [134, 59, 115, 255], [102, 21, 76, 255], [173, 118, 127, 255]],
[[176, 108, 163, 255], [153, 79, 139, 255], [142, 53, 126, 255], [ 92, 29, 61, 255], [125, 122, 67, 255]]]
def test_read(self):
image = IO.read("./test/images/test_1.jpg")
assert (image.pixels == np.array(self.TEST1)).all()
def test_read_non_existent(self):
with pytest.raises(IOError):
IO.read("./test/images/fake.jpg")
def test_write(self, tmpdir):
matrix = np.array(self.TEST1)
image = Image(matrix)
IO.write(image, "./test/images_test/test.png")
new_image = IO.read("./test/images_test/test.png")
assert (new_image.pixels == matrix).all()
|
""" Copyright © - 2020 - UMONS
CONQUESTO of University of Mons - Jonathan Joertz, Dorian Labeeuw, and Gaëtan Staquet - is free software : you can redistribute it and/or modify it under the terms of the BSD-3 Clause license. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 Clause License for more details.
You should have received a copy of the BSD-3 Clause License along with this program.
Each use of this software must be attributed to University of Mons (Jonathan Joertz, Dorian Labeeuw, and Gaëtan Staquet).
"""
from __future__ import annotations
from typing import Optional, List, Tuple, Dict, Set
from abc import ABC, abstractmethod
import copy
import query.attack_graph as attack_graph
import query.query as query
def next_subtree_head_name(head_name : str) -> str:
'''
Generate the head name to use in the next tree.
:param head_name: of shape alpha_1
:return: of shape alpha_2
'''
parts = head_name.split(sep = "_")
parts[1] = str(int(parts[1]) + 1)
return parts[0] + "_" + parts[1]
def next_node_head_name(head_name : str) -> str:
'''
Generate the next head name to use (without changing the tree).
:param head_name: of shape alpha_1_1
:return: of shape alpha_1_2
'''
parts = head_name.split(sep = "_")
parts[2] = str(int(parts[2]) + 1)
return "_".join(parts)
class Node(ABC):
def __init__(self, children: List[Node]):
self.children = children
def set_children(self, children: List[Node]):
self.children = children
@abstractmethod
def apply_negation(self) -> Node:
'''
Applies the negation on the current node and returns the new node (with the negation applied).
'''
pass
@abstractmethod
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
'''
Writes this node in an ASP program in a rule of the given name
:param head_name: The name of the current rule we are writing
:param free_variables: The list of free variables in the current rule
:param for_save_variables: The list of atoms that can be used to ensure that every free variable is safe
:param unsafe_variables: The set of unsafe variables
:return: The string giving the ASP program
'''
pass
@abstractmethod
def _get_primary_keys(self) -> List[query.Variable]:
pass
def _apply_negation_on_children(self) -> List[Node]:
return [child.apply_negation() for child in self.children]
def _add_atoms_for_unsafe_variables(self, for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
'''
Creates a string with every needed atom in order to make every variable safe.
:param for_save_variables: The atoms that can be used to make variables safe.
:param unsafe_variables: The unsafe variables.
:return: The part of an ASP body with atoms to make sure every variable is safe.
'''
s = ""
# We need to first iterate over the atoms to be able to remove the variables from the set
for for_safe_atom in for_safe_variables:
contains_var = False
# We check if the current atom is useful
for unsafe_var in unsafe_variables:
contains_var = len(list(filter(lambda v: v.name == unsafe_var.name, for_safe_atom.pure_variables))) != 0
if contains_var:
break
# If it is useful, we can use it to make some variables safe
if contains_var:
# In order to properly make the variables safe, we create new atoms
# Each atom is a copy of the current useful atom where all the variables' names are changed except for one of the unsafe variables
# For instance, if the atom R[X, Y, Z] can be used to make the variables [X, Z] safe, we create two new atoms
# R[X, YYY_0_1, ZZZ_0_2]
# R[XXX_1_0, YYY_1_1, Z]
for i, variable in enumerate(for_safe_atom.primary_variables_as_non_primary() + for_safe_atom.secondary_variables):
# If the current variable is already safe, we skip it
if variable not in unsafe_variables:
continue
variables: List[query.Variable] = []
unsafe_variables.remove(variable) # We mark the variable as being safe
for j, var in enumerate(for_safe_atom.primary_variables_as_non_primary() + for_safe_atom.secondary_variables):
if i == j:
# We are at the variable we want to make safe
variables += [variable]
elif isinstance(var, query.Constant):
# We do not change constants
variables += [var]
else:
# We change the variable's name
new_var = copy.copy(var)
new_var.name = (3 * var.name) + "_{}_{}".format(i, j)
variables += [new_var]
# Now that we have all the variables' names, we can create the atom and convert it to ASP
new_atom = query.Atom(for_safe_atom.name, variables)
s += new_atom.to_ASP() + ", "
return s
class NotNode(Node):
def apply_negation(self) -> Node:
return NotNode(self._apply_negation_on_children())
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
next_head_name = next_node_head_name(head_name)
primary_keys = self.children[0]._get_primary_keys()
new_unsafe_variables = set(primary_keys + free_variables)
new_free_variables = list(new_unsafe_variables)
if len(new_free_variables) != 0:
in_parenthesis = "(" + ", ".join(map(lambda var: var.to_ASP(), new_free_variables)) + ")"
s = "not " + next_head_name + in_parenthesis + ".\n"
s += next_head_name + in_parenthesis
else:
s = "not " + next_head_name + ".\n"
s += next_head_name
s += " :- " + self.children[0].to_ASP(next_head_name, new_free_variables, for_safe_variables, new_unsafe_variables)
return s
def _get_primary_keys(self) -> List[query.Variable]:
return []
def __str__(self) -> str:
return "Not (" + str(self.children[0]) + ")"
class ExistsNode(Node):
def apply_negation(self) -> Node:
raise NotImplementedError("Impossible to negate an Exists node in this context.")
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
return self.children[0].to_ASP(head_name, free_variables, for_safe_variables, unsafe_variables)
def _get_primary_keys(self) -> List[query.Variable]:
return self.children[0]._get_primary_keys()
def __str__(self) -> str:
return "Exists (" + str(self.children[0]) + ")"
def descend_into_OR(self) -> OrNode:
if not isinstance(self.children[0], OrNode):
raise RuntimeError("Your construction is not correct")
return OrNode([ExistsNode([x]) for x in self.children[0].children])
class AndNode(Node):
def apply_negation(self) -> OrNode:
return OrNode(self._apply_negation_on_children())
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
return ", ".join(map(lambda child: child.to_ASP(head_name, free_variables, for_safe_variables, unsafe_variables), self.children))
def _get_primary_keys(self) -> List[query.Variable]:
# The query atom must always be in the left part of the conjunction
return self.children[0]._get_primary_keys()
def __str__(self) -> str:
return "(" + ") AND (".join(map(str, self.children)) + ")"
def distribute_over_or(self) -> OrNode:
if not isinstance(self.children[1], OrNode):
raise RuntimeError("Your construction is incorrect")
left = self.children[0]
and_nodes : List[Node] = []
for right in self.children[1].children:
and_nodes.append(AndNode([left, right]))
return OrNode(and_nodes)
class OrNode(Node):
def apply_negation(self) -> AndNode:
return AndNode(self._apply_negation_on_children())
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
in_parenthesis = "(" + ", ".join(map(lambda x: x.to_ASP(), free_variables)) + ")"
new_unsafe_variables = unsafe_variables.union(free_variables)
s = ""
# We now, by construction, that the parent of this node is a NotNode
# Thus, we also know that the previous body is already closed.
# So, we can just create the new rules
s += self.children[0].to_ASP(head_name, free_variables, for_safe_variables, copy.deepcopy(new_unsafe_variables))
for child in self.children[1:]:
s += head_name + in_parenthesis + " :- " + child.to_ASP(head_name, free_variables, for_safe_variables, copy.deepcopy(new_unsafe_variables))
return s
def _get_primary_keys(self) -> List[query.Variable]:
return self.children[0]._get_primary_keys()
def __str__(self) -> str:
return "(" + ") OR (".join(map(str, self.children)) + ")"
class ImpliesNode(Node):
def apply_negation(self) -> AndNode:
return AndNode([self.children[0], self.children[1].apply_negation()])
def _get_primary_keys(self) -> List[query.Variable]:
raise NotImplementedError("An Implies node should not be present")
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
raise NotImplementedError("An Implies node should not be present")
def __str__(self) -> str:
return "(" + ") IMPLIES (".join(map(str, self.children)) + ")"
class Leaf(Node):
def __init__(self, negation : bool = False):
self.negation = negation
class AtomNode(Leaf):
def __init__(self, atom : query.Atom, negation : bool = False):
super().__init__(negation)
self.atom = atom
def apply_negation(self) -> AtomNode:
return AtomNode(self.atom, not self.negation)
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
# We update the set of unsafe variables to remove variables that are safe thanks to this atom
for var in self.atom.pure_variables:
if var in unsafe_variables:
unsafe_variables.remove(var)
# We immediately make every variable safe
# This is fine since we modify the set without copying it
# So, if an another node makes unsafe variables safe, the variables that are made safe now won't be make safe again
# That is, we do not add useless atoms in the body of the rules
s = self._add_atoms_for_unsafe_variables(for_safe_variables, unsafe_variables)
return s + self.atom.to_ASP()
def _get_primary_keys(self) -> List[query.Variable]:
return list(self.atom.primary_variables)
def __str__(self):
return ("NOT " if self.negation else "") + str(self.atom)
class SubtreeNode(Leaf):
def __init__(self, subtree : Tree, atom_node : AtomNode, changes_in_names : Dict[query.Variable, query.Variable], negation : bool = False):
super().__init__(negation)
self.subtree = subtree
self.atom_node = atom_node
self.changes_in_names = changes_in_names
def apply_negation(self) -> SubtreeNode:
return SubtreeNode(self.subtree, self.atom_node, self.changes_in_names, not self.negation)
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
next_head_name = next_subtree_head_name(head_name[:-2])
free_var_for_subtree = self.subtree.free_variables
in_parenthesis = "(" + ", ".join(map(lambda x: x.to_ASP(), free_var_for_subtree)) + ")"
s = self._add_atoms_for_unsafe_variables(for_safe_variables, unsafe_variables)
s += "not " if self.negation else ""
s += next_head_name + "_1"
if len(free_var_for_subtree) != 0:
s += in_parenthesis
s += ".\n"
s += self.subtree.to_ASP(next_head_name)
return s
def _get_primary_keys(self) -> List[query.Variable]:
raise NotImplementedError("A Subtree node does not have primary keys")
def __str__(self):
return ("NOT " if self.negation else "") + "SUBTREE"
class EqualityNode(Leaf):
def __init__(self, right : query.Variable, left : query.Variable, original_atom : query.Atom, negation : bool = False):
super().__init__(negation)
self.right = right
self.left = left
self.original_atom = original_atom
def apply_negation(self) -> EqualityNode:
return EqualityNode(self.right, self.left, self.original_atom, not self.negation)
def to_ASP(self, head_name : str, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], unsafe_variables : Set[query.Variable]) -> str:
s = self._add_atoms_for_unsafe_variables(for_safe_variables, unsafe_variables)
comp = "!=" if self.negation else "="
s += self.left.to_ASP() + comp + self.right.to_ASP() + ".\n"
return s
def _get_primary_keys(self) -> List[query.Variable]:
raise NotImplementedError("An Equality node does not have primary keys")
def __str__(self):
return str(self.left) + ("!=" if self.negation else "=") + str(self.right)
class Tree:
def __init__(self, q : query.Query, atom : query.Atom, free_variables : List[query.Variable], for_safe_variables : List[query.Atom], subtree : Tree = None):
'''
Creation of the logical tree.
For details, see ASPpip.pdf.
The atom can not contain a variable 'zeta'!
:param q: the query
:param atom: the atom to use for the rewriting
:param free_variables: the list of free variables in the query
:param subtree: the subformula, if the current formula needs one
'''
self.free_variables = free_variables
self.for_save_variables = for_safe_variables
primary_variables = atom.primary_variables
secondary_variables = atom.secondary_variables
if len(secondary_variables) == 0 and subtree is None:
# Only primary variables and no subtree means that we have nothing in the right part of the implication
self.root = ExistsNode([AtomNode(atom)])
self.close_body = True
else:
# At least one non-primary variable
self.close_body = False
# We create the tree
# First, the part that checks if the block exists
exists_blocks = ExistsNode([AtomNode(atom)])
# Then, we create a new atom with new, unused variables and without constants
# This is used in the right part of the implication
# Note that the first occurrence is not modified
seen_names : Set[str] = set()
different_secondary_variables : List[query.Variable] = []
for i, var in enumerate(secondary_variables):
in_primary = len(list(filter(lambda prim: prim.name == var.name, primary_variables))) != 0
in_free_variables = len(list(filter(lambda free: free.name == var.name, free_variables))) != 0
# If the variable appears in the primary keys or is a constant or is a free variable, we must always replace it in the secondary variables
if in_primary or in_free_variables or isinstance(var, query.Constant) or var.name in seen_names:
new_var = query.Variable("zeta_{}".format(i))
different_secondary_variables.append(new_var)
else:
different_secondary_variables.append(var)
seen_names.add(var.name)
all_variables : List[query.Variable] = list(primary_variables) + different_secondary_variables
atom_with_different_variables = AtomNode(query.Atom(atom.name, all_variables))
# We memorize the changes in names we just performed
changes_in_names : Dict[query.Variable, query.Variable] = {}
for i, var in enumerate(secondary_variables):
if var not in changes_in_names:
changes_in_names[var] = different_secondary_variables[i]
# We create the conjunction on the equality constraints
# Note that we do not create X = X constraints (for obvious reasons)
# Also, we iterate in reverse order to be able to check if the variable is used multiple times (and to reduce the number of iterations needed for that check)
and_children : List[Node] = []
for i, var in reversed(list(enumerate(secondary_variables))):
if var.name != different_secondary_variables[i].name:
# The variable is used in the primary keys
prim_keys_with_same_name = list(filter(lambda prim: prim.name == var.name, primary_variables))
if len(prim_keys_with_same_name) > 0:
and_children.append(EqualityNode(prim_keys_with_same_name[0], different_secondary_variables[i], atom))
# The variable is in the free variables
in_free_var = False
free_variables_with_same_name = list(filter(lambda free_var: free_var.name == var.name, free_variables))
if len(free_variables_with_same_name) > 0:
and_children.append(EqualityNode(free_variables_with_same_name[0], different_secondary_variables[i], atom))
in_free_var = True
# The variable appears multiple times in the secondary variables
# AND does NOT appear in the free variables
# Indeed, it is useless to have ZETA_0 = X, ZETA_1 = X and ZETA_0 = ZETA_1, since ZETA_0 = X and ZETA_1 = X is enough
if not in_free_var:
for j, secondary_var in enumerate(secondary_variables[:i]):
if secondary_var.name == var.name:
if different_secondary_variables[i].name != var.name:
and_children.append(EqualityNode(different_secondary_variables[i], different_secondary_variables[j], atom))
break
# The variable is a constant
if isinstance(var, query.Constant):
and_children.append(EqualityNode(var, different_secondary_variables[i], atom))
# If we have a subformula, we use it
if subtree is not None:
and_children.append(SubtreeNode(subtree, atom_with_different_variables, changes_in_names))
# The implication, the for all and the AND after the first Exists
if len(and_children) == 0:
# No equality constraint nor subtree
self.root = ExistsNode([exists_blocks])
self.close_body = True
elif len(and_children) == 1:
implication_node = ImpliesNode([atom_with_different_variables, and_children[0]])
not_implication_node = implication_node.apply_negation()
# We do not need to distribute the AND (since we do not have an OR)
not_for_all_node = ExistsNode([not_implication_node])
big_and_node = AndNode([exists_blocks, NotNode([not_for_all_node])])
self.root = ExistsNode([big_and_node])
else:
implication_node = ImpliesNode([atom_with_different_variables, AndNode(and_children)])
not_implication_node = implication_node.apply_negation()
distributed_or = not_implication_node.distribute_over_or()
not_for_all_node = ExistsNode([distributed_or])
or_node = not_for_all_node.descend_into_OR()
not_node = NotNode([or_node])
big_and_node = AndNode([exists_blocks, not_node])
self.root = ExistsNode([big_and_node])
def to_ASP(self, head_name : str) -> str:
'''
Writes the tree as an ASP program.
:param head_name: The name to use for the rules of this tree. It must be of shape 'alpha_1'.
:return: A string giving the ASP program
'''
if len(self.free_variables) == 0:
s = head_name + "_1"
else:
in_parenthesis = "(" + ", ".join(map(lambda var: var.to_ASP(), self.free_variables)) + ")"
s = head_name + "_1" + in_parenthesis
s += " :- " + self.root.to_ASP(head_name + "_1", self.free_variables, self.for_save_variables, set(self.free_variables))
if self.close_body:
# In some cases, the tree is not as usual.
# That is, the rightmost node is not an EqualityNode nor a SubtreeNode.
# So, we need to manually close the body
return s + ".\n"
else:
return s
def __str__(self) -> str:
return "TREE[" + str(self.root) + "]"
def fo_rewriting(q : query.Query, removed_atoms : List[query.Atom] = []) -> Optional[Tree]:
'''
Rewrites the query in FO.
It returns a tree representing the formula in propositional logic.
The tree can then easily be used to construct an ASP program.
See ASPpip.pdf for details and explanations on the logic used.
:param q: The query to rewrite
:return: The tree describing the rewritten formula
'''
graph = attack_graph.AttackGraph(q)
if not graph.is_acyclic:
return None
if graph.number_of_atoms > 1:
for i, R in enumerate(graph.unattacked_atoms):
# It may happen that we remove an atom we shouldn't
try:
q_reduced = q.remove_atom(R)
except query.QueryCreationException:
continue
tree_for_q_reduced = fo_rewriting(q_reduced, removed_atoms=removed_atoms + [R])
if tree_for_q_reduced is None:
return None
tree_for_q = Tree(q, R, q.free_variables, removed_atoms + [R], subtree=tree_for_q_reduced)
return tree_for_q
return None
else:
R = graph.unattacked_atoms[0]
tree_for_q = Tree(q, R, q.free_variables, removed_atoms + [R])
if tree_for_q is None:
return None
return tree_for_q
|
######################################
# Import and initialize the librarys #
######################################
from code.pygame_objects import *
#################
# Setup logging #
#################
filename = os.path.basename(__file__).split('.')[0]
logger = log.get_logger(filename)
logger.info('Loading up {}...'.format(filename))
selection_screen = screen(
name = 'selection',
surfaceParameters = {
'frame': coord(w=1024, h=768),
'bgColour': None,
'isAlpha': True,
'scroll': False
},
objectsParameters = {
'background': {
'type': 'object',
'isAlpha': True,
'frame': {
'box': coord(w=1024, h=768),
'image': coord(w=1024, h=768)
},
'runclass': runclass(action='go_back')
},
'bubble': {
'type': 'button',
'isAlpha': True,
'frame': {
'box': coord(x=267, y=243, w=689, h=59),
'image': coord(w=1024, h=768),
'text': coord(x=281, y=248, w=646, h=49),
},
'runclass': runclass(action='Bubble sort')
},
'insertion': {
'type': 'button',
'isAlpha': True,
'frame': {
'box': coord(x=267, y=300, w=689, h=59),
'image': coord(w=1024, h=768)
},
'runclass': runclass(action='Insertion sort')
},
'merge': {
'type': 'button',
'isAlpha': True,
'frame': {
'box': coord(x=267, y=358, w=689, h=59),
'image': coord(w=1024, h=768)
},
'runclass': runclass(action='Merge sort')
},
'quick': {
'type': 'button',
'isAlpha': True,
'frame': {
'box': coord(x=267, y=415, w=689, h=59),
'image': coord(w=1024, h=768)
},
'runclass': runclass(action='Quick sort')
},
'radix': {
'type': 'button',
'isAlpha': True,
'frame': {
'box': coord(x=267, y=473, w=689, h=59),
'image': coord(w=1024, h=768)
},
'runclass': runclass(action='Radix sort')
},
'bogo': {
'type': 'button',
'isAlpha': True,
'frame': {
'box': coord(x=267, y=531, w=689, h=59),
'image': coord(w=1024, h=768)
},
'runclass': runclass(action='Bogo sort')
},
},
keyboardParameters = {
'back': {
'keys': {27},
'runclass': runclass(action='go_back')
}
}
)
# Load items to screen
selection_screen.surface.load()
class selection:
@staticmethod
def run(screen, itemName):
# Display selection screen
selection_screen.surface.display()
screen.objects[itemName].display(directToScreen=True)
while True:
# Get check for interaction with screen
action_result = selection_screen.event.action(directToScreen=True)
# No action
if action_result == None: continue
# When program is set to close
if action_result.contains('outcome','__quit__'): return '__quit__'
# Going back
if action_result.contains('outcome', 'go_back'): return '__back__'
# Change text to corresponding sort types
if action_result.didAction('click'):
screen.objects[itemName].data.setText(text=action_result.click.outcome, withDisplay=False)
return '__back__'
|
from .wrapper import Querier
__all__ = [Querier]
|
import board
import busio
import time
import math
import adafruit_hcsr04
sonar = adafruit_hcsr04.HCSR04(trigger_pin=board.D11, echo_pin=board.D10)
from sphero_rvr import RVRDrive
rvr = RVRDrive(uart = busio.UART(board.TX, board.RX, baudrate=115200))
************************************************************************************************************
time.sleep(0.5)
rvr.set_all_leds(255,0,0) #set leds to red
time.sleep(0.1)
rvr.set_all_leds(0,255,0) #set leds to green
time.sleep(0.1)
rvr.set_all_leds(0,0,255) #set leds to blue
time.sleep(0.1) #turn off
rvr.set_all_leds(255,255,255) #turn off leds or make them all black
rvr.sensor_start()
print("starting up")
rvr.update_sensors()
************************************************************************************************************************
while(elasped_time < 5.0):
elasped_time = time>monotonic() - start_time
setpoint = 2
MAX_SPEED = 100
k = 10
error = 0
Tolerance = 3
# RVRDrive.drive_to_position_si(angle, x, y, speed)
rvr.drive_to_position_si(0, 0, 2, 50)
x_coordinate = rvr.get_x()
error = setpoint - x_coordinate
output = k*error + Tolerance
if(output > 50):
output = 50
if(output < -50):
output = -50
time.sleep(0.5)
********************************************************************************************************
'''
try:
sensor_distance = sonar.distance
# Add your proportional control code here.
error = sensor_distance - setpoint
if(error > 0):
output = 80
elif(error < 0):
output = -80
rvr.setMotors(output, output) #set the power of the motors for both the left and right track
# Read the Sphero RVR library file to find the rvr.setMotors(left,right) command.
# Use this command in the next line to send the output of your proportional
# control to both the left and right motors.
except RuntimeError:
print("Retrying!")
pass
time.sleep(0.2)
# Drive for two seconds at a heading of 90 degrees
rvr.drive(30,90)
time.sleep(2.0)
rvr.stop()
# Drive back to the starting point
rvr.drive_to_position_si(0,0,0,0.4)
time.sleep(3.0)
'''
|
import json
import unittest
import pyyoutube.models as models
class ChannelSectionModelTest(unittest.TestCase):
BASE_PATH = "testdata/modeldata/channel_sections/"
with open(BASE_PATH + "channel_section_info.json", "rb") as f:
CHANNEL_SECTION_INFO = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "channel_section_response.json", "rb") as f:
CHANNEL_SECTION_RESPONSE = json.loads(f.read().decode("utf-8"))
def testChannelSection(self) -> None:
m = models.ChannelSection.from_dict(self.CHANNEL_SECTION_INFO)
self.assertEqual(m.id, "UC_x5XG1OV2P6uZZ5FSM9Ttw.e-Fk7vMPqLE")
self.assertEqual(m.snippet.type, "multipleChannels")
self.assertEqual(len(m.contentDetails.channels), 16)
def testChannelSectionResponse(self) -> None:
m = models.ChannelSectionResponse.from_dict(self.CHANNEL_SECTION_RESPONSE)
self.assertEqual(m.kind, "youtube#channelSectionListResponse")
self.assertEqual(len(m.items), 10)
|
import importlib
from ptoken.cache.backend import Backend
class Frontend:
"""
Frontend of cache
"""
__backend_ = None # type: Backend
def __init__(self, backend, **backend_kwargs):
"""
:param backend:
type: string
"""
module = importlib.import_module("ptoken.cache.backend")
self.__backend_ = getattr(module, backend)(**backend_kwargs)
def get(self, key, default=None):
return self.__backend_.get(key, default)
def set(self, key, value, **kwargs):
return self.__backend_.set(key, value, **kwargs)
def has(self, key):
return self.__backend_.has(key)
def remove(self, *key):
return self.__backend_.remove(*key)
|
# Copyright 2018-2021 Faculty Science Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from marshmallow import ValidationError
from faculty.clients.secret import DatasetsSecrets, DatasetsSecretsSchema
TEST_SECRETS = DatasetsSecrets(
bucket="test-bucket",
access_key="test-access-key",
secret_key="test-secret-key",
region="test-region",
verified=True,
)
TEST_SECRETS_BODY = {
"bucket": TEST_SECRETS.bucket,
"access_key": TEST_SECRETS.access_key,
"secret_key": TEST_SECRETS.secret_key,
"region": TEST_SECRETS.region,
"verified": TEST_SECRETS.verified,
}
def test_datasets_secrets_schema():
assert DatasetsSecretsSchema().load(TEST_SECRETS_BODY) == TEST_SECRETS
def test_datasets_secrets_invalid():
with pytest.raises(ValidationError):
DatasetsSecretsSchema().load({})
|
import cupy
from cupy import core
def flip(a, axis):
"""Reverse the order of elements in an array along the given axis.
Note that ``flip`` function has been introduced since NumPy v1.12.
The contents of this document is the same as the original one.
Args:
a (~cupy.ndarray): Input array.
axis (int): Axis in array, which entries are reversed.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.flip`
"""
a_ndim = a.ndim
if a_ndim < 1:
raise ValueError('Input must be >= 1-d')
axis = int(axis)
if not -a_ndim <= axis < a_ndim:
raise ValueError(
'axis must be >= %d and < %d' % (-a_ndim, a_ndim))
return _flip(a, axis)
def fliplr(a):
"""Flip array in the left/right direction.
Flip the entries in each row in the left/right direction. Columns
are preserved, but appear in a different order than before.
Args:
a (~cupy.ndarray): Input array.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.fliplr`
"""
if a.ndim < 2:
raise ValueError('Input must be >= 2-d')
return a[::, ::-1]
def flipud(a):
"""Flip array in the up/down direction.
Flip the entries in each column in the up/down direction. Rows are
preserved, but appear in a different order than before.
Args:
a (~cupy.ndarray): Input array.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.flipud`
"""
if a.ndim < 1:
raise ValueError('Input must be >= 1-d')
return a[::-1]
def roll(a, shift, axis=None):
"""Roll array elements along a given axis.
Args:
a (~cupy.ndarray): Array to be rolled.
shift (int): The number of places by which elements are shifted.
axis (int or None): The axis along which elements are shifted.
If ``axis`` is ``None``, the array is flattened before shifting,
and after that it is reshaped to the original shape.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.roll`
"""
if axis is None:
if a.size == 0:
return a
size = a.size
ra = a.ravel()
shift %= size
res = cupy.empty((size,), a.dtype)
res[:shift] = ra[size - shift:]
res[shift:] = ra[:size - shift]
return res.reshape(a.shape)
else:
axis = int(axis)
if axis < 0:
axis += a.ndim
if not 0 <= axis < a.ndim:
raise core.core._AxisError(
'axis must be >= %d and < %d' % (-a.ndim, a.ndim))
size = a.shape[axis]
if size == 0:
return a
shift %= size
prev = (slice(None),) * axis
rest = (slice(None),) * (a.ndim - axis - 1)
# Roll only the dimensiont at the given axis
# ind1 is [:, ..., size-shift:, ..., :]
# ind2 is [:, ..., :size-shift, ..., :]
ind1 = prev + (slice(size - shift, None, None),) + rest
ind2 = prev + (slice(None, size - shift, None),) + rest
r_ind1 = prev + (slice(None, shift, None),) + rest
r_ind2 = prev + (slice(shift, None, None),) + rest
res = cupy.empty_like(a)
res[r_ind1] = a[ind1]
res[r_ind2] = a[ind2]
return res
def rot90(a, k=1, axes=(0, 1)):
"""Rotate an array by 90 degrees in the plane specified by axes.
Note that ``axes`` argument has been introduced since NumPy v1.12.
The contents of this document is the same as the original one.
Args:
a (~cupy.ndarray): Array of two or more dimensions.
k (int): Number of times the array is rotated by 90 degrees.
axes: (tuple of ints): The array is rotated in the plane defined by
the axes. Axes must be different.
Returns:
~cupy.ndarray: Output array.
.. seealso:: :func:`numpy.rot90`
"""
a_ndim = a.ndim
if a_ndim < 2:
raise ValueError('Input must be >= 2-d')
axes = tuple(axes)
if len(axes) != 2:
raise ValueError('len(axes) must be 2')
if axes[0] == axes[1] or abs(axes[0] - axes[1]) == a_ndim:
raise ValueError('axes must be different')
if not (-a_ndim <= axes[0] < a_ndim and -a_ndim <= axes[1] < a_ndim):
raise ValueError('axes must be >= %d and < %d' % (-a_ndim, a_ndim))
k = k % 4
if k == 0:
return a[:]
if k == 2:
return _flip(_flip(a, axes[0]), axes[1])
axes_t = list(range(0, a_ndim))
axes_t[axes[0]], axes_t[axes[1]] = axes_t[axes[1]], axes_t[axes[0]]
if k == 1:
return cupy.transpose(_flip(a, axes[1]), axes_t)
else:
return _flip(cupy.transpose(a, axes_t), axes[1])
def _flip(a, axis):
# This function flips array without checking args.
indexer = [slice(None)] * a.ndim
indexer[axis] = slice(None, None, -1)
return a[tuple(indexer)]
|
"""
Contains application configuration for django
"""
from django.apps import AppConfig
class PermissionsConfig(AppConfig):
"""Configuration of the application"""
name = 'permissions'
|
"""fifth migration
Revision ID: 7edeef2ab637
Revises: 5595ad14bcf5
Create Date: 2020-05-13 05:14:43.748249
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7edeef2ab637'
down_revision = '5595ad14bcf5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_blogposts_quote', table_name='blogposts')
op.drop_index('ix_blogposts_user_id', table_name='blogposts')
op.drop_index('ix_profile_photos_user_id', table_name='profile_photos')
op.drop_index('ix_reviews_user_id', table_name='reviews')
op.create_foreign_key(None, 'reviews', 'blogposts', ['quote_id'], ['id'])
op.drop_column('reviews', 'quote')
op.drop_column('reviews', 'author')
op.drop_index('ix_users_role_id', table_name='users')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_users_role_id', 'users', ['role_id'], unique=False)
op.add_column('reviews', sa.Column('author', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('reviews', sa.Column('quote', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'reviews', type_='foreignkey')
op.create_index('ix_reviews_user_id', 'reviews', ['user_id'], unique=False)
op.create_index('ix_profile_photos_user_id', 'profile_photos', ['user_id'], unique=False)
op.create_index('ix_blogposts_user_id', 'blogposts', ['user_id'], unique=False)
op.create_index('ix_blogposts_quote', 'blogposts', ['quote'], unique=False)
# ### end Alembic commands ###
|
#!/usr/bin/python3
# -*-coding:utf-8-*-
from urllib.parse import urlencode
param = {"birthday":"生日快乐", "mingxing":"huangbo"}
result = urlencode(param)
print(result)
|
from __future__ import absolute_import
from django.conf.urls import url
from talks.contributors.views import (contributors_home, contributors_events,
contributors_eventgroups, contributors_persons)
urlpatterns = [
url(r'^$', contributors_home, name='contributors-home'),
url(r'^talks$', contributors_events, name='contributors-events'),
url(r'^series', contributors_eventgroups, name='contributors-eventgroups'),
url(r'^persons$', contributors_persons, name='contributors-persons'),
]
|
"""
Clean up Ravel and Mininet.
"""
import os
import mininet.clean
from ravel.log import logger
def clean():
"Try to kill Pox controller and clean Mininet"
logger.info("killing Pox controller instance")
os.system("pkill -9 -f pox.py")
logger.info("cleaning Mininet")
mininet.clean.cleanup()
|
import data
import math
import random
import time
import torch
import torch.nn as nn
from datetime import timedelta
from evaluate import compute_many2one_acc, compute_v_measure
class Control(nn.Module):
def __init__(self, model, model_path, batch_size, device, logger):
super(Control, self).__init__()
self.model = model
self.model_path = model_path
self.batch_size = batch_size
self.device = device
self.logger = logger
def train(self, data, lr, epochs):
self.log_data(data)
self.logger.log('[TRAINING]')
self.logger.log(' num_labels: %d' % self.model.num_labels)
self.logger.log(' dim: %d' % self.model.wemb.embedding_dim)
self.logger.log(' batch_size: %d' % self.batch_size)
self.logger.log(' lr: %g' % lr)
self.logger.log(' epochs: %d' % epochs)
self.logger.log('')
optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
best_acc = float('-inf')
start_time = time.time()
try:
for epoch in range(1, epochs + 1):
avg_loss, acc, vm, epoch_time = self.do_epoch(data, optimizer)
bits = (- avg_loss) * math.log2(math.e)
self.logger.log('| epoch {:3d} | loss {:6.2f} | {:6.2f} bits | '
'acc {:6.2f} | vm {:6.2f} | time {:10s}'.format(
epoch, avg_loss, bits, acc, vm,
str(timedelta(seconds=int(epoch_time)))))
if best_acc < acc:
best_acc = acc
with open(self.model_path, 'wb') as f:
torch.save(self.model, f)
except KeyboardInterrupt:
self.logger.log('-' * 89)
self.logger.log('Exiting from training early')
self.logger.log('\nTraining time {:10s}'.format(
str(timedelta(seconds=int(time.time() - start_time)))))
self.load_model()
acc, vm, zseqs, clustering = self.evaluate(data)
self.logger.log('=' * 89)
self.logger.log('| Best | acc {:5.2f} | vm {:5.2f}'.format(acc, vm))
self.logger.log('=' * 89)
return acc, vm, zseqs, clustering
def do_epoch(self, data, optimizer):
self.model.train()
avg_loss = 0
epoch_start_time = time.time()
batches = data.get_batches(self.batch_size)
for batch in batches:
self.model.zero_grad()
X, Y1, Y2, lengths = data.tensorize_batch(batch, self.device,
self.model.width)
loss = self.model(X, Y1, Y2, lengths, is_training=True)
avg_loss += loss.item() / len(batches)
loss.backward()
optimizer.step()
acc, vm, _, _ = self.evaluate(data)
epoch_time = time.time() - epoch_start_time
return avg_loss, acc, vm, epoch_time
def evaluate(self, data):
self.model.eval()
batches = data.get_batches(self.batch_size)
zseqs = [[False for w in sent] for sent in data.sents]
clustering = [{} for z in range(self.model.num_labels)]
with torch.no_grad():
for batch in batches:
X, Y1, Y2, lengths = data.tensorize_batch(batch, self.device,
self.model.width)
future_probs, future_indices = self.model(X, Y1, Y2, lengths,
is_training=False)
for k, (i, j) in enumerate(batch):
z = future_indices[k].item()
zseqs[i][j] = z
clustering[z][data.sents[i][j]] = True
acc = compute_many2one_acc(data.golds, zseqs)
vm = compute_v_measure(data.golds, zseqs)
return acc, vm, zseqs, clustering
def load_model(self):
with open(self.model_path, 'rb') as f:
self.model = torch.load(f)
self.model.future.lstm.flatten_parameters()
def log_data(self, data):
self.logger.log('-' * 89)
self.logger.log('[DATA]')
self.logger.log(' data: %s' % data.data_path)
self.logger.log(' # word types: %d' % len(data.w2i))
self.logger.log(' # char types: %d' % len(data.c2i))
self.logger.log(' # words: %d' % sum(len(sent) for sent in
data.sents))
self.logger.log(' # tag types: %d' % len(data.label_counter))
self.logger.log('-' * 89)
|
import pytest
import os
import shutil
from gtmcore.dataset import Manifest
from lmsrvlabbook.tests.fixtures import fixture_single_dataset
from gtmcore.fixtures.datasets import helper_append_file
class TestDatasetOverviewQueries(object):
def test_num_files(self, fixture_single_dataset):
"""Test getting the a Dataset's file count"""
ds = fixture_single_dataset[3]
query = """
{
dataset(owner: "default", name: "test-dataset") {
overview {
numFiles
}
}
}
"""
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['numFiles'] == 5
m = Manifest(ds, 'default')
current_revision_dir = m.cache_mgr.current_revision_dir
shutil.rmtree(current_revision_dir)
os.makedirs(current_revision_dir)
m.update()
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['numFiles'] == 0
def test_total_bytes(self, fixture_single_dataset):
"""Test getting the a Dataset's total_bytes"""
ds = fixture_single_dataset[3]
query = """
{
dataset(owner: "default", name: "test-dataset") {
overview {
totalBytes
}
}
}
"""
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['totalBytes'] == '35'
m = Manifest(ds, 'default')
current_revision_dir = m.cache_mgr.current_revision_dir
shutil.rmtree(current_revision_dir)
os.makedirs(current_revision_dir)
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['totalBytes'] == '35'
# Update manifest after all files have been deleted
m.update()
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['totalBytes'] == '0'
def test_local_bytes(self, fixture_single_dataset):
"""Test getting the a Dataset's local_bytes"""
ds = fixture_single_dataset[3]
query = """
{
dataset(owner: "default", name: "test-dataset") {
overview {
localBytes
}
}
}
"""
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['localBytes'] == '35'
# Delete all files
m = Manifest(ds, 'default')
current_revision_dir = m.cache_mgr.current_revision_dir
shutil.rmtree(current_revision_dir)
os.makedirs(current_revision_dir)
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['localBytes'] == '0'
# Update manifest after all files have been deleted, should still be 0
m.update()
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['localBytes'] == '0'
def test_file_distribution(self, fixture_single_dataset):
"""Test getting the a Dataset's local_bytes"""
ds = fixture_single_dataset[3]
query = """
{
dataset(owner: "default", name: "test-dataset") {
overview {
fileTypeDistribution
}
}
}
"""
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert len(result['data']['dataset']['overview']['fileTypeDistribution']) == 1
assert result['data']['dataset']['overview']['fileTypeDistribution'] == ['1.00|.txt']
# Delete all files
m = Manifest(ds, 'default')
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, "test55.csv", "22222")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, "df.csv", "33333")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, ".hidden", "33333")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, "noextension", "33333")
m.update()
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert len(result['data']['dataset']['overview']['fileTypeDistribution']) == 2
assert result['data']['dataset']['overview']['fileTypeDistribution'][0] == '0.71|.txt'
assert result['data']['dataset']['overview']['fileTypeDistribution'][1] == '0.29|.csv'
def test_file_info_combined(self, fixture_single_dataset):
"""Test getting the a Dataset's file info"""
ds = fixture_single_dataset[3]
query = """
{
dataset(owner: "default", name: "test-dataset") {
overview {
fileTypeDistribution
localBytes
totalBytes
}
}
}
"""
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['fileTypeDistribution'] == ['1.00|.txt']
assert result['data']['dataset']['overview']['localBytes'] == '35'
assert result['data']['dataset']['overview']['totalBytes'] == '35'
# Delete all files
m = Manifest(ds, 'default')
current_revision_dir = m.cache_mgr.current_revision_dir
shutil.rmtree(current_revision_dir)
os.makedirs(current_revision_dir)
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['fileTypeDistribution'] == ['1.00|.txt']
assert result['data']['dataset']['overview']['localBytes'] == '0'
assert result['data']['dataset']['overview']['totalBytes'] == '35'
m.update()
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['fileTypeDistribution'] == []
assert result['data']['dataset']['overview']['localBytes'] == '0'
assert result['data']['dataset']['overview']['totalBytes'] == '0'
def test_file_distribution_hidden(self, fixture_single_dataset):
""""""
ds = fixture_single_dataset[3]
query = """
{
dataset(owner: "default", name: "test-dataset") {
overview {
fileTypeDistribution
}
}
}
"""
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['fileTypeDistribution'] == ['1.00|.txt']
# Delete all files
m = Manifest(ds, 'default')
os.makedirs(os.path.join(m.cache_mgr.cache_root, m.dataset_revision, ".hiddendir"))
os.makedirs(os.path.join(m.cache_mgr.cache_root, m.dataset_revision, ".hiddendir", "subdir"))
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, "test55.csv", "22222")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, "df.csv", "11")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, ".hidden", "343")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, "noextension", "6t4")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, ".hiddendir/tester.png", "8544")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, ".hiddendir/subdir/blah.jpeg", "8544")
helper_append_file(m.cache_mgr.cache_root, m.dataset_revision, ".hiddendir/subdir/.hiddenfile", "jhg")
m.update()
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert len(result['data']['dataset']['overview']['fileTypeDistribution']) == 4
assert result['data']['dataset']['overview']['fileTypeDistribution'][0] == '0.56|.txt'
assert result['data']['dataset']['overview']['fileTypeDistribution'][1] == '0.22|.csv'
assert result['data']['dataset']['overview']['fileTypeDistribution'][2] == '0.11|.jpeg'
assert result['data']['dataset']['overview']['fileTypeDistribution'][3] == '0.11|.png'
def test_readme(self, fixture_single_dataset):
"""Test getting a datasets's readme document"""
ds = fixture_single_dataset[3]
query = """
{
dataset(owner: "default", name: "test-dataset") {
overview {
readme
}
}
}
"""
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['readme'] is None
ds.write_readme("##Summary\nThis is my readme!!")
result = fixture_single_dataset[2].execute(query)
assert 'errors' not in result
assert result['data']['dataset']['overview']['readme'] == "##Summary\nThis is my readme!!"
|
"""Training a face recognizer with TensorFlow based on the FaceNet paper
FaceNet: A Unified Embedding for Face Recognition and Clustering: http://arxiv.org/abs/1503.03832
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import sys
import tensorflow as tf
import numpy as np
import importlib
import argparse
import support
from scipy import misc
from tensorflow.python.ops import data_flow_ops
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
def main(args):
network = importlib.import_module(args.model_def)
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
# Write arguments to a text file
support.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
support.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
train_set = support.get_dataset(args.data_dir)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Placeholder for the learning rate
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,3), name='image_paths')
labels_placeholder = tf.placeholder(tf.int64, shape=(None,3), name='labels')
input_queue = data_flow_ops.FIFOQueue(capacity=100000,
dtypes=[tf.string, tf.int64],
shapes=[(3,), (3,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder])
# TODO modality begin
train_set_modality = support.get_dataset(args.modality_data_dir)
# TODO modality end
nrof_preprocess_threads = 4
images_and_labels = []
for _ in range(nrof_preprocess_threads):
filenames, label = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.read_file(filename)
#TODO FIFOQueue error 20180629
image = tf.image.decode_image(file_contents, channels=3)
if args.random_crop:
image = tf.random_crop(image, [args.image_size, args.image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)
if args.random_flip:
image = tf.image.random_flip_left_right(image)
#pylint: disable=no-member
image.set_shape((args.image_size, args.image_size, 3))
images.append(tf.image.per_image_standardization(image))
images_and_labels.append([images, label])
image_batch, labels_batch = tf.train.batch_join(
images_and_labels, batch_size=batch_size_placeholder,
shapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * args.batch_size,
allow_smaller_final_batch=True)
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
labels_batch = tf.identity(labels_batch, 'label_batch')
# Build the inference graph
prelogits, prelogits_modality, orthogonality_loss, orthConvNum = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
# Split embeddings into anchor, positive and negative and calculate triplet loss
anchor, positive, negative = tf.unstack(tf.reshape(embeddings, [-1,3,args.embedding_size]), 3, 1)
triplet_loss = support.triplet_loss(anchor, positive, negative, args.alpha)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=10)
# TODO save best acc
# Create a saver
best_saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=10)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# Initialize variables
sess.run(tf.global_variables_initializer(), feed_dict={phase_train_placeholder:True})
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
with sess.as_default():
if args.pretrained_model:
print('Restoring pretrained model: %s' % args.pretrained_model)
support.load_model(args.pretrained_model)
img_list = []
image_path='./VIS_sample.png'
img = misc.imread(os.path.expanduser(image_path), mode='RGB')
aligned = misc.imresize(img, (args.image_size, args.image_size), interp='bilinear')
prewhitened = support.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
feed_dict = { images_placeholder: images, phase_train_placeholder:False }
feas = sess.run(embeddings, feed_dict=feed_dict)
print(image_path)
print(feas)
return model_dir
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.',
default='./logs/DSVNs_result')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.',
default='./models/DSVNs_model_saver')
parser.add_argument('--best_models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.',
default='./models/DSVNs_model_saver_best_models')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.8)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.',
default='./')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='./')
parser.add_argument('--modality_data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='./')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module models/containing the definition of the inference graph.',
default='DSVNs_Architecture')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=3000)
parser.add_argument('--train_epoch_distribution', type=int,
help='one epoch for modality, and k epoch for identity', default=4)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--people_per_batch', type=int,
help='Number of people per batch.', default=60)
parser.add_argument('--images_per_person', type=int,
help='Number of images per person.', default=9)
parser.add_argument('--people_per_batch_modality', type=int,
help='Number of people per batch.', default=2)
parser.add_argument('--images_per_person_modality', type=int,
help='Number of images per person.', default=45)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=300) # ori 1000
parser.add_argument('--alpha', type=float,
help='Positive to negative triplet distance margin.', default=0.2)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--embedding_size_modality', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed',
action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=0.8)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=5e-4)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.001)
parser.add_argument('--learning_rate_modality', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.0001)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=2)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=0.9)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.',
default='./data/learning_rate_schedule.txt')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
# Global configuration information used across all the
# translations of documentation.
#
# Import the base theme configuration
from cakephpsphinx.config.all import *
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '4.x'
# The search index version.
search_version = 'debugkit-4'
# The marketing display name for the book.
version_name = ''
# Project name shown in the black header bar
project = 'CakePHP DebugKit'
# Other versions that display in the version picker menu.
version_list = [
{'name': '3.x', 'number': 'debugkit/3.x', 'title': '3.x'},
{'name': '4.x', 'number': 'debugkit/4.x', 'title': '4.x', 'current': True},
]
# Languages available.
languages = ['en', 'fr', 'ja', 'pt']
# The GitHub branch name for this version of the docs
# for edit links to point at.
branch = '4.x'
# Current version being built
version = '4.x'
# Language in use for this directory.
language = 'en'
show_root_link = True
repository = 'cakephp/debug_kit'
source_path = 'docs/'
hide_page_contents = ('search', '404', 'contents')
# DebugKit docs use mp4 videos to show the UI
extensions.append('sphinxcontrib.video')
|
# -*- coding: utf-8 -*-
"""Tests for schools.views."""
import unittest
import django.test
from schools import models
from schools import views
import users.models
class ViewsTestCase(django.test.TestCase):
def setUp(self):
self.request_factory = django.test.RequestFactory()
self.student = users.models.User.objects.create(
username='test_student',
email='student@lksh.ru',
password='student_secret',
is_staff=False)
self.teacher = users.models.User.objects.create(
username='test_teacher',
email='teacher@lksh.ru',
password='teacher_secret',
is_staff=True)
self.school = models.School.objects.create(name='ЛКШ 100500',
year='100500',
short_name='sis-100500')
@unittest.mock.patch('schools.views.user')
def test_index_for_student(self, user_view_mock):
"""Index returns correct page for student"""
request = self.request_factory.get('/sis-100500/')
request.user = self.student
request.school = self.school
views.index(request)
user_view_mock.assert_called_once_with(request)
@unittest.mock.patch('django.shortcuts.redirect')
def test_staff(self, redirect_mock):
"""Staff view makes correct redirect"""
request = self.request_factory.get('/sis-100500/')
request.user = self.teacher
request.school = self.school
views.staff(request)
redirect_mock.assert_called_once_with('school:entrance:enrolling',
school_name='sis-100500')
# TODO(Artem Tabolin): test the case with some blocks
@unittest.mock.patch('django.shortcuts.render')
def test_user_no_blocks(self, render_mock):
"""User view renders correct template with correct arguments"""
request = self.request_factory.get('/sis-100500/')
request.user = self.student
request.school = self.school
views.user(request)
render_mock.assert_called_once_with(
request,
'home/user.html',
{'school': self.school, 'blocks': []})
|
#!/usr/bin/python
#medallion,hack_license,vendor_id,rate_code,store_and_fwd_flag,pickup_datetime,dropoff_datetime,passenger_count,trip_time_in_secs
#89D227B655E5C82AECF13C3F540D4CF4,BA96DE419E711691B9445D6A6307C170,CMT,1,N,2013-01-01 15:11:48,2013-01-01 15:18:10,4,382,1.00,-73\
.978165,40.757977,-73.989838,40.751171
from mrjob.job import MRJob
from mrjob.job import MRStep
import re
import md5
class MRHackLicences(MRJob):
def mapper_init(self):
self.cksum_number_map = json.load(open('rainbow.json'))
def mapper(self, _, line):
data = line.split(',')
hack_no = self.cksum_number_map.get(data[1], 'UNKNOWN')
yield(str(hack_no), 1)
def combiner(self, key, value):
yield(key, sum(value))
def reducer(self, key, value):
yield(key, sum(value))
if __name__ == '__main__':
MRHackLicences.run()
|
from collections import OrderedDict
from cnamedtuple._namedtuple import namedtuple, _register_asdict
__all__ = [
'namedtuple'
]
__version__ = '0.1.6'
# Register `OrderedDict` as the constructor to use when calling `_asdict`.
# This step exists because at one point there was work being done to move
# this project into Python 3.5, and this works to solve a circular dependency
# between 'cnamedtuple/_namedtuple.c' ('Modules/_collectionsmodule.c'
# in cpython) and 'Lib/collections.py'.
#
# However, after discussion with the CPython folks, it was determined that
# this project will not be moved in after all, and will remain as a
# a third-party project.
_register_asdict(OrderedDict)
# Clean up the namespace for this module, the only public api should be
# `namedtuple`.
del _register_asdict
del OrderedDict
|
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
import sys, logging
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
def upgrade(migrate_engine):
db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
metadata.bind = migrate_engine
User_table = Table( "galaxy_user", metadata, autoload=True )
HistoryDatasetAssociation_table = Table( "history_dataset_association", metadata, autoload=True )
def boolean_false():
if migrate_engine.name == 'postgresql' or migrate_engine.name == 'mysql':
return False
elif migrate_engine.name == 'sqlite':
return 0
else:
raise Exception( 'Unable to convert data for unknown database type: %s' % migrate_engine.name)
# Load existing tables
metadata.reflect()
# Add 2 indexes to the galaxy_user table
i = Index( 'ix_galaxy_user_deleted', User_table.c.deleted )
try:
i.create()
except Exception, e:
log.debug( "Adding index 'ix_galaxy_user_deleted' to galaxy_user table failed: %s" % ( str( e ) ) )
i = Index( 'ix_galaxy_user_purged', User_table.c.purged )
try:
i.create()
except Exception, e:
log.debug( "Adding index 'ix_galaxy_user_purged' to galaxy_user table failed: %s" % ( str( e ) ) )
# Set the default data in the galaxy_user table, but only for null values
cmd = "UPDATE galaxy_user SET deleted = %s WHERE deleted is null"
cmd = cmd % boolean_false()
try:
db_session.execute( cmd )
except Exception, e:
log.debug( "Setting default data for galaxy_user.deleted column failed: %s" % ( str( e ) ) )
cmd = "UPDATE galaxy_user SET purged = %s WHERE purged is null"
cmd = cmd % boolean_false()
try:
db_session.execute( cmd )
except Exception, e:
log.debug( "Setting default data for galaxy_user.purged column failed: %s" % ( str( e ) ) )
# Add 1 index to the history_dataset_association table
i = Index( 'ix_hda_copied_from_library_dataset_dataset_association_id', HistoryDatasetAssociation_table.c.copied_from_library_dataset_dataset_association_id )
try:
i.create()
except Exception, e:
log.debug( "Adding index 'ix_hda_copied_from_library_dataset_dataset_association_id' to history_dataset_association table failed: %s" % ( str( e ) ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
pass
|
import sys, os, cv2, time, heapq, argparse
from PIL import Image, ImageFont, ImageDraw
from vidgear.gears import NetGear
import numpy as np, math
try:
from armv7l.openvino.inference_engine import IENetwork, IEPlugin
except:
from openvino.inference_engine import IENetwork, IEPlugin
import multiprocessing as mp
from time import sleep
import threading
import paho.mqtt.client as mqtt
import json
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("/var/log/plant_disease_flask.log")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
MQTT_HOST = "192.168.0.11"
MQTT_PORT = 1883
netgear_addr = "127.0.0.1"
#detect_data = {"serialNumber": "SN-0002", "video_url": "http://192.168.0.10:15000/video_feed"}
detect_data = {"serialNumber": "SN-0002"}
send_time = 0
#activate multiserver_mode
options = {'multiserver_mode': False}
#change following IP address '192.168.1.xxx' with Client's IP address and assign unique port address(for e.g 5566).
server = NetGear(address = netgear_addr, port = '5566', protocol = 'tcp', pattern = 1, receive_mode = False, logging=False, **options) # and keep rest of settings similar to Client
yolo_scale_13 = 13
yolo_scale_26 = 26
yolo_scale_52 = 52
classes = 19
coords = 4
num = 3
anchors = [10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326]
LABELS = ("apple_black_rot", "apple_cedar_rust", "apple_healthy", "apple_scab", "cherry_healthy",
"cherry_sour_powdery_mildew", "grape_black_rot", "grape_blight", "grape_esca", "grape_healthy",
"peach_bacterial_spot", "peach_healthy", "pepper_bacterial_spot", "pepper_healthy", "potato_eb",
"potato_healthy", "potato_lb", "strawberry_Leaf_scorch", "strawberry_healthy")
label_text_color = (255, 255, 0)
label_background_color = (125, 175, 75)
box_color = (255, 128, 0)
box_thickness = 1
processes = []
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
lastresults = None
cur_path = sys.path[0]
model_xml = cur_path + "/../lrmodels/YoloV3_plant/FP16/yolov3_plant_model.xml"
model_bin = cur_path + "/../lrmodels/YoloV3_plant/FP16/yolov3_plant_model.bin"
'''
DESCRIPTION = {"strawberry_healthy": "这株草莓很健康。",
"strawberry_Leaf_scorch": "这株草莓得了叶焦病,草莓叶焦(Leaf Scorch)由双翘龙真菌感染\n"
"引起感染之初会在叶片顶部出现紫色小斑点,随着时间的流逝,斑\n"
"点将继续变大、变暗。严重的情况下,黑点甚至可能覆盖草莓植物\n"
"叶片的整个部分,这可能导致整个叶片完全干燥并从植物上掉下来。\n"
"防治方法:保持通风、清洁卫生、避免土壤过涝",
"cherry_healthy": "这株樱桃很健康。",
"cherry_sour_powdery_mildew": "这株樱桃得了白粉病(Powdery Mildew),这是一种农作物常见\n"
"的病害,感染后会在叶片出现一些白色状的粉状霉层,之后会蔓延\n"
"到果实,果面会出现白色粉状霉层,同时果实会出现表皮枯死、硬化、\n"
"龟裂等症状,从而使樱桃出现早衰的现象,降低产量。\n"
"防治方法:\n"
" 1、发病期喷洒0.3°Be石硫合剂或25%粉锈宁3000倍液、\n"
"70%甲基硫菌灵可湿性粉剂1500倍液1-2次。\n"
" 2、秋后清理果园,扫除落叶,集中烧毁。\n"
}
'''
DESCRIPTION = {"strawberry_healthy": "这株草莓很健康。",
"strawberry_Leaf_scorch": "草莓叶焦(Leaf Scorch)\n"
"病因:双翘龙真菌感染 \n"
"防治方法:保持通风、清洁卫生、\n"
"避免土壤过涝",
"cherry_healthy": "这株樱桃很健康。",
"cherry_sour_powdery_mildew": "樱桃白粉病(Powdery Mildew)\n"
"病因:三支叉丝单囊壳菌感染"
"防治方法:\n"
" 1、发病期喷洒0.3°Be石硫合剂或25%\n"
"粉锈宁3000倍液、70%甲基硫菌灵可湿性\n"
"粉剂1500倍液1-2次。\n"
" 2、秋后清理果园,扫除落叶,集中烧毁。\n"
}
RECONNECT_DELAY_SECS = 2
def on_connect(client, userdata, flags, rc):
print("Connected with result code %s" % rc)
def on_disconnect(client, userdata, rc):
print("Disconnected from MQTT server with code: %s" % rc)
while rc != 0:
sleep(RECONNECT_DELAY_SECS)
print("Reconnecting...")
rc = client.reconnect()
client = mqtt.Client()
client.connect_async(MQTT_HOST, MQTT_PORT, 600)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.loop_start()
def send_mqtt(data):
if not data:
return
logger.info("==========send mqtt data: %s" % json.dumps(data))
res = client.publish("/sensor/data", json.dumps(data), 0)
logger.info("==========send mqtt successful!===========")
def paint_chinese_opencv(im,chinese,position,fontsize,color):#opencv输出中文
img_PIL = Image.fromarray(cv2.cvtColor(im,cv2.COLOR_BGR2RGB))# 图像从OpenCV格式转换成PIL格式
font = ImageFont.truetype('simhei.ttf',fontsize,encoding="utf-8")
#color = (255,0,0) # 字体颜色
#position = (100,100)# 文字输出位置
draw = ImageDraw.Draw(img_PIL)
draw.text(position,chinese,font=font,fill=color)# PIL图片上打印汉字 # 参数1:打印坐标,参数2:文本,参数3:字体颜色,参数4:字体
img = cv2.cvtColor(np.asarray(img_PIL),cv2.COLOR_RGB2BGR)# PIL图片转cv2 图片
return img
def EntryIndex(side, lcoords, lclasses, location, entry):
n = int(location / (side * side))
loc = location % (side * side)
return int(n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc)
class DetectionObject():
xmin = 0
ymin = 0
xmax = 0
ymax = 0
class_id = 0
confidence = 0.0
def __init__(self, x, y, h, w, class_id, confidence, h_scale, w_scale):
self.xmin = int((x - w / 2) * w_scale)
self.ymin = int((y - h / 2) * h_scale)
self.xmax = int(self.xmin + w * w_scale)
self.ymax = int(self.ymin + h * h_scale)
self.class_id = class_id
self.confidence = confidence
def IntersectionOverUnion(box_1, box_2):
width_of_overlap_area = min(box_1.xmax, box_2.xmax) - max(box_1.xmin, box_2.xmin)
height_of_overlap_area = min(box_1.ymax, box_2.ymax) - max(box_1.ymin, box_2.ymin)
area_of_overlap = 0.0
if (width_of_overlap_area < 0.0 or height_of_overlap_area < 0.0):
area_of_overlap = 0.0
else:
area_of_overlap = width_of_overlap_area * height_of_overlap_area
box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin)
box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin)
area_of_union = box_1_area + box_2_area - area_of_overlap
retval = 0.0
if area_of_union <= 0.0:
retval = 0.0
else:
retval = (area_of_overlap / area_of_union)
return retval
def ParseYOLOV3Output(blob, resized_im_h, resized_im_w, original_im_h, original_im_w, threshold, objects):
out_blob_h = blob.shape[2]
out_blob_w = blob.shape[3]
side = out_blob_h
anchor_offset = 0
if side == yolo_scale_13:
anchor_offset = 2 * 6
elif side == yolo_scale_26:
anchor_offset = 2 * 3
elif side == yolo_scale_52:
anchor_offset = 2 * 0
side_square = side * side
output_blob = blob.flatten()
for i in range(side_square):
row = int(i / side)
col = int(i % side)
for n in range(num):
obj_index = EntryIndex(side, coords, classes, n * side * side + i, coords)
box_index = EntryIndex(side, coords, classes, n * side * side + i, 0)
scale = output_blob[obj_index]
if (scale < threshold):
continue
x = (col + output_blob[box_index + 0 * side_square]) / side * resized_im_w
y = (row + output_blob[box_index + 1 * side_square]) / side * resized_im_h
height = math.exp(output_blob[box_index + 3 * side_square]) * anchors[anchor_offset + 2 * n + 1]
width = math.exp(output_blob[box_index + 2 * side_square]) * anchors[anchor_offset + 2 * n]
for j in range(classes):
class_index = EntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j)
prob = scale * output_blob[class_index]
if prob < threshold:
continue
obj = DetectionObject(x, y, height, width, j, prob, (original_im_h / resized_im_h), (original_im_w / resized_im_w))
objects.append(obj)
return objects
def camThread(LABELS, results, frameBuffer, camera_width, camera_height, vidfps):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global send_time
global cam
global window_name
cam = cv2.VideoCapture(0)
if cam.isOpened() != True:
logger.info("USB Camera Open Error!!!")
print("USB Camera Open Error!!!")
sys.exit(0)
cam.set(cv2.CAP_PROP_FPS, vidfps)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
window_name = "USB Camera"
wait_key_time = 1
#cam = cv2.VideoCapture("data/input/testvideo4.mp4")
#camera_width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
#camera_height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
#frame_count = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
#window_name = "Movie File"
#wait_key_time = int(1000 / vidfps)
#cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# USB Camera Stream Read
s, color_image = cam.read()
if not s:
continue
if frameBuffer.full():
frameBuffer.get()
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
if not results.empty():
objects = results.get(False)
detectframecount += 1
for obj in objects:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax-10, obj.ymax-10), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.4, label_text_color, 1)
if DESCRIPTION.get(LABELS[label], None):
if "healthy" not in LABELS[label] and time.time() - send_time > 3:
detect_data['plant_disease_v'] = 1
send_mqtt(detect_data)
send_time = time.time()
print("send mqtt ===========")
color_image = paint_chinese_opencv(color_image, DESCRIPTION[LABELS[label]], (obj.xmin, obj.ymin + 50), 15, (255,255,0))
lastresults = objects
else:
if not isinstance(lastresults, type(None)):
for obj in lastresults:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax-10, obj.ymax-10), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.4, label_text_color, 1)
if DESCRIPTION.get(LABELS[label], None):
if "healthy" not in LABELS[label] and time.time() - send_time > 3:
detect_data['plant_disease_v'] = 1
send_mqtt(detect_data)
send_time = time.time()
color_image = paint_chinese_opencv(color_image, DESCRIPTION[LABELS[label]], (obj.xmin, obj.ymin + 50), 15, (255,255,0))
cv2.putText(color_image, fps, (width-140,15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (38,0,255), 1, cv2.LINE_AA)
cv2.putText(color_image, detectfps, (width-140,30), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (38,0,255), 1, cv2.LINE_AA)
#cv2.imshow(window_name, cv2.resize(color_image, (width, height)))
#if cv2.waitKey(wait_key_time)&0xFF == ord('q'):
# sys.exit(0)
try:
server.send(color_image)
except:
logger.info("==========netgear server send error!=====")
server = NetGear(address = netgear_addr, port = '5566', protocol = 'tcp', pattern = 1, receive_mode = False, logging=False, **options) # and keep rest of settings similar to Client
logger.info("==========netgear server send error!=====")
continue
## Print FPS
framecount += 1
if framecount >= 15:
fps = "(Capture) {:.1f} FPS".format(time1/15)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworker):
ncsworker.skip_frame_measurement()
while True:
ncsworker.predict_async()
class NcsWorker(object):
def __init__(self, devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps):
self.devid = devid
self.frameBuffer = frameBuffer
self.model_xml = model_xml
self.model_bin = model_bin
self.camera_width = camera_width
self.camera_height = camera_height
self.m_input_size = 416
self.threshould = 0.7
self.num_requests = 4
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.plugin = IEPlugin(device="MYRIAD")
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
self.results = results
self.number_of_ncs = number_of_ncs
self.predict_async_time = 800
self.skip_frame = 0
self.roop_frame = 0
self.vidfps = vidfps
self.new_w = int(camera_width * self.m_input_size/camera_width)
self.new_h = int(camera_height * self.m_input_size/camera_height)
def image_preprocessing(self, color_image):
resized_image = cv2.resize(color_image, (self.new_w, self.new_h), interpolation = cv2.INTER_CUBIC)
canvas = np.full((self.m_input_size, self.m_input_size, 3), 128)
canvas[(self.m_input_size-self.new_h)//2:(self.m_input_size-self.new_h)//2 + self.new_h,(self.m_input_size-self.new_w)//2:(self.m_input_size-self.new_w)//2 + self.new_w, :] = resized_image
prepimg = canvas
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def skip_frame_measurement(self):
surplustime_per_second = (1000 - self.predict_async_time)
if surplustime_per_second > 0.0:
frame_per_millisecond = (1000 / self.vidfps)
total_skip_frame = surplustime_per_second / frame_per_millisecond
self.skip_frame = int(total_skip_frame / self.num_requests)
else:
self.skip_frame = 0
def predict_async(self):
try:
if self.frameBuffer.empty():
return
self.roop_frame += 1
if self.roop_frame <= self.skip_frame:
self.frameBuffer.get()
return
self.roop_frame = 0
prepimg = self.image_preprocessing(self.frameBuffer.get())
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
cnt, dev = heapq.heappop(self.heap_request)
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
objects = []
outputs = self.exec_net.requests[dev].outputs
for output in outputs.values():
objects = ParseYOLOV3Output(output, self.new_h, self.new_w, self.camera_height, self.camera_width, self.threshould, objects)
objlen = len(objects)
for i in range(objlen):
if (objects[i].confidence == 0.0):
continue
for j in range(i + 1, objlen):
if (IntersectionOverUnion(objects[i], objects[j]) >= 0.4):
objects[j].confidence = 0
self.results.put(objects)
self.inferred_request[dev] = 0
else:
heapq.heappush(self.heap_request, (cnt, dev))
except:
import traceback
traceback.print_exc(traceback.print_exc())
def inferencer(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
thworker = threading.Thread(target=async_infer, args=(NcsWorker(devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps),))
thworker.start()
threads.append(thworker)
for th in threads:
th.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')
args = parser.parse_args()
number_of_ncs = args.number_of_ncs
camera_width = 320
camera_height = 240
vidfps = 10
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
results = mp.Queue()
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer, args=(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
sleep(number_of_ncs * 7)
# Start streaming
p = mp.Process(target=camThread, args=(LABELS, results, frameBuffer, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
if head == None: return None
temp = []
while head != None:
temp.append(head.val)
head = head.next
temp = sorted(list(set(temp)))
temp = [ListNode(i) for i in temp]
temp.append(None)
temp = temp[::-1]
for i in range(1,len(temp)):
temp[i].next = temp[i-1]
return temp[-1]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2018/11/23 12:02
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description : 二叉树的最近公共祖先
给定一个二叉树, 找到该树中两个指定节点的最近公共祖先。
百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先且 x 的深度尽可能大(一个节点也可以是它自己的祖先)。”
例如,给定如下二叉树: root = [3,5,1,6,2,0,8,null,null,7,4]
_______3______
/ \
___5__ ___1__
/ \ / \
6 _2 0 8
/ \
7 4
示例 1:
输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
输出: 3
解释: 节点 5 和节点 1 的最近公共祖先是节点 3。
示例 2:
输入: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
输出: 5
解释: 节点 5 和节点 4 的最近公共祖先是节点 5。因为根据定义最近公共祖先节点可以为节点本身。
说明:
所有节点的值都是唯一的。
p、q 为不同节点且均存在于给定的二叉树中。
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
def search_road(stack, x):
"""
:type stack: list[TreeNode]
:type x: TreeNode
:rtype: list,boole
"""
t = stack[-1]
if t.val == x:
return stack, True
if t.left:
stack.append(t.left)
stack_left, res_left = search_road(stack, x)
if res_left:
return stack_left, res_left
if t.right:
stack.append(t.right)
stack_right, res_right = search_road(stack, x)
if res_right:
return stack_right, res_right
stack.pop()
return stack, False
pk = list(map(lambda x: x.val, search_road([root], p.val)[0]))
qk = list(map(lambda x: x.val, search_road([root], q.val)[0]))
if len(pk) < len(qk):
pk, qk = qk, pk
for i in range(len(pk)):
if pk[i] not in qk:
return pk[i - 1]
root = TreeNode(3)
root.right = TreeNode(1)
root.right.right = TreeNode(8)
root.right.left = TreeNode(0)
root.left = TreeNode(5)
root.left.right = TreeNode(2)
root.left.right.right = TreeNode(4)
root.left.right.left = TreeNode(7)
root.left.left = TreeNode(6)
p = TreeNode(5)
q = TreeNode(4)
res = Solution().lowestCommonAncestor(root, p, q)
print(res)
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
|
from tensorflow.keras.layers import (Conv2D, Dense, Flatten, MaxPooling2D,
TimeDistributed)
def VGG16(inputs):
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same',name = 'block1_conv1')(inputs)
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same', name = 'block1_conv2')(x)
x = MaxPooling2D((2,2), strides = (2,2), name = 'block1_pool')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv1')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv2')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block2_pool')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv1')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv2')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block3_pool')(x)
# 第四个卷积部分
# 14,14,512
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block4_pool')(x)
# 第五个卷积部分
# 7,7,512
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv3')(x)
return x
def vgg_classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = TimeDistributed(Flatten(name='flatten'))(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc1')(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc2')(x)
return x
|
##
## Author Michel F. Sanner Jan 2009
##
import types, weakref
from Scenario2.actions import Actions
from Scenario2.keyframes import KF, Interval
from Scenario2.datatypes import DataType
from Scenario2.interpolators import Interpolator, BehaviorList
class Actor:
"""
An Actor is an object that will modify an attribute of a Python Object
over the course of time.
Actions performed by the actor are represented by keyframes (representing a
value at a given time), and intervals that can interpolate values between
keyframes.
Actors are stored in MultipleActorActions objects.
An Actor is created with:
- a name
- a given Python object
optionally:
- an initial value
- a datatype used to validate value for keyframes
- default value generator function which defined how the value will modified
by default in Intervals
When an actor is created its Actions object is empty.
Actors can be made active/inactive for playback.
Actors that know how to retrieve the value from the Python object
can be made active/inactive for recording keyframes.
"""
_easeInOutDict = {'none':(0,0),
'ease in':(1,0),
'ease out':(0,1),
'ease in and out':(1,1)}
def __init__(self, name, object, initialValue=None, datatype=None,
interp=None):
self.printValuesWhenSetting = False # set to True to debug
self.object = object
self.name = name # actor name, has to be unique in Director.actors
self.interp = interp
self.initialValue = initialValue
self.hasGetFunction = False # set to true is actor knows how to get the
# value from the object
self.recording = False # true if this actor will record keyframes
self.playing = True #true if the actor will set the values
self._maa = None # becomes a weakref to MAA when added to MAA
self.easeIn = False
self.easeOut = False
if datatype is not None:
assert issubclass(datatype, DataType)
self.datatype = datatype()
else:
self.datatype = None
self.actions = Actions()
self.actions.actor = weakref.ref(self)
self.nbvar = None # number of variable that are interpolated
self.varnames = []
self.activeVars = [] # list of booleans allowing to turn particular
# variables on or off
self.interpClass = None
args = (initialValue, initialValue)
kw = {}
interpolator = None
if interp is not None:
try:
interpClass, args, kw = interp
assert issubclass(interpClass, Interpolator)
assert isinstance(args, tuple)
assert isinstance(kw, dict)
except TypeError:
interpClass = interp
assert issubclass(interpClass, Interpolator)
interpolator = interpClass( *args, **kw)
self.interpClass = interpClass
self.nbvar = interpClass.nbvar
self.varnames = interpClass.varnames
if self.nbvar:
self.activeVars = [1]*self.nbvar
self.behaviorList = bl = BehaviorList( *args, **{'actor':self})
if interpolator:
bl.addBehavior(interpolator)
bl.configure(active = False)
if initialValue is not None:
kf0 = KF(0, initialValue)
self.actions.addKeyframe(kf0)
# FIXME .. check variable below to see if needed
self.preStep_cb = None
self.postStep_cb = None
# GUI variable
self.visible = True
self.displayFunction = False
self.graphHeight = 40
self.valueFormat = None
self.scenarioname = None
def addIntervals(self, intervals, generator=None, check=True):
"""
add a list of intervals to the actions. Intervals can be Interval
Objects or pairs of KFs that can be specified as KF objects or
(position, value) pairs
"""
maxPos = 0
actions = self.actions
for inter in intervals:
if isinstance(inter, Interval):
if inter.valGen.generator is None:
inter.setValGenGenerator(self.behaviorList.clone())
if inter.kf2.pos > maxPos:
maxPos = inter.kf2.pos
else:
kf1, kf2 = inter
if not isinstance(kf1, KF):
kf1 = KF( *kf1 )
if not isinstance(kf2, KF):
kf2 = KF( *kf2 )
if kf2.pos > maxPos:
maxPos = kf2.pos
if generator is None:
generator = self.behaviorList.clone()
inter = Interval(kf1, kf2, generator=generator)
val = actions.addInterval( inter )
if val is False:
return False
if self._maa:
if self._maa().getLastFrame() <= maxPos:
self._maa().updateEndFrame( maxPos )
return True
def addKeyframe(self, kf):
"""
None <- addKeyframe(kf)
Adds the keyframe 'kf' to the actor's actions object
"""
self.actions.addKeyframe(kf)
if self._maa:
if self._maa().getLastFrame() <= fk.pos:
self._maa().updateEndFrame( frame )
def addActionsAt(self, srcActions, position, check=True):
"""
Add the actions srcActions at the given position to the actor's
actions object. This method will add the Actor'; default value
generator to any interval that does not have one.
None <- actor.addActionsAt(actions, position, check=True)
Input:
arcAction: Actions object
position: an integer specifying at which frame the srcAction will
be added
Check: boolean
Method used to add actions to to an actor. For every interval
that has no value generator, this method will set it to the actor's
default VG, and then call self.actions.addActions(actions, check) to
add the keyframes and intervals in actions to the actor's actions.
"""
for i in srcActions.intervals:
if i.valGen.generator is None:
i.setValGenGenerator(self.behaviorList.clone())
else:
pass # FIXME check is valgen is ok for this actor
self.actions.addActionsAt(srcActions, position, check=check)
def getLastKeyFrame(self):
"""
KF <- actor.getLastKeyFrame()
Return the last keyframe ni the actor's actions object
"""
if len(self.actions.keyframes):
return self.actions.keyframes[-1]
else:
return None
def onAddToDirector(self):
pass
def setValueAt(self, frame, off=0):
"""
None <- setValueAt(frame, off)
argumets: frame - position in time;
off - offset value.
Set the value at specified frame to the object's animated attribute.
"""
# is called at each time step if the actor's readMode is active
from SimPy.Simulation import now
print 'setting ', self.name, 'for', self.name, 'at:', frame
def setValue(self, value):
"""
None <- setValue(value)
set the value on the object
"""
# is called at each time step if the actor's readMode is active
from SimPy.Simulation import now
print 'setting ', self.name, 'for', self.name, 'at:', now(), 'to', value
def setEaseIn(self, val):
"""
None <- setEaseIn(val)
Set easeIn atribute of all value generators in the actions
object to the specified value (val).
val can be 0, 1, True of False
"""
assert val in [0, 1, True, False]
self.behaviorList.easeIn = val
for inter in self.actions.intervals:
inter.valGen.generator.easeIn = val
def setEaseOut(self, val):
"""
None <- setEaseOut(val)
Set easeOut atribute of all value generators in the actions
object to the specified value (val).
val can be 0, 1, True of False
"""
assert val in [0, 1, True, False]
self.behaviorList.easeOut = val
for inter in self.actions.intervals:
inter.valGen.generator.easeOut = val
def setEaseInOut(self, val):
"""
None <- setEaseInOut(val)
Set easeIn and easeOut atributes of all value generators in the actions
object.
val can be :
'none' set easeIn and easeOut to 0,
'ease in' easeIn = True, easeOut = False,
'ease out' easeIn = False, easeOut = True,
'ease in and out' easeIn = True , easeOut = True
"""
easeIn, easeOut = self._easeInOutDict[val]
#print 'setting ease for actor', self.name, easeIn, easeOut
self.setEaseIn(easeIn)
self.setEaseOut(easeOut)
def clone(self):
"""newActor <- clone()
Return a copy of the actor (self).
The method calls self.__class__ method. It then clones self.behaviorList and
assignes the cloned copy to newActor.behaviorList.
"""
if self.datatype is not None:
dt = self.datatype.__class__
else:
dt = None
newActor = self.__class__(self.name, self.object, initialValue=self.initialValue,
datatype=dt, interp=self.interp)
newActor.behaviorList = self.behaviorList.clone()
return newActor
class CustomActor(Actor):
"""
Custom actors are actors for which a setMethod or setFunction and a
getFunction allowing set and get the value of the atrribute in the
Python object that is driven by the actor can be set and gotten
"""
def __init__(self, name, object, initialValue=None, datatype=None,
interp=None, setFunction=None, setMethod=None,
getFunction=None):
"""
Constructor of the Actor object,
arguments:
object: Python object on which to operate
name: Name of this Actor, This name has to be unique
among the list of ActorDescriptor in an Director
setFunction: function to called at each time step.
The function will be called using func(*(actor,value))
setMethod: method of the obect to be called at each time step.
The function will be called using obj.method(value)
getFunction=None: [optional] function that can be called to get the
current value of the attribute managed by this actor
The function and its arguments have to be specified as a
3-tuple (func, *args, **kw). It will be called using
func(*(object,)+args), **kw) if it is a function
or func(*args, **kw) if it is a method
interp interpreter class
initialValue initial value of the attribute
dataType type of the attribute value
"""
self.getFuncTuple = None
self.hasGetFunction = False
if setFunction:
assert callable(setFunction)
self.setFunction = setFunction
if setMethod:
method = getattr(object, setMethod)
assert callable(method)
else:
method = None
self.setMethod = method
if getFunction:
self.getFuncTuple = self.checkFunction(getFunction)
self.object = object
if initialValue is None:
if self.getFuncTuple:
initialValue = self.getValueFromObject()
Actor.__init__(self, name, object, datatype=datatype,
initialValue=initialValue, interp=interp)
if self.getFuncTuple:
self.hasGetFunction = True
def clone(self):
"""newActor <- clone()
Return a copy of the actor (self).
The method calls self.__class__ method. It then clones self.behaviorList and
assignes the cloned copy to newActor.behaviorList.
"""
if self.setMethod is not None:
setMethod = self.setMethod.__name__
else:
setMethod = None
newActor = self.__class__(self.name, self.object, initialValue=self.initialValue,
datatype=self.datatype.__class__, interp=self.interp,
setFunction=self.setFunction, setMethod=setMethod,
getFunction=None)
newActor.getFuncTuple = self.getFuncTuple
if self.getFuncTuple:
newActor.hasGetFunction = True
newActor.behaviorList = self.behaviorList.clone()
return newActor
def checkFunction(self, function):
"""
Check if the specified 'function' is eather:
1.callable function.
Returned value will be a tuple: (function, (), {}).
2.tuple: (function, (args), {kw}),
where args is the function's arguments (tuple) ,
kw - keywords dictionary.
Returned value : (function, (args), {kw}).
"""
# check that functionTuple is of form (func, (), {})
try:
f, args, kw = function
assert callable(f)
assert isinstance(args, tuple)
assert isinstance(kw, dict)
except TypeError:
assert callable(function)
f, args, kw = function, (), {}
return f, args, kw
def getValueFromObject(self):
"""
value <- getValueFromObject()
Get the current value of the animated attribute from the object
and return it.
"""
if not self.getFuncTuple:
return None
f, args, kw = self.getFuncTuple
if type(f) == types.FunctionType or type(f) == types.BuiltinFunctionType:
# add object as first argument to functions
return f(*(self.object,)+args, **kw)
elif type(f) == types.MethodType:
return f(*args, **kw)
def setValue(self, value):
"""
None <- setValue(value)
Call the self.setFunction() to set the value on the object.
If set.selfFunction is None, this method will call self.setMethod() instead
(if it is not None).
"""
if self.setFunction:
self.setFunction( *(self, value) )
elif self.setMethod:
self.setMethod(value)
def setValueAt(self, frame, off):
"""
None <- setValueAt(frame, off)
argumets: frame - position in time;
off - offset value.
The method first gets a value (at specified frame and offset) from the actions object.
It then sets the value on the object with self.setValue() call.
"""
# call the function to set the value on the object
value = self.actions.getValue(frame-off)
if value != 'Nothing There':
self.setValue(value)
|
numbers = [int(n) for n in input().split(', ')]
number_beggars = int(input())
result = []
for beggar in range(number_beggars):
beggar_result = 0
for index in range(beggar, len(numbers), number_beggars):
if index < len(numbers):
beggar_result += numbers[index]
result.append(beggar_result)
print(result)
|
"""
Copyright 2017 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_CVXcanon', [dirname(__file__)])
except ImportError:
import _CVXcanon
return _CVXcanon
if fp is not None:
try:
_mod = imp.load_module('_CVXcanon', fp, pathname, description)
finally:
fp.close()
return _mod
_CVXcanon = swig_import_helper()
del swig_import_helper
else:
import _CVXcanon
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _CVXcanon.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _CVXcanon.SwigPyIterator_value(self)
def incr(self, n=1):
return _CVXcanon.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _CVXcanon.SwigPyIterator_decr(self, n)
def distance(self, x):
return _CVXcanon.SwigPyIterator_distance(self, x)
def equal(self, x):
return _CVXcanon.SwigPyIterator_equal(self, x)
def copy(self):
return _CVXcanon.SwigPyIterator_copy(self)
def next(self):
return _CVXcanon.SwigPyIterator_next(self)
def __next__(self):
return _CVXcanon.SwigPyIterator___next__(self)
def previous(self):
return _CVXcanon.SwigPyIterator_previous(self)
def advance(self, n):
return _CVXcanon.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _CVXcanon.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _CVXcanon.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _CVXcanon.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _CVXcanon.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _CVXcanon.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _CVXcanon.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _CVXcanon.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
_CVXcanon.VARIABLE_swigconstant(_CVXcanon)
VARIABLE = _CVXcanon.VARIABLE
_CVXcanon.PROMOTE_swigconstant(_CVXcanon)
PROMOTE = _CVXcanon.PROMOTE
_CVXcanon.MUL_swigconstant(_CVXcanon)
MUL = _CVXcanon.MUL
_CVXcanon.RMUL_swigconstant(_CVXcanon)
RMUL = _CVXcanon.RMUL
_CVXcanon.MUL_ELEM_swigconstant(_CVXcanon)
MUL_ELEM = _CVXcanon.MUL_ELEM
_CVXcanon.DIV_swigconstant(_CVXcanon)
DIV = _CVXcanon.DIV
_CVXcanon.SUM_swigconstant(_CVXcanon)
SUM = _CVXcanon.SUM
_CVXcanon.NEG_swigconstant(_CVXcanon)
NEG = _CVXcanon.NEG
_CVXcanon.INDEX_swigconstant(_CVXcanon)
INDEX = _CVXcanon.INDEX
_CVXcanon.TRANSPOSE_swigconstant(_CVXcanon)
TRANSPOSE = _CVXcanon.TRANSPOSE
_CVXcanon.SUM_ENTRIES_swigconstant(_CVXcanon)
SUM_ENTRIES = _CVXcanon.SUM_ENTRIES
_CVXcanon.TRACE_swigconstant(_CVXcanon)
TRACE = _CVXcanon.TRACE
_CVXcanon.RESHAPE_swigconstant(_CVXcanon)
RESHAPE = _CVXcanon.RESHAPE
_CVXcanon.DIAG_VEC_swigconstant(_CVXcanon)
DIAG_VEC = _CVXcanon.DIAG_VEC
_CVXcanon.DIAG_MAT_swigconstant(_CVXcanon)
DIAG_MAT = _CVXcanon.DIAG_MAT
_CVXcanon.UPPER_TRI_swigconstant(_CVXcanon)
UPPER_TRI = _CVXcanon.UPPER_TRI
_CVXcanon.CONV_swigconstant(_CVXcanon)
CONV = _CVXcanon.CONV
_CVXcanon.HSTACK_swigconstant(_CVXcanon)
HSTACK = _CVXcanon.HSTACK
_CVXcanon.VSTACK_swigconstant(_CVXcanon)
VSTACK = _CVXcanon.VSTACK
_CVXcanon.SCALAR_CONST_swigconstant(_CVXcanon)
SCALAR_CONST = _CVXcanon.SCALAR_CONST
_CVXcanon.DENSE_CONST_swigconstant(_CVXcanon)
DENSE_CONST = _CVXcanon.DENSE_CONST
_CVXcanon.SPARSE_CONST_swigconstant(_CVXcanon)
SPARSE_CONST = _CVXcanon.SPARSE_CONST
_CVXcanon.NO_OP_swigconstant(_CVXcanon)
NO_OP = _CVXcanon.NO_OP
_CVXcanon.KRON_swigconstant(_CVXcanon)
KRON = _CVXcanon.KRON
class LinOp(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LinOp, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LinOp, name)
__repr__ = _swig_repr
__swig_setmethods__["type"] = _CVXcanon.LinOp_type_set
__swig_getmethods__["type"] = _CVXcanon.LinOp_type_get
if _newclass:
type = _swig_property(_CVXcanon.LinOp_type_get, _CVXcanon.LinOp_type_set)
__swig_setmethods__["size"] = _CVXcanon.LinOp_size_set
__swig_getmethods__["size"] = _CVXcanon.LinOp_size_get
if _newclass:
size = _swig_property(_CVXcanon.LinOp_size_get, _CVXcanon.LinOp_size_set)
__swig_setmethods__["args"] = _CVXcanon.LinOp_args_set
__swig_getmethods__["args"] = _CVXcanon.LinOp_args_get
if _newclass:
args = _swig_property(_CVXcanon.LinOp_args_get, _CVXcanon.LinOp_args_set)
__swig_setmethods__["sparse"] = _CVXcanon.LinOp_sparse_set
__swig_getmethods__["sparse"] = _CVXcanon.LinOp_sparse_get
if _newclass:
sparse = _swig_property(_CVXcanon.LinOp_sparse_get, _CVXcanon.LinOp_sparse_set)
__swig_setmethods__["sparse_data"] = _CVXcanon.LinOp_sparse_data_set
__swig_getmethods__["sparse_data"] = _CVXcanon.LinOp_sparse_data_get
if _newclass:
sparse_data = _swig_property(_CVXcanon.LinOp_sparse_data_get, _CVXcanon.LinOp_sparse_data_set)
__swig_setmethods__["dense_data"] = _CVXcanon.LinOp_dense_data_set
__swig_getmethods__["dense_data"] = _CVXcanon.LinOp_dense_data_get
if _newclass:
dense_data = _swig_property(_CVXcanon.LinOp_dense_data_get, _CVXcanon.LinOp_dense_data_set)
__swig_setmethods__["slice"] = _CVXcanon.LinOp_slice_set
__swig_getmethods__["slice"] = _CVXcanon.LinOp_slice_get
if _newclass:
slice = _swig_property(_CVXcanon.LinOp_slice_get, _CVXcanon.LinOp_slice_set)
def __init__(self):
this = _CVXcanon.new_LinOp()
try:
self.this.append(this)
except Exception:
self.this = this
def has_constant_type(self):
return _CVXcanon.LinOp_has_constant_type(self)
def set_dense_data(self, matrix):
return _CVXcanon.LinOp_set_dense_data(self, matrix)
def set_sparse_data(self, data, row_idxs, col_idxs, rows, cols):
return _CVXcanon.LinOp_set_sparse_data(self, data, row_idxs, col_idxs, rows, cols)
__swig_destroy__ = _CVXcanon.delete_LinOp
__del__ = lambda self: None
LinOp_swigregister = _CVXcanon.LinOp_swigregister
LinOp_swigregister(LinOp)
cvar = _CVXcanon.cvar
CONSTANT_ID = cvar.CONSTANT_ID
class ProblemData(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ProblemData, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ProblemData, name)
__repr__ = _swig_repr
__swig_setmethods__["V"] = _CVXcanon.ProblemData_V_set
__swig_getmethods__["V"] = _CVXcanon.ProblemData_V_get
if _newclass:
V = _swig_property(_CVXcanon.ProblemData_V_get, _CVXcanon.ProblemData_V_set)
__swig_setmethods__["I"] = _CVXcanon.ProblemData_I_set
__swig_getmethods__["I"] = _CVXcanon.ProblemData_I_get
if _newclass:
I = _swig_property(_CVXcanon.ProblemData_I_get, _CVXcanon.ProblemData_I_set)
__swig_setmethods__["J"] = _CVXcanon.ProblemData_J_set
__swig_getmethods__["J"] = _CVXcanon.ProblemData_J_get
if _newclass:
J = _swig_property(_CVXcanon.ProblemData_J_get, _CVXcanon.ProblemData_J_set)
__swig_setmethods__["const_vec"] = _CVXcanon.ProblemData_const_vec_set
__swig_getmethods__["const_vec"] = _CVXcanon.ProblemData_const_vec_get
if _newclass:
const_vec = _swig_property(_CVXcanon.ProblemData_const_vec_get, _CVXcanon.ProblemData_const_vec_set)
__swig_setmethods__["id_to_col"] = _CVXcanon.ProblemData_id_to_col_set
__swig_getmethods__["id_to_col"] = _CVXcanon.ProblemData_id_to_col_get
if _newclass:
id_to_col = _swig_property(_CVXcanon.ProblemData_id_to_col_get, _CVXcanon.ProblemData_id_to_col_set)
__swig_setmethods__["const_to_row"] = _CVXcanon.ProblemData_const_to_row_set
__swig_getmethods__["const_to_row"] = _CVXcanon.ProblemData_const_to_row_get
if _newclass:
const_to_row = _swig_property(_CVXcanon.ProblemData_const_to_row_get, _CVXcanon.ProblemData_const_to_row_set)
def getV(self, values):
return _CVXcanon.ProblemData_getV(self, values)
def getI(self, values):
return _CVXcanon.ProblemData_getI(self, values)
def getJ(self, values):
return _CVXcanon.ProblemData_getJ(self, values)
def getConstVec(self, values):
return _CVXcanon.ProblemData_getConstVec(self, values)
def __init__(self):
this = _CVXcanon.new_ProblemData()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _CVXcanon.delete_ProblemData
__del__ = lambda self: None
ProblemData_swigregister = _CVXcanon.ProblemData_swigregister
ProblemData_swigregister(ProblemData)
class IntVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)
__repr__ = _swig_repr
def iterator(self):
return _CVXcanon.IntVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _CVXcanon.IntVector___nonzero__(self)
def __bool__(self):
return _CVXcanon.IntVector___bool__(self)
def __len__(self):
return _CVXcanon.IntVector___len__(self)
def __getslice__(self, i, j):
return _CVXcanon.IntVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _CVXcanon.IntVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _CVXcanon.IntVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _CVXcanon.IntVector___delitem__(self, *args)
def __getitem__(self, *args):
return _CVXcanon.IntVector___getitem__(self, *args)
def __setitem__(self, *args):
return _CVXcanon.IntVector___setitem__(self, *args)
def pop(self):
return _CVXcanon.IntVector_pop(self)
def append(self, x):
return _CVXcanon.IntVector_append(self, x)
def empty(self):
return _CVXcanon.IntVector_empty(self)
def size(self):
return _CVXcanon.IntVector_size(self)
def swap(self, v):
return _CVXcanon.IntVector_swap(self, v)
def begin(self):
return _CVXcanon.IntVector_begin(self)
def end(self):
return _CVXcanon.IntVector_end(self)
def rbegin(self):
return _CVXcanon.IntVector_rbegin(self)
def rend(self):
return _CVXcanon.IntVector_rend(self)
def clear(self):
return _CVXcanon.IntVector_clear(self)
def get_allocator(self):
return _CVXcanon.IntVector_get_allocator(self)
def pop_back(self):
return _CVXcanon.IntVector_pop_back(self)
def erase(self, *args):
return _CVXcanon.IntVector_erase(self, *args)
def __init__(self, *args):
this = _CVXcanon.new_IntVector(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _CVXcanon.IntVector_push_back(self, x)
def front(self):
return _CVXcanon.IntVector_front(self)
def back(self):
return _CVXcanon.IntVector_back(self)
def assign(self, n, x):
return _CVXcanon.IntVector_assign(self, n, x)
def resize(self, *args):
return _CVXcanon.IntVector_resize(self, *args)
def insert(self, *args):
return _CVXcanon.IntVector_insert(self, *args)
def reserve(self, n):
return _CVXcanon.IntVector_reserve(self, n)
def capacity(self):
return _CVXcanon.IntVector_capacity(self)
__swig_destroy__ = _CVXcanon.delete_IntVector
__del__ = lambda self: None
IntVector_swigregister = _CVXcanon.IntVector_swigregister
IntVector_swigregister(IntVector)
class DoubleVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)
__repr__ = _swig_repr
def iterator(self):
return _CVXcanon.DoubleVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _CVXcanon.DoubleVector___nonzero__(self)
def __bool__(self):
return _CVXcanon.DoubleVector___bool__(self)
def __len__(self):
return _CVXcanon.DoubleVector___len__(self)
def __getslice__(self, i, j):
return _CVXcanon.DoubleVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _CVXcanon.DoubleVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _CVXcanon.DoubleVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _CVXcanon.DoubleVector___delitem__(self, *args)
def __getitem__(self, *args):
return _CVXcanon.DoubleVector___getitem__(self, *args)
def __setitem__(self, *args):
return _CVXcanon.DoubleVector___setitem__(self, *args)
def pop(self):
return _CVXcanon.DoubleVector_pop(self)
def append(self, x):
return _CVXcanon.DoubleVector_append(self, x)
def empty(self):
return _CVXcanon.DoubleVector_empty(self)
def size(self):
return _CVXcanon.DoubleVector_size(self)
def swap(self, v):
return _CVXcanon.DoubleVector_swap(self, v)
def begin(self):
return _CVXcanon.DoubleVector_begin(self)
def end(self):
return _CVXcanon.DoubleVector_end(self)
def rbegin(self):
return _CVXcanon.DoubleVector_rbegin(self)
def rend(self):
return _CVXcanon.DoubleVector_rend(self)
def clear(self):
return _CVXcanon.DoubleVector_clear(self)
def get_allocator(self):
return _CVXcanon.DoubleVector_get_allocator(self)
def pop_back(self):
return _CVXcanon.DoubleVector_pop_back(self)
def erase(self, *args):
return _CVXcanon.DoubleVector_erase(self, *args)
def __init__(self, *args):
this = _CVXcanon.new_DoubleVector(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _CVXcanon.DoubleVector_push_back(self, x)
def front(self):
return _CVXcanon.DoubleVector_front(self)
def back(self):
return _CVXcanon.DoubleVector_back(self)
def assign(self, n, x):
return _CVXcanon.DoubleVector_assign(self, n, x)
def resize(self, *args):
return _CVXcanon.DoubleVector_resize(self, *args)
def insert(self, *args):
return _CVXcanon.DoubleVector_insert(self, *args)
def reserve(self, n):
return _CVXcanon.DoubleVector_reserve(self, n)
def capacity(self):
return _CVXcanon.DoubleVector_capacity(self)
__swig_destroy__ = _CVXcanon.delete_DoubleVector
__del__ = lambda self: None
DoubleVector_swigregister = _CVXcanon.DoubleVector_swigregister
DoubleVector_swigregister(DoubleVector)
class IntVector2D(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntVector2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntVector2D, name)
__repr__ = _swig_repr
def iterator(self):
return _CVXcanon.IntVector2D_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _CVXcanon.IntVector2D___nonzero__(self)
def __bool__(self):
return _CVXcanon.IntVector2D___bool__(self)
def __len__(self):
return _CVXcanon.IntVector2D___len__(self)
def __getslice__(self, i, j):
return _CVXcanon.IntVector2D___getslice__(self, i, j)
def __setslice__(self, *args):
return _CVXcanon.IntVector2D___setslice__(self, *args)
def __delslice__(self, i, j):
return _CVXcanon.IntVector2D___delslice__(self, i, j)
def __delitem__(self, *args):
return _CVXcanon.IntVector2D___delitem__(self, *args)
def __getitem__(self, *args):
return _CVXcanon.IntVector2D___getitem__(self, *args)
def __setitem__(self, *args):
return _CVXcanon.IntVector2D___setitem__(self, *args)
def pop(self):
return _CVXcanon.IntVector2D_pop(self)
def append(self, x):
return _CVXcanon.IntVector2D_append(self, x)
def empty(self):
return _CVXcanon.IntVector2D_empty(self)
def size(self):
return _CVXcanon.IntVector2D_size(self)
def swap(self, v):
return _CVXcanon.IntVector2D_swap(self, v)
def begin(self):
return _CVXcanon.IntVector2D_begin(self)
def end(self):
return _CVXcanon.IntVector2D_end(self)
def rbegin(self):
return _CVXcanon.IntVector2D_rbegin(self)
def rend(self):
return _CVXcanon.IntVector2D_rend(self)
def clear(self):
return _CVXcanon.IntVector2D_clear(self)
def get_allocator(self):
return _CVXcanon.IntVector2D_get_allocator(self)
def pop_back(self):
return _CVXcanon.IntVector2D_pop_back(self)
def erase(self, *args):
return _CVXcanon.IntVector2D_erase(self, *args)
def __init__(self, *args):
this = _CVXcanon.new_IntVector2D(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _CVXcanon.IntVector2D_push_back(self, x)
def front(self):
return _CVXcanon.IntVector2D_front(self)
def back(self):
return _CVXcanon.IntVector2D_back(self)
def assign(self, n, x):
return _CVXcanon.IntVector2D_assign(self, n, x)
def resize(self, *args):
return _CVXcanon.IntVector2D_resize(self, *args)
def insert(self, *args):
return _CVXcanon.IntVector2D_insert(self, *args)
def reserve(self, n):
return _CVXcanon.IntVector2D_reserve(self, n)
def capacity(self):
return _CVXcanon.IntVector2D_capacity(self)
__swig_destroy__ = _CVXcanon.delete_IntVector2D
__del__ = lambda self: None
IntVector2D_swigregister = _CVXcanon.IntVector2D_swigregister
IntVector2D_swigregister(IntVector2D)
class DoubleVector2D(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector2D, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleVector2D, name)
__repr__ = _swig_repr
def iterator(self):
return _CVXcanon.DoubleVector2D_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _CVXcanon.DoubleVector2D___nonzero__(self)
def __bool__(self):
return _CVXcanon.DoubleVector2D___bool__(self)
def __len__(self):
return _CVXcanon.DoubleVector2D___len__(self)
def __getslice__(self, i, j):
return _CVXcanon.DoubleVector2D___getslice__(self, i, j)
def __setslice__(self, *args):
return _CVXcanon.DoubleVector2D___setslice__(self, *args)
def __delslice__(self, i, j):
return _CVXcanon.DoubleVector2D___delslice__(self, i, j)
def __delitem__(self, *args):
return _CVXcanon.DoubleVector2D___delitem__(self, *args)
def __getitem__(self, *args):
return _CVXcanon.DoubleVector2D___getitem__(self, *args)
def __setitem__(self, *args):
return _CVXcanon.DoubleVector2D___setitem__(self, *args)
def pop(self):
return _CVXcanon.DoubleVector2D_pop(self)
def append(self, x):
return _CVXcanon.DoubleVector2D_append(self, x)
def empty(self):
return _CVXcanon.DoubleVector2D_empty(self)
def size(self):
return _CVXcanon.DoubleVector2D_size(self)
def swap(self, v):
return _CVXcanon.DoubleVector2D_swap(self, v)
def begin(self):
return _CVXcanon.DoubleVector2D_begin(self)
def end(self):
return _CVXcanon.DoubleVector2D_end(self)
def rbegin(self):
return _CVXcanon.DoubleVector2D_rbegin(self)
def rend(self):
return _CVXcanon.DoubleVector2D_rend(self)
def clear(self):
return _CVXcanon.DoubleVector2D_clear(self)
def get_allocator(self):
return _CVXcanon.DoubleVector2D_get_allocator(self)
def pop_back(self):
return _CVXcanon.DoubleVector2D_pop_back(self)
def erase(self, *args):
return _CVXcanon.DoubleVector2D_erase(self, *args)
def __init__(self, *args):
this = _CVXcanon.new_DoubleVector2D(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _CVXcanon.DoubleVector2D_push_back(self, x)
def front(self):
return _CVXcanon.DoubleVector2D_front(self)
def back(self):
return _CVXcanon.DoubleVector2D_back(self)
def assign(self, n, x):
return _CVXcanon.DoubleVector2D_assign(self, n, x)
def resize(self, *args):
return _CVXcanon.DoubleVector2D_resize(self, *args)
def insert(self, *args):
return _CVXcanon.DoubleVector2D_insert(self, *args)
def reserve(self, n):
return _CVXcanon.DoubleVector2D_reserve(self, n)
def capacity(self):
return _CVXcanon.DoubleVector2D_capacity(self)
__swig_destroy__ = _CVXcanon.delete_DoubleVector2D
__del__ = lambda self: None
DoubleVector2D_swigregister = _CVXcanon.DoubleVector2D_swigregister
DoubleVector2D_swigregister(DoubleVector2D)
class IntIntMap(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, IntIntMap, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, IntIntMap, name)
__repr__ = _swig_repr
def iterator(self):
return _CVXcanon.IntIntMap_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _CVXcanon.IntIntMap___nonzero__(self)
def __bool__(self):
return _CVXcanon.IntIntMap___bool__(self)
def __len__(self):
return _CVXcanon.IntIntMap___len__(self)
def __iter__(self):
return self.key_iterator()
def iterkeys(self):
return self.key_iterator()
def itervalues(self):
return self.value_iterator()
def iteritems(self):
return self.iterator()
def __getitem__(self, key):
return _CVXcanon.IntIntMap___getitem__(self, key)
def __delitem__(self, key):
return _CVXcanon.IntIntMap___delitem__(self, key)
def has_key(self, key):
return _CVXcanon.IntIntMap_has_key(self, key)
def keys(self):
return _CVXcanon.IntIntMap_keys(self)
def values(self):
return _CVXcanon.IntIntMap_values(self)
def items(self):
return _CVXcanon.IntIntMap_items(self)
def __contains__(self, key):
return _CVXcanon.IntIntMap___contains__(self, key)
def key_iterator(self):
return _CVXcanon.IntIntMap_key_iterator(self)
def value_iterator(self):
return _CVXcanon.IntIntMap_value_iterator(self)
def __setitem__(self, *args):
return _CVXcanon.IntIntMap___setitem__(self, *args)
def asdict(self):
return _CVXcanon.IntIntMap_asdict(self)
def __init__(self, *args):
this = _CVXcanon.new_IntIntMap(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def empty(self):
return _CVXcanon.IntIntMap_empty(self)
def size(self):
return _CVXcanon.IntIntMap_size(self)
def swap(self, v):
return _CVXcanon.IntIntMap_swap(self, v)
def begin(self):
return _CVXcanon.IntIntMap_begin(self)
def end(self):
return _CVXcanon.IntIntMap_end(self)
def rbegin(self):
return _CVXcanon.IntIntMap_rbegin(self)
def rend(self):
return _CVXcanon.IntIntMap_rend(self)
def clear(self):
return _CVXcanon.IntIntMap_clear(self)
def get_allocator(self):
return _CVXcanon.IntIntMap_get_allocator(self)
def count(self, x):
return _CVXcanon.IntIntMap_count(self, x)
def erase(self, *args):
return _CVXcanon.IntIntMap_erase(self, *args)
def find(self, x):
return _CVXcanon.IntIntMap_find(self, x)
def lower_bound(self, x):
return _CVXcanon.IntIntMap_lower_bound(self, x)
def upper_bound(self, x):
return _CVXcanon.IntIntMap_upper_bound(self, x)
__swig_destroy__ = _CVXcanon.delete_IntIntMap
__del__ = lambda self: None
IntIntMap_swigregister = _CVXcanon.IntIntMap_swigregister
IntIntMap_swigregister(IntIntMap)
class LinOpVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, LinOpVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, LinOpVector, name)
__repr__ = _swig_repr
def iterator(self):
return _CVXcanon.LinOpVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _CVXcanon.LinOpVector___nonzero__(self)
def __bool__(self):
return _CVXcanon.LinOpVector___bool__(self)
def __len__(self):
return _CVXcanon.LinOpVector___len__(self)
def __getslice__(self, i, j):
return _CVXcanon.LinOpVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _CVXcanon.LinOpVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _CVXcanon.LinOpVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _CVXcanon.LinOpVector___delitem__(self, *args)
def __getitem__(self, *args):
return _CVXcanon.LinOpVector___getitem__(self, *args)
def __setitem__(self, *args):
return _CVXcanon.LinOpVector___setitem__(self, *args)
def pop(self):
return _CVXcanon.LinOpVector_pop(self)
def append(self, x):
return _CVXcanon.LinOpVector_append(self, x)
def empty(self):
return _CVXcanon.LinOpVector_empty(self)
def size(self):
return _CVXcanon.LinOpVector_size(self)
def swap(self, v):
return _CVXcanon.LinOpVector_swap(self, v)
def begin(self):
return _CVXcanon.LinOpVector_begin(self)
def end(self):
return _CVXcanon.LinOpVector_end(self)
def rbegin(self):
return _CVXcanon.LinOpVector_rbegin(self)
def rend(self):
return _CVXcanon.LinOpVector_rend(self)
def clear(self):
return _CVXcanon.LinOpVector_clear(self)
def get_allocator(self):
return _CVXcanon.LinOpVector_get_allocator(self)
def pop_back(self):
return _CVXcanon.LinOpVector_pop_back(self)
def erase(self, *args):
return _CVXcanon.LinOpVector_erase(self, *args)
def __init__(self, *args):
this = _CVXcanon.new_LinOpVector(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _CVXcanon.LinOpVector_push_back(self, x)
def front(self):
return _CVXcanon.LinOpVector_front(self)
def back(self):
return _CVXcanon.LinOpVector_back(self)
def assign(self, n, x):
return _CVXcanon.LinOpVector_assign(self, n, x)
def resize(self, *args):
return _CVXcanon.LinOpVector_resize(self, *args)
def insert(self, *args):
return _CVXcanon.LinOpVector_insert(self, *args)
def reserve(self, n):
return _CVXcanon.LinOpVector_reserve(self, n)
def capacity(self):
return _CVXcanon.LinOpVector_capacity(self)
__swig_destroy__ = _CVXcanon.delete_LinOpVector
__del__ = lambda self: None
LinOpVector_swigregister = _CVXcanon.LinOpVector_swigregister
LinOpVector_swigregister(LinOpVector)
def build_matrix(*args):
return _CVXcanon.build_matrix(*args)
build_matrix = _CVXcanon.build_matrix
# This file is compatible with both classic and new-style classes.
|
from memesv4 import do_experiment
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("seed")
args = parser.parse_args()
seed = int(args.seed)
params = {
"sigma": 4,
"RES": 32,
"mutation": 0.0,
"select": False,
"uniform_init": True,
"output_dir": "no_sel_no_mut_uni",
"seed": seed,
}
do_experiment(params)
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
import copy
from utils import OUNoise
# random seed
np.random.seed(1)
class Actor_TD3(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor_TD3, self).__init__()
self.l1 = nn.Linear(state_dim, 5)
self.l2 = nn.Linear(5, 3)
self.l3 = nn.Linear(3, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic_TD3(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic_TD3, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 7)
self.l2 = nn.Linear(7, 6)
self.l3 = nn.Linear(6, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 7)
self.l5 = nn.Linear(7, 6)
self.l6 = nn.Linear(6, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
# section 4.2 eq (10)
return torch.min(q1, q2)
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class Actor_DDPG(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_dim):
super(Actor_DDPG, self).__init__()
self.preprocess = nn.BatchNorm1d(state_dim)
self.preprocess.weight.data.fill_(1)
self.preprocess.bias.data.fill_(0)
# self.preprocess = lambda x: x
self.l1 = nn.Linear(state_dim, hidden_dim*2)
self.l2 = nn.Linear(hidden_dim*2, hidden_dim)
self.l3 = nn.Linear(hidden_dim, action_dim)
self.tanh = nn.Tanh()
self.l3.weight.data.uniform_(-3e-3, 3e-3)
init = lambda x: nn.init.zeros_(x)
#init(self.l1.weight)
#init(self.l2.weight)
#init(self.l3.weight)
self.max_action = max_action
def forward(self, state):
#print("state shape is ", state.size())
if len(state.size()) == 1:
state = state.unsqueeze(0)
state = self.preprocess(state)
#print("state shape is ", state.size())
#raise NotImplementedError
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
# print(self.l3(a))
return self.max_action * self.tanh(self.l3(a))
class Critic_DDPG(nn.Module):
def __init__(self, state_dim, action_dim, num_agents, hidden_dim):
super(Critic_DDPG, self).__init__()
self.preprocess = nn.BatchNorm1d((state_dim + action_dim) * num_agents)
self.preprocess.weight.data.fill_(1)
self.preprocess.bias.data.fill_(0)
# self.preprocess = lambda x: x
self.l1 = nn.Linear((state_dim + action_dim) * num_agents, hidden_dim*2)
self.l2 = nn.Linear(hidden_dim*2, hidden_dim)
self.l3 = nn.Linear(hidden_dim, 1)
#self.l3.weight.data.uniform_(-3e-3, 3e-3)
init = lambda x: nn.init.zeros_(x)
#init(self.l1.weight)
#init(self.l2.weight)
#init(self.l3.weight)
def forward(self, X):
#if len(X.size()) == 1:
# X = X.unsqueeze(0)
X = self.preprocess(X)
q = F.relu(self.l1(X))
q = F.relu(self.l2(q))
# print(self.l3(q))
return self.l3(q)
class TD3_single():
def __init__(self, state_dim, action_dim, max_action, expl_noise_init, expl_noise_final, expl_noise_decay_rate):
# default params follows https://github.com/sfujim/TD3/blob/master/TD3.py
self.max_action = max_action
self.expl_noise_init = expl_noise_init
self.expl_noise_final = expl_noise_final
self.expl_noise_decay_rate = expl_noise_decay_rate
self.actor = Actor_TD3(state_dim, action_dim, max_action)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic_TD3(state_dim, action_dim)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
# counter
self.iter = 0
def select_action(self, obs):
expl_noise = max(self.expl_noise_final, self.expl_noise_init * (1 - self.iter * self.expl_noise_decay_rate))
action = self.actor(obs) # assume they are on the same device
action += torch.Tensor(expl_noise * np.random.normal(loc=0, scale=self.max_action, size=action.size())).to(
action.device)
action = action.clamp(-self.max_action, self.max_action)
return action
class DDPG_single():
def __init__(self, state_dim, action_dim, max_action, num_agents, learning_rate, discrete_action = True, grid_per_action = 20, hidden_dim=32):
self.max_action = max_action
self.actor = Actor_DDPG(state_dim, action_dim, max_action, hidden_dim)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=learning_rate)
self.critic = Critic_DDPG(state_dim, action_dim, num_agents, hidden_dim)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=learning_rate)
self.exploration = OUNoise(action_dim)
self.iter = 0
def scale_noise(self, scale):
self.exploration.scale = scale
def reset_noise(self):
self.exploration.reset()
def select_action(self, obs, explore=False):
self.actor.eval()
action = self.actor(obs)
self.actor.train()
if explore:
device = action.device
action += torch.Tensor(self.exploration.noise()).to(device)
action = action.clamp(-self.max_action, self.max_action)
return action
def get_params(self):
return {'actor': self.actor.state_dict(),
'actor_target': self.actor_target.state_dict(),
'critic': self.critic.state_dict(),
'critic_target': self.critic_target.state_dict(),
'actor_optimizer': self.actor_optimizer.state_dict(),
'critic_optimizer': self.critic_optimizer.state_dict()
}
def load_params(self, params):
self.actor.load_state_dict(params['actor'])
self.actor_target.load_state_dict(params['actor_target'])
self.actor_optimizer.load_state_dict(params['actor_optimizer'])
self.critic.load_state_dict(params['critic'])
self.critic_target.load_state_dict(params['critic_target'])
self.critic_optimizer.load_state_dict(params['critic_optimizer'])
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing policy gradient losses.
Policy gradient algorithms directly update the policy of an agent based on
a stochatic estimate of the direction of steepest ascent in a score function
representing the expected return of that policy. This subpackage provides a
number of utility functions for implementing policy gradient algorithms for
discrete and continuous policies.
"""
from typing import Optional
import chex
import jax
import jax.numpy as jnp
from rlax._src import distributions
from rlax._src import losses
Array = chex.Array
Scalar = chex.Scalar
def _clip_by_l2_norm(x: Array, max_norm: float) -> Array:
"""Clip gradients to maximum l2 norm `max_norm`."""
norm = jnp.sqrt(jnp.sum(jnp.vdot(x, x)))
return jnp.where(norm > max_norm, x * (max_norm / norm), x)
def dpg_loss(
a_t: Array,
dqda_t: Array,
dqda_clipping: Optional[Scalar] = None
) -> Array:
"""Calculates the deterministic policy gradient (DPG) loss.
See "Deterministic Policy Gradient Algorithms" by Silver, Lever, Heess,
Degris, Wierstra, Riedmiller (http://proceedings.mlr.press/v32/silver14.pdf).
Args:
a_t: continuous-valued action at time t.
dqda_t: gradient of Q(s,a) wrt. a, evaluated at time t.
dqda_clipping: clips the gradient to have norm <= `dqda_clipping`.
Returns:
DPG loss.
"""
chex.assert_rank([a_t, dqda_t], 1)
chex.assert_type([a_t, dqda_t], float)
if dqda_clipping is not None:
dqda_t = _clip_by_l2_norm(dqda_t, dqda_clipping)
target_tm1 = dqda_t + a_t
return losses.l2_loss(jax.lax.stop_gradient(target_tm1) - a_t)
def policy_gradient_loss(
logits_t: Array,
a_t: Array,
adv_t: Array,
w_t: Array,
) -> Array:
"""Calculates the policy gradient loss.
See "Simple Gradient-Following Algorithms for Connectionist RL" by Williams.
(http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
a_t: a sequence of actions sampled from the preferences `logits_t`.
adv_t: the observed or estimated advantages from executing actions `a_t`.
w_t: a per timestep weighting for the loss.
Returns:
Loss whose gradient corresponds to a policy gradient update.
"""
chex.assert_rank([logits_t, a_t, adv_t, w_t], [2, 1, 1, 1])
chex.assert_type([logits_t, a_t, adv_t, w_t], [float, int, float, float])
log_pi_a_t = distributions.softmax().logprob(a_t, logits_t)
adv_t = jax.lax.stop_gradient(adv_t)
loss_per_timestep = -log_pi_a_t * adv_t
return jnp.mean(loss_per_timestep * w_t)
def entropy_loss(
logits_t: Array,
w_t: Array,
) -> Array:
"""Calculates the entropy regularization loss.
See "Function Optimization using Connectionist RL Algorithms" by Williams.
(https://www.tandfonline.com/doi/abs/10.1080/09540099108946587)
Args:
logits_t: a sequence of unnormalized action preferences.
w_t: a per timestep weighting for the loss.
Returns:
Entropy loss.
"""
chex.assert_rank([logits_t, w_t], [2, 1])
chex.assert_type([logits_t, w_t], float)
entropy_per_timestep = distributions.softmax().entropy(logits_t)
return -jnp.mean(entropy_per_timestep * w_t)
def _compute_baseline(pi_t, q_t):
"""Computes baseline given a policy and action values at a state."""
return jnp.sum(pi_t * q_t, axis=1)
def _compute_advantages(logits_t: Array,
q_t: Array,
use_stop_gradient=True) -> Array:
"""Computes summed advantage using logits and action values."""
policy_t = jax.nn.softmax(logits_t, axis=1)
# Avoid computing gradients for action_values.
if use_stop_gradient:
q_t = jax.lax.stop_gradient(q_t)
baseline_t = _compute_baseline(policy_t, q_t)
adv_t = q_t - jnp.expand_dims(baseline_t, 1)
return policy_t, adv_t
def qpg_loss(
logits_t: Array,
q_t: Array,
) -> Array:
"""Computes the QPG (Q-based Policy Gradient) loss.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
regularization.
Returns:
QPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
policy_advantages = -policy_t * jax.lax.stop_gradient(advantage_t)
loss = jnp.mean(jnp.sum(policy_advantages, axis=1), axis=0)
return loss
def rm_loss(
logits_t: Array,
q_t: Array,
) -> Array:
"""Computes the RMPG (Regret Matching Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with thresholded regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
Returns:
RM Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
action_regret_t = jax.nn.relu(advantage_t)
policy_regret = -policy_t * jax.lax.stop_gradient(action_regret_t)
loss = jnp.mean(jnp.sum(policy_regret, axis=1), axis=0)
return loss
def rpg_loss(
logits_t: Array,
q_t: Array,
) -> Array:
"""Computes the RPG (Regret Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
Returns:
RPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
_, adv_t = _compute_advantages(logits_t, q_t)
regrets_t = jnp.sum(jax.nn.relu(adv_t), axis=1)
total_regret_t = jnp.mean(regrets_t, axis=0)
return total_regret_t
def clipped_surrogate_pg_loss(
prob_ratios_t: Array,
adv_t: Array,
epsilon: Scalar) -> Array:
"""Computes the clipped surrogate policy gradient loss.
L_clipₜ(θ) = - min(rₜ(θ)Âₜ, clip(rₜ(θ), 1-ε, 1+ε)Âₜ)
Where rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ) and Âₜ are the advantages.
See Proximal Policy Optimization Algorithms, Schulman et al.:
https://arxiv.org/abs/1707.06347
Args:
prob_ratios_t: Ratio of action probabilities for actions a_t:
rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ)
adv_t: the observed or estimated advantages from executing actions a_t.
epsilon: Scalar value corresponding to how much to clip the objecctive.
Returns:
Loss whose gradient corresponds to a clipped surrogate policy gradient
update.
"""
chex.assert_rank([prob_ratios_t, adv_t], [1, 1])
chex.assert_type([prob_ratios_t, adv_t], [float, float])
clipped_ratios_t = jnp.clip(prob_ratios_t, 1. - epsilon, 1. + epsilon)
clipped_objective = jnp.fmin(prob_ratios_t * adv_t, clipped_ratios_t * adv_t)
return -jnp.mean(clipped_objective)
|
from datetime import datetime
import handle_json.handle_weather_json
import nonebot
import pytz
from aiocqhttp.exceptions import Error as CQHttpError
city = '绵阳'
msg = handle_json.handle_weather_json.get_weather_dict(city)
@nonebot.scheduler.scheduled_job('cron',day='*', hour='7', minute='20')
async def _():
bot = nonebot.get_bot()
now = datetime.now(pytz.timezone('Asia/Shanghai'))
try:
await bot.send_private_msg(user_id=1090693441,
message=f'早安~现在{now.hour}点{now.minute}分啦!现在{city}的天气是'
f'{msg["weather"]},最高温度为{msg["max_tep"]}度,最低温度为{msg["min_tep"]}度.')
except CQHttpError:
pass
@nonebot.scheduler.scheduled_job('cron', hour='22', minute='15')
async def _():
bot = nonebot.get_bot()
now = datetime.now(pytz.timezone('Asia/Shanghai'))
try:
await bot.send_private_msg(user_id=1090693441,message=f'晚安~')
except CQHttpError:
pass
@nonebot.scheduler.scheduled_job('cron', hour='7', minute='10')
async def _():
bot = nonebot.get_bot()
now = datetime.now(pytz.timezone('Asia/Shanghai'))
try:
await bot.send_private_msg(user_id=1090693441,message=f'早啊~')
except CQHttpError:
pass
|
from gundala import EPP, Contact
from config import config, contacts, nameserver
data = {
'id': '7654323',
'name': 'Admin 3',
'org': 'Biznetgio',
'street': 'Jl. Sudirman',
'city': 'Jakarta Pusat',
'sp': '',
'pc': '',
'cc': 'ID',
'voice': '',
'fax': '',
'email': 'admin@biznetgio.com',
}
epp = EPP(**config)
""" Create new contact. """
contact = Contact(epp, False, **data)
print(contact.create())
|
""" Script to export matplotlib plots from training to tikz """
from hierarchical_policy.decision_maker.ppo_decision_maker import PPO
from hierarchical_policy.updraft_exploiter import model_updraft_exploiter
from policy_evaluation import run_episode
from hierarchical_policy.vertex_tracker.waypoint_controller import ControllerWrapper
from hierarchical_policy.decision_maker import params_decision_maker
import torch
from glider.envs.glider_env_3D import GliderEnv3D
device = torch.device('cpu')
# env = gym.make('glider3D-v0', agent='decision_maker')
env = GliderEnv3D(agent='decision_maker')
# set seed to fix updraft distribution and trajectory
#env.seed(42)
#np.random.seed(42)
waypoint_controller = ControllerWrapper(env)
updraft_exploiter = model_updraft_exploiter.UpdraftExploiterActorCritic().to(device)
updraft_exploiter.load_state_dict(torch.load(
"../resources/results_paper/policies/updraft_exploiter_actor_critic_final_17-October-2021_20-21.pt", map_location=torch.device('cpu')))
ppo = PPO(waypoint_controller, updraft_exploiter, env)
ppo.model.actor.load_state_dict(torch.load(
"../resources/results_paper/policies/decision_maker_actor_final_30-October-2021_11-02.pt", map_location=torch.device('cpu')))
_params_agent = params_decision_maker.AgentParameters()
iterations = 10
for plot_number in range(0, iterations):
print("Running iteration number {}!".format(plot_number))
run_episode.main(env, ppo, plot_number, _params_agent, validation_mask=True)
|
# -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2009 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
class TemplateError(Exception):
"""Baseclass for all template errors."""
def __init__(self, message=None):
if message is not None:
message = unicode(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
def __init__(self, name):
IOError.__init__(self, name)
self.name = name
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __unicode__(self):
# for translated errors we only return the message
if self.translated:
return self.message.encode('utf-8')
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
def __str__(self):
return unicode(self).encode('utf-8')
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : server.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 05.09.2019
'''The core sensor server. This handles sensor reporting
and timing and makes the collected data available over
UNIX socket and WebSocket protocols.
'''
import json
import os
import signal
import sys
from typing import Awaitable, Optional, Callable, Tuple
import curio
from curio.socket import *
from .database import SensorDB
from .utils import notifies, now
from .ws_server import serve_ws
# Type definition for sensor-reporter coroutines
ReporterType = Callable[[curio.Queue, float], Awaitable[None]]
DEFAULT_SOCKET = '/tmp/brego.gpio.sock'
class SensorServer:
def __init__(self, sensor_db: SensorDB,
broadcast_socket: str = DEFAULT_SOCKET,
database_write_interval: float = 0.5,
write_results: bool = False,
report_status: bool = False):
"""
Args:
sensor_db (SensorDB): Database to write to.
polling_interval (float): Global sensor min polling interval.
database_write_interval (float): Data write op min interval.
websocket_host (str): Host address for websocket broadcast; if None, don't broadcast.
websocket_port (int): Host port for websocket broadcast.
Returns:
"""
try:
os.unlink(broadcast_socket)
except OSError:
if os.path.exists(broadcast_socket):
raise
self.readings = curio.Queue()
self.sensor_db = sensor_db
self.write_results = write_results
self.database_write_interval = database_write_interval
self.broadcast_socket = broadcast_socket
self.subscribers = set()
self.subscriber_names = {}
self.reporters= set()
self.report_status = report_status
def register_subscriber(self, q: curio.Queue, name: str) -> None:
if q not in self.subscribers:
self.subscribers.add(q)
self.subscriber_names[q] = name
def unsubscribe(self, q: curio.Queue) -> None:
try:
self.subscribers.remove(q)
del self.subscriber_names[q]
except:
pass
def register_reporter(self, reporter: ReporterType) -> None:
"""Register a new reporter coroutine. These act as the
producers and push their results on to the readings queue.
Args:
reporter (ReporterType): Reporter coroutine.
Returns:
None:
"""
self.reporters.add(reporter)
async def dispatcher(self) -> None:
try:
async for reading in self.readings:
for q in list(self.subscribers):
await q.put(reading)
except curio.CancelledError:
raise
async def status_reporter(self) -> None:
try:
while True:
await curio.sleep(5)
sizes = {name: q.qsize() for q, name in self.subscriber_names.items()}
print(f'Subscriber queue sizes: {sizes}', file=sys.stderr)
except curio.CancelledError:
raise
async def database_writer(self) -> None:
try:
write_q = curio.Queue()
self.register_subscriber(write_q, 'database_writer')
while True:
block = await write_q.get()
self.sensor_db.insert_readings(block)
await write_q.task_done()
except curio.CancelledError:
raise
finally:
self.unsubscribe(write_q)
async def broadcast_client(self, client: curio.io.Socket, addr: Tuple[str, int]) -> None:
client_name = hash(client) # i guess getpeername() doesn't work with AF_UNIX
print(f'Unix socket connection: {client_name}', file=sys.stderr)
stream = client.as_stream()
bcast_q = curio.Queue()
self.register_subscriber(bcast_q, f'broadcast_client:{client_name}')
n_readings = 0
last_report = now()
try:
while True:
block = await bcast_q.get()
n_readings += len(block)
delta = block[-1][0] - last_report
if(delta >= 5.0):
print(f'Broadcasting {n_readings / delta} readings/second.', file=sys.stderr)
last_report = now()
n_readings = 0
string = json.dumps(block) + '\n'
await curio.timeout_after(60, stream.write, string.encode('ascii'))
except curio.CancelledError:
await stream.write(json.dumps([(0, 'END_STREAM', -1)]).encode('ascii'))
raise
except (BrokenPipeError, curio.TaskTimeout):
print(f'Unix socket closed: {client_name}', file=sys.stderr)
finally:
self.unsubscribe(bcast_q)
async def broadcaster(self) -> None:
async with curio.SignalQueue(signal.SIGHUP) as restart:
while True:
print(f'Starting broadcast server on {self.broadcast_socket}.', file=sys.stderr)
broadcast_task = await curio.spawn(curio.unix_server,
self.broadcast_socket,
self.broadcast_client)
await restart.get()
await broadcast_task.cancel()
async def run(self) -> None:
async with curio.TaskGroup() as g:
cancel = curio.SignalEvent(signal.SIGINT, signal.SIGTERM)
await g.spawn(self.dispatcher)
if self.report_status:
await g.spawn(self.status_reporter)
if self.write_results:
await g.spawn(self.database_writer)
await g.spawn(self.broadcaster)
for reporter in self.reporters:
await g.spawn(reporter,
self.readings)
await cancel.wait()
del cancel
print('Shutting down server...', file=sys.stderr)
await g.cancel_remaining()
self.sensor_db.end_session()
def run(args):
from gpiozero import MCP3008
from brego.database import SensorDB
from brego.sensors import (find_onewire_devices,
MultiOneWireSensor,
DS18B20Sensor,
ADCManager)
database = SensorDB.request_instance()
server = SensorServer(database,
broadcast_socket=args.broadcast_socket,
report_status=args.report_status)
# one-wire temperature sensors
onewire_devices = [DS18B20Sensor(fn) for fn in find_onewire_devices()]
onewire_sensors = MultiOneWireSensor(onewire_devices)
for device in onewire_devices:
database.add_device(device.device_name, 'temperature')
server.register_reporter(onewire_sensors.reporter)
# ADCs
adc_devices = [MCP3008(channel=0), MCP3008(channel=7)]
adc_names = ['Potentiometer', 'Tachometer']
for name in adc_names:
database.add_device(name, 'ADC')
adc_manager = ADCManager(adc_devices, adc_names)
adc_manager.start()
server.register_reporter(adc_manager.reporter)
curio.run(server.run, with_monitor=True)
|
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
# Hook for the zeep module: https://pypi.python.org/pypi/zeep
# Tested with zeep 0.13.0, Python 2.7, Windows
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('zeep')
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is script defines a PySpark Job to enhance avro files exported from the BigQuery Public Dataset
nyc-tlc:yellow.trips to include an additional average speed column. This is a simple spark job
which demonstrates using Cloud Composer to automate spinning up a
Dataproc cluster to run a spark job and tear it down once the job completes.
"""
import argparse
import io
import csv
from contextlib import closing
import datetime
import json
import sys
from pyspark import SparkConf, SparkContext
class AverageSpeedEnhancer(object):
"""This Class serves as a namespace for the business logic function to calculate an average_speed
field from trip_distance, pickup_datetime and drop off date_time.
"""
output_schema = [ # This is the schema of nyc-tlc:yellow.trips
"vendor_id", "pickup_datetime", "dropoff_datetime", "pickup_longitude",
"pickup_latitude", "dropoff_longitude", "dropoff_latitude",
"rate_code", "passenger_count", "trip_distance", "payment_type",
"fare_amount", "extra", "mta_tax", "imp_surcharge", "tip_amount",
"tolls_amount", "total_amount", "store_and_fwd_flag", "average_speed"
]
def dict_to_csv(self, dictionary):
"""This funciton converts a python dictionary to a CSV line. Note keys in output schema that
are missing in the dictionary or that contains commas (which happens occasionally with the
store_and_fwd_flags fields due to a data quality issue) will result in empty values.
Arguments:
dictionary: A dictionary containing the data of interest.
"""
with closing(io.StringIO()) as csv_string:
writer = csv.DictWriter(csv_string,
AverageSpeedEnhancer.output_schema)
writer.writerow(dictionary)
# Our desired output is a csv string not a line in a file so we strip the
# newline character written by the writerow function by default.
return csv_string.getvalue().strip()
def enhance_with_avg_speed(self, record):
"""
This is the business logic for the average speed column to calculate for each record.
The desired units are miles per hour.
Arguments:
record: A dict record from the nyc-tlc:yellow Public BigQuery table to be transformed
with an average_speed field. (This argument object gets modified in place by
this method).
"""
# There is some data quality issue in the public table chosen for this example.
if record.get('store_and_fwd_flag') and record.get(
'store_and_fwd_flag') not in 'YN':
record['store_and_fwd_flag'] = None
if (record['pickup_datetime'] and record['dropoff_datetime']
and record['trip_distance'] > 0):
_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S UTC'
# Parse strings output by BigQuery to create datetime objects
pickup = datetime.datetime.strptime(record['pickup_datetime'],
_DATETIME_FORMAT)
dropoff = datetime.datetime.strptime(record['dropoff_datetime'],
_DATETIME_FORMAT)
elapsed = dropoff - pickup
if elapsed > datetime.timedelta(
0): # Only calculate if drop off after pick up.
_SECONDS_IN_AN_HOUR = 3600.0
# Calculate speed in miles per hour.
record['average_speed'] = _SECONDS_IN_AN_HOUR * record['trip_distance'] / \
elapsed.total_seconds()
else: # Speed is either negative or undefined.
record['average_speed'] = None
elif record['trip_distance'] == 0.0:
record['average_speed'] = 0.0
else: # One of the fields required for calculation is None.
record['average_speed'] = None
return self.dict_to_csv(record)
def main(sc, gcs_path_raw, gcs_path_transformed):
ase = AverageSpeedEnhancer()
file_strings_rdd = sc.textFile(gcs_path_raw)
# Apply the speed enhancement logic defined in the AverageSpeedEnhancer class
# Read the newline delimited json into dicts (note that this is automatically applied per line).
records_rdd = file_strings_rdd.map(
lambda record_string: json.loads(record_string))
transformed_records_rdd = records_rdd.map(ase.enhance_with_avg_speed)
transformed_records_rdd.saveAsTextFile(gcs_path_transformed)
if __name__ == '__main__':
spark_conf = SparkConf()
spark_conf.setAppName('AverageSpeedEnhancement')
spark_context = SparkContext(conf=spark_conf)
parser = argparse.ArgumentParser()
parser.add_argument(
'--gcs_path_raw',
dest='gcs_path_raw',
required=True,
help='Specify the full GCS wildcard path to the json files to enhance.'
)
parser.add_argument(
'--gcs_path_transformed',
dest='gcs_path_transformed',
required=True,
help='Specify the full GCS path prefix for the transformed json files. '
)
known_args, _ = parser.parse_known_args(None)
main(sc=spark_context,
gcs_path_raw=known_args.gcs_path_raw,
gcs_path_transformed=known_args.gcs_path_transformed)
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
assert_equal,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
wait_bitcoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
)
from .authproxy import AuthServiceProxy, JSONRPCException
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*500)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave zixxds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop zixxds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing zixxd/zixx-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
else:
print("Note: zixxds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
# Can override the num_nodes variable to indicate how many nodes to run.
def __init__(self):
self.num_nodes = 2
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ZIXXD", "zixxd"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("ZIXXD", "zixxd"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'menderLoginGUI.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(52, 122, 135))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 122, 135))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(52, 122, 135))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
MainWindow.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Liberation Serif")
font.setPointSize(12)
MainWindow.setFont(font)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(210, 40, 220, 72))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("mender_logo.png"))
self.label.setObjectName("label")
self.usr = QtWidgets.QLineEdit(self.centralwidget)
self.usr.setGeometry(QtCore.QRect(230, 225, 180, 30))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.usr.sizePolicy().hasHeightForWidth())
self.usr.setSizePolicy(sizePolicy)
self.usr.setStatusTip("")
self.usr.setInputMask("")
self.usr.setMaxLength(128)
self.usr.setObjectName("usr")
self.pwd = QtWidgets.QLineEdit(self.centralwidget)
self.pwd.setGeometry(QtCore.QRect(230, 280, 180, 30))
self.pwd.setStatusTip("")
self.pwd.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText|QtCore.Qt.ImhSensitiveData)
self.pwd.setMaxLength(256)
self.pwd.setFrame(True)
self.pwd.setEchoMode(QtWidgets.QLineEdit.Password)
self.pwd.setObjectName("pwd")
self.logInBtn = QtWidgets.QPushButton(self.centralwidget)
self.logInBtn.setGeometry(QtCore.QRect(355, 355, 70, 24))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.logInBtn.setPalette(palette)
self.logInBtn.setObjectName("logInBtn")
self.quitBtn = QtWidgets.QPushButton(self.centralwidget)
self.quitBtn.setGeometry(QtCore.QRect(215, 355, 70, 24))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.quitBtn.setPalette(palette)
self.quitBtn.setCheckable(False)
self.quitBtn.setChecked(False)
self.quitBtn.setAutoRepeat(False)
self.quitBtn.setDefault(False)
self.quitBtn.setFlat(False)
self.quitBtn.setObjectName("quitBtn")
self.server_addr = QtWidgets.QLineEdit(self.centralwidget)
self.server_addr.setEnabled(True)
self.server_addr.setGeometry(QtCore.QRect(230, 150, 180, 30))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.server_addr.sizePolicy().hasHeightForWidth())
self.server_addr.setSizePolicy(sizePolicy)
self.server_addr.setStatusTip("")
self.server_addr.setInputMask("")
self.server_addr.setMaxLength(256)
self.server_addr.setFrame(False)
self.server_addr.setReadOnly(True)
self.server_addr.setClearButtonEnabled(False)
self.server_addr.setObjectName("server_addr")
self.domain_label = QtWidgets.QLabel(self.centralwidget)
self.domain_label.setGeometry(QtCore.QRect(230, 137, 100, 14))
self.domain_label.setObjectName("domain_label")
self.serverCheckBox = QtWidgets.QCheckBox(self.centralwidget)
self.serverCheckBox.setGeometry(QtCore.QRect(290, 180, 120, 16))
self.serverCheckBox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.serverCheckBox.setObjectName("serverCheckBox")
self.showPwd = QtWidgets.QCheckBox(self.centralwidget)
self.showPwd.setGeometry(QtCore.QRect(260, 308, 150, 20))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.showPwd.sizePolicy().hasHeightForWidth())
self.showPwd.setSizePolicy(sizePolicy)
self.showPwd.setLayoutDirection(QtCore.Qt.RightToLeft)
self.showPwd.setObjectName("showPwd")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 32))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Mender User Log In"))
self.usr.setPlaceholderText(_translate("MainWindow", "User name"))
self.pwd.setPlaceholderText(_translate("MainWindow", "Password"))
self.logInBtn.setText(_translate("MainWindow", "Log in"))
self.quitBtn.setText(_translate("MainWindow", "Quit"))
self.server_addr.setPlaceholderText(_translate("MainWindow", "https://hosted.mender.io/"))
self.domain_label.setText(_translate("MainWindow", "Server address"))
self.serverCheckBox.setText(_translate("MainWindow", "Custom server"))
self.showPwd.setText(_translate("MainWindow", "Show password"))
|
import matplotlib
#matplotlib.use("Agg")
from mirnylib.plotting import nicePlot
import os
import pickle
from openmmlib import contactmaps
from mirnylib.numutils import zoomArray
from openmmlib import polymerutils
import matplotlib.pyplot as plt
import numpy as np
from mirnylib.h5dict import h5dict
from mirnylib.genome import Genome
from mirnylib.numutils import completeIC, coarsegrain
from mirnylib.systemutils import setExceptionHook
from openmmlib.contactmapManager import averageContacts
import pandas as pd
from mirnylib.numutils import coarsegrain
setExceptionHook()
import mirnylib.plotting
filename = "/net/levsha/share/nezar/ctcf_sites/GM12878.ctcf_narrowPeak.loj.encodeMotif.rad21.txt"
SEPARATION = 400
LIFETIME = 200
class simulator(object):
def __init__(self, i, forw, rev, blocks, steps):
import pyximport; pyximport.install()
from smcTranslocatorDirectional import smcTranslocator
import numpy as np
N = len(forw)
birthArray = np.zeros(N, dtype=np.double) + 0.1
deathArray = np.zeros(N, dtype=np.double) + 1. / LIFETIME
stallArrayLeft = forw
stallArrayRight = rev
stallDeathArray = np.zeros(N, dtype=np.double) + 1 / LIFETIME
pauseArray = np.zeros(N, dtype=np.double)
smcNum = N // SEPARATION
myDeathArray = deathArray
SMCTran = smcTranslocator(birthArray, myDeathArray, stallArrayLeft, stallArrayRight, pauseArray, stallDeathArray, smcNum)
self.SMCTran = SMCTran
self.steps = steps
self.blocks = blocks
self.i = 0
def next(self):
self.i += 1
if self.i == self.blocks:
raise StopIteration
self.SMCTran.steps(self.steps)
conts = self.SMCTran.getSMCs()
if self.i % 1000 == 500:
print(self.i, conts[0][0])
return np.array(conts) // 20
def getForwBacv(mu = 3):
df = pd.read_csv(filename, sep = "\t")
df = df.loc[(~pd.isnull(df["summitDist"]) ) ]
mychr = 14
df = df.loc[df["chrom"] == "chr{0}".format(mychr)]
start = df["start"].values
end = df["end"].values
strand = df["summitDist"].values > 0
strength = df["fc"]
mid_1k = (start + end) // 1200
M = mid_1k.max() + 1
forw = np.bincount(mid_1k[strand], weights = (strength[strand] / 20), minlength=M)
rev = np.bincount(mid_1k[~strand], weights = (strength[~strand] / 20), minlength = M)
low = 60000
high = 75000
lowMon = low * 1000 // 600
highMon = high * 1000 // 600
forw = forw[lowMon:highMon]
rev = rev[lowMon:highMon]
def logistic(x, mu = 3):
x[x==0] = -99999999
return 1 / (1 + np.exp(-(x - mu)))
forw = logistic(forw,mu)
rev = logistic(rev, mu)
return forw, rev
# uncommend this to just display a simulated heatmap.
#hm = averageContacts(simulator, range(30), 1500, classInitArgs=[forw, rev, 5000, 150], bucketNum = 20, nproc=30)
#exit()
#print(hm.shape)
class contactCalculator:
def __init__(self, filenames, cutoff, coarsegrainBy, method):
self.filenames = filenames
self.cutoff = cutoff
self.coarsegrain = coarsegrainBy
self.method = method
def next(self):
if len(self.filenames) == 0:
raise StopIteration
data = polymerutils.load(self.filenames.pop())
contacts = self.method(data, cutoff=self.cutoff) // self.coarsegrain
return contacts
def getCmap(prefix = "", radius = 6):
"""
This is a function to calculate a simulated Hi-C contact map from one or several folders with conformations, defined by "prefix".
"""
n = 20 # number of processes to use = number of cores
coarsegrainBy = 5 # how many monomers per pixel in a heatmap
print(os.getcwd())
folders = [i for i in os.listdir(".") if i.startswith(prefix)]
foldes = [i for i in folders if os.path.exists(i)]
print(folders)
files = sum([polymerutils.scanBlocks(i)["files"] for i in folders], [])
filegroups = [files[i::n] for i in range(n)]
data = polymerutils.load(files[0])
N = len(data)
method = contactmaps.findMethod(data,radius)
cmapN = int(np.ceil(N / coarsegrainBy))
cmap = averageContacts(contactCalculator, filegroups, cmapN, classInitArgs=[radius, coarsegrainBy, method ], nproc=n,
bucketNum = 60, useFmap=True)
pickle.dump(cmap, open("cmaps/cmap{0}_r={1}.pkl".format(prefix, radius), 'wb'))
def getAllCmaps():
"""This functions iterates over different contact radii and over different contactmap names,
and calculates contact maps. Right now set only for one contact map.
"""
for radius in [8]:
for prefix in [
#"lessCTCF","lessLifetime","moreLifetime","moreSeparation","steps=500",
# "flagship_try","flagshipLessCTCF","flagshipMoreCtcf","flagship_cellType"
#"flagshipMod_", "flagshipModLessCtcf", "flagshipModMoreCtcf"
#"flagshipBoundaryStallLifetime100Mu3","flagshipBoundaryStallLifetime200Mu3",
#"flagshipBoundaryStallLifetime300Mu3",
#"flagshipLifetime100Mu3","flagshipLifetime200Mu3",
"flagshipLifetime300Mu3"
#,"flagshipLifetime300Mu2","flagshipLifetime300Mu4",
]:
print(prefix, radius)
getCmap(prefix, radius)
exit()
#getAllCmaps()
def pivotHeatmap(heatmap, diags = 20):
N = len(heatmap)
newdata = np.zeros((diags, 2*N), dtype = float)
for i in range(diags):
diag = np.diagonal(heatmap,i)
pad = N - len(diag)
newdiag = np.zeros(2 * len(diag), dtype = float)
newdiag[::2] = diag
newdiag[1::2] = diag
if pad == 0:
newdata[i] = newdiag
else:
newdata[i][pad:-pad] = newdiag
return newdata
def showCmap():
"""Shows Hi-C data together with the simulated data. Hi-C data created by hiclib is needed for that,
but you can replace the line mydict=h5dict()... and the following line with your own data loading code. """
low = 60000
high = 75000
lowMon = low * 1000 // 600
highMon = high * 1000 // 600
low20 = low // 10
high20 = high // 10
# here Hi-C data is loaded for display purposes only..... replace it with your own code if your data is in a different format
mydict = h5dict("/home/magus/HiC2011/Erez2014/hg19/GM12878_inSitu-all-combined-10k_HighRes.byChr",'r')
hicdata = mydict.get_dataset("13 13")[low20:high20, low20:high20]
hicdata = completeIC(hicdata)
curshape = hicdata.shape
newshape = (1000 * (high - low)) // (600 * 5)
print(hicdata.shape, newshape)
hicdata = zoomArray(hicdata, (newshape, newshape))
hicdata = np.clip(hicdata, 0, np.percentile(hicdata, 99.99))
hicdata /= np.mean(np.sum(hicdata, axis=1))
#hicdata = hm / np.mean(np.sum(hm, axis=1))
for fname in os.listdir("cmaps"):
cmap = pickle.load(open(os.path.join("cmaps", fname), 'rb'))
#arr = coarsegrain(cmap, 2)
arr = cmap
if arr.shape[0] != hicdata.shape[0]:
continue
print(arr.shape)
arr = arr / np.mean(np.sum(arr, axis=1))
ran = np.arange(len(arr))
mask = ran[:,None] > ran[None,:]
arr[mask] = hicdata[mask]
logarr = np.log(arr + 0.0001)
# noinspection PyTypeChecker
plt.imshow(logarr, vmax = np.percentile(logarr, 99.99), vmin = np.percentile(logarr, 10), extent = [low, high, high, low], interpolation = "none")
plt.savefig(os.path.join("heatmaps", fname+".png"))
plt.savefig(os.path.join("heatmaps", fname+".pdf"))
plt.show()
plt.clf()
# getCmap()
#showCmap()
# plt.show()
def showCmapNew():
"""Saves a bunch of heatmaps at high resolutions."""
plt.figure(figsize=(8,8))
low = 60000
high = 75000
lowMon = low * 1000 // 600
highMon = high * 1000 // 600
low20 = low // 10
high20 = high // 10
mydict = h5dict("/home/magus/HiC2011/Erez2014/hg19/GM12878_inSitu-all-combined-10k_HighRes.byChr",'r')
hicdata = mydict.get_dataset("13 13")[low20:high20, low20:high20]
hicdata = completeIC(hicdata)
curshape = hicdata.shape
resolutionMon = 5
newshape = (1000 * (high - low)) // (600 * resolutionMon)
print(hicdata.shape, newshape)
hicdata = zoomArray(hicdata, (newshape, newshape))
hicdata = np.clip(hicdata, 0, np.percentile(hicdata, 99.99))
hicdata /= np.mean(np.sum(hicdata, axis=1))
#hicdata = hm / np.mean(np.sum(hm, axis=1))
#for fname in os.listdir("cmaps"):
for fname in ["cmapflagshipLifetime300Mu3_r=8.pkl"]:
if ("r=8" not in fname) or ("Lifetime" not in fname):
print("not going", fname)
continue
try:
mu = float(fname.split("_r=")[0].split("Mu")[1])
except:
continue
forw, rev = getForwBacv(mu)
cmap = pickle.load(open(os.path.join("cmaps", fname), 'rb'))
#arr = coarsegrain(cmap, 2)
arr = cmap
if arr.shape[0] != hicdata.shape[0]:
continue
arr = arr / np.mean(np.sum(arr,axis=1))
hicdata *= 1.5
diags = 1000
print(arr.shape)
ax = plt.subplot(211)
turned = pivotHeatmap(arr, diags)[::-1] * 3
turned2 = pivotHeatmap(hicdata, diags)
turned = np.concatenate([turned, turned2], axis=0)
myextent = [low, high, -(high - low) * diags/ len(arr) , (high - low) * diags/ len(arr) ]
plt.imshow(np.log(turned + 0.0001) , aspect=0.5,cmap = "fall", vmax = -4, vmin = -8,
extent=myextent , interpolation = "none")
#plt.colorbar()
#plt.ylim([-(high - low) * diags/ len(arr) , (high - low) * diags/ len(arr) ])
#nicePlot(show=False)
plt.subplot(413, sharex = ax)
xaxis=np.arange(len(forw)// 20) * 12 + 60000
forwcg = coarsegrain(forw,20)
revcg = coarsegrain(rev, 20)
plt.vlines(xaxis[forwcg>0], 0, forwcg[forwcg>0], color = "blue")
plt.vlines(xaxis[revcg>0], 0, revcg[revcg>0], color = "green")
#plt.scatter(xaxis[forwcg>0], forwcg[forwcg>0], label = "forward CTCF")
#plt.scatter(xaxis[revcg > 0],revcg[revcg>0], label = "reverse CTCF")
plt.xlim([60000, 75000])
plt.title(fname)
plt.legend()
plt.show()
continue
#nicePlot(show=False)
#plt.subplot(414, sharex = ax)
#plt.plot(xaxis, data)
#plt.show()
#arr = arr / np.mean(np.sum(arr, axis=1))
#ran = np.arange(len(arr))
#mask = ran[:,None] > ran[None,:]
#arr[mask] = hicdata[mask]
#logarr = np.log(arr + 0.0001)
# noinspection PyTypeChecker
#plt.imshow(logarr, vmax = np.percentile(logarr, 99.9), extent = [low, high, high, low], interpolation = "none")
for st in range(60000, 75000, 1000):
for size in [2000, 3000, 5000]:
end = st + size
if end > 75000:
continue
plt.xlim([st, end])
plt.savefig(os.path.join("heatmaps", "{0}_st={1}_end={2}_r=2.png".format(fname, st, end)))
plt.savefig(os.path.join("heatmaps", "{0}_st={1}_end={2}_r=2.pdf".format(fname, st, end)))
plt.clf()
plt.show()
# getCmap()
showCmapNew()
# plt.show()
|
from django.apps import AppConfig
class SignaldataConfig(AppConfig):
name = 'signalData'
|
# coding: utf-8
"""
Justap API
欢迎阅读 Justap Api 文档 Justap 是为移动端应用和PC端应用打造的下一代聚合支付SAAS服务平台,通过一个 SDK 即可快速的支持各种形式的应用,并且一次接口完成多个不同支付渠道的接入。平台除了支持服务商子商户模式,同时还对商家自有商户(即自己前往微信、支付宝等机构开户)提供了完整的支持。 感谢您的支持,我们将不断探索,为您提供更优质的服务!如需技术支持可前往商户中心提交工单,支持工程师会尽快与您取得联系! # 文档说明 采用 REST 风格设计。所有接口请求地址都是可预期的以及面向资源的。使用规范的 HTTP 响应代码来表示请求结果的正确或错误信息。使用 HTTP 内置的特性,如 HTTP Authentication 和 HTTP 请求方法让接口易于理解。 ## HTTP 状态码 HTTP 状态码可以用于表明服务的状态。服务器返回的 HTTP 状态码遵循 [RFC 7231](http://tools.ietf.org/html/rfc7231#section-6) 和 [IANA Status Code Registry](http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) 标准。 ## 认证 在调用 API 时,必须提供 API Key 作为每个请求的身份验证。你可以在管理平台内管理你的 API Key。API Key 是商户在系统中的身份标识,请安全存储,确保其不要被泄露。如需获取或更新 API Key ,也可以在商户中心内进行操作。 Api Key 在使用自定义的 HTTP Header 进行传递。 ``` X-Justap-Api-Key ``` API Key 分为 live 和 test 两种模式。分别对应真实交易环境和模拟测试交易环境并且可以实时切换。 测试模式下的 API Key 会模拟交易等请求,但是不会产生任何真实交易行为和费用,便于调试和接入。 **⚠️ 注意**:在使用 live 模式前,需要先前往 `商户中心 -> 应用设置 -> 开发参数` 开启 live 模式。 <SecurityDefinitions /> ## 请求类型 所有的 API 请求只支持 HTTPS 方式调用。 ## 路由参数 路由参数是指出现在 URL 路径中的可变变量。在本文档中,使用 `{}` 包裹的部分。 例如: `{charge_id}`,在实际使用是,需要将 `{charge_id}` 替换为实际值 `charge_8a8sdf888888` ## MIME Type MIME 类型用于指示服务器返回的数据格式。服务器目前默认采用 `application/json`。 例如: ``` application/json ``` ## 错误 服务器使用 HTTP 状态码 (status code) 来表明一个 API 请求的成功或失败状态。返回 HTTP 2XX 表明 API 请求成功。返回 HTTP 4XX 表明在请求 API 时提供了错误信息,例如参数缺失、参数错误、支付渠道错误等。返回 HTTP 5XX 表明 API 请求时,服务器发生了错误。 在返回错误的状态码时,回同时返回一些错误信息提示出错原因。 具体的错误码我们正在整理当中。 ## 分页 所有的 Justap 资源都可以被 list API 方法支持,例如分页 charges 和 refunds。这些 list API 方法拥有相同的数据结构。Justap 是基于 cursor 的分页机制,使用参数 starting_after 来决定列表从何处开始,使用参数 ending_before 来决定列表从何处结束。 ## 参数说明 请求参数中包含的以下字段释义请参考: - REQUIRED: 必填参数 - OPTIONAL: 可选参数,可以在请求当前接口时按需传入 - CONDITIONAL: 在某些条件下必传 - RESPONSE-ONLY: 标示该参数仅在接口返回参数中出现,调用 API 时无需传入 # 如何保证幂等性 如果发生请求超时或服务器内部错误,客户端可能会尝试重发请求。您可以在请求中设置 ClientToken 参数避免多次重试带来重复操作的问题。 ## 什么是幂等性 在数学计算或者计算机科学中,幂等性(idempotence)是指相同操作或资源在一次或多次请求中具有同样效果的作用。幂等性是在分布式系统设计中具有十分重要的地位。 ## 保证幂等性 通常情况下,客户端只需要在500(InternalErrorInternalError)或503(ServiceUnavailable)错误,或者无法获取响应结果时重试。充实时您可以从客户端生成一个参数值不超过64个的ASCII字符,并将值赋予 ClientToken,保证重试请求的幂等性。 ## ClientToken 详解 ClientToken参数的详细信息如下所示。 - ClientToken 是一个由客户端生成的唯一的、大小写敏感、不超过64个ASCII字符的字符串。例如,`ClientToken=123e4567-e89b-12d3-a456-426655440000`。 - 如果您提供了一个已经使用过的 ClientToken,但其他请求参数**有变化**,则服务器会返回 IdempotentParameterMismatch 的错误代码。 - 如果您提供了一个已经使用过的 ClientToken,且其他请求参数**不变**,则服务器会尝试返回 ClientToken 对应的记录。 ## API列表 以下为部分包含了 ClientToken 参数的API,供您参考。具体哪些API支持 ClientToken 参数请以各 API 文档为准,此处不一一列举。 - [申请退款接口](https://www.justap.cn/docs#operation/TradeService_Refunds) # 签名 为保证安全,JUSTAP 所有接口均需要对请求进行签名。服务器收到请求后进行签名的验证。如果签名验证不通过,将会拒绝处理请求,并返回 401 Unauthorized。 签名算法: ``` base64Encode(hamc-sha256(md5(请求 body + 请求时间戳 + 一次性随机字符串) + 一次性随机字符串)) ``` ## 准备 首先需要在 Justap 创建一个应用,商户需要生成一对 RSA 密钥对,并将公钥配置到 `商户中心 -> 开发配置`。 RSA 可以使用支付宝提供的 [密钥生成工具](https://opendocs.alipay.com/common/02kipl) 来生成。 商户在使用时,可以按照下述步骤生成请求的签名。 ## 算法描述: - 在请求发送前,取完整的**请求 body** - 生成一个随机的32位字符串,得到 **一次性随机字符串** - 获取当前时间的时间戳,得到 **请求时间戳** - 在请求字符串后面拼接上 **请求时间戳** 和 **一次性随机字符串**,得到 **待 Hash 字符串** - 对 **待 Hash 字符串** 计算 md5,得到 **待签名字符串** - **待签名字符串** 后面拼接上 一次性随机字符串,得到完整的 **待签名字符串** - 使用商户 RSA 私钥,对 **待签名字符串** 计算签名,并对 结果 进行 base64 编码,即可得到 **签名** ## 设置HTTP头 Justap 要求请求通过 自定义头部 来传递签名。具体定义如下: ``` X-Justap-Signature: 签名 X-Justap-Request-Time: 请求时间戳 X-Justap-Nonce: 一次性随机字符串 X-Justap-Body-Hash: 待签名字符串 ``` 具体的签名算法实现,可参考我们提供的各语言 SDK。 # WebHooks # noqa: E501
OpenAPI spec version: 1.0
Contact: support@justap.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "justap-server-sdk-python"
VERSION = ""
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"certifi>=2017.4.17",
"python-dateutil>=2.1",
"six>=1.10",
"urllib3>=1.23"
]
setup(
name=NAME,
version=VERSION,
description="Justap API",
author_email="support@justap.net",
url="",
keywords=["Swagger", "Justap API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
欢迎阅读 Justap Api 文档 Justap 是为移动端应用和PC端应用打造的下一代聚合支付SAAS服务平台,通过一个 SDK 即可快速的支持各种形式的应用,并且一次接口完成多个不同支付渠道的接入。平台除了支持服务商子商户模式,同时还对商家自有商户(即自己前往微信、支付宝等机构开户)提供了完整的支持。 感谢您的支持,我们将不断探索,为您提供更优质的服务!如需技术支持可前往商户中心提交工单,支持工程师会尽快与您取得联系! # 文档说明 采用 REST 风格设计。所有接口请求地址都是可预期的以及面向资源的。使用规范的 HTTP 响应代码来表示请求结果的正确或错误信息。使用 HTTP 内置的特性,如 HTTP Authentication 和 HTTP 请求方法让接口易于理解。 ## HTTP 状态码 HTTP 状态码可以用于表明服务的状态。服务器返回的 HTTP 状态码遵循 [RFC 7231](http://tools.ietf.org/html/rfc7231#section-6) 和 [IANA Status Code Registry](http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) 标准。 ## 认证 在调用 API 时,必须提供 API Key 作为每个请求的身份验证。你可以在管理平台内管理你的 API Key。API Key 是商户在系统中的身份标识,请安全存储,确保其不要被泄露。如需获取或更新 API Key ,也可以在商户中心内进行操作。 Api Key 在使用自定义的 HTTP Header 进行传递。 ``` X-Justap-Api-Key ``` API Key 分为 live 和 test 两种模式。分别对应真实交易环境和模拟测试交易环境并且可以实时切换。 测试模式下的 API Key 会模拟交易等请求,但是不会产生任何真实交易行为和费用,便于调试和接入。 **⚠️ 注意**:在使用 live 模式前,需要先前往 `商户中心 -> 应用设置 -> 开发参数` 开启 live 模式。 <SecurityDefinitions /> ## 请求类型 所有的 API 请求只支持 HTTPS 方式调用。 ## 路由参数 路由参数是指出现在 URL 路径中的可变变量。在本文档中,使用 `{}` 包裹的部分。 例如: `{charge_id}`,在实际使用是,需要将 `{charge_id}` 替换为实际值 `charge_8a8sdf888888` ## MIME Type MIME 类型用于指示服务器返回的数据格式。服务器目前默认采用 `application/json`。 例如: ``` application/json ``` ## 错误 服务器使用 HTTP 状态码 (status code) 来表明一个 API 请求的成功或失败状态。返回 HTTP 2XX 表明 API 请求成功。返回 HTTP 4XX 表明在请求 API 时提供了错误信息,例如参数缺失、参数错误、支付渠道错误等。返回 HTTP 5XX 表明 API 请求时,服务器发生了错误。 在返回错误的状态码时,回同时返回一些错误信息提示出错原因。 具体的错误码我们正在整理当中。 ## 分页 所有的 Justap 资源都可以被 list API 方法支持,例如分页 charges 和 refunds。这些 list API 方法拥有相同的数据结构。Justap 是基于 cursor 的分页机制,使用参数 starting_after 来决定列表从何处开始,使用参数 ending_before 来决定列表从何处结束。 ## 参数说明 请求参数中包含的以下字段释义请参考: - REQUIRED: 必填参数 - OPTIONAL: 可选参数,可以在请求当前接口时按需传入 - CONDITIONAL: 在某些条件下必传 - RESPONSE-ONLY: 标示该参数仅在接口返回参数中出现,调用 API 时无需传入 # 如何保证幂等性 如果发生请求超时或服务器内部错误,客户端可能会尝试重发请求。您可以在请求中设置 ClientToken 参数避免多次重试带来重复操作的问题。 ## 什么是幂等性 在数学计算或者计算机科学中,幂等性(idempotence)是指相同操作或资源在一次或多次请求中具有同样效果的作用。幂等性是在分布式系统设计中具有十分重要的地位。 ## 保证幂等性 通常情况下,客户端只需要在500(InternalErrorInternalError)或503(ServiceUnavailable)错误,或者无法获取响应结果时重试。充实时您可以从客户端生成一个参数值不超过64个的ASCII字符,并将值赋予 ClientToken,保证重试请求的幂等性。 ## ClientToken 详解 ClientToken参数的详细信息如下所示。 - ClientToken 是一个由客户端生成的唯一的、大小写敏感、不超过64个ASCII字符的字符串。例如,`ClientToken=123e4567-e89b-12d3-a456-426655440000`。 - 如果您提供了一个已经使用过的 ClientToken,但其他请求参数**有变化**,则服务器会返回 IdempotentParameterMismatch 的错误代码。 - 如果您提供了一个已经使用过的 ClientToken,且其他请求参数**不变**,则服务器会尝试返回 ClientToken 对应的记录。 ## API列表 以下为部分包含了 ClientToken 参数的API,供您参考。具体哪些API支持 ClientToken 参数请以各 API 文档为准,此处不一一列举。 - [申请退款接口](https://www.justap.cn/docs#operation/TradeService_Refunds) # 签名 为保证安全,JUSTAP 所有接口均需要对请求进行签名。服务器收到请求后进行签名的验证。如果签名验证不通过,将会拒绝处理请求,并返回 401 Unauthorized。 签名算法: ``` base64Encode(hamc-sha256(md5(请求 body + 请求时间戳 + 一次性随机字符串) + 一次性随机字符串)) ``` ## 准备 首先需要在 Justap 创建一个应用,商户需要生成一对 RSA 密钥对,并将公钥配置到 `商户中心 -> 开发配置`。 RSA 可以使用支付宝提供的 [密钥生成工具](https://opendocs.alipay.com/common/02kipl) 来生成。 商户在使用时,可以按照下述步骤生成请求的签名。 ## 算法描述: - 在请求发送前,取完整的**请求 body** - 生成一个随机的32位字符串,得到 **一次性随机字符串** - 获取当前时间的时间戳,得到 **请求时间戳** - 在请求字符串后面拼接上 **请求时间戳** 和 **一次性随机字符串**,得到 **待 Hash 字符串** - 对 **待 Hash 字符串** 计算 md5,得到 **待签名字符串** - **待签名字符串** 后面拼接上 一次性随机字符串,得到完整的 **待签名字符串** - 使用商户 RSA 私钥,对 **待签名字符串** 计算签名,并对 结果 进行 base64 编码,即可得到 **签名** ## 设置HTTP头 Justap 要求请求通过 自定义头部 来传递签名。具体定义如下: ``` X-Justap-Signature: 签名 X-Justap-Request-Time: 请求时间戳 X-Justap-Nonce: 一次性随机字符串 X-Justap-Body-Hash: 待签名字符串 ``` 具体的签名算法实现,可参考我们提供的各语言 SDK。 # WebHooks # noqa: E501
"""
)
|
# -*- coding: utf-8 -*-
# @Brief: iou相关
import tensorflow as tf
import math
def box_iou(b1, b2):
"""
计算iou
:param b1:
:param b2:
:return:
"""
# 13,13,3,1,4
# 计算左上角的坐标和右下角的坐标
b1 = tf.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh/2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# 1,n,4
# 计算左上角和右下角的坐标
b2 = tf.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh/2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
# 计算重合面积
intersect_mins = tf.maximum(b1_mins, b2_mins)
intersect_maxes = tf.minimum(b1_maxes, b2_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def box_ciou(b1, b2):
"""
计算ciou
:param b1: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
:param b2: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh
:return:tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)
"""
# 求出预测框左上角右下角
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh / 2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# 求出真实框左上角右下角
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh / 2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
# 求真实框和预测框所有的iou
intersect_mins = tf.maximum(b1_mins, b2_mins)
intersect_maxes = tf.minimum(b1_maxes, b2_maxes)
# 用右下角坐标 - 左上角坐标,如果大于0就是有重叠的,如果是0就没有重叠
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
union_area = b1_area + b2_area - intersect_area
iou = intersect_area / (union_area + 1e-7)
# 计算中心的差距
center_distance = tf.reduce_sum(tf.square(b1_xy - b2_xy), axis=-1)
# 找到包裹两个框的最小框的左上角和右下角、计算两个框对角线的距离
enclose_mins = tf.minimum(b1_mins, b2_mins)
enclose_maxes = tf.maximum(b1_maxes, b2_maxes)
enclose_wh = tf.maximum(enclose_maxes - enclose_mins, 0.0)
# 计算对角线距离
enclose_diagonal = tf.reduce_sum(tf.square(enclose_wh), axis=-1)
diou = iou - 1.0 * center_distance / (enclose_diagonal + 1e-7)
v = 4 * tf.square(tf.math.atan2(b1_wh[..., 0], b1_wh[..., 1]) -
tf.math.atan2(b2_wh[..., 0], b2_wh[..., 1])) / (math.pi * math.pi)
# alpha * v是一个惩罚参数
alpha = v / (1.0 - iou + v)
ciou = diou - alpha * v
ciou = tf.expand_dims(ciou, -1)
return ciou
|
import numpy as np
import matplotlib.pyplot as pp
from mpl_toolkits.mplot3d import Axes3D
from fenics import Mesh
from static import run_static
from visualisation import *
from axes_world import one_by_one
# =============================================================================
# Mesh
#name = 'straight_50'
name = 'straight_100'
#name = 'straight_300'
mesh_file = '../xml_files/%s.xml' % name
# Define speed direction
u0_th = (0.50)*np.pi
u0_ph = (0.00)*np.pi
# u0 is unitary by definition.
u0 = np.array([np.sin(u0_th)*np.cos(u0_ph),
np.sin(u0_th)*np.sin(u0_ph),
np.cos(u0_th)])
# Drag coefficient and Cauchy number
Cd = 1.2
Cy = 50
# Relaxation parameter
rlx = 0.8
# Import mesh
meshy = Mesh(mesh_file)
# Successive simulations
def run_succession(list_Cy):
list_results = []
meshy = Mesh(mesh_file)
for Cy in list_Cy:
print('Cy = %s' % Cy)
results = run_static(meshy, u0, 'drag', Cd*Cy, rlx)
list_results.append(results)
print('')
return list_results
def draw_succession(ax, list_Cy, list_results, colors):
for Cy, results, color in zip(list_Cy, list_results, colors):
pos_x, pos_y = results[3,:,0], results[3,:,1]
ax.plot(pos_y, -pos_x, linewidth=1.5, linestyle='-', color=color,
label=r'$C_{\mathrm{Y}} = %.1f$' % Cy)
ax.plot(-pos_y, -pos_x, linewidth=1.5, linestyle='-', color=color)
ax.set_aspect('equal')
ax.axis('off')
ax.legend(loc='best',
fontsize=10,
frameon=False,
ncol=1,
labelspacing=0.3,
handlelength=1.5)
# Parameters used to plot Figure 1(a) in the JOSS paper
list_Cy_tilde = np.array([0.001, 2, 4.7, 9.0, 27, 72, 99])
list_Cy = list_Cy_tilde/Cd
colors = ['lightgray', 'yellow', 'khaki', 'goldenrod', 'olive', 'orange', 'red']
list_results = run_succession(list_Cy)
ax = one_by_one()
draw_succession(ax, list_Cy, list_results, colors)
pp.show()
|
# -*- coding: utf-8 -*-
from __future__ import print_function # (at top of module)
import sys, requests
import uuid
from props.localtest_mapper import *
import lib.obp
# test payment workflow
# prerequisites:
# 1 run OBP-API and run OBP-Kafka_Python
# 2 in props
# connector=mapped
# 3 prepare your own accounts info
# Reference : localtest_mapper.py
#
# test endpoint list:
# 1 Create counterparty for an account - V210
# 2 Create Transaction Request. (SANDBOX_TAN)- V210
# 3 Create Transaction Request. (SEPA)- V210
# 4 Create Transaction Request. (COUNTERPARTY)- V210
# 5 Answer Transaction Request Challenge. (SANDBOX_TAN)-V210
# 6 Answer Transaction Request Challenge. (SEPA)-V210
# 7 Answer Transaction Request Challenge. (COUNTERPARTY)-V210
# 8 Get Transaction by Id. -V121
# 9 Get Transactions for Account (Full)-- V210
#########################Step1 : Direct Login process ################import uuid
obp = lib.obp
obp.setBaseUrl(BASE_URL)
obp.setApiVersion(API_VERSION_V210)
print("Call API - 0 'DirectLogin'")
obp.login(USERNAME, PASSWORD, CONSUMER_KEY)
# set the fromAccount info:
from_bank_id = FROM_BANK_ID
from_account_id = FROM_ACCOUNT_ID
######################### Step2 - make a payment - SANDBOX_TAN ################
print("")
print("")
# set the toAccount for SANDBOX_TAN
to_bank_id = TO_BANK_ID
to_account_id = TO_ACCOUNT_ID
TRANSACTION_REQUEST_TYPE_SANDBOX_TAN = "SANDBOX_TAN"
print("--------- TRANSACTION_REQUEST_TYPE : {0}".format(TRANSACTION_REQUEST_TYPE_SANDBOX_TAN))
print("Call API - 1 'Create Transaction Request. -- V210' (no challenge)")
# set up a small value in payment detail
obp.setPaymentDetails(OUR_CURRENCY, OUR_VALUE)
initiate_response = obp.createTransactionRequestV210(from_bank_id=from_bank_id,
from_account_id=from_account_id,
transaction_request_type=TRANSACTION_REQUEST_TYPE_SANDBOX_TAN,
to_bank_id=to_bank_id,
to_account_id=to_account_id,
to_counterparty_id="", # used for SEPA
to_counterparty_iban="") # used for COUNTERPARTY
# There was no challenge, transaction was created immediately
obp.printMessageNoChallenge(initiate_response)
#####Case2: with challenge
print("")
print("Call API - 2 'Create Transaction Request. -- V210' (with challenge)")
# set up a large value in payment detail
obp.setPaymentDetails(OUR_CURRENCY, OUR_VALUE_LARGE)
initiate_response = obp.createTransactionRequestV210(from_bank_id=from_bank_id,
from_account_id=from_account_id,
transaction_request_type=TRANSACTION_REQUEST_TYPE_SANDBOX_TAN,
to_bank_id=to_bank_id,
to_account_id=to_account_id,
to_counterparty_id="", # used for SEPA
to_counterparty_iban="") # used for COUNTERPARTY
obp.printMessageWithChallenge(initiate_response)
print("")
print("Call API - 3 'Answer Transaction Request Challenge. -- V210'")
challenge_id = initiate_response['challenge']['id']
transaction_req_id = initiate_response['id']
challenge_response = obp.answerChallengeV210(from_bank_id,
from_account_id,
transaction_req_id,
TRANSACTION_REQUEST_TYPE_SANDBOX_TAN,
challenge_id)
obp.printMessageAfterAnswerChallenge(challenge_response)
######################### Step3 - make a payment - SEPA ################
print("")
print("")
TRANSACTION_REQUEST_TYPE_SEPA = "SEPA"
print("--------- TRANSACTION_REQUEST_TYPE : {0}".format(TRANSACTION_REQUEST_TYPE_SEPA))
print("Call API - 1 'Create counterparty for an account. -- V210'")
to_counterparty_iban = str(uuid.uuid4())
create_counterparty_response = obp.createCounterparty(bank_id=from_bank_id,
account_id=from_account_id,
name=str(uuid.uuid4()),
other_account_routing_scheme="IBAN",
other_account_routing_address=to_counterparty_iban,
other_bank_routing_scheme="test",
other_bank_routing_address="test")
obp.printCreateCounterparty(create_counterparty_response)
print("")
print("Call API - 2 'Create Transaction Request. -- V210' (no challenge)")
# set up a small value in payment detail
obp.setPaymentDetails(OUR_CURRENCY, OUR_VALUE)
initiate_response = obp.createTransactionRequestV210(from_bank_id=from_bank_id,
from_account_id=from_account_id,
transaction_request_type=TRANSACTION_REQUEST_TYPE_SEPA,
to_bank_id="", # used for SANDBOX_TAN
to_account_id="", # used for SANDBOX_TAN
to_counterparty_id="", # used for COUNTERPARTY
to_counterparty_iban=to_counterparty_iban)
obp.printMessageNoChallenge(initiate_response)
print("")
print("Call API - 2 'Create Transaction Request. -- V210' (with challenge)")
# set up a large value in payment detail
obp.setPaymentDetails(OUR_CURRENCY, OUR_VALUE_LARGE)
initiate_response = obp.createTransactionRequestV210(from_bank_id=from_bank_id,
from_account_id=from_account_id,
transaction_request_type=TRANSACTION_REQUEST_TYPE_SEPA,
to_bank_id="", # used for SANDBOX_TAN
to_account_id="", # used for SANDBOX_TAN
to_counterparty_id="", # used for COUNTERPARTY
to_counterparty_iban=to_counterparty_iban)
obp.printMessageWithChallenge(initiate_response)
print("")
print("Call API - 3 'Answer Transaction Request Challenge. -- V210'")
print("Transaction is done , and the transaction_request is 'COMPLETED' and new Transaction id is created: :")
challenge_response = obp.answerChallengeV210(from_bank_id,
from_account_id,
transaction_req_id,
TRANSACTION_REQUEST_TYPE_SEPA,
challenge_id)
challenge_id = initiate_response['challenge']['id']
transaction_req_id = initiate_response['id']
obp.printMessageAfterAnswerChallenge(challenge_response)
######################### Step4 - make a payment - COUNTERPARTY ################
print("")
print("")
TRANSACTION_REQUEST_TYPE_COUNTERPARTY = "COUNTERPARTY"
print("--------- TRANSACTION_REQUEST_TYPE : {0}".format(TRANSACTION_REQUEST_TYPE_COUNTERPARTY))
print("Call API - 1 'Create counterparty for an account. -- V210'")
create_counterparty_response = obp.createCounterparty(bank_id=from_bank_id,
account_id=from_account_id,
name=str(uuid.uuid4()),
other_account_routing_scheme="OBP",
other_account_routing_address="test",
other_bank_routing_scheme="OBP",
other_bank_routing_address="test")
obp.printCreateCounterparty(create_counterparty_response)
print("")
print("Call API - 1 'Create Transaction Request. -- V210' (no challenge)")
to_counterparty_id = create_counterparty_response['counterparty_id']
# set up a small value in payment detail
obp.setPaymentDetails(OUR_CURRENCY, OUR_VALUE)
initiate_response = obp.createTransactionRequestV210(from_bank_id=from_bank_id,
from_account_id=from_account_id,
transaction_request_type=TRANSACTION_REQUEST_TYPE_COUNTERPARTY,
to_bank_id="", # used for SANDBOX_TAN
to_account_id="", # used for SANDBOX_TAN
to_counterparty_id=to_counterparty_id,
to_counterparty_iban="") # used for SEPA
# There was no challenge, transaction was created immediately
obp.printMessageNoChallenge(initiate_response)
print("")
print("Call API - 3 'Create Transaction Request. -- V210' (with challenge)")
# set up a large value in payment detail
obp.setPaymentDetails(OUR_CURRENCY, OUR_VALUE_LARGE)
initiate_response = obp.createTransactionRequestV210(from_bank_id=from_bank_id,
from_account_id=from_account_id,
transaction_request_type=TRANSACTION_REQUEST_TYPE_COUNTERPARTY,
to_bank_id="", # used for SANDBOX_TAN
to_account_id="", # used for SANDBOX_TAN
to_counterparty_id=to_counterparty_id,
to_counterparty_iban="") # used for SEPA
obp.printMessageWithChallenge(initiate_response)
print("")
print("Call API - 3 'Answer Transaction Request Challenge. -- V210'")
print("Transaction is done , and the transaction_request is 'COMPLETED' and new Transaction id is created: :")
# we need to answer the challenge
challenge_id = initiate_response['challenge']['id']
transaction_req_id = initiate_response['id']
challenge_response = obp.answerChallengeV210(from_bank_id,
from_account_id,
transaction_req_id,
TRANSACTION_REQUEST_TYPE_COUNTERPARTY,
challenge_id)
obp.printMessageAfterAnswerChallenge(challenge_response)
######################## Step5 - Get Transactions ################
print("")
print("")
print("--------- Check the new transaction records")
print("Call API - 1 'Get Transaction by Id.-- V121'")
newTransactionId = challenge_response["transaction_ids"]
getTransaction_response = obp.getTransaction(from_bank_id, from_account_id, newTransactionId)
obp.printGetTransaction(getTransaction_response, newTransactionId)
print("Call API - 2 'Get Transactions for Account (Full)-- V121'")
getTransactions_response = obp.getTransactions(FROM_BANK_ID, FROM_ACCOUNT_ID)
obp.printGetTransactions(getTransactions_response)
|
X = int(input())
f = False
i = 0
while i < 3000:
if f:
break
j = i-1
while X > (i**5-j**5):
if X == (i**5-j**5):
print(i, j)
f = True
break
j -= 1
i += 1
|
import json
from dojo.models import Finding
class GitlabDepScanParser(object):
def get_scan_types(self):
return ["GitLab Dependency Scanning Report"]
def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now
def get_description_for_scan_types(self, scan_type):
return "Import GitLab SAST Report vulnerabilities in JSON format."
def get_findings(self, json_output, test):
if json_output is None:
return
tree = self.parse_json(json_output)
if tree:
return self.get_items(tree, test)
def parse_json(self, json_output):
try:
data = json_output.read()
try:
tree = json.loads(str(data, 'utf-8'))
except:
tree = json.loads(data)
except:
raise Exception("Invalid format")
return tree
def get_items(self, tree, test):
items = {}
for node in tree['vulnerabilities']:
item = get_item(node, test)
if item:
items[item.unique_id_from_tool] = item
return list(items.values())
def get_item(vuln, test):
if vuln['category'] != 'dependency_scanning':
# For Dependency Scanning reports, value must always be "dependency_scanning"
return None
unique_id_from_tool = None
if 'id' in vuln:
unique_id_from_tool = vuln['id']
else:
# If the new unique id is not provided, fall back to deprecated "cve" fingerprint (old version)
unique_id_from_tool = vuln['cve']
title = ''
if 'name' in vuln:
title = vuln['name']
elif 'message' in vuln:
title = vuln['message']
elif 'description' in vuln:
title = vuln['description']
else:
# All other fields are optional, if none of them has a value, fall back on the unique id
title = unique_id_from_tool
description = 'Scanner: {}\n'.format(vuln['scanner']['name'])
if 'message' in vuln:
description += '{}\n'.format(vuln['message'])
if 'description' in vuln:
description += '{}\n'.format(vuln['description'])
location = vuln['location']
file_path = location['file'] if 'file' in location else None
component_name = None
component_version = None
if 'dependency' in location:
component_version = location['dependency']['version'] if 'version' in location['dependency'] else None
if 'package' in location['dependency']:
component_name = location['dependency']['package']['name'] if 'name' in location['dependency']['package'] else None
severity = vuln['severity']
if severity == 'Undefined' or severity == 'Unknown':
# Severity can be "Undefined" or "Unknown" in report
# In that case we set it as Info and specify the initial severity in the title
title = '[{} severity] {}'.format(severity, title)
severity = 'Info'
# Dependency Scanning analyzers doesn't provide confidence property
# See https://docs.gitlab.com/ee/user/application_security/dependency_scanning/analyzers.html#analyzers-data
scanner_confidence = False
mitigation = ''
if 'solution' in vuln:
mitigation = vuln['solution']
cwe = None
vulnerability_id = None
references = ''
if 'identifiers' in vuln:
for identifier in vuln['identifiers']:
if identifier['type'].lower() == 'cwe':
cwe = identifier['value']
elif identifier['type'].lower() == 'cve':
vulnerability_id = identifier['value']
else:
references += 'Identifier type: {}\n'.format(identifier['type'])
references += 'Name: {}\n'.format(identifier['name'])
references += 'Value: {}\n'.format(identifier['value'])
if 'url' in identifier:
references += 'URL: {}\n'.format(identifier['url'])
references += '\n'
finding = Finding(title=vulnerability_id + ": " + title if vulnerability_id else title,
test=test,
description=description,
severity=severity,
scanner_confidence=scanner_confidence,
mitigation=mitigation,
unique_id_from_tool=unique_id_from_tool,
references=references,
file_path=file_path,
component_name=component_name,
component_version=component_version,
cwe=cwe,
static_finding=True,
dynamic_finding=False)
if vulnerability_id:
finding.unsaved_vulnerability_ids = [vulnerability_id]
return finding
|
import logging
from yapsy.IPlugin import IPlugin
from modules.common.Downloads import Downloads
logger = logging.getLogger(__name__)
"""
"""
class GO(IPlugin):
def __init__(self):
self._logger = logging.getLogger(__name__)
def process(self, conf, output, cmd_conf):
self._logger.info("GO step")
Downloads(output.prod_dir).exec(conf)
|
# -*- coding: utf-8 -*-
import redis
import re
'''
Usage:
moon.py -u redis http://127.0.0.1:6379
redis未授权访问漏洞
'''
def attack(URL):
print('[+]开始检测-Redis未授权访问漏洞。[+]')
#print(re.findall('//(.*?):',URL)[0])#获取IP
#print(re.findall(':(\w*?)$',URL)[0])#获取端口
try:
r = redis.StrictRedis(host=re.findall('//(.*?):',URL)[0], port=re.findall(':(\w*?)$',URL)[0], db=0)
print('获取连接成功。客户列表为:'+str(r.client_list()))
except IndexError:
try:
r = redis.StrictRedis(host=re.findall('(.*?):', URL)[0], port=re.findall(':(\w*?)$', URL)[0], db=0)
print('获取连接成功。客户列表为:' + str(r.client_list()))
except redis.exceptions.ResponseError:
print('[-]访问受限:NOAUTH Authentication required')
except redis.exceptions.ConnectionError:
print('获取连接失败。')
print('[+]检测结束-Redis未授权访问漏洞。[+]')
if __name__ == "__main__":
attack()
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the scaffold contract definition."""
from typing import Any, Dict, Optional
from aea.contracts.base import Contract
from aea.crypto.base import LedgerApi
class HegicBTCOptions(Contract):
"""The scaffold contract class for a smart contract."""
@classmethod
def create_option(
cls,
ledger_api: LedgerApi,
contract_address: str,
deployer_address: str,
amount: int,
period: int,
strike: int,
type: int,
data: Optional[bytes] = b"",
gas: int = 300000,
) -> Dict[str, Any]:
"""
Get the transaction to create a single token.
:param ledger_api: the ledger API
:param contract_address: the address of the contract
:param deployer_address: the address of the deployer
:param token_id: the token id for creation
:param data: the data to include in the transaction
:param gas: the gas to be used
:return: the transaction object
"""
# create the transaction dict
nonce = ledger_api.api.eth.getTransactionCount(deployer_address)
instance = cls.get_instance(ledger_api, contract_address)
fee_estimate = instance.functions.fees(period, amount, strike, type).call()
tx = instance.functions.create(period, amount, strike, type).buildTransaction(
{"from": deployer_address, "value": fee_estimate[1], "nonce": nonce}
)
tx = cls._try_estimate_gas(ledger_api, tx)
return tx
@classmethod
def estimate(
cls,
ledger_api: LedgerApi,
contract_address: str,
deployer_address: str,
amount: int,
period: int,
strike: int,
type: int,
data: Optional[bytes] = b"",
gas: int = 300000,
) -> Dict[str, Any]:
"""
Get the transaction to create a single token.
:param ledger_api: the ledger API
:param contract_address: the address of the contract
:param deployer_address: the address of the deployer
:param token_id: the token id for creation
:param data: the data to include in the transaction
:param gas: the gas to be used
:return: the transaction object
"""
# create the transaction dict
instance = cls.get_instance(ledger_api, contract_address)
fee_estimate = instance.functions.fees(period, amount, strike, type).call()
option_id = instance.functions.create(period, amount, strike, type).call()
return {"option_id": option_id, "fee_estimate": fee_estimate}
@classmethod
def get_pool(
cls,
ledger_api: LedgerApi,
contract_address: str,
deployer_address: str,
data: Optional[bytes] = b"",
gas: int = 300000,
) -> Dict[str, Any]:
"""
Get the transaction to create a batch of tokens.
:param ledger_api: the ledger API
:param deployer_address: the address of the deployer
:param args: the price
:param gas: the gas to be used
:return: the transaction object
"""
# create the transaction dict
instance = cls.get_instance(ledger_api, contract_address)
tx = instance.functions.pool().call()
return tx
@classmethod
def get_deploy_transaction(
cls,
ledger_api: LedgerApi,
deployer_address: str,
args: list,
gas: int = 60000000,
) -> Dict[str, Any]:
"""
Get the transaction to create a batch of tokens.
:param ledger_api: the ledger API
:param deployer_address: the address of the deployer
:param args: the price
:param gas: the gas to be used
:return: the transaction object
"""
contract_interface = cls.contract_interface.get(ledger_api.identifier, {})
nonce = ledger_api.api.eth.getTransactionCount(deployer_address)
instance = ledger_api.get_contract_instance(contract_interface)
constructed = instance.constructor(*args)
data = constructed.buildTransaction()["data"]
tx = {
"from": deployer_address, # only 'from' address, don't insert 'to' address!
"value": 0, # transfer as part of deployment
"gas": gas,
"gasPrice": gas, # TODO: refine
"nonce": nonce,
"data": data,
}
tx = cls._try_estimate_gas(ledger_api, tx)
return tx
@classmethod
def exercise(
cls,
ledger_api: LedgerApi,
contract_address: str,
deployer_address: str,
option_id: int,
data: Optional[bytes] = b"",
gas: int = 300000,
) -> Dict[str, Any]:
"""
Get the transaction to create a single token.
:param ledger_api: the ledger API
:param contract_address: the address of the contract
:param deployer_address: the address of the deployer
:param token_id: the token id for creation
:param data: the data to include in the transaction
:param gas: the gas to be used
:return: the transaction object
"""
# create the transaction dict
nonce = ledger_api.api.eth.getTransactionCount(deployer_address)
instance = cls.get_instance(ledger_api, contract_address)
tx = instance.functions.exercise(option_id).buildTransaction(
{
"from": deployer_address,
"gas": gas,
"gasPrice": ledger_api.api.toWei("50", "gwei"),
"nonce": nonce,
"value": 0,
}
)
tx = cls._try_estimate_gas(ledger_api, tx)
return tx
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs
) -> Dict[str, Any]:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:return: the tx
"""
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs
) -> Dict[str, Any]:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:return: the tx
"""
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs
) -> Dict[str, Any]:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:return: the tx
"""
raise NotImplementedError
@staticmethod
def _try_estimate_gas(ledger_api: LedgerApi, tx: Dict[str, Any]) -> Dict[str, Any]:
"""
Attempts to update the transaction with a gas estimate.
:param ledger_api: the ledger API
:param tx: the transaction
:return: the transaction (potentially updated)
"""
try:
# try estimate the gas and update the transaction dict
gas_estimate = ledger_api.api.eth.estimateGas(transaction=tx)
tx["gas"] = gas_estimate
except Exception as e: # pylint: disable=broad-except
raise e
return tx
|
from abc import *
from pandas import DataFrame
from Common.Measures.Time.TimeSpan import TimeSpan
class AbstractPlotter(ABC):
_data_frame: DataFrame
_src: str
_col: str
_legend_place: str
_ticker: str
_time_span: TimeSpan
|
#!/usr/bin/python
from sys import argv, stdout
from os.path import basename
from struct import pack, unpack
def clamp( x, low, high ):
if x < low:
x = low
if x > high:
x = high
return x
class BitStream:
def __init__( self ):
self.bits=[]
def add_int( self, x, bits ):
for n in range( bits ):
bit = ( x >> n ) & 0x1
self.bits += [bit]
def pad( self, pad_size ):
n = len( self.bits ) % pad_size
if n != 0:
self.add_int( 0, pad_size-n )
def get_byte_string( self ):
s=""
byte=0
shift=7
self.pad( 8 )
for bit in self.bits:
byte |= ( bit << shift )
shift -= 1
if shift < 0:
s += pack( "B", byte )
byte=0
shift=7
return s
def get_formatted_byte_string( self ):
s=""
for byte in self.get_byte_string():
s+=hex( unpack("B",byte)[0] ) + ","
s=s[:-1] # Remove the last comma
return s
class Mesh:
def __init__( self, obj_file=None ):
self.dim = 0 # Number of dimensions (2 or 3)
# Multiply normalized vertex coords with this scale to get the actual coordinates:
self.scale = [1.0,1.0,1.0]
self.verts = []
self.faces = []
if ( obj_file is not None ):
self.parse_wavefront_obj( obj_file )
def add_vertex( self, vert ):
self.verts += [vert]
def add_quad( self, quad ):
self.faces += [quad]
def get_bounds( self ):
""" Returns the bounding box """
min_co=[10000.0,10000.0,10000.0]
max_co=[-10000.0,-10000.0,-10000.0]
for vert in self.verts:
for c in range(3):
if vert[c] < min_co[c]:
min_co[c] = vert[c]
if vert[c] > max_co[c]:
max_co[c] = vert[c]
return [[min_co[n],max_co[n]] for n in range(3)]
#return [min_co, max_co]
def parse_wavefront_obj( self, file ):
""" Loads the mesh from a OBJ file """
self.verts=[]
self.faces=[]
do_close=False
if ( type(file) == str ):
# The file argument was a filename instead of a file
# Need to open/close the file
file=open( file, "r" )
do_close=True
for line in file.readlines():
if ( line[0] == 'v' ):
self.add_vertex( [float(s) for s in line.split()[1:]] )
elif ( line[0] == 'f' ):
# All indexes in OBJ begin with 1. How fucking retarted is that...?
self.add_quad( [(int(s)-1) for s in line.split() if s.isdigit()] )
# Discard comments, materials, etc..
if ( do_close ):
file.close()
# Compute mesh scale
bounds=self.get_bounds()
self.scale=[max(abs(co[0]), abs(co[1])) for co in bounds]
# Normalize the vertices to range [-1,1]
for v in self.verts:
for n in range(3):
scale=self.scale[n]
if ( scale < 0.00001 ):
v[n] = 0
else:
v[n] /= scale
# Check if the mesh is 2D or not
if self.scale[2] < 0.00001:
self.dim = 2
else:
self.dim = 3
def get_vertex_array_string( self, prefix="" ):
s="const signed char %s_verts[%d*%d] = {" % ( prefix, len( self.verts ), self.dim )
for vert in self.verts:
for b in [ int( vert[n] * 127.5 - 1.0 ) for n in range( self.dim ) ]:
s += "%d," % clamp( b, -128, 127 )
s = s[:-1] + "};"
return s
def get_index_array_string( self, prefix="" ):
s="const unsigned char %s_indices[%d] = {" % ( prefix, len(self.faces) * 4 )
for quad in self.faces:
for b in quad:
s += "%d," % b
s = s[:-1] + "};"
return s
def dump_text( self, f, prefix ):
f.write( "/* scale: %g, %g, %g */\n" %( self.scale[0], self.scale[1], self.scale[2] ))
f.write( self.get_vertex_array_string( prefix ) + "\n" )
f.write( self.get_index_array_string( prefix ) + "\n" )
def write_obj( self, f ):
for v in self.verts:
f.write( "v %g %g %g\n" % (v[0],v[1],v[2]) )
for q in self.faces:
f.write( "f %d %d %d %d\n" % (q[0]+1,q[1]+1,q[2]+1,q[3]+1) )
def get_vertex_array_string_5bit_s( self ):
bs=BitStream()
for vert in self.verts:
for n in range( self.dim ):
x=int( vert[n] * 15.5 - 1.0 )
x=clamp( x, -16, 15 )
bs.add_int( x, 5 )
return bs.get_formatted_byte_string()
def get_index_array_string_5bit_u( self ):
bs=BitStream()
for face in self.faces:
for index in face:
x=( index & 0b11111 )
bs.add_int( x, 5 )
return bs.get_formatted_byte_string()
def dump_text_5bit( self, f, prefix ):
f.write( "/* %s\n" )
f.write( " scale: %g, %g, %g\n" %( self.scale[0], self.scale[1], self.scale[2] ))
f.write( " verts=%d (%d-D); indices=%d */\n" %( len(self.verts), self.dim, len(self.faces)*4 ))
f.write( "/* %s 5-bit verts */ %s\n" % (prefix, self.get_vertex_array_string_5bit_s()) )
f.write( "/* %s 5-bit indices */ %s\n" % (prefix, self.get_index_array_string_5bit_u()) )
def test_dump( me, filename ):
print "Test dump ->", filename
file=open(filename,"w")
me.write_obj( file )
file.close()
def main():
if len(argv) < 2:
print "Usage:", argv[0], "[FILENAME(s)]"
print " Formats data from wavefront OBJ file(s) to C-like arrays"
else:
for filename in argv[1:]:
print "\n/* filename:", filename, "*/"
me = Mesh()
me.parse_wavefront_obj( filename )
name=basename( filename )
name=name.rsplit('.',1)[0]
me.dump_text( stdout, name )
#me.dump_text_5bit( stdout, name )
#test_dump( me, filename+".test_dump.obj" )
if __name__ == "__main__":
main()
|
import re
mapping_units = {
"0":"",
"zero":"zero",
"1":"um",
"2":"dois",
"3":"três",
"4":"quatro",
"5":"cinco",
"6":"seis",
"7":"sete",
"8":"oito",
"9":"nove"
}
mapping_dozens = {
"0":"",
"10":"dez",
"11":"onze",
"12":"doze",
"13":"treze",
"14":"catorze",
"15":"quinze",
"16":"dezasseis",
"17":"dezassete",
"18":"dezoito",
"19":"dezanove",
"2":"vinte",
"3":"trinta",
"4":"quarenta",
"5":"cinquenta",
"6":"sessenta",
"7":"setenta",
"8":"oitenta",
"9":"noventa"
}
mapping_hundreds = {
"0":"",
"100":"cem",
"1":"cento",
"2":"duzentos",
"3":"trezentos",
"4":"quatrocentos",
"5":"quinhentos",
"6":"seiscentos",
"7":"setecentos",
"8":"oitocentos",
"9":"novecentos"
}
mapping_ord = {
0: "",
1: "mil",
2: "milhões",
3: "mil milhões",
4: "bilhões",
5: "triliões"
}
mapping_ord_um = {
0: "um",
1: "mil",
2: "um milhão",
3: "mil milhões",
4: "um bilhão",
5: "um trilião"
}
mapping_ord_rev = {
"cem":"00",
"mil":"000",
"milhão":"000000",
"milhões":"000000",
"bilhão":"000000000000",
"bilhões":"000000000000",
"trilião":"000000000000000",
"triliões":"000000000000000"
}
def toString(text):
# String a devolver
out = ""
if text[0]==",": #se começar por uma virgula colocar já no resultado final
out += " vírgula "
# remover espaços e virgulas
text = [t for t in text if t!=" " and t!=","]
#tamanho do número recebido
sizeT = size = len(text)
# Se for apenas 0
if sizeT==1 and text[0]=="0":
out += mapping_units["zero"];
else:
#percorre-se o número do mais signficativo para o menos significativo
#contudo por forma a saber a grandeza é necessário que o contador vá do tamanho total do número para 0
#por forma a saber a posição de cada número
#o resto será a ordem dentro de centenas(0), dezenas(2) e unidades(1)
#a divisão inteira dará o número de vezes que há 3 algarismos, ou seja centenas, dezenas, unidades
while size>0:
rest = size % 3 #calcular a ordem
if rest == 1: #ordem das unidades
div = size // 3
if sizeT-size-1>=0:
if text[sizeT-size-1] != "1": #se esse valor antes é diferente de 1, de forma a garantir que n entra em conflito com o caso especial das dezenas
out += mapping_units[text[sizeT-size]]
if div!=0 and ((sizeT-size-2>=0 and text[sizeT-size-2]!="0") or text[sizeT-size-1]!="0" or text[sizeT-size]!="0"):
out += " " + mapping_ord[div]
else:
if div != 0:
out += " "
out += mapping_ord[div]
else: #senao, não tem valor antes, é o primeiro algarismo
if text[sizeT-size] != "1": #se o algarismo for diferente de 1
out += mapping_units[text[sizeT-size]]
if div != 0:
out += " "
out += mapping_ord[div]
else:
out += mapping_ord_um[div]
if sizeT-size+1 < sizeT and text[sizeT-size+1]!="0": #se existe um número|,|espaço a seguir e é diferente de 0
out += " e "
elif rest == 2: #ordem das dezenas
if text[sizeT-size] == "1": #se o algarismo das dezenas é 1 então a forma como é escrito é especial
out += mapping_dozens[text[sizeT-size]+text[sizeT-size+1]]
else: #senão
out += mapping_dozens[text[sizeT-size]]
if text[sizeT-size+1]!="0": #se o algarismo das unidades é diferente de 0
out += " e "
else: #rest == 0, ordem das centenas
if text[sizeT-size]+text[sizeT-size+1]+text[sizeT-size+2]=="100":
out += mapping_hundreds["100"]
else:
out += mapping_hundreds[text[sizeT-size]]
if text[sizeT-size+1]!="0": #se o algarismo das dezenas é diferente de 0
out += " e "
size -= 1
return out
def toNumber(text):
#valor a devolver com o número
out = ""
#separa a string por espaços
words = text.split()
#remove e e faz reverse da lista de palavras
words = [word for word in words if word!="e"][::-1]
#saber se tem vírgula
haveComma = False
for word in words:
if word=="vírgula":
haveComma = True
#converte palavra para números, os valores são adicionados
#por ordem inversa visto que estamos a percorrer o número
#por ordem inversa também
i=0 # contador geral
num=0 # contador de números, feito reset quando aparece a vírgula
for word in words:
#se é uma virgula coloca uma
if word=="vírgula":
out = "," + out
num = 0
# senão é uma vígula
else:
#boolean pra saber se já encontrou a palavra
found = False
#procura nas centenas e coloca no out
for item in list(mapping_hundreds.items())[1:]:
if item[1]==word:
#se não tem nenhum número após
if num % 3 == 0 and len(item[0])==1:
out = item[0] + "00" + out
num += 3
#se só tem um número após
elif num % 3 == 1 and len(item[0])==1:
out = item[0] + "0" + out
num += 2
# se tem dois números após
else:
out = item[0] + out
num += len(item[0])
found = True
break
# senão estava nas centenas
if not found:
#procura nas dezenas
for item in list(mapping_dozens.items())[1:]:
if item[1]==word:
#se não tem nenhum número após
if num % 3 == 0 and len(item[0])==1:
out = item[0] + "0" + out
num += 2
# se tem um número após
else:
out = item[0] + out
num += len(item[0])
found = True
break
# senão estava nas dezenas
if not found:
#procura nas unidades
for item in list(mapping_units.items())[2:]:
if item[1]==word:
out = item[0] + out
found = True
num += 1
break
#senao estava nas unidades
if not found:
# se zero entao coloca um zero
if word == "zero":
out = "0" + out
else:
#vê se o número não tem virgula
if not haveComma:
#se está no fim do número, então coloca os zeros de acordo com a ordem
if i==0 or (i==1 and words[0]=="milhões"):
out = mapping_ord_rev[word] + out
else:
#colocar o número de zeros necessários
out = "0"*(len(mapping_ord_rev[word])-num) + out
num += len(mapping_ord_rev[word])-num
# se é a ultima palavra então coloca um 1
if len(words) == i+1:
out = "1" + out
# se tem virgula, entao não converte pra número as grandezas
else:
out = " " + word + out
i += 1
return out
def number_to_text(text):
#adicionar um espaço entre números e simbolos especiais
text = re.sub(r"([0-9]+)([€%$])",r"\1 \2",text)
#match com a parte decimal do número
text = re.sub(r" ?,([0-9 ]+)?[0-9]",lambda x: toString(x[0]),text)
#match com a parte inteira do número que tem de começar e acabar num número podendo ter espaços pelo meio
text = re.sub(r"[0-9]([0-9 ]+)?[0-9]",lambda x: toString(x[0]),text)
#match com números com apenas unidade, os casos que não foram apanhados pelo caso anterior
text = re.sub(r"[0-9]",lambda x: toString(x[0]),text)
return text
def text_to_number(text):
#palavras válidas para um número em extenso
validWords = list(mapping_ord.values())[1:][::-1]
validWords += list(mapping_ord_um.values())[1:][::-1]
validWords += list(mapping_hundreds.values())[1:]
validWords += list(mapping_dozens.values())[1:][::-1]
validWords += list(mapping_units.values())[1:]
# primeira palavra
validWordsF = "(" + "|".join(validWords) + ")"
# restantes palavras
validWords = "( " + "| ".join(validWords) + "| e(?= )| vírgula)"
#sempre que encontrar um número em formato texto, converter para número
text = re.sub(r""+validWordsF+validWords+"*",lambda x: toNumber(x[0]),text)
return text
def parse_number(text):
return text_to_number(number_to_text(text))
|
import os
from iconsdk.builder.transaction_builder import DeployTransactionBuilder
from iconsdk.builder.call_builder import CallBuilder
from iconsdk.icon_service import IconService
from iconsdk.libs.in_memory_zip import gen_deploy_data_content
from iconsdk.providers.http_provider import HTTPProvider
from iconsdk.signed_transaction import SignedTransaction
from tbears.libs.icon_integrate_test import IconIntegrateTestBase, SCORE_INSTALL_ADDRESS
from BattleBombRoyale.tests.utils import *
DIR_PATH = os.path.abspath(os.path.dirname(__file__))
class TestBattleBombRoyale(IconIntegrateTestBase):
TEST_HTTP_ENDPOINT_URI_V3 = "http://127.0.0.1:9000/api/v3"
SCORE_PROJECT= os.path.abspath(os.path.join(DIR_PATH, '..'))
_PARTICIPATION_COST = 1 * 10**18
def setUp(self):
super().setUp()
self.icon_service = None
# if you want to send request to network, uncomment next line and set self.TEST_HTTP_ENDPOINT_URI_V3
# self.icon_service = IconService(HTTPProvider(self.TEST_HTTP_ENDPOINT_URI_V3))
# install SCORE
self._score_address = self._deploy_score()['scoreAddress']
def _deploy_score(self, to: str = SCORE_INSTALL_ADDRESS) -> dict:
# Generates an instance of transaction for deploying SCORE.
transaction = DeployTransactionBuilder() \
.from_(self._test1.get_address()) \
.to(to) \
.step_limit(100_000_000_000) \
.nid(3) \
.nonce(100) \
.content_type("application/zip") \
.content(gen_deploy_data_content(self.SCORE_PROJECT)) \
.build()
# Returns the signed transaction object having a signature
signed_transaction = SignedTransaction(transaction, self._test1)
# process the transaction in local
result = self.process_transaction(signed_transaction, self.icon_service)
self.assertTrue('status' in result)
self.assertEqual(1, result['status'])
self.assertTrue('scoreAddress' in result)
return result
def test_score_update(self):
# update SCORE
result = self._deploy_score(self._score_address)
self.assertEqual(self._score_address, result['scoreAddress'])
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
from webkitpy.common.system import outputcapture
from webkitpy.tool import mocktool
from webkitpy.layout_tests.port import chromium_win
from webkitpy.layout_tests.port import port_testcase
class ChromiumWinTest(port_testcase.PortTestCase):
class RegisterCygwinOption(object):
def __init__(self):
self.register_cygwin = True
self.results_directory = '/'
def setUp(self):
self.orig_platform = sys.platform
def tearDown(self):
sys.platform = self.orig_platform
self._port = None
def port_maker(self, platform):
if platform not in ('cygwin', 'win32'):
return None
return chromium_win.ChromiumWinPort
def _mock_path_from_chromium_base(self, *comps):
return self._port._filesystem.join("/chromium/src", *comps)
def test_default_worker_model(self):
port = self.make_port()
if not port:
return
self.assertEqual(port.default_worker_model(), 'old-threads')
def test_setup_environ_for_server(self):
port = self.make_port()
if not port:
return
port._executive = mocktool.MockExecutive(should_log=True)
self._port = port
port.path_from_chromium_base = self._mock_path_from_chromium_base
output = outputcapture.OutputCapture()
orig_environ = os.environ.copy()
env = output.assert_outputs(self, port.setup_environ_for_server)
self.assertEqual(orig_environ["PATH"], os.environ["PATH"])
self.assertNotEqual(env["PATH"], os.environ["PATH"])
def test_setup_environ_for_server_register_cygwin(self):
port = self.make_port(options=ChromiumWinTest.RegisterCygwinOption())
if not port:
return
port._executive = mocktool.MockExecutive(should_log=True)
port.path_from_chromium_base = self._mock_path_from_chromium_base
self._port = port
setup_mount = self._mock_path_from_chromium_base("third_party",
"cygwin",
"setup_mount.bat")
expected_stderr = "MOCK run_command: %s\n" % [setup_mount]
output = outputcapture.OutputCapture()
output.assert_outputs(self, port.setup_environ_for_server,
expected_stderr=expected_stderr)
def assert_name(self, port_name, windows_version, expected):
port = chromium_win.ChromiumWinPort(port_name=port_name,
windows_version=windows_version)
self.assertEquals(expected, port.name())
def test_versions(self):
port = chromium_win.ChromiumWinPort()
self.assertTrue(port.name() in ('chromium-win-xp', 'chromium-win-vista', 'chromium-win-win7'))
self.assert_name(None, (5, 1), 'chromium-win-xp')
self.assert_name('chromium-win', (5, 1), 'chromium-win-xp')
self.assert_name('chromium-win-xp', (5, 1), 'chromium-win-xp')
self.assert_name('chromium-win-xp', (6, 0), 'chromium-win-xp')
self.assert_name('chromium-win-xp', (6, 1), 'chromium-win-xp')
self.assert_name(None, (6, 0), 'chromium-win-vista')
self.assert_name('chromium-win', (6, 0), 'chromium-win-vista')
self.assert_name('chromium-win-vista', (5, 1), 'chromium-win-vista')
self.assert_name('chromium-win-vista', (6, 0), 'chromium-win-vista')
self.assert_name('chromium-win-vista', (6, 1), 'chromium-win-vista')
self.assert_name(None, (6, 1), 'chromium-win-win7')
self.assert_name('chromium-win', (6, 1), 'chromium-win-win7')
self.assert_name('chromium-win-win7', (5, 1), 'chromium-win-win7')
self.assert_name('chromium-win-win7', (6, 0), 'chromium-win-win7')
self.assert_name('chromium-win-win7', (6, 1), 'chromium-win-win7')
self.assertRaises(KeyError, self.assert_name, None, (4, 0), 'chromium-win-xp')
self.assertRaises(KeyError, self.assert_name, None, (5, 0), 'chromium-win-xp')
self.assertRaises(KeyError, self.assert_name, None, (5, 2), 'chromium-win-xp')
self.assertRaises(KeyError, self.assert_name, None, (7, 1), 'chromium-win-xp')
def test_baseline_path(self):
port = chromium_win.ChromiumWinPort(port_name='chromium-win-xp')
self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-win-xp'))
port = chromium_win.ChromiumWinPort(port_name='chromium-win-vista')
self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-win-vista'))
port = chromium_win.ChromiumWinPort(port_name='chromium-win-win7')
self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-win'))
if __name__ == '__main__':
unittest.main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from loss.Dist import Dist
class GCPLoss(nn.CrossEntropyLoss):
def __init__(self, **options):
super(GCPLoss, self).__init__()
self.weight_pl = options['weight_pl']
self.temp = options['temp']
self.Dist = Dist(num_classes=options['num_classes'], feat_dim=options['feat_dim']) #
def forward(self, x, y, labels=None):
dist = self.Dist(x)
logits = F.softmax(-dist, dim=1)
if labels is None: return logits, 0
loss = F.cross_entropy(-dist / self.temp, labels)
center_batch = self.Dist.centers[labels, :]
loss_r = F.mse_loss(x, center_batch) / 2
loss = loss + self.weight_pl * loss_r
return logits, loss
|
import jwt
from fastapi import Security
from fastapi.exceptions import HTTPException
from fastapi.security.api_key import APIKeyHeader
from starlette.status import HTTP_401_UNAUTHORIZED
from settings import config
api_key_header = APIKeyHeader(
scheme_name=config.API_KEY_SCHEME,
name=config.API_KEY_NAME,
description=config.DESCRIPTION_TOKEN,
auto_error=False,
)
def clear_token(token):
"""
Checks that the token has been transferred by form:
Token xxxxxx.yyyyyyy.zzzzzz.
Otherwise causes an exception.
"""
try:
split_token = token.split()
except AttributeError:
raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)
if len(split_token) == 2:
scheme, credentials = split_token
if scheme == config.API_KEY_SCHEME:
return credentials
raise HTTPException(status_code=HTTP_401_UNAUTHORIZED)
def encode_jwt(email, password):
"""
Create token by email and password.
"""
token_jwt = jwt.encode({email: password}, config.SECRET, algorithm=config.ALGORITHM)
return token_jwt
def check_token(raw_token: str = Security(api_key_header)):
"""
Checking the token received from "Authorization" header.
Returns a token.
"""
return clear_token(raw_token)
|
import unittest
from numpy.testing import assert_array_equal
import numpy as np
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from imbalanceddl.dataset.imbalance_svhn import IMBALANCESVHN
class TestCIFAR10(unittest.TestCase):
def test_svhn10_exp100(self):
train_dataset = IMBALANCESVHN(root='./data',
imb_type="exp",
imb_factor=0.01,
rand_number=0,
split='train',
download=True,
transform=None)
true_cls_num = np.array([10, 1000, 599, 359, 215, 129, 77, 46, 27, 16])
gen_cls_num = train_dataset.get_cls_num_list()
assert_array_equal(gen_cls_num, true_cls_num)
def test_svhn10_step100(self):
train_dataset = IMBALANCESVHN(root='./data',
imb_type="step",
imb_factor=0.01,
rand_number=0,
split='train',
download=True,
transform=None)
true_cls_num = np.array(
[10, 1000, 1000, 1000, 1000, 1000, 10, 10, 10, 10])
gen_cls_num = train_dataset.get_cls_num_list()
assert_array_equal(gen_cls_num, true_cls_num)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, re, json
from xml.etree import ElementTree as ET
import common as beerlib
curl_ua = 'curl/7.54.1'
# first we need the post ID
html = beerlib.download_html('https://m.facebook.com/page_content_list_view/more/?page_id=1871132519814729&start_cursor=10000&num_to_fetch=10&surface_type=timeline', curl_ua)
if not html:
exit(-1)
reg = re.compile('(<p>.*?</p>)')
# find post ids
ids = re.findall('top_level_post_id":"([0-9]+)', html)
# Look at all articles until some beers are found
for content_id in ids:
post_url = "https://m.facebook.com/story.php?story_fbid=%s&id=%s" % (content_id, '1871132519814729')
# print(post_url)
# Okay, let's get the post
post_html = beerlib.download_html(post_url, curl_ua)
if not post_html:
continue
paragraphs = reg.findall(post_html)
# Hope that some paragraph of post contains beers
for p in paragraphs:
beers = ET.XML(p)
# Nothing? Give up
if not beers:
continue
beers = list(beers.itertext())
# Hope that the beer list format is the same
headers = ['Pivo', 'Alk.', 'Pivovar', 'Typ']
output = []
for line in beers:
# Black Label #4 8,1% (Raven, Wild Ale)
m = re.match(' *(.+?)(?: -)? +([0-9,\.]+%) +\(([^,]+), ?([^\)]+)\)?', line)
if not m:
# Zlaté Prasátko 6,5%
m = re.match(' *(.+?)(?: -)? +([0-9,\.]+%)()()', line)
if m:
output = output + [list(m.groups())]
if output:
beerlib.parser_output(output, headers, 'Craftbeer bottle shop & bar', sys.argv)
exit(0)
# nothing was found
exit(1)
|
import numpy as np
from pqdict import pqdict
def mins(df):
'''
Function for exploring a bidimensional PES in order to find minima in the surface.
'''
lims = list(df.shape) #shape is a tuple containing the (y,x) shape
mins = dict()
for x in range(lims[0]):
for y in range(lims[1]):
is_min = []
if str(df.iloc[x][y]) != 'nan': #this avoid failures in uncomplete PESs
try :
is_min.append(df.iloc[x][y] < df.iloc[x-1][y-1])
except (ValueError, IndexError):
pass
try :
is_min.append(df.iloc[x][y] < df.iloc[x][y-1])
except (ValueError, IndexError):
pass
try :
is_min.append(df.iloc[x][y] < df.iloc[x+1][y-1])
except (ValueError, IndexError):
pass
try :
is_min.append(df.iloc[x][y] < df.iloc[x-1][y])
except (ValueError, IndexError):
pass
try :
is_min.append(df.iloc[x][y] < df.iloc[x+1][y])
except (ValueError, IndexError):
pass
try :
is_min.append(df.iloc[x][y] < df.iloc[x-1][y+1])
except (ValueError, IndexError):
pass
try :
is_min.append(df.iloc[x][y] < df.iloc[x][y+1])
except (ValueError, IndexError):
pass
try :
is_min.append(df.iloc[x][y] < df.iloc[x+1][y+1])
except (ValueError, IndexError):
pass
if False not in is_min:
mins[(x,y)] = round(df.iloc[x][y], 2)
return mins
def path(pes, starting_point, ending_point):
'''
Dijsktra algorithm applied to the search of lowest-energy paths between two pre-selected points in a 2D PES.
It is an adaptation of the BBSysDyn version uploaded to https://math.stackexchange.com/questions/3088292/finding-lowest-elevation-path-between-two-points
'''
def get_neighbor_idx(x,y,dims):
res = []
for i in ([0,-1,1]):
for j in ([0,-1,1]):
if i==0 and j==0: continue
if x+i<(dims[0]) and x+i>-1 and y+j<(dims[1]) and y+j>-1:
res.append((x+i,y+j))
return res
D = {}
P = {}
Q = pqdict()
Q[starting_point] = 0
while len(Q)>0:
(v,vv) = Q.popitem()
D[v] = vv
neighs = get_neighbor_idx(v[0],v[1],pes.shape)
for w in neighs:
vwLength = D[v] + np.abs(pes[v[0],v[1]] - pes[w[0],w[1]])
if w in D:
if vwLength < D[v]:
raise ValueError
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
path = []
while 1:
path.append(ending_point)
if ending_point == starting_point: break
ending_point = P[ending_point]
path.reverse()
return path
def stationary_points(path_energies):
'''
Function for outputing a dict with all the stationary points found in a given path.
dict will contain the points as keys and a list of type of structure and energy
'''
stationary_points = dict()
TSs = 0; MINs = 0
for i in range(len(path_energies)):
if i not in (0, len(path_energies) -1):
if list(path_energies.values())[i] < list(path_energies.values())[i-1] and list(path_energies.values())[i] < list(path_energies.values())[i+1]:
stationary_points[list(path_energies.keys())[i]] = ['min', list(path_energies.values())[i]]
MINs += 1
if list(path_energies.values())[i] > list(path_energies.values())[i-1] and list(path_energies.values())[i] > list(path_energies.values())[i+1]:
stationary_points[list(path_energies.keys())[i]] = ['TS', list(path_energies.values())[i]]
TSs += 1
elif i == 0:
if list(path_energies.values())[i] < list(path_energies.values())[i+1]:
stationary_points[list(path_energies.keys())[i]] = ['min', list(path_energies.values())[i]]
MINs += 1
elif i == (len(path_energies) -1):
if list(path_energies.values())[i] < list(path_energies.values())[i-1]:
stationary_points[list(path_energies.keys())[i]] = ['min', list(path_energies.values())[i]]
MINs += 1
TS = 0; INT = 0
for i in range(len(stationary_points)):
if i == 0:
stationary_points[list(stationary_points.keys())[i]][0] = 'RC'
elif i != 0 and i != (len(stationary_points) -1):
if list(stationary_points.values())[i][0] == 'TS':
if TS > 1:
TS +=1
stationary_points[list(stationary_points.keys())[i]][0] = 'TS%s' % TS
if TS == 1:
stationary_points[list(stationary_points.keys())[i]][0] = 'TS'
if list(stationary_points.values())[i][0] == 'min':
if MINs > 2:
stationary_points[list(stationary_points.keys())[i]][0] = 'INT%s' % INT
INT +=1
elif MINs == 2:
stationary_points[list(stationary_points.keys())[i]][0] = 'PD'
elif i == (len(stationary_points) -1):
stationary_points[list(stationary_points.keys())[i]][0] = 'PD'
return stationary_points
|
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from djangobmf.categories import SALES
from djangobmf.currencies import BaseCurrency
from djangobmf.models import BMFModel
from djangobmf.settings import CONTRIB_ACCOUNT
from djangobmf.settings import CONTRIB_TAX
from djangobmf.settings import CONTRIB_PRODUCT
from djangobmf.fields import CurrencyField
from djangobmf.fields import MoneyField
from djangobmf.contrib.accounting.models import ACCOUNTING_INCOME, ACCOUNTING_EXPENSE
from decimal import Decimal
PRODUCT_SERVICE = 1
PRODUCT_CONSUMABLE = 2
PRODUCT_STOCKABLE = 3
PRODUCT_TYPES = (
(PRODUCT_SERVICE, _("Service")),
# (PRODUCT_CONSUMABLE,_("Consumable")),
# (PRODUCT_STOCKABLE,_("Stockable")),
)
PRODUCT_NO_BATCH = 1
PRODUCT_NO_SERIAL = 2
PRODUCT_NO = (
(PRODUCT_NO_BATCH, _("Has batch number")),
(PRODUCT_NO_SERIAL, _("Has serial number")),
)
# =============================================================================
@python_2_unicode_compatible
class AbstractProduct(BMFModel):
"""
"""
name = models.CharField(
_("Name"),
max_length=255,
null=False,
blank=False,
)
code = models.CharField(
_("Product Code"),
max_length=255,
null=False,
blank=True,
db_index=True,
)
type = models.PositiveSmallIntegerField(
_("Product type"),
null=False,
blank=False,
choices=PRODUCT_TYPES,
default=PRODUCT_SERVICE,
)
can_sold = models.BooleanField(
_("Can be sold"), null=False, blank=True, default=False, db_index=True,
)
can_purchased = models.BooleanField(
_("Can be purchased"), null=False, blank=True, default=False, db_index=True,
)
description = models.TextField(_("Description"), null=False, blank=True)
price_currency = CurrencyField()
price_precision = models.PositiveSmallIntegerField(
default=0, blank=True, null=True, editable=False,
)
price = MoneyField(_("Price"), blank=False)
taxes = models.ManyToManyField(
CONTRIB_TAX,
blank=True,
related_name="product_taxes",
limit_choices_to={'is_active': True},
through='ProductTax',
)
# discount = models.FloatField(_('Max. discount'), default=0.0)
# Accounting
income_account = models.ForeignKey(
CONTRIB_ACCOUNT,
null=False,
blank=False,
related_name="product_income",
limit_choices_to={'type': ACCOUNTING_INCOME, 'read_only': False},
on_delete=models.PROTECT,
)
expense_account = models.ForeignKey(
CONTRIB_ACCOUNT,
null=False,
blank=False,
related_name="product_expense",
limit_choices_to={'type': ACCOUNTING_EXPENSE, 'read_only': False},
on_delete=models.PROTECT,
)
# warehouse
# number = models.PositiveSmallIntegerField( _("Product number"), null=True, blank=True, choices=PRODUCT_NO)
# uos = models.CharField( "UOS", max_length=255, null=False, blank=True, help_text=_("Unit of Service"))
# uom = models.CharField( "UOM", max_length=255, null=False, blank=True, help_text=_("Unit of Measurement"))
# customer_taxes
# supplier_taxes
# image
# category
# warehouse
# description_web
# validation method / FIFO or Running average - first in first out
# aktiv posten
# garantie
# end of live
# netto weight
# UOM weight
# supplier
# cost-center
# pricelist
# inspection
# manufactoring
# online available
# discount
# sale_price
# product_manager
# warranty: months
# description_quotation
# description_suppliers
# customer_lead_time: days
# FIFO - First in First out
# LIFO - Last-in-First-Out
# sku Product SKU required string new_product
# name Product name required
# meta_title Product meta title optional string new product
# meta_description
# price Product price required
# weight Product weight required
# visibility Product visibility. Can have the following values:
# 1 - Not Visible Individually, 2 - Catalog, 3 - Search, 4 - Catalog, Search. required
# description Product description. required
# short_description Product short description. required
# UOM to UOS
# Unit weight (Kg)
# Sales price 0.00
# Sales currency EUR
# Max sales discount (%) 0.00
# Sales tax (%) 0.00
# Description empty
# Categories empty
# Tags empty
class Meta(BMFModel.Meta): # only needed for abstract models
verbose_name = _('Product')
verbose_name_plural = _('Products')
ordering = ['name']
abstract = True
swappable = "BMF_CONTRIB_PRODUCT"
class BMFMeta:
category = SALES
search_fields = ['name', 'code']
def __str__(self):
return self.name
# def calc_default_price(self, project, amount, price):
# return self.get_price(1.0, self.price)
def calc_tax(self, amount, price):
# TODO add currency for calculation of taxes
if not isinstance(amount, Decimal):
amount = Decimal(str(amount))
if isinstance(price, BaseCurrency):
price = price.value
elif not isinstance(price, Decimal):
price = Decimal(str(price))
if price.as_tuple().exponent > -2:
price = price.quantize(Decimal('0.01'))
taxes = self.product_tax.select_related('tax')
tax_inc_sum = Decimal(1)
for tax in taxes:
if tax.included:
tax_inc_sum += tax.tax.get_rate()
# net price of one unit
unit_exact = (price / tax_inc_sum).quantize(price)
used_taxes = []
net = (amount * unit_exact).quantize(Decimal('0.01'))
gross = (amount * unit_exact).quantize(Decimal('0.01'))
for tax in taxes:
tax_value = (net * tax.tax.get_rate()).quantize(Decimal('0.01'))
gross += tax_value
used_taxes.append((tax.tax, tax_value))
return unit_exact, net, gross, used_taxes
class Product(AbstractProduct):
pass
class ProductTax(models.Model):
product = models.ForeignKey(
CONTRIB_PRODUCT,
null=True,
blank=True,
related_name="product_tax",
on_delete=models.CASCADE,
)
tax = models.ForeignKey(
CONTRIB_TAX,
null=True,
blank=True,
related_name="product_tax",
on_delete=models.PROTECT,
)
included = models.BooleanField(_("Is the tax included in the price?"), default=False)
class Meta:
unique_together = ("product", "tax")
|
import os
import pandas as pd
from typing import Any, List, Dict
def extract_data_from(root: str, system: str) -> pd.DataFrame:
tests: List[Dict[str, Any]] = list()
path: str = os.path.join(root, system)
files: List[str] = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
for f in files:
params = f.split('.')
d = dict(
system=params[1],
device=params[2],
version=params[3],
precision=params[4],
matrix_size=f'{params[5]}x{params[6]}',
k=params[7],
)
with open(os.path.join(path, f), 'r') as fin:
content: str = fin.read()
content = content.split('100%')[1].split(' EXEC TIME')[0].strip() #clean non-kernels content
lines: List[str] = content.split('\n')
for line in lines:
name: str = line.split('time =')[0].strip()
time: int = float(line.split('=')[1].split('(us)')[0].strip())
time = time / 1000000 # us -> s
d['kernel'] = name
d['time'] = time
aux: Dict = d.copy()
tests.append(aux)
return pd.DataFrame(tests)
if __name__ == '__main__':
df: pd.DataFrame = pd.DataFrame()
path: str = os.path.join('.', 'datawarehouse', 'system')
systems: List[str] = [s for s in os.listdir(path) if os.path.isdir(os.path.join(path, s))]
for s in systems:
df = df.append(extract_data_from(path, s))
out: str = os.path.join(path, 'system_times.csv')
df.to_csv(out, index=False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# ************* 1 **********
EtabsAPI illustration: 0.0.1
Autor: Francisco J.Mucho
Date: 25/03/21
Time: satrt 10:10am finish 12:00pm
# ************* 1 **********
"""
from collections import OrderedDict
import os, sys
# import EtabsAPIFile
from coreapi.EAPIFile import EtabsAPIcFile as efile
nombre='Etabs API'
# ************* 1 *************
def tipoStructura(op):
"""Elegir un tipo de plantilla/modelo"""
nombrets = "Menu estructura"
cerrar = False
menu_tipostruct = OrderedDict(
[
('1', efile.nuevaHojaEnBanco),
('2', efile.nuevaHojaConGrilla),
('3', efile.nuevaHojaConPlanta),
('4', efile.cancelar),
('5', efile.cerrarAplicacion)
])
# ************* 1.2 *************
while not cerrar:
print("="*len(nombrets))
print(nombrets)
print("="*len(nombrets))
ubicacion = None
for opcion, funcion in menu_tipostruct.items():
ubicacion = funcion.__doc__
print(f"[{opcion}] {funcion.__doc__}")
opts = input(f"[{nombre}\\{nombrets}]> ")
# Opciones de salir
if opts == "4":
bienvenidos()
cerrar = True
cerrar = opts=='5'
# opciones de acceso
funcionalidad = menu_tipostruct.get(opts, None)
print(funcionalidad) # test
# if funcionalidad:
# efile.nuevaHojaEnBanco
else:
print("Salir")
return 0
def docs():
'''Ver la documentacion'''
# flata agreagr path, pero problema no del python si no del editor
fileName = APIPath = "."+os.sep+'docs'+os.sep # os.path.pardir
try:
# print(fileName)
docn = open(fileName+"leame.txt",'r')
print(docn.read())
input("continuar (Enter)")
docn.close()
bienvenidos()
except (IOError, OSError):
print(f"The file doesn't exist. {OSError}\n")
cerrar()
def bienvenidos():
menu_bienvenida = OrderedDict(
[
('1', tipoStructura),
('2', docs),
('3', cerrar)
])
priTitulo = f" Bienvenido a <{nombre}!> "
# ************* 1.1 *************
print("="*len(priTitulo))
print(priTitulo)
print("="*len(priTitulo))
print(f"[1] {menu_bienvenida['1'].__doc__}")
print(f"[2] {menu_bienvenida['2'].__doc__}")
print(f"[3] {menu_bienvenida['3'].__doc__}")
opcion = input(f"[{nombre}] (1)> ")
if opcion == "1" or opcion=="":
tipoStructura(opcion)
elif opcion == "2":
print("As elegido primero ver la <documentacion>")
docs()
elif opcion == "3":
print("As elegidos <salir> " '\r\nAs salido de la apliacion...')
sys.exit(-1)
else:
mierror()
return 0
def cerrar():
'''salir de la aplicación'''
print("ctrl+c, para salir o ecriba una letra diferente de 'si'.\n")
opc = input("volver al menu(si) ")
if opc == "" or opc.lower() == "si":
bienvenidos()
sys.exit(-1)
def mierror():
print("La opción que has seleccionado, no esta implementado ...")
opc = input("Volver an menu(si)> ")
if opc == "" or opc.lower() == "si":
bienvenidos()
sys.exit(-1)
|
"""Test suite of unit tests for simple PyBind module"""
import unittest
import numpy as np
# import module created using PyBind
import utils
class UtilsTest(unittest.TestCase):
"""Test utility functions"""
def setUp(self) -> None:
"""Provide some local variables for each test"""
self.i3 = 3
self.i4 = 4
self.i5 = 5
self.f3 = float(3.0)
self.f4 = float(4.0)
self.f5 = float(5.0)
self.d3 = np.float64(3.0)
self.d4 = np.float64(4.0)
self.d5 = np.float64(5.0)
def test_add(self):
"""Expectation: add two numbers using correct template"""
with self.subTest("Testing add() using ints"):
computed = utils.add(self.i3, self.i4)
self.assertIsInstance(computed, int)
self.assertEqual(computed, int(3 + 4))
with self.subTest("Testing add() using floats"):
computed = utils.add(self.f3, self.f4)
self.assertIsInstance(computed, float)
self.assertEqual(computed, self.f3 + self.f4)
def test_subtract(self):
"""Expectation: subtract one number from another using C++ lambda function"""
with self.subTest("Testing subtract() using ints"):
computed = utils.subtract(self.i3, self.i4)
self.assertIsInstance(computed, int)
self.assertEqual(computed, int(3 - 4))
with self.subTest("Testing subtract() using floats"):
computed = utils.subtract(self.f3, self.f4)
self.assertIsInstance(computed, float)
self.assertEqual(computed, self.f3 - self.f4)
def test_divide(self):
"""Expectation: divide one number by another using the correct template"""
with self.subTest("Testing divide() using integers"):
computed = utils.divide(self.i3, self.i4)
self.assertIsInstance(computed, int)
self.assertEqual(computed, int(self.i3 / self.i4))
with self.subTest("Testing divide() using floats"):
computed = utils.divide(self.f3, self.f4)
self.assertIsInstance(computed, float)
self.assertEqual(computed, self.f3 / self.f4)
def test_hyp(self):
"""Expectation: hypotenuse is computed using the correct template"""
with self.subTest(msg="Testing hyp() using ints"):
computed = utils.hyp(self.i3, self.i4)
hyp_int = int(np.power(self.i3 ** 2 + self.i4 ** 2, 0.5))
self.assertIsInstance(computed, int)
self.assertEqual(computed, hyp_int)
with self.subTest(msg="Testing hyp() using floats"):
computed = utils.hyp(self.f3, self.f4)
hyp_float = np.power(self.f3 ** 2 + self.f4 ** 2, 0.5)
self.assertIsInstance(computed, float)
self.assertEqual(computed, hyp_float)
if __name__ == "__main__":
unittest.main(verbosity=3)
|
from modules.Load import *
from modules.SpikeTracker import *
from modules.Plotter import *
# test via manual selection of data source ( explicit run of this file alone )
if __name__ == "__main__":
run = LoadMultiRun()
ad_accel = run["accel"]
ad_omega = run["omega"]
# for automated testing via test_main.py
else:
dataDir = "../../data/2019 06 12/0 degrees/run1/"
ad_accel = [LoadAccelFile(dataDir+"run1.accel.x2.CSV")]
ad_omega = [Load_Omega(dataDir+"run1.omega.pasco.csv")]
ad_accel, ad_omega = SpikeAdjust(ad_accel, ad_omega)
Plot(ad_accel, ad_omega)
|
# -*- coding: utf-8 -*-
'''
List Functions
Created on Sun Jul 07 14:23:47 2013
@author: Will Rhodes
'''
import math,operator
from collections import OrderedDict
import itertools
def convertAllToFloat(lst):
'''
utility method that can convert all in list to float without throwing exceptions
'''
fltLst = []
if type(lst) == list:
for each in lst:
each.strip('\n\t\s')
try:
fltLst.append(float(each))
except:
print("problem with ",each)
return fltLst
else:
print(lst,' is not a list')
return lst
def chunks(l, n):
'''
splits a list, l, into a list of lists with each sublist of size n
'''
newList = []
for i in range(0, len(l), n):
newList.append(l[i:i+n])
return newList
def getNumPerRange(tempList,sampleSize = 50,dSize = None,upTo=False,returnList=False):
'''
gets the number of items in a list within the range 0-> limit in dSize chunks
returns: dictionary with x labels and y count
'''
if dSize is None:
limit = max(tempList)
dSize = limit/sampleSize
else:
limit = sampleSize*dSize
upperBound = dSize
lowerBound = upperBound - dSize
returning = OrderedDict()
while upperBound <= limit:
tag = upperBound#+"-"+str(upperBound-0.001)+"..."
if upTo:
returning[tag] = sum(1 for item in tempList if (item<upperBound))
else:
returning[tag] = sum(1 for item in tempList if ((item<upperBound) and (item>= lowerBound)))
upperBound = upperBound+dSize
lowerBound = lowerBound+dSize
if returnList:
return list(returning.values())
return returning
def partitionByAttributeSize(mylist,attribute,dSize,maxVal=None):
'''
sorts a list by dynamic attribute name and parttions into dSize chunks
'''
try:
getattr(mylist[0], attribute)
except:
print("Object does not have attribute, try again")
return None
dSize = float(dSize)
# sort first
mylist.sort(key=operator.attrgetter(attribute), reverse=False)
returning = []
if maxVal is None:
maxVal = float(max([getattr(each, attribute) for each in mylist]))
maxIndex = int(math.ceil(maxVal/dSize))
print(maxIndex)
for count in range(maxIndex):
returning.append([])
for each in mylist:
indexPlacement = int(math.ceil((float(getattr(each, attribute))/maxVal)*float(maxIndex)))-1
returning[indexPlacement].append(each)
return returning
def getNumPerRange_v2(keys,tempList,upTo=False,returnList=True):
'''
gets the number of items in a list within the range 0-> limit in dSize chunks
returns: dictionary with x labels and y count
'''
returning = OrderedDict()
for i in range(1,len(keys)):
temp = []
for each in tempList:
if upTo:
if each <=keys[i]:
temp.append(each)
else:
if each >= keys[i-1] and each < keys[i]:
temp.append(each)
returning[keys[i]] = len(temp)
if returnList:
return list(returning.values())
else:
return returning
def averageCorroIndices(listoflists):
'''
Average Corropsonding Indices
Get the averages across multiple lists where they have the same index
e.g.: [[3,4],[5,6]] yields [4,5]
assumption: each sublist is of the same size
'''
if type(listoflists[0]) != list:
print("Not a list of lists")
return None
size = len(listoflists)
subListSize = len(listoflists[0])
returning = []
for j in range(subListSize):
temp = 0
for each in listoflists:
temp+=each[j]
returning.append(temp)
returning = [x/size for x in returning]
return returning
def flatten(lst):
'''
takes a list of lists and makes it one list
'''
#print lst
return list(itertools.chain.from_iterable(lst))
def subtractLists(major,minor):
'''
subtracts elements from the minor list
from the major list and returns the listed result
'''
subtracted = []
if len(minor)==0:
return major
if len(major) == 0:
return None
if type(minor[0]) == list:
minor = list(itertools.chain.from_iterable(minor)) #FLATTEN THE LIST
for each in major:
if getIndex(minor,each) <0: #each major is NOT in minor
subtracted.append(each)
return subtracted
def first(iterable, default=None):
'''
returns the first item in a list or itterable
'''
for item in iterable:
return item
return default
def getIndex(mylist,obj):
'''
similar to list.index(item) but without the error, just returns -1
'''
try:
idx = mylist.index(obj)
return idx
except Exception as e:
#print e
return -1
def is_sorted(lst):
'''
tests to see if the list is sorted numerically
'''
it = iter(lst)
try:
prev = next(it)
except StopIteration:
return True
for x in it:
if prev > x:
return False
prev = x
return True
def normalizeList(myList):
'''
normalizes a list of numbers
'''
try:
maxval = max(myList)
return [float(x)/float(maxval) for x in myList]
except:
print("Can't normalize this list")
return []
def distinct(l):
'''
returns a list of distinct items
'''
return list(set(l))
|
from importlib import reload #python3 only
import markhov
import numpy as np
import random
a="a"
aaa = ['a','a a','a a a']
ops = {'S':{'NotCL':['mg']}, # from start we have to merge
'NotCL':{'NotCL':['mg','copy'], # this state is the state in which the last "special" operation was *not* Clear. Either we've done none or the last was copy. From here we can do everything including end
'CLEAR_S':['clear'], # go here to clear the buffer
'F':['end'] # go here to end
},
'CLEAR_S':{'CLEAR':['mg']}, # this is where we've just cleared. Buffer is empty so you can only Merge
'CLEAR':{'CLEAR':['mg'], # the last special op was Clear so we can Copy or Merge.
'NotCL':['copy'] # if we Copy, the last special op was Copy so go to NotCL
},
'F':{} #final state
}
ops = {'S':[('NotCL','mg')], # from start we have to merge
'NotCL':[('NotCL','mg'),('NotCL','copy'), # this state is the state in which the last "special" operation was *not* Clear. Either we've done none or the last was copy. From here we can do everything including end
('CLEAR_S','clear'), # go here to clear the buffer
('F','end') # go here to end
],
'CLEAR_S':[('CLEAR','mg')], # this is where we've just cleared. Buffer is empty so you can only Merge
'CLEAR':[('CLEAR','mg'), # the last special op was Clear so we can Copy or Merge.
('NotCL','copy') # if we Copy, the last special op was Copy so go to NotCL
],
'F':[] #final state
}
trans = {'a':['a','b'],
'b':['b','a'],
'[':['a','b']
}
# bigrams = {'a':{']':0.25,'a':0.25,'b':0.5},
# 'b':{']':0.25,'b':0.25,'a':0.5},
# '':{'a':0.5,'b':0.5}
# }
def ops_log(ops):
for a in ops:
for b in ops[a]:
for w in ops[a][b]:
ops[a][b][w]=np.log(ops[a][b][w])
return ops
def bis_log(bigrams):
for a in bigrams:
for b in bigrams[a]:
bigrams[a][b]=np.log(bigrams[a][b])
return bigrams
trans_probs = {'a':{'a':0.5,'b':0.5},
'b':{'b':0.5,'a':0.5},
'[':{'a':0.5,'b':0.5}
}
trans_probs=bis_log(trans_probs)
bigrams_ends = {'a':{'a':0.5,'b':0.25,']':0.25},
'b':{'b':0.5,'a':0.25,']':0.25},
'[':{'a':0.5,'b':0.5}
}
bigrams_ends=bis_log(bigrams_ends)
# markhov chain of operations
# when you go to Merge, you also make a move in the bigram FSM
# when you go to copy, you make a copy of the buffer and append it to the string and buffer
# when you go to clear, you clear the buffer
# ops = {'mg':{'mg':0.8,'copy':0.1,'clear':0.1},
# 'copy':{'mg':0.3,'copy':0.2,'clear':0.5},
# 'clear':{'mg':1.}
# }
# (state : state: transition: prob, final)
ops_probs = {'S':{'NotCL':{'mg':1.}}, # from start we have to merge
'NotCL':{'NotCL':{'mg':0.3,'copy':0.1}, # this state is the state in which the last "special" operation was *not* Clear. Either we've done none or the last was copy. From here we can do everything including end
'CLEAR_S':{'clear':0.1}, # go here to clear the buffer
'F':{'end':0.5} # go here to end
},
'CLEAR_S':{'CLEAR':{'mg':1.}}, # this is where we've just cleared. Buffer is empty so you can only Merge
'CLEAR':{'CLEAR':{'mg':0.5}, # the last special op was Clear so we can Copy or Merge.
'NotCL':{'copy':0.5} # if we Copy, the last special op was Copy so go to NotCL
},
'F':{} #final state
}
ops_probs=ops_log(ops_probs)
bi_ops = {'S':{'S':{'mg':0.5},
'F':{'end':0.5}
},
'F':{}
}
bi_ops=ops_log(bi_ops)
s1 = "mg mg end".split(' ')
s2 = "mg copy end".split(' ')
bis1 = "[ a a".split(' ')
bis2= "[ a".split(' ')
s1="mg clear mg copy mg end".split(' ')
s2="mg mg mg mg end".split(' ')
bis1="[ a b a".split(' ')
bis2="[ a b b a".split(' ')
parses = [(s1,bis1),(s2,bis2)]
for lhs in ops_psg:
for (rhs,p) in ops_psg[lhs]:
print lhs,rhs,get_c_phi((lhs,rhs),parses,ops_psg,bigrams)
rule=('S',['MG','NotCL'])
mg = ('MG',['mg'])
copy= ('COPY',['copy'])
f=open('../corpus/cath8.txt','r')
corpus = f.readlines()
f.close()
corpus = [line.rstrip('\n') for line in corpus]
|
import django
from . import views
from django.conf.urls import url
from django.contrib.auth import views as auth_views
urlpatterns = [
# post views
# url(r'^login/$', views.user_login, name='login'),
# login / logout urls
url(r'^login/$', django.contrib.auth.views.login, name='login'),
url(r'^logout/$', django.contrib.auth.views.logout, name='logout'),
url(r'^logout-then-login/$', django.contrib.auth.views.logout_then_login, name='logout_then_login'),
url(r'^$', views.dashboard, name='dashboard'),
url(r'^password-change/$', django.contrib.auth.views.password_change, name='password_change'),
url(r'^password-change/done/$', django.contrib.auth.views.password_change_done, name='password_change_done'),
url(r'^register/$', views.register, name='register'),
url(r'^edit/$', views.edit, name='edit'),
]
|
# Blueprints provide a nice API for encapsulating a group of related routes
# and templates. When an application has distinct components, blueprints
# can be used to separate the various moving parts.
from flask import Blueprint
from helpers import object_list
from models import Entry, Tag
entries = Blueprint('entries', __name__, template_folder='templates')
@entries.route('/')
def index():
entries = Entry.query.order_by(Entry.created_timestamp.desc())
return object_list('entries/index.html', entries)
@entries.route('/tags/')
def tag_index():
pass
@entries.route('/tags/<slug>/')
def tag_detail(slug):
pass
@entries.route('/<slug>/')
def detail(slug):
pass
|
import json
import typing
import factory
from flask import Flask, url_for
from flask.testing import FlaskClient
from app.extensions import get_session
from auth.models import User
from .helpers import LoggedInState, random_password
class UserFactory(factory.alchemy.SQLAlchemyModelFactory):
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
email = factory.Faker("email")
password = factory.LazyFunction(random_password)
id = factory.Sequence(lambda n: n)
class Meta:
model = User
sqlalchemy_session = get_session()
@staticmethod
def build_invalid_users_for_register() -> typing.List[User]:
return [
UserFactory.build(email="invalid email"),
UserFactory.build(password="in"),
UserFactory.build(first_name=""),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def assign_creator(apps, schema_editor):
Quest = apps.get_model("coordination", "Quest")
for quest in Quest.objects.all():
quest.creator = quest.organizer
quest.save()
def move_organizer_and_players(apps, schema_editor):
Quest = apps.get_model("coordination", "Quest")
Membership = apps.get_model("coordination", "Membership")
for quest in Quest.objects.all():
Membership.objects.create(quest=quest, user=quest.organizer, role="O")
for player in quest.players.all():
Membership.objects.create(quest=quest, user=player, role="P")
class Migration(migrations.Migration):
dependencies = [
('coordination', '0006_create_membership_model'),
]
operations = [
migrations.RunPython(assign_creator),
migrations.RunPython(move_organizer_and_players),
]
|
from sqlalchemy import MetaData, Table, Column, Integer, NVARCHAR, BOOLEAN
meta = MetaData()
t = Table(
"task_status_type",
meta,
Column("id", Integer, primary_key=True),
Column("name", NVARCHAR(255)),
Column("is_complete", BOOLEAN),
Column("is_active", BOOLEAN),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
t.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("task_status_type", meta, autoload=True)
t.drop()
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
#Set up plot style
font = {'size' : 12}
matplotlib.rc('font', **font)
matplotlib.rc('font', serif='Computer Modern Roman')
#Define colours
martaRed = "#c24c51"
martaGreen = "#54a666"
martaBlue = "#4c70b0"
martaPurple = "#7f70b0"
martaGold = "#ccb873"
Icolour = '#DB2420' #Central line red
Ocolour = '#00A0E2' #Victoria line blue
O2colour = '#868F98' #Jubilee line grey
D2colour = '#F386A0' #Hammersmith line pink
D4colour = '#97015E' #Metropolitan line magenta
D6colour = '#B05F0F' #Bakerloo line brown
D5colour = '#00843D' #District line green
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
fnames = []
fnames.append("data/cascade_s_vs_t_q75.txt")
fnames.append("data/cascade_s_vs_t_q10.txt")
fnames.append("data/cascade_s_vs_t_q121.txt")
fnames.append("data/cascade_s_vs_t_q20.txt")
alphavals = [0.75, 1, 1.25, 2]
for i, fname in enumerate(fnames):
# start with a rectangular Figure
plt.figure()
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_histy = plt.axes(rect_histy)
ax_histy.tick_params(direction='in', labelleft=False)
dat = np.genfromtxt(fname)
y, x = np.hsplit(dat,2)
x = x[:,0]
y = y[:,0]
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
# the scatter plot:
ax_scatter.scatter(x, y, c=z, s=30) #, edgecolor='', cmap="viridis")
# now determine nice limits by hand:
binwidth = 0.25
lim = np.ceil(np.abs([x, y]).max() / binwidth) * binwidth
ax_scatter.set_xlim((-1, 101))
ax_scatter.set_ylim((-0.05, 1.05))
ax_histx.hist(x, bins=70, density=True, width=max(x)/50.0, color=martaRed)
ax_histy.hist(y, bins=70, orientation='horizontal', density=True, height=max(y)/50,color=martaBlue)
ax_histx.set_xlim(ax_scatter.get_xlim())
ax_histy.set_ylim(ax_scatter.get_ylim())
plt.ylabel("$\\mathcal{S}$", rotation=0, labelpad=340)
plt.xlabel("$T$")
title = "$\\alpha=$" + str(alphavals[i])
plt.title(title)
plt.tight_layout()
plt.show()
|
import os
import itertools as it
import numpy as np
import conv_cart_internal_geoms as trimerGeom
def getE0DataObjFromCalcObjs(calcObjList, geomRepStr):
allDeltaE0 = list()
allGeoms = list()
for x in calcObjList:
allDeltaE0.append( x.e0Diff )
for x in calcObjList:
allGeoms.append( x.trimerGeom )
return ThreeBodyE0Contribs(allGeoms, allDeltaE0, geomRep=geomRepStr)
class ThreeBodyE0Contribs():
def __init__(self, trimerGeomList:list, deltaE0Values, geomRep="sankey", folder=None, outPath=None):
self._eqTol = 1e-6
self.trimerGeomList = trimerGeomList
self.deltaE0Values = deltaE0Values
self.geomRep = geomRep.lower()
self._folder = folder
self._outPath = outPath
@classmethod
def fromFile(cls, inpPath):
with open(inpPath,"rt") as f:
fileAsList = f.readlines()
counter = 0
dataList = list()
while counter < len(fileAsList):
currLine = fileAsList[counter].lower()
if "elements" in currLine:
counter += 1
elements = [x for x in fileAsList[counter].strip().split(",")]
elif currLine.startswith("#"):
pass
elif "geomrepstr" in currLine:
counter += 1
geomStr= fileAsList[counter].strip().lower()
elif "data" in currLine:
trimerGeomList, deltaE0Values, counter = cls._parseDataSectionOfInpFile(fileAsList, counter, elements, geomStr)
counter += 1
return cls(trimerGeomList, deltaE0Values, geomStr, outPath=inpPath)
@classmethod
def _parseDataSectionOfInpFile(self, fileAsList, counter, elements, geomRep):
geomStrToMethod = {"sankey":trimerGeom.TrimerInternGeom.fromSankeyRep,
"bondLengths".lower():trimerGeom.TrimerInternGeom.fromBondLengths}
getInternGeom = geomStrToMethod[geomRep]
counter+= 1
nPoints = int( fileAsList[counter].strip() )
counter+=1
allGeoms, allE0 = list(), list()
for x in range(nPoints):
currParams = [float(x) for x in fileAsList[counter].strip().split(",")]
internCoords = getInternGeom(*currParams[:3])
allGeoms.append( trimerGeom.TrimerGeom(elements, internCoords) )
allE0.append( currParams[-1] )
counter += 1
return allGeoms, allE0, counter
def getArrayParamsAgainstValues(self):
keywordToRepProp = {"sankey":"sankeyRep", "bondLengths".lower():"bondLengths"}
nCols = len( getattr(self.trimerGeomList[0].internCoords, keywordToRepProp[self.geomRep]) ) + 1
nRows = len( self.trimerGeomList )
outArray = np.zeros( (nRows,nCols) )
for rowIdx, (geom, deltaE0) in enumerate(it.zip_longest(self.trimerGeomList, self.deltaE0Values)):
geomParams = getattr( geom.internCoords, keywordToRepProp[self.geomRep] )
outArray[rowIdx,:nCols-1] = geomParams
outArray[rowIdx,-1] = deltaE0
#Sort final array by each column in turn (Never worked + mine was just naturally in the correct order anyway so....)
# nDecimals = 1
# intArray = np.array(outArray*(10**nDecimals))
# intArray = intArray.astype(int) #Im relying on behaviour that numbers after the decimal are truncated on conversion
# intArrayDtypes = [('colA',int), ('colB',int), ('colC',int), ('colD',int)]
# structIntArray = np.array(intArray, dtype=intArrayDtypes)
# sortOrder = np.argsort(structIntArray,order=('colA','colB','colC'))
# sortOrder = np.lexsort( (intArray[:,2],intArray[:,1], intArray[:,0]) )
## intArray = intArray[sortOrder]
return outArray
@property
def outPath(self):
if self._outPath is not None:
return self._outPath
if self._folder is None:
folder = os.path.abspath( os.getcwd() )
else:
folder = self._folder
fileName = "{}_{}_{}.e03b".format(*self.elements)
return os.path.join(folder,fileName)
@outPath.setter
def outPath(self,value):
self._outPath = value
@property
def elements(self):
return list(self.trimerGeomList[0].elements)
def writeFile(self):
outStr = ""
outStr += "geomRepStr\n{}\n".format(self.geomRep)
outStr += "elements\n" + ",".join(self.elements) + "\n"
outStr += "#" + ",".join(self._getArrayHeadings() + ["Delta E0 / eV"]) + "\n"
outStr += "data\n"
outStr += "{}\n".format(len(self.trimerGeomList))
for row in self.getArrayParamsAgainstValues():
outStr += "{:17.10g}, {:17.10g}, {:17.10g}, {:17.10g}\n".format(*row)
with open(self.outPath, "wt") as f:
f.write(outStr)
def _getArrayHeadings(self):
if self.geomRep=="sankey":
headings = ["r_ij", "r_ck", "theta_ick"]
elif self.geomRep.lower()=="bondlengths":
headings = ["r_ij", "r_jk", "r_kj"]
else:
raise ValueError("{} is an unsupported geomRep".format(self.geomRep))
return headings
def __eq__(self,other):
absTol = min(self._eqTol, other._eqTol)
if not np.allclose( self.getArrayParamsAgainstValues(), other.getArrayParamsAgainstValues() ,atol=absTol):
return False
if self.outPath != other.outPath:
return False
return True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/5/8
@Author : AnNing
"""
from __future__ import print_function
import os
import numpy as np
import sys
from hdf5 import write_hdf5_and_compress
from initialize import load_yaml_file
from load import LoadH8Ndvi
def main(yaml_file):
"""
:param yaml_file: (str) 接口yaml文件
:return:
"""
# ######################## 初始化 ###########################
# 加载接口文件
print("main: interface file <<< {}".format(yaml_file))
interface_config = load_yaml_file(yaml_file)
i_in_files = interface_config['PATH']['ipath'] # 待处理文件绝对路径(list)
i_out_file = interface_config['PATH']['opath'] # 输出文件绝对路径(str)
# 如果输出文件已经存在,跳过
if os.path.isfile(i_out_file):
print("***Warning***File is already exist, skip it: {}".format(i_out_file))
return
in_files = list()
for in_file in i_in_files:
if os.path.isfile(in_file):
in_files.append(in_file)
else:
print('***WARNING***File is not existent: {}'.format(in_file))
file_count = len(in_files)
if len(in_files) <= 0:
print('###ERROR###The count of Valid files is 0')
return
else:
print('---INFO---File count: {}'.format(file_count))
combine(in_files, i_out_file)
def combine(in_files, out_file):
ndvi = None
flag = None
for in_file in in_files:
print('<<< {}'.format(in_file))
loder = LoadH8Ndvi(in_file)
ndvi_part = loder.get_ndvi()
flag_part = loder.get_flag()
if ndvi is None:
ndvi = ndvi_part
flag = flag_part
else:
# 新旧值进行比较
index_max = np.logical_and(flag == 0, flag_part == 0)
ndvi[index_max] = np.maximum(ndvi[index_max], ndvi_part[index_max])
# 新值赋值
index_new = np.logical_and(flag != 0, flag_part == 0)
ndvi[index_new] = ndvi_part[index_new]
flag[index_new] = 0
# 将无效值赋值为云或者水体值
index_nan = np.logical_and(flag == 3, flag_part != 3)
ndvi[index_nan] = ndvi_part[index_nan]
flag[index_nan] = flag_part[index_nan]
# 写HDF5文件
result = {'NDVI': ndvi, 'Flag': flag}
write_hdf5_and_compress(out_file, result)
# ######################## 程序全局入口 ##############################
if __name__ == "__main__":
# 获取程序参数接口
ARGS = sys.argv[1:]
HELP_INFO = \
u"""
[arg1]:yaml_path
[example]: python app.py arg1
"""
if "-h" in ARGS:
print(HELP_INFO)
sys.exit(-1)
if len(ARGS) != 1:
print(HELP_INFO)
sys.exit(-1)
else:
ARG1 = ARGS[0]
main(ARG1)
|
from PySide2.QtWidgets import QApplication, QWidget, QSplitter, QTextEdit, QVBoxLayout, QToolButton
from PySide2.QtCore import Qt
class CustomSplitter(QWidget):
def __init__(self):
QWidget.__init__(self)
self.splitter = QSplitter(self)
self.splitter.addWidget(QTextEdit(self))
self.splitter.addWidget(QTextEdit(self))
layout = QVBoxLayout(self)
layout.addWidget(self.splitter)
handle = self.splitter.handle(1)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
button = QToolButton(handle)
button.setArrowType(Qt.LeftArrow)
button.clicked.connect(
lambda: self.handleSplitterButton(True))
layout.addWidget(button)
button = QToolButton(handle)
button.setArrowType(Qt.RightArrow)
button.clicked.connect(
lambda: self.handleSplitterButton(False))
layout.addWidget(button)
handle.setLayout(layout)
def handleSplitterButton(self, left=True):
if not all(self.splitter.sizes()):
self.splitter.setSizes([1, 1])
elif left:
self.splitter.setSizes([0, 1])
else:
self.splitter.setSizes([1, 0])
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = CustomSplitter()
window.setGeometry(500, 300, 300, 300)
window.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
import json
import re
import datetime
# Regex pattern definitions
from jrnl.util import date2string, datetime2string
DATE_PATTERN = r'\d\d\d\d\-\d\d\-\d\d' # YEAR-MONTH-DAY e.g. 1984-01-24
STATUS_REGEX_DICT = {
r'\[ \]': 'incomplete',
r'\{%s\}(?!\[)' % DATE_PATTERN: 'incomplete',
r'\[x\]': 'complete',
r'\{%s\}\[x\]' % DATE_PATTERN: 'complete',
r'\{%(date)s\}\[%(date)s\]' % dict(date=DATE_PATTERN): 'complete',
}
CONTENT_PATTERN = r' ?(?P<content>.*)'
ANY_TODOS_REGEX = r''
# Or's together all possible status patterns.
i = 0
for pattern in STATUS_REGEX_DICT.iterkeys():
if i > 0:
ANY_TODOS_REGEX = r'{}|'.format(ANY_TODOS_REGEX)
ANY_TODOS_REGEX = r'{}(?:{})'.format(ANY_TODOS_REGEX, pattern)
i += 1
ANY_TODOS_REGEX = re.compile(r'(?P<all>(?:%s)%s)' % (ANY_TODOS_REGEX, CONTENT_PATTERN), re.MULTILINE)
class Todo:
def __init__(self, text_repr, entry):
self.text_repr = text_repr
self.entry = entry
self.status = None
self.completed_date = None
self.due_date = None
self.content = None
self.parse_text_repr()
@property
def is_complete(self):
return self.status == 'complete'
def extract_date(self, start_pattern=None, end_pattern=None):
"""
Extracts a date from the text_repr, isolating which date if specified by the start and end patterns.
:rtype: datetime.date
"""
if start_pattern is None:
start_pattern = r''
if end_pattern is None:
end_pattern = r''
regex = re.compile(r'.*%s(?P<date>%s)%s.*' % (start_pattern, DATE_PATTERN, end_pattern))
match = regex.match(self.text_repr)
if not match:
return None
date_string = match.group('date')
return datetime.datetime.strptime(date_string, '%Y-%m-%d').date()
def parse_text_repr(self):
if not isinstance(self.text_repr, (str, unicode)):
return
# Sets status
for pattern, status in STATUS_REGEX_DICT.iteritems():
if re.compile(pattern).match(self.text_repr):
self.status = status
if self.is_complete:
self.completed_date = self.extract_date(r'\[', r'\]')
self.due_date = self.extract_date(r'\{', r'\}')
match = ANY_TODOS_REGEX.match(self.text_repr)
if match is not None:
self.content = match.group('content')
@classmethod
def parse_entry_todos(cls, entry):
"""
:type entry: Entry.Entry
:rtype: list[Todo]
"""
fulltext = entry.get_fulltext(lower=False)
todos_matches = [m.group('all') for m in re.finditer(ANY_TODOS_REGEX, fulltext)]
todos = []
for match in todos_matches:
todos.append(Todo(match, entry))
return todos
def to_dict(self):
return {
'text_repr': self.text_repr,
'status': self.status,
'content': self.content,
'completed_date': date2string(self.completed_date),
'due_date': date2string(self.due_date),
}
def to_item_format(self):
ret = "* {}".format(self.content)
ret += "\n Entry: {}".format(datetime2string(self.entry.date, self.entry.journal))
ret += "\n {}".format(self.entry.title)
if self.due_date:
ret += "\n Due: {}".format(date2string(self.due_date))
if self.completed_date:
ret += "\n Completed: {}".format(date2string(self.completed_date))
return ret
def __unicode__(self):
return self.text_repr
def __repr__(self):
return "<Todo '{}'>".format(self.text_repr)
|
__author__ = 'royrusso'
import jmespath
class TestNodes_v2:
def test_get_node_stats(self, fixture):
response = fixture.app.get('/api/nodes/%s/_stats' % fixture.cluster_v2_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_NODE_STATS, jmespath.search('*', res['data'][0]['nodes'])[0]) is True
def test_get_one_node_stats(self, fixture):
response = fixture.app.get('/api/nodes/%s/_stats' % fixture.cluster_v2_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
# get node ID:
node_id = list(jmespath.search('nodes', res['data'][0]).keys())[0]
response = fixture.app.get('/api/nodes/%s/%s/_stats' % (fixture.cluster_v2_name, node_id))
res = fixture.get_response_data(response)
assert list(jmespath.search('nodes', res['data'][0]).keys())[0] == node_id
assert fixture.has_all_keys(fixture.config.KEYS_NODE_STATS, res['data'][0]['nodes'][node_id].keys()) is True
def test_get_node_info(self, fixture):
response = fixture.app.get('/api/nodes/%s/_info' % fixture.cluster_v2_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
assert fixture.has_all_keys(fixture.config.KEYS_NODE_INFO, jmespath.search('*', res['data'][0]['nodes'])[0]) is True
def test_get_one_node_info(self, fixture):
response = fixture.app.get('/api/nodes/%s/_info' % fixture.cluster_v2_name)
assert 200 == response.status_code
res = fixture.get_response_data(response)
node_id = list(jmespath.search('nodes', res['data'][0]).keys())[0]
response = fixture.app.get('/api/nodes/%s/%s/_info' % (fixture.cluster_v2_name, node_id))
res = fixture.get_response_data(response)
assert list(jmespath.search('nodes', res['data'][0]).keys())[0] == node_id
assert fixture.has_all_keys(fixture.config.KEYS_NODE_INFO, res['data'][0]['nodes'][node_id]) is True
|
###############################################################################
# Copyright 2012-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import logging
import os
import time
import stat
import sys
from ipf.error import StepError
#######################################################################################################################
logger = logging.getLogger(__name__)
#######################################################################################################################
class LogFileWatcher(object):
def __init__(self, callback, path, posdb_path=None):
self.callback = callback
self.path = path
self.keep_running = True
self.pos_db = PositionDB(posdb_path)
def run(self):
file = LogFile(self.path,self.callback,self.pos_db)
file.open()
while self.keep_running:
try:
file.handle()
except IOError:
# try to reopen in case of stale NFS file handle or similar
file.reopen()
file.handle()
time.sleep(1)
file.close()
def stop(self):
self.keep_running = False
#######################################################################################################################
class LogDirectoryWatcher(object):
"""Discovers new lines in log files and sends them to the callback."""
def __init__(self, callback, dir, posdb_path=None):
if not os.path.exists(dir):
raise StepError("%s doesn't exist",dir)
if not os.path.isdir(dir):
raise StepError("%s isn't a directory",dir)
self.callback = callback
self.dir = dir
self.pos_db = PositionDB(posdb_path)
self.files = {}
self.last_update = -1
logger.info("created watcher for directory %s",dir)
def run(self):
while True:
self._updateFiles()
for file in list(self.files.values()):
try:
file.handle()
except IOError:
# try to reopen in case of stale NFS file handle or similar
file.reopen()
file.handle()
time.sleep(1)
def _updateFiles(self):
cur_time = time.time()
if cur_time - self.last_update < 60: # update files once a minute
return
logger.debug("updating files")
cur_files = self._getCurrentFiles()
for file in cur_files:
if file.id in self.files:
self._handleExistingFile(file)
else:
self._handleNewFile(file)
self._handleDeletedFiles(cur_files)
self.last_update = cur_time
def _getCurrentFiles(self):
cur_files = []
for file_name in os.listdir(self.dir):
path = os.path.join(self.dir,file_name)
if not os.path.isfile(path): # only regular files
continue
if os.path.islink(path): # but not soft links
continue
cur_files.append(LogFile(path,self.callback,self.pos_db))
return cur_files
def _handleExistingFile(self, file):
logger.debug("existing file %s",file.path)
if file.path != self.files[file.id].path: # file has been rotated
logger.info("log file %s rotated to %s",self.files[file.id].path,file.path)
self.files[file.id].path = file.path
file.closeIfNeeded()
def _handleNewFile(self, file):
logger.info("new file %s %s",file.id,file.path)
file.openIfNeeded()
self.files[file.id] = file
def _handleDeletedFiles(self, cur_files):
cur_file_ids = set([file.id for file in cur_files])
for id in [id for id in list(self.files.keys()) if id not in cur_file_ids]:
if self.files[id].file is not None:
self.files[id].file.close()
del self.files[id]
for id in self.pos_db.ids():
if id not in self.files:
self.pos_db.remove(id)
#######################################################################################################################
class LogFile(object):
def __init__(self, path, callback, pos_db = None):
self.path = path
st = os.stat(path)
self.id = self._getId(st)
self.callback = callback
self.file = None
if pos_db is None:
self.pos_db = PositionDB()
else:
self.pos_db = pos_db
def _getId(self, st):
return "%s-%d" % (st.st_dev, st.st_ino)
def openIfNeeded(self):
if self._shouldOpen():
self.open()
def _shouldOpen(self):
if self.file is not None:
return False
st = os.stat(self.path)
if st.st_mtime > time.time() - 15*60:
return True
return False
def _seek(self):
position = self.pos_db.get(self.id)
if position is not None:
self.file.seek(position)
else:
self.file.seek(0,os.SEEK_END)
self._savePosition()
def _savePosition(self):
return self.pos_db.set(self.id,self.file.tell())
def _forgetPosition(self):
self.pos_db.remove(self.id)
def open(self):
logger.info("opening file %s (%s)",self.id,self.path)
if self.file is not None:
logger.warn("attempting to open already open file %s",self.path)
self.file = open(self.path,"r")
self._seek()
def reopen(self):
logger.info("reopening file %s (%s)",self.id,self.path)
self.file = open(self.path,"r")
self._seek()
def closeIfNeeded(self):
if self._shouldClose():
self.close()
#self._forgetPosition()
def _shouldClose(self):
if self.file is None:
return False
st = os.stat(self.path)
if st.st_mtime < time.time() - 15*60:
return True
return False
def close(self):
logger.info("closing file %s (%s)",self.id,self.path)
if self.file is None:
logger.warn("attempting to close already closed file %s",self.path)
return
self.file.close()
self.file = None
def handle(self):
if self.file is None:
return
logger.debug("checking log file %s",self.path)
line = "junk"
self._seek()
while line:
line = self.file.readline()
if line.endswith("\n"):
self.callback(self.path,line)
self._savePosition()
else:
break
#######################################################################################################################
class PositionDB(object):
def __init__(self, path=None):
self.position = {}
self.path = path
self._read()
def set(self, id, position):
if id not in self.position or position != self.position[id]:
self.position[id] = position
self._write()
def get(self, id):
return self.position.get(id,None)
def remove(self, id):
del self.position[id]
self._write()
def ids(self):
return list(self.position.keys())
def _read(self):
if self.path is None:
return
self.position = {}
if not os.path.exists(self.path):
return
try:
file = open(self.path,"r")
for line in file:
(id,pos_str) = line.split()
self.position[id] = int(pos_str)
file.close()
except IOError as e:
logger.error("failed to read position database %s: %s" % (self.path,e))
def _write(self):
if self.path is None:
return
try:
file = open(self.path,"w")
for key in self.position:
file.write("%s %d\n" % (key,self.position[key]))
file.close()
except IOError as e:
logger.error("failed to write position database %s: %s" % (self.path,e))
#######################################################################################################################
# testing
def echo(path, message):
print("%s: %s" % (path,message[:-1]))
def doNothing(path, message):
pass
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: log.py <log directory> [position file]")
sys.exit(1)
import logging.config
from ipf.paths import IPF_ETC_PATH
if len(sys.argv) >= 3:
watcher = LogDirectoryWatcher(echo,sys.argv[1],sys.argv[2])
else:
watcher = LogDirectoryWatcher(echo,sys.argv[1])
watcher.run()
|
from random import randint
class Task(object):
def __init__(self, env, start):
self.env = env
self.env.new_game()
self.start = self.env.load_state('./rl/game_state_ckpts/{}.npy'.format(start))
def finished():
pass
def reached_pos(self, x_, y_):
x, y = self.env.agent_pos()
return (x_ - 5 <= x <= x_ + 5) and (y_ - 5 <= y <= y_ + 5)
class Task1(Task):
def __init__(self, env):
super(Task1, self).__init__(env, 'ckpt-1-2')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 0 and self.reached_pos(27, 235) and self.env.orb_collected()
class Task2(Task):
def __init__(self, env):
super(Task2, self).__init__(env, 'ckpt-1-2')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 0 and self.reached_pos(77, 138)
class Task3(Task):
def __init__(self, env):
super(Task3, self).__init__(env, 'ckpt-3')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 0 and self.reached_pos(77, 138) and self.env.orb_collected()
class Task4(Task):
def __init__(self, env):
super(Task4, self).__init__(env, 'ckpt-4-5')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 4 and self.reached_pos(77, 137)
class Task5(Task):
def __init__(self, env):
super(Task5, self).__init__(env, 'ckpt-4-5')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 4 and self.reached_pos(21, 235)
class Task6(Task):
def __init__(self, env):
super(Task6, self).__init__(env, 'ckpt-6')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 3 and self.reached_pos(77, 137)
class Task7(Task):
def __init__(self, env):
super(Task7, self).__init__(env, 'ckpt-7-8')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 10 and self.reached_pos(13, 235) and self.env.orb_collected()
class Task8(Task):
def __init__(self, env):
super(Task8, self).__init__(env, 'ckpt-7-8')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 10 and self.reached_pos(138, 235)
class Task9(Task):
def __init__(self, env):
super(Task9, self).__init__(env, 'ckpt-9')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 10 and self.reached_pos(138, 235)
class Task10(Task):
def __init__(self, env):
super(Task10, self).__init__(env, 'ckpt-10')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 9 and self.reached_pos(8, 235)
class Task11(Task):
def __init__(self, env):
super(Task11, self).__init__(env, 'ckpt-11-12')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 8 and self.reached_pos(77, 155)
class Task12(Task):
def __init__(self, env):
super(Task12, self).__init__(env, 'ckpt-11-12')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 8 and self.reached_pos(77, 235)
class Task13(Task):
def __init__(self, env):
super(Task13, self).__init__(env, 'ckpt-13')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 8 and self.reached_pos(77, 235) and self.env.has_key()
class Task14(Task):
def __init__(self, env):
super(Task14, self).__init__(env, 'ckpt-14')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 8 and self.reached_pos(77, 155) and self.env.has_key()
class Task15(Task):
def __init__(self, env):
super(Task15, self).__init__(env, 'ckpt-15')
self.env.repeat_action(0, 4)
def finished(self):
return self.env.room() == 8 and self.reached_pos(149, 235)
|
# @lc app=leetcode id=322 lang=python3
#
# [322] Coin Change
#
# https://leetcode.com/problems/coin-change/description/
#
# algorithms
# Medium (38.38%)
# Likes: 7686
# Dislikes: 210
# Total Accepted: 699.3K
# Total Submissions: 1.8M
# Testcase Example: '[1,2,5]\n11'
#
# You are given an integer array coins representing coins of different
# denominations and an integer amount representing a total amount of money.
#
# Return the fewest number of coins that you need to make up that amount. If
# that amount of money cannot be made up by any combination of the coins,
# return -1.
#
# You may assume that you have an infinite number of each kind of coin.
#
#
# Example 1:
#
#
# Input: coins = [1,2,5], amount = 11
# Output: 3
# Explanation: 11 = 5 + 5 + 1
#
#
# Example 2:
#
#
# Input: coins = [2], amount = 3
# Output: -1
#
#
# Example 3:
#
#
# Input: coins = [1], amount = 0
# Output: 0
#
#
# Example 4:
#
#
# Input: coins = [1], amount = 1
# Output: 1
#
#
# Example 5:
#
#
# Input: coins = [1], amount = 2
# Output: 2
#
#
#
# Constraints:
#
#
# 1 <= coins.length <= 12
# 1 <= coins[i] <= 2^31 - 1
# 0 <= amount <= 10^4
#
#
#
# @lc tags=dynamic-programming
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 给定硬币与目标数,求使用最少的硬币个数。
# 直接遍历。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
counts = [amount + 1] * (amount + 1)
counts[0] = 0
for coin in coins:
for i in range(amount - coin + 1):
counts[i + coin] = min(counts[i + coin], counts[i] + 1)
return -1 if counts[-1] == amount + 1 else counts[-1]
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('coins = [1,2,5], amount = 11')
print('Exception :')
print('3')
print('Output :')
print(str(Solution().coinChange([1, 2, 5], 11)))
print()
print('Example 2:')
print('Input : ')
print('coins = [2], amount = 3')
print('Exception :')
print('-1')
print('Output :')
print(str(Solution().coinChange([2], 3)))
print()
print('Example 3:')
print('Input : ')
print('coins = [1], amount = 0')
print('Exception :')
print('0')
print('Output :')
print(str(Solution().coinChange([1], 0)))
print()
print('Example 4:')
print('Input : ')
print('coins = [1], amount = 1')
print('Exception :')
print('1')
print('Output :')
print(str(Solution().coinChange([1], 1)))
print()
print('Example 5:')
print('Input : ')
print('coins = [1], amount = 2')
print('Exception :')
print('2')
print('Output :')
print(str(Solution().coinChange([1], 2)))
print()
pass
# @lc main=end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.