max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
projects/models.py | maattss/tdt4242-agreelance | 0 | 12765851 | <gh_stars>0
from django.db import models
from user.models import Profile
from taggit.managers import TaggableManager
from django.core.files.storage import FileSystemStorage
from django.conf import settings
import os
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
This file storage solves overwrite on upload problem. Another
proposed solution was to override the save method on the model
like so (from https://code.djangoproject.com/ticket/11663):
def save(self, *args, **kwargs):
try:
this = MyModelName.objects.get(id=self.id)
if this.MyImageFieldName != self.MyImageFieldName:
this.MyImageFieldName.delete()
except: pass
super(MyModelName, self).save(*args, **kwargs)
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
class ProjectCategory(models.Model):
name = models.CharField(max_length=200)
tags = TaggableManager(blank=True)
def __str__(self):
return self.name
class Project(models.Model):
user = models.ForeignKey(Profile, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField(max_length=500)
participants = models.ManyToManyField(Profile, related_name='project_participants')
category = models.ForeignKey(ProjectCategory, on_delete=models.CASCADE, related_name='project_category')
tags = TaggableManager(blank=True)
OPEN = 'o'
INPROG = 'i'
FINISHED = 'f'
STATUS_CHOICES = (
(OPEN, 'Open'),
(INPROG, 'In progress'),
(FINISHED, 'Finished'),
)
status = models.CharField(max_length=11, choices=STATUS_CHOICES, default=OPEN)
def __str__(self):
return self.title
class Task(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name="tasks")
title = models.CharField(max_length=200)
description = models.TextField(max_length=500)
budget = models.IntegerField(default=0)
AWAITING_DELIVERY = 'ad'
PENDING_ACCEPTANCE = 'pa'
PENDING_PAYMENT = 'pp'
PAYMENT_SENT = 'ps'
DECLINED_DELIVERY = 'dd'
STATUS_CHOICES = (
(AWAITING_DELIVERY, 'Waiting for delivery'),
(PENDING_ACCEPTANCE, 'Delivered and waiting for acceptance'),
(PENDING_PAYMENT, 'Delivery has been accepted, awaiting payment'),
(PAYMENT_SENT, 'Payment for delivery is done'),
(DECLINED_DELIVERY, 'Declined delivery, please revise'),
)
status = models.CharField(max_length=50, choices=STATUS_CHOICES, default=AWAITING_DELIVERY)
feedback = models.TextField(max_length=500, default="")
read = models.ManyToManyField(Profile, related_name='task_participants_read')
write = models.ManyToManyField(Profile, related_name='task_participants_write')
modify = models.ManyToManyField(Profile, related_name='task_participants_modify')
def __str__(self):
return str(self.id) + " " + self.title
def accepted_task_offer(task):
task_offer = None
try:
task_offer = task.taskoffer_set.get(status='a')
except TaskOffer.DoesNotExist:
pass
return task_offer
class Team(models.Model):
name = models.CharField(max_length=200)
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name="teams")
members = models.ManyToManyField(Profile, related_name='teams')
write = models.BooleanField(default=False)
def __str__(self):
return self.task.project.title + " - " + self.task.title + " - " + self.name
def directory_path(instance, filename):
return 'static/uploads/tasks/{0}/{1}'.format(instance.task.id, filename)
class TaskFile(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name="files")
file = models.FileField(upload_to=directory_path, storage=OverwriteStorage())
def name(self):
parts = self.file.path.split("/")
file_name = parts[len(parts) - 1]
return file_name
class TaskFileTeam(models.Model):
file = models.ForeignKey(TaskFile, on_delete=models.CASCADE, related_name="teams")
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name="file")
name = models.CharField(max_length=200)
read = models.BooleanField(default=False)
write = models.BooleanField(default=False)
modify = models.BooleanField(default=False)
class Delivery(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name="delivery")
file = models.FileField(upload_to=directory_path)
comment = models.TextField(max_length=500)
delivery_user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="deliveries")
delivery_time = models.DateTimeField(auto_now=True)
responding_user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="responded_deliveries", blank=True, null=True)
responding_time = models.DateTimeField(blank=True, null=True)
ACCEPTED = 'a'
PENDING = 'p'
DECLINED = 'd'
STATUS_CHOICES = (
(ACCEPTED, 'Accepted'),
(PENDING, 'Pending'),
(DECLINED, 'Declined'),
)
status = models.CharField(max_length=8, choices=STATUS_CHOICES, default=PENDING)
feedback = models.TextField(max_length=500)
class TaskOffer(models.Model):
task = models.ForeignKey(Task, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField(max_length=500)
price = models.IntegerField(default=0)
offerer = models.ForeignKey(Profile, on_delete=models.CASCADE)
ACCEPTED = 'a'
PENDING = 'p'
DECLINED = 'd'
STATUS_CHOICES = (
(ACCEPTED, 'Accepted'),
(PENDING, 'Pending'),
(DECLINED, 'Declined'),
)
status = models.CharField(max_length=8, choices=STATUS_CHOICES, default=PENDING)
feedback = models.TextField(max_length=500)
def calculate_average_task_offer(self):
pass
def get_taskOffer(profile):
return TaskOffer.objects.filter(offerer = profile)
| 2.109375 | 2 |
generate_result.py | SeitaroShinagawa/chainer-partial_convolution_image_inpainting | 116 | 12765852 | <gh_stars>100-1000
#!/usr/bin/env python
import argparse
import os
import chainer
from chainer import training
from chainer import cuda, serializers
from chainer.training import extension
from chainer.training import extensions
import sys
import common.net as net
import datasets
from updater import *
from evaluation import *
from chainer.links import VGG16Layers
import common.paths as paths
def main():
parser = argparse.ArgumentParser(
description='Train Completion Network')
parser.add_argument('--batch_size', '-b', type=int, default=8)
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--eval_folder', '-e', default='generated_results',
help='Directory to output the evaluation result')
parser.add_argument("--load_model", help='completion model path')
parser.add_argument("--resize_to", type=int, default=256, help='resize the image to')
parser.add_argument("--crop_to", type=int, default=256, help='crop the resized image to')
parser.add_argument("--load_dataset", default='place2_test', help='load dataset')
#parser.add_argument("--layer_n", type=int, default=7, help='number of layers')
args = parser.parse_args()
print(args)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
#load completion model
model = getattr(net, "PartialConvCompletion")(ch0=3,input_size=args.crop_to)
#load vgg_model
print("loading vgg16 ...")
vgg = VGG16Layers()
print("ok")
if args.load_model != '':
serializers.load_npz(args.load_model, model)
print("Completion model loaded")
if not os.path.exists(args.eval_folder):
os.makedirs(args.eval_folder)
# select GPU
if args.gpu >= 0:
model.to_gpu()
vgg.to_gpu()
print("use gpu {}".format(args.gpu))
val_dataset = getattr(datasets, args.load_dataset)(paths.val_place2, mask_path="mask/256", resize_to=args.resize_to, crop_to=args.crop_to)
val_iter = chainer.iterators.SerialIterator(
val_dataset, args.batch_size)
#test_dataset = horse2zebra_Dataset_train(flip=args.flip, resize_to=args.resize_to, crop_to=args.crop_to)
#test_iter = chainer.iterators.SerialIterator(train_dataset, 8)
#generate results
xp = model.xp
batch = val_iter.next()
batchsize = len(batch)
image_size = args.crop_to
x = xp.zeros((batchsize, 3, image_size, image_size)).astype("f")
m = xp.zeros((batchsize, 3, image_size, image_size)).astype("f")
for i in range(batchsize):
x[i, :] = xp.asarray(batch[i][0])
m[i, :] = xp.asarray(batch[i][1])
mask_b = xp.array(m.astype("bool"))
I_gt = Variable(x)
M = Variable(m)
M_b = Variable(mask_b)
I_out = model(x, m)
I_comp = F.where(M_b,I_gt,I_out)
img = x.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_3_Igt.jpg")
img = I_comp.data.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_2_Icomp.jpg")
img = I_out.data.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_1_Iout.jpg")
img = M.data.get()
img = batch_postprocess_images(img, batchsize, 1)
Image.fromarray(img).save(args.eval_folder+"/generated_0_mask.jpg")
if __name__ == '__main__':
main()
| 2.1875 | 2 |
3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/10. Exercise - Functions Advanced/05_even_or_odd.py | kzborisov/SoftUni | 1 | 12765853 | def even_odd(*args):
args = list(args)
command = args.pop()
if command == "even":
return [x for x in args if x % 2 == 0]
return [x for x in args if not x % 2 == 0]
print(even_odd(1, 2, 3, 4, 5, 6, "even"))
print(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "odd"))
| 4.03125 | 4 |
applications/interface/test.py | beckerrh/simfemsrc | 0 | 12765854 | <reponame>beckerrh/simfemsrc<filename>applications/interface/test.py
from __future__ import print_function
import sys, os, shutil
from argparse import ArgumentParser
sys.path.append("@simfempythonpath@")
sys.path.append("@libpythonpath@")
import mesh.geometry
import tools.plot
import tools.testerrors
import numpy as np
import simfempy
import simfeminterface
import time
#---------------------------------------------------------#
def main():
testtype = 'hmean'
# testtype = 'gamma'
# testtype = 'kex'
# testtype = 'kin'
# testtype = 'xgamma'
if len(sys.argv)>1:
if sys.argv[1] == "1D":
meshtypes = ["LineMesh"]
elif sys.argv[1] == "2D":
meshtypes = ["TriangleMesh"]
elif sys.argv[1] == "3D":
meshtypes = ["TetrahedralMesh"]
else:
raise KeyError("unknwon argument", sys.argv[1])
meshtypes = ["LineMesh", "TriangleMesh", "TetrahedralMesh"]
meshtypes = ["TriangleMesh"]
paramdict = {}
paramdict["fem"] = "P1"
paramdict["application"] = "quadratic_circle"
# paramdict["application"] = "linear_straight"
# paramdict["application"] = "quadratic_straight"
paramdict["kin"] = 1.0
paramdict["kex"] = 100.0
paramdict["gamma"] = 5.0
methods=["nitsche","strong", "newnitsche","P1IFEM","CR1IFEM"]
# methods=["nitsche","P1IFEM","CR1IFEM"]
hmean=0.02
newmeshperiteration=False
if testtype == 'hmean':
hmean = None
params=[0.5*0.5**i for i in range(5)]
elif testtype == 'gamma':
params=[0.1* 4.0**i for i in range(15)]
elif testtype == 'kex':
params=[0.1* 5.0**i for i in range(12)]
elif testtype == 'kin':
paramdict["kex"] = 1.0
params=[0.1* 5.0**i for i in range(12)]
elif testtype == 'xgamma':
newmeshperiteration=True
paramdict["application"] = "quadratic_straight"
params=[-0.1 + 0.02*i for i in range(11)]
errs = ["H1", "E", "L2", "Linf"]
simfemplot = tools.plot.SimFemPlot(methods, params=params, param=testtype)
simfemplot.methodsnames["strong"] = "str"
simfemplot.methodsnames["nitsche"] = "nit"
simfemplot.methodsnames["P1IFEM"] = "p1iim"
simfemplot.methodsnames["CR1IFEM"] = "cr1iim"
simfemplot.methodsnames["nitschestabc"] = "sc"
simfemplot.methodsnames["nitschestabnc"] = "snc"
simfemplot.methodsnames["newnitsche"] = "new"
simfemplot.methodsnames["newnitsche2"] = "new2"
solver = simfeminterface.Solver()
for meshtype in meshtypes:
simfemtesterrors = tools.testerrors.SimFemTestErrors(solver=solver, errs=errs, plotter=simfemplot, meshtype=meshtype, paramdict=paramdict, methods=methods, param=testtype, params=params, hmean=hmean, newmeshperiteration=newmeshperiteration)
simfemtesterrors.run()
#---------------------------------------------------------#
if __name__ == '__main__':
main()
#
# #---------------------------------------------------------#
# def main():
# testtype = 'hmean'
# testtype = 'gamma'
# testtype = 'kex'
# testtype = 'xgamma'
#
# paramdict = {}
# paramdict["application"] = "quadratic_circle"
# # paramdict["application"] = "linear_straight"
# # paramdict["application"] = "quadratic_straight"
# paramdict["kin"] = 1.0
# paramdict["kex"] = 100.0
# paramdict["gamma"] = 5.0
#
# if testtype == 'hmean':
# hmeans=[0.25*0.5**i for i in range(6)]
# methods=["nitsche","nitschestabc","nitschestabnc","newnitsche"]
# methods=["nitsche","CR1IFEM","P1IFEM"]
# methods=["nitsche","newnitsche","newnitsche2"]
# methods=["nitsche","strong","newnitsche2"]
# testerrors(paramdict, methods, paramname='hmean', params=hmeans)
# if testtype == 'gamma':
# hmean=0.02
# gammas=[0.1* 4.0**i for i in range(15)]
# methods=["P1IFEM","CR1IFEM"]
# methods=["nitsche","newnitsche","newnitsche2"]
# methods=["nitsche","strong","newnitsche2"]
# testerrors(paramdict, methods, paramname='gamma', params=gammas, hmean=hmean)
# if testtype == 'kex':
# hmean=0.02
# kexs=[0.1* 5.0**i for i in range(12)]
# methods=["P1IFEM"]
# methods=["nitsche","CR1IFEM","P1IFEM"]
# methods=["nitsche","newnitsche","newnitsche2"]
# methods=["nitsche","strong","newnitsche2"]
# testerrors(paramdict, methods, paramname='kin', params=kexs, hmean=hmean)
# if testtype == 'xgamma':
# hmean=0.02
# paramdict["kin"] = 1.0
# paramdict["kex"] = 10000.0
# paramdict["application"] = "quadratic_straight"
# params=[-0.1 + 0.2*i for i in range(11)]
# methods=["nitsche","newnitsche","newnitsche2"]
# methods=["nitsche","strong","CR1IFEM","P1IFEM"]
# methods=["nitsche","strong","newnitsche2"]
# # methods=["nitsche","strong"]
# testerrors(paramdict, methods, paramname='xgamma', params=params, hmean=hmean, scale='toto')
#
# #---------------------------------------------------------#
# def testerrors(paramdict, methods, paramname, params, hmean=None, scale='loglog'):
# meshtype = 'TriangleMesh'
# from mesh.geometries.unitsquare import GeometryClass
# errLinf = {}
# errL2 = {}
# errH1 = {}
# errE = {}
# times = {}
# simfemplot = tools.plot.SimFemPlot(methods, params=params, param=paramname)
# if paramname!='hmean':
# geom=GeometryClass(hmean=hmean)
# geom.runGmsh(outdir="Data")
# for method in methods:
# times[method] = 0.0
# errL2[method] = np.zeros(len(params))
# errLinf[method] = np.zeros(len(params))
# errH1[method] = np.zeros(len(params))
# errE[method] = np.zeros(len(params))
# for ip,param in enumerate(params):
# print("---- param=", param)
# if paramname=='hmean':
# geom=GeometryClass(hmean=param)
# geom.runGmsh(outdir="Data")
# for im,method in enumerate(methods):
# print("####### %s #####" %(method))
# start = time.time()
# partion_id = 1
# construct_bdrymeshes = False
# mesh = simfempy.create(meshtype, partion_id, construct_bdrymeshes)
# mesh.readGmsh("Data/"+geom.name+'.msh')
# solver = simfeminterface.Solver()
# solver.setMesh(mesh)
# for key, value in paramdict.items():
# solver.setParameter(key,value)
# solver.setParameter("method",method)
# if paramname!='hmean':
# solver.setParameter(paramname,param)
# solver.init()
# info = solver.getInfo()
# print(info)
# d = solver.run()
# errL2[method][ip] = d["L2"]
# errLinf[method][ip] = d["Linf"]
# errH1[method][ip] = d["H1"]
# errE[method][ip] = d["E"]
# # print("d=",d)
# solver.writeXdmf()
# times[method] = time.time()-start
# for method in methods:
# print("errL2 %-20s" %(method), errL2[method])
# print()
# for method in methods:
# print("errLinf %-20s" %(method), errLinf[method])
# print()
# for method in methods:
# print("errH1 %-20s" %(method), errH1[method])
# for method in methods:
# print("errE %-20s" %(method), errE[method])
# # for method in methods:
# # errH1[method] = np.sqrt( eps*errH1[method]**2 + errL2[method]**2)
# for method in methods: print("%-20s %10g" %(method,times[method]))
#
# datatoplot = {}
# # datatoplot['L1'] = errL1
# datatoplot['L2'] = errL2
# datatoplot['Linf'] = errLinf
# datatoplot['H1'] = errH1
# datatoplot['E'] = errE
# methodsnames={}
# for method in methods: methodsnames[method]=method.lower()
# methodsnames["P1IFEM"] = "p1iim"
# methodsnames["CR1IFEM"] = "cr1iim"
# methodsnames["nitsche"] = "nit"
# methodsnames["nitschestabc"] = "sc"
# methodsnames["nitschestabnc"] = "snc"
# methodsnames["newnitsche"] = "new"
# methodsnames["newnitsche2"] = "new2"
#
# simfemplot.ploterrors(datatoplot, scale=scale)
#
#
# #---------------------------------------------------------#
# if __name__ == '__main__':
# main()
| 2.109375 | 2 |
examples/issues/issue529_obj.py | tgolsson/appJar | 666 | 12765855 | <reponame>tgolsson/appJar
import sys
sys.path.append("../../")
from numpy import sin, pi, arange
from appJar import gui
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg as addToolbar
import random
from mpl_toolkits.mplot3d import Axes3D
with gui() as app:
fig = app.addPlotFig("p1", showNav=True)
ax = fig.add_subplot(111, projection='3d')
ax.scatter([1,2],[1,2],[1,2])
| 2.1875 | 2 |
ttarray/core/creation.py | sonnerm/ttarray | 0 | 12765856 | from .array import TensorTrainArray
from .slice import TensorTrainSlice
from .dispatch import implement_function
from ..raw import find_balanced_cluster,trivial_decomposition
import numpy as np
def _get_cluster_chi_array(shape,cluster,chi):
if cluster is None:
cluster=find_balanced_cluster(shape)
if isinstance(chi,int):
chi=[chi]*(len(cluster)-1)
chi=tuple([1]+list(chi)+[1])
return cluster,chi
def _get_cluster_chi_slice(shape,cluster,chi):
if len(shape)<2:
raise ValueError("TensorTrainSlice has at least 2 dimensions.")
if cluster is None:
cluster=find_balanced_cluster(shape[1:-1])
if isinstance(chi,int):
chi=[chi]*(len(cluster)-1)
chi=tuple([shape[0]]+list(chi)+[shape[-1]])
return cluster,chi
@implement_function("empty","array")
def empty(shape,dtype=np.float64, cluster=None,chi=1):
'''
Create an empty TensorTrainArray
'''
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.empty([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainArray.frommatrices(ms)
@implement_function("empty","slice")
def empty_slice(shape,dtype=np.float64, cluster=None,chi=1):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.empty([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def empty_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return empty(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return empty_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("zeros","array")
def zeros(shape,dtype=np.float64,cluster=None,chi=1):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainArray.frommatrices(ms)
@implement_function("zeros","slice")
def zeros_slice(shape,dtype=np.float64,cluster=None,chi=1):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def zeros_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return zeros(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return zeros_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("ones","array")
def ones(shape,dtype=np.float64,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
for m in ms:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
return TensorTrainArray.frommatrices(ms)
@implement_function("ones","slice")
def ones_slice(shape,dtype=np.float64,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
if len(ms)==1:
ms[0]=np.ones(ms[0].shape,dtype)
else:
ms[0][...,0]=np.ones(ms[0].shape[:-1],dtype)
ms[-1][0,...]=np.ones(ms[-1].shape[1:],dtype)
for m in ms[1:-1]:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def ones_like(prototype, dtype=None, shape=None, cluster=None, chi=1,*,order=None,subok=None):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return ones(shape,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return ones_slice(shape,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("full","array")
def full(shape,fill_value,dtype=None,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_array(shape,cluster,chi)
if dtype is None:
dtype=np.array(fill_value).dtype
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
for m in ms:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
ms[-1]*=fill_value
return TensorTrainArray.frommatrices(ms)
@implement_function("full","slice")
def full_slice(shape,fill_value,dtype=None,cluster=None,chi=1,*,order=None):
cluster,chi=_get_cluster_chi_slice(shape,cluster,chi)
if dtype is None:
dtype=np.array(fill_value).dtype
ms=[np.zeros([c1]+list(s)+[c2],dtype) for c1,s,c2 in zip(chi[:-1],cluster,chi[1:])]
if len(ms)==1:
ms[0]=np.ones(ms[0].shape,dtype)
else:
ms[0][...,0]=np.ones(ms[0].shape[:-1],dtype)
ms[-1][0,...]=np.ones(ms[-1].shape[1:],dtype)
for m in ms[1:-1]:
m[0,...,0]=np.ones(m.shape[1:-1],dtype)
ms[-1]*=fill_value
return TensorTrainSlice.frommatrices(ms)
@implement_function()
def full_like(prototype, fill_value, dtype=None, shape=None, cluster=None, chi=1):
if dtype is None:
dtype=prototype.dtype
if shape is None:
shape,cluster,chi=prototype.shape,prototype.cluster,prototype.chi
if isinstance(prototype,TensorTrainArray):
return full(shape,fill_value,dtype,cluster,chi)
elif isinstance(prototype,TensorTrainSlice):
return full_slice(shape,fill_value,dtype,cluster,chi)
else:
return NotImplemented
@implement_function("eye","array")
def eye(N, M=None, k=0, dtype=np.float64, cluster=None):
if M!=None:
raise NotImplementedError("not implemented yet ...")
return diag(ones((N,),dtype=dtype,cluster=cluster),k)
@implement_function("identity","array")
def identity(n,dtype=None,cluster=None):
return eye(N=n,dtype=dtype,cluster=cluster)
@implement_function("diag","array")
def diag(v,k=0):
pass
@implement_function("array","array")
def array(ar, dtype=None, cluster=None, copy=True,*,ndim=0):
if isinstance(ar,TensorTrainArray):
#copy if necessary, recluster if necessary
pass
elif isinstance(ar,TensorTrainSlice):
#just recast, clustering is then a bit weird, recluster afterwards
arm=ar.asmatrices()
if not copy:
arm[0]=arm[0][None,...]
else:
pass
ret=TensorTrainArray.frommatrices(arm)
if cluster is not None:
ret.recluster(cluster)
return ret
else:
return TensorTrainArray.fromdense(ar,dtype,cluster)
@implement_function("array","slice")
def slice(ar, dtype=None, cluster=None, copy=True,*,ndim=0):
if isinstance(ar,TensorTrainSlice):
#copy if necessary
pass
elif isinstance(ar,TensorTrainArray):
#recluster then recast
pass
else:
return TensorTrainSlice.fromdense(ar,dtype,cluster)
@implement_function("asarray","array")
def asarray(ar, dtype=None,cluster=None):
return array(ar,dtype,cluster=cluster,copy=False)
@implement_function("asarray","slice")
def asslice(ar, dtype=None,cluster=None):
return slice(ar,dtype,cluster=cluster,copy=False)
@implement_function("asanyarray","array")
def asanyarray(ar, dtype=None,cluster=None):
return array(ar,dtype,cluster=cluster,copy=False)
@implement_function("asanyarray","slice")
def asanyslice(ar, dtype=None,cluster=None):
return slice(ar,dtype,cluster=cluster,copy=False)
@implement_function("frombuffer","array")
def frombuffer(buffer, dtype=float, count=- 1, offset=0, cluster=None):
return array(np.frombuffer(buffer,dtype,count,offset),dtype=dtype,cluster=cluster)
@implement_function("fromiter","array")
def fromiter(iter, dtype, count=- 1, cluster=None):
return array(np.fromiter(iter,dtype,count),dtype=dtype,cluster=cluster)
@implement_function("fromfunction","array")
def fromfunction(function, shape, dtype=float, cluster=None, **kwargs):
'''
Should be upgraded to support ttcross eventually, so might change behavior if function is not sane
'''
return array(np.fromfunction(function,shape,dtype=dtype,**kwargs),dtype=dtype,cluster=cluster)
@implement_function("fromfunction","slice")
def fromfunction_slice(function, shape, dtype=float, cluster=None, **kwargs):
'''
Should be upgraded to support ttcross eventually, so might change behavior if function is not sane
'''
return slice(np.fromfunction(function,shape,dtype=dtype,**kwargs),dtype=dtype,cluster=cluster)
@implement_function()
def copy(a,*,order=None,subok=None):
if isinstance(a,TensorTrainArray) or isinstance(a,TensorTrainSlice):
return a.copy()
else:
return NotImplemented
@implement_function("arange","array")
def arange(*args, **kwargs):
#wild hack to deal with optional arguments
if len(args)==5:
array(np.arange(*args[:-1],**kwargs),cluster=args[-1])
elif "cluster" in kwargs.keys():
cluster=kwargs["cluster"]
del kwargs["cluster"]
array(np.arange(*args,**kwargs),cluster=cluster)
else:
array(np.arange(*args,**kwargs))
@implement_function("linspace","array")
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0,cluster=None):
array(np.linspace(todense(start),todense(stop),num,endpoint,retstep,dtype,axis),cluster=cluster)
# @implement_function("linspace","slice")
# def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0,cluster=None):
# slice(np.linspace(todense(start),todense(stop),num,endpoint,retstep,dtype,axis),cluster=cluster)
@implement_function("logspace","array")
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0,cluster=None):
raise NotImplementedError("not yet")
# @implement_function("logspace","slice")
# def logspace_slice(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0,cluster=None):
# slice(np.logspace(todense(start),todense(stop),num,endpoint,base,dtype,axis),cluster=cluster)
@implement_function("geomspace","array")
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0,cluster=None):
raise NotImplementedError("not yet")
# @implement_function("geomspace","slice")
# def geomspace_slice(start, stop, num=50, endpoint=True, dtype=None, axis=0,cluster=None):
# slice(np.geomspace(todense(start),todense(stop),num,endpoint,dtype,axis),cluster=cluster)
# def fromdense(ar,dtype=None,cluster=None):
# return TensorTrainArray.fromdense(ar,dtype,cluster)
# def fromdense_slice(ar,dtype,cluster):
# return TensorTrainSlice.fromdense(ar,dtype,cluster)
def todense(ttar):
return ttar.todense()
@implement_function("asfarray","array")
def asfarray(ttar,dtype=None):
if not np.issubdtype(dtype,np.inexact):
dtype=float
return asarray(ttar,dtype=dtype)
@implement_function("asfarray","slice")
def asfslice(ttar,dtype=None):
if not np.issubdtype(dtype,np.inexact):
dtype=float
return asslice(ttar,dtype=dtype)
| 2.6875 | 3 |
dcms/media/urls.py | yifei-fu/dcms | 1 | 12765857 | from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from .views import *
router = DefaultRouter()
router.register('image', ImageViewSet)
router.register('file', FileViewSet)
urlpatterns = [
url(r'', include(router.urls)),
url(r'^upload_image/(?P<filename>[^/]+)$', ImageUploadView.as_view()),
url(r'^upload_file/(?P<filename>[^/]+)$', FileUploadView.as_view()),
]
| 1.914063 | 2 |
werf_chart_repo_doit_tasks/__init__.py | ilya-lesikov/werf-chart-repo-tasks | 0 | 12765858 | <filename>werf_chart_repo_doit_tasks/__init__.py
"""werf_chart_repo_doit_tasks - Doit Tasks for managing Werf chart repo"""
__version__ = '1.0.6'
__author__ = '<NAME> <<EMAIL>>'
__all__ = []
| 1.039063 | 1 |
signal_processing.py | sydneyhill3901/Heart-rate-measurement-using-camera | 0 | 12765859 | import os, csv
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
from scipy import signal
class ProcessSignalData(object):
def __init__(self):
# path to video data from signal_output.py
self.dir = './processed_new/videos'
self.full_path = ''
self.dataframe = pd.DataFrame()
self.real_data = pd.DataFrame()
self.fake_data = pd.DataFrame()
self.dataset = pd.DataFrame()
self.real_data_mean = {}
self.fake_data_mean = {}
self.real_data_var = {}
self.fake_data_var = {}
self.real_data_std = {}
self.fake_data_std = {}
self.real_data_psd = {}
self.fake_data_psd = {}
self.real_data_csd = {}
self.fake_data_csd = {}
self.real_data_f1 = {}
self.fake_data_f1 = {}
self.real_data_test = {}
self.fake_data_test = {}
self.real_data_RCCE = {}
self.real_data_LCCE = {}
self.real_data_LCRC = {}
self.fake_data_RCCE = {}
self.fake_data_LCCE = {}
self.fake_data_LCRC = {}
self.real_count = 0
self.fake_count = 0
self.vid_count = 0
self.data_path_lcce = './lcce250.csv'
self.data_path_lcrc = './lcrc250.csv'
self.data_path_rcce = './rcce250.csv'
self.data_path_m = './mean_data16.csv'
self.data_path_v = './new_chrom/var_data16.csv'
self.data_path_s = './new_chrom/std_data16.csv'
self.data_path_p = './new_chrom/psd_data16.csv'
self.data_path_c = './new_chrom/csd_data_128.csv'
self.data_path_c = './f1_data_128.csv'
self.log_path = './process_log.csv'
self.test_data_lcce_path = './new_chrom/test_lcce.csv'
self.test_data_lcrc_path = './new_chrom/test_lcrc.csv'
self.test_data_rcce_path = './new_chrom/test_rcce.csv'
self.train_data_lcce_path = './new_chrom/train_lcce.csv'
self.train_data_lcrc_path = './new_chrom/train_lcrc.csv'
self.train_data_rcce_path = './new_chrom/train_rcce.csv'
self.test_data_v_path = './new_chrom/train_data_v32c.csv'
self.train_data_v_path = './new_chrom/test_data_v32c.csv'
self.test_data_m_path = './new_chrom/train_data_m32c.csv'
self.train_data_m_path = './new_chrom/test_data_m32c.csv'
self.test_data_s_path = './new_chrom/train_data_s32c.csv'
self.train_data_s_path = './new_chrom/test_data_s32c.csv'
self.test_data_p_path = './new_chrom/train_data_p128c.csv'
self.train_data_p_path = './new_chrom/test_data_p128c.csv'
self.test_data_c_path = './train_data_c128c.csv'
self.train_data_c_path = './test_data_c128c.csv'
self.test_data_f1_path = './train_data_f1-128c.csv'
self.train_data_f1_path = './test_data_f1-128c.csv'
self.test_data_test_path = './train_data_test.csv'
self.train_data_test_path = './test_data_test.csv'
self.main()
def new_chrom(self, red, green, blue):
# calculation of new X and Y
Xcomp = 3 * red - 2 * green
Ycomp = (1.5 * red) + green - (1.5 * blue)
# standard deviations
sX = np.std(Xcomp)
sY = np.std(Ycomp)
alpha = sX / sY
# -- rPPG signal
bvp = Xcomp - alpha * Ycomp
return bvp
def main(self):
# length of video in frames to process
sample_length = 250
# interval for mean, var, std
group_size = 32
#window for psd
psd_size = 128
for paths, subdir, files in os.walk(self.dir):
for file in files:
if file.endswith('.csv'):
self.full_path = os.path.join(paths, file)
if 'rejected' in self.full_path.lower() or '.txt' in self.full_path.lower() or 'imposter' in self.full_path.lower():
pass
else:
print(self.full_path)
self.dataset = pd.read_csv(self.full_path)
right_R = self.dataset['RC-R'].iloc[:sample_length]
left_R = self.dataset['LC-R'].iloc[:sample_length]
chin_R = self.dataset['C-R'].iloc[:sample_length]
forehead_R = self.dataset['F-R'].iloc[:sample_length]
outerR_R = self.dataset['OR-R'].iloc[:sample_length]
outerL_R = self.dataset['OL-R'].iloc[:sample_length]
center_R = self.dataset['CE-R'].iloc[:sample_length]
right_G = self.dataset['RC-G'].iloc[:sample_length]
left_G = self.dataset['LC-G'].iloc[:sample_length]
chin_G = self.dataset['C-G'].iloc[:sample_length]
forehead_G = self.dataset['F-G'].iloc[:sample_length]
outerR_G = self.dataset['OR-G'].iloc[:sample_length]
outerL_G = self.dataset['OL-G'].iloc[:sample_length]
center_G = self.dataset['CE-G'].iloc[:sample_length]
right_B = self.dataset['RC-B'].iloc[:sample_length]
left_B = self.dataset['LC-B'].iloc[:sample_length]
chin_B = self.dataset['C-B'].iloc[:sample_length]
forehead_B = self.dataset['F-B'].iloc[:sample_length]
outerR_B = self.dataset['OR-B'].iloc[:sample_length]
outerL_B = self.dataset['OL-B'].iloc[:sample_length]
center_B = self.dataset['CE-B'].iloc[:sample_length]
right_C = self.dataset['RC-chrom'].iloc[:sample_length]
left_C = self.dataset['LC-Chrom'].iloc[:sample_length]
chin_C = self.dataset['C-chrom'].iloc[:sample_length]
forehead_C = self.dataset['F-chrom'].iloc[:sample_length]
outerR_C = self.dataset['OR-chrom'].iloc[:sample_length]
outerL_C = self.dataset['OL-chrom'].iloc[:sample_length]
center_C = self.dataset['CE-chrom'].iloc[:sample_length]
chrom_R = right_C
chrom_L = left_C
chrom_CE = center_C
chrom_OL = outerL_C
chrom_OR = outerR_C
#chrom_R = self.new_chrom(right_R, right_G, right_B)
#chrom_L = self.new_chrom(left_R, left_G, left_B)
chrom_C = self.new_chrom(chin_R, chin_G, chin_B)
chrom_F = self.new_chrom(forehead_R, forehead_G, forehead_B)
#chrom_OR = self.new_chrom(outerR_R, outerR_G, outerR_B)
#chrom_OL = self.new_chrom(outerL_R, outerL_G, outerL_B)
#chrom_CE = self.new_chrom(center_R, center_G, center_B)
difg_LCRC = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['LC-G'].iloc[:sample_length]).abs()
difc_LCRC = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['LC-Chrom'].iloc[:sample_length]).abs()
difg_o1 = (self.dataset['C-G'].iloc[:sample_length] - self.dataset['F-G'].iloc[:sample_length]).abs()
difc_o1 = (self.dataset['C-chrom'].iloc[:sample_length] - self.dataset['F-chrom'].iloc[:sample_length]).abs()
difg_o2 = (self.dataset['OR-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difc_o2 = (self.dataset['OR-chrom'].iloc[:sample_length] - self.dataset['OL-chrom'].iloc[:sample_length]).abs()
difc_LCCe = (self.dataset['LC-Chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_RCCe = (self.dataset['RC-chrom'].iloc[:sample_length] - self.dataset['CE-chrom'].iloc[
:sample_length]).abs()
difc_LCRC = (chrom_R.iloc[:sample_length] - chrom_L.iloc[:sample_length]).abs()
difc_LCCe = (chrom_L.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_RCCe = (chrom_R.iloc[:sample_length] - chrom_CE.iloc[:sample_length]).abs()
difc_LCOL = (chrom_L.iloc[:sample_length] - chrom_OL.iloc[:sample_length]).abs()
difc_RCOR = (chrom_R.iloc[:sample_length] - chrom_OR.iloc[:sample_length]).abs()
difg_LCOL = (self.dataset['LC-G'].iloc[:sample_length] - self.dataset['OL-G'].iloc[:sample_length]).abs()
difg_RCOR = (self.dataset['RC-G'].iloc[:sample_length] - self.dataset['OR-G'].iloc[:sample_length]).abs()
# green channel features
# right cheek - left cheek
difg_LCRC_lst = [difg_LCRC.iloc[i:i + group_size] for i in
range(0, len(difg_LCRC) - group_size + 1, group_size)]
# forehead - chin
difg_o1_lst = [difg_o1.iloc[i:i + group_size] for i in
range(0, len(difg_o1) - group_size + 1, group_size)]
# outer right - outer left
difg_o2_lst = [difg_o2.iloc[i:i + group_size] for i in
range(0, len(difg_o2) - group_size + 1, group_size)]
# chrominance features
# right cheek - left cheek
difc_LCRC_lst = [difc_LCRC.iloc[i:i + group_size] for i in
range(0, len(difc_LCRC) - group_size + 1, group_size)]
# forehead - chin
difc_o1_lst = [difc_o1.iloc[i:i + group_size] for i in
range(0, len(difc_o1) - group_size + 1, group_size)]
# outer right - outer left
difc_o2_lst = [difc_o2.iloc[i:i + group_size] for i in
range(0, len(difc_o2) - group_size + 1, group_size)]
# mean
difg_LCRC_mean = np.array([difg_LCRC_lst[i].mean() for i in range(len(difg_LCRC_lst))])
difc_LCRC_mean = np.array([difc_LCRC_lst[i].mean() for i in range(len(difc_LCRC_lst))])
print("MEAN")
print(difc_LCRC_mean)
difg_o1_mean = np.array([difg_o1_lst[i].mean() for i in range(len(difg_o1_lst))])
difc_o1_mean = np.array([difc_o1_lst[i].mean() for i in range(len(difc_o1_lst))])
difg_o2_mean = np.array([difg_o2_lst[i].mean() for i in range(len(difg_o2_lst))])
difc_o2_mean = np.array([difc_o2_lst[i].mean() for i in range(len(difc_o2_lst))])
# variance
difg_LCRC_var = np.array([difg_LCRC_lst[i].var() for i in range(len(difg_LCRC_lst))])
difc_LCRC_var = np.array([difc_LCRC_lst[i].var() for i in range(len(difc_LCRC_lst))])
print("VAR")
print(difc_LCRC_var)
difg_o1_var = np.array([difg_o1_lst[i].var() for i in range(len(difg_o1_lst))])
difc_o1_var = np.array([difc_o1_lst[i].var() for i in range(len(difc_o1_lst))])
difg_o2_var = np.array([difg_o2_lst[i].var() for i in range(len(difg_o2_lst))])
difc_o2_var = np.array([difc_o2_lst[i].var() for i in range(len(difc_o2_lst))])
# standard deviation
difg_LCRC_std = np.array([difg_LCRC_lst[i].std() for i in range(len(difg_LCRC_lst))])
difc_LCRC_std = np.array([difc_LCRC_lst[i].std() for i in range(len(difc_LCRC_lst))])
print("STD")
print(difc_LCRC_std)
difg_o1_std = np.array([difg_o1_lst[i].std() for i in range(len(difg_o1_lst))])
difc_o1_std = np.array([difc_o1_lst[i].std() for i in range(len(difc_o1_lst))])
difg_o2_std = np.array([difg_o2_lst[i].std() for i in range(len(difg_o2_lst))])
difc_o2_std = np.array([difc_o2_lst[i].std() for i in range(len(difc_o2_lst))])
# power spectral density
f, difg_LCRC_psd = signal.welch(difg_LCRC, nperseg=psd_size)
f, difc_LCCe_psd = signal.welch(difc_LCCe, nperseg=psd_size)
f, difc_RCCe_psd = signal.welch(difc_RCCe, nperseg=psd_size)
f, difc_LCRC_psd = signal.welch(difc_LCRC, nperseg=psd_size)
print("PSD")
print(difc_LCRC_psd)
f, difg_o1_psd = signal.welch(difg_o1, nperseg=psd_size)
f, difc_o1_psd = signal.welch(difc_o1, nperseg=psd_size)
f, difg_o2_psd = signal.welch(difg_o2, nperseg=psd_size)
f, difc_o2_psd = signal.welch(difc_o2, nperseg=psd_size)
# cross power spectral density
left_C.fillna(0, inplace=True)
center_C.fillna(0, inplace=True)
right_C.fillna(0, inplace=True)
outerL_C.fillna(0, inplace=True)
outerR_C.fillna(0, inplace=True)
f, difc_LCCe_v_csd = signal.csd(left_C, center_C, nperseg=128)
f, difc_LCRC_v_csd = signal.csd(left_C, right_C, nperseg=128)
f, difc_RCCe_v_csd = signal.csd(right_C, center_C, nperseg=128)
f, difc_LCOL_v_csd = signal.csd(left_C, outerL_C, nperseg=128)
f, difc_RCOR_v_csd =signal.csd(right_C, outerR_C, nperseg=128)
difc_LCCe_csd_0 = []
difc_LCRC_csd_0 = []
difc_RCCe_csd_0 = []
difc_LCOL_csd_0 = []
difc_RCOR_csd_0 = []
difc_LCCe_csd_1 = []
difc_LCRC_csd_1 = []
difc_RCCe_csd_1 = []
difc_LCOL_csd_1 = []
difc_RCOR_csd_1 = []
for i in range(len(difc_LCCe_v_csd)):
difc_LCCe_csd_0.append(difc_LCCe_v_csd[i].real)
difc_LCCe_csd_1.append(difc_LCCe_v_csd[i].imag)
for i in range(len(difc_LCRC_v_csd)):
difc_LCRC_csd_0.append(difc_LCRC_v_csd[i].real)
difc_LCRC_csd_1.append(difc_LCRC_v_csd[i].imag)
for i in range(len(difc_RCCe_v_csd)):
difc_RCCe_csd_0.append(difc_RCCe_v_csd[i].real)
difc_RCCe_csd_1.append(difc_RCCe_v_csd[i].imag)
for i in range(len(difc_LCOL_v_csd)):
difc_LCOL_csd_0.append(difc_LCOL_v_csd[i].real)
difc_LCOL_csd_1.append(difc_LCOL_v_csd[i].imag)
for i in range(len(difc_RCOR_v_csd)):
difc_RCOR_csd_0.append(difc_RCOR_v_csd[i].real)
difc_RCOR_csd_1.append(difc_RCOR_v_csd[i].imag)
csd2_LCCe = []
csd2_LCRC = []
csd2_RCCe = []
for i in range(len(difc_RCCe_csd_0)):
csd2_LCCe.append((difc_LCCe_csd_0[i], difc_LCCe_csd_1[i]))
csd2_LCRC.append((difc_LCRC_csd_0[i], difc_LCRC_csd_1[i]))
csd2_RCCe.append((difc_RCCe_csd_0[i], difc_RCCe_csd_1[i]))
# f1 feature
t = np.abs(difc_LCCe_v_csd)
j = np.argmax(t)
max_cLCCe = (difc_LCCe_csd_0[j], difc_LCCe_csd_1[j])
mean_cLCCe = [np.mean(np.asarray(difc_LCCe_csd_0)), np.mean(np.asarray(difc_LCCe_csd_1))]
f1LCCe = np.array([max_cLCCe[0], max_cLCCe[1], mean_cLCCe[0], mean_cLCCe[1]])
t = np.abs(difc_LCRC_v_csd)
j = np.argmax(t)
max_cLCRC = (difc_LCRC_csd_0[j], difc_LCRC_csd_1[j])
mean_cLCRC = [np.mean(np.asarray(difc_LCRC_csd_0)), np.mean(np.asarray(difc_LCRC_csd_1))]
f1LCRC = np.array([max_cLCRC[0], max_cLCRC[1], mean_cLCRC[0], mean_cLCRC[1]])
t = np.abs(difc_RCCe_v_csd)
j = np.argmax(t)
max_cRCCe = (difc_RCCe_csd_0[j], difc_RCCe_csd_1[j])
mean_cRCCe = [np.mean(np.asarray(difc_RCCe_csd_0)), np.mean(np.asarray(difc_RCCe_csd_1))]
f1RCCe = np.array([max_cRCCe[0], max_cRCCe[1], mean_cRCCe[0], mean_cRCCe[1]])
t = np.abs(difc_LCOL_v_csd)
j = np.argmax(t)
max_cLCOL = (difc_LCOL_csd_0[j], difc_LCOL_csd_1[j])
mean_cLCOL = [np.mean(np.asarray(difc_LCOL_csd_0)), np.mean(np.asarray(difc_LCOL_csd_1))]
f1LCOL = np.array([max_cLCOL[0], max_cLCOL[1], mean_cLCOL[0], mean_cLCOL[1]])
t = np.abs(difc_RCOR_v_csd)
j = np.argmax(t)
max_cRCOR = (difc_RCOR_csd_0[j], difc_RCOR_csd_1[j])
mean_cRCOR = [np.mean(np.asarray(difc_RCOR_csd_0)), np.mean(np.asarray(difc_RCOR_csd_1))]
f1RCOR = np.array([max_cRCOR[0], max_cRCOR[1], mean_cRCOR[0], mean_cRCOR[1]])
derived_data_mean = np.concatenate([difg_LCRC_mean, difc_LCRC_mean, difg_o1_mean, difc_o1_mean,
difg_o2_mean, difc_o2_mean])
derived_data_var = np.concatenate([difg_LCRC_var, difc_LCRC_var, difg_o1_var, difc_o1_var,
difg_o2_var, difc_o2_var])
derived_data_std = np.concatenate([difg_LCRC_std, difc_LCRC_std, difg_o1_std, difc_o1_std,
difg_o2_std, difc_o2_std])
derived_data_psd = np.concatenate([difc_LCCe_psd, difc_LCRC_psd, difc_RCCe_psd])
derived_data_csd = np.concatenate([difc_LCCe_csd_0, difc_LCCe_csd_1, difc_LCRC_csd_0, difc_LCRC_csd_1, difc_RCCe_csd_0, difc_RCCe_csd_1])
derived_data_rcsd = np.concatenate([difc_LCCe_csd_0, difc_LCRC_csd_0, difc_RCCe_csd_0])
derived_data_f1 = np.concatenate([f1LCCe, f1LCRC, f1RCCe])
derived_data_test = np.concatenate([f1LCCe, f1LCRC, f1RCCe, f1LCOL, f1RCOR, difc_LCRC_std, difc_LCRC_var, difc_LCRC_psd, difc_LCRC_mean])
chrom_data = self.dataset['RC-chrom'].iloc[50] - self.dataset['C-chrom'].iloc[50]
if 'fake' in self.full_path.lower():
self.fake_data_LCCE[self.fake_count] = difc_LCCe
self.fake_data_LCRC[self.fake_count] = difc_LCRC
self.fake_data_RCCE[self.fake_count] = difc_RCCe
self.fake_data_mean[self.fake_count] = derived_data_mean
self.fake_data_var[self.fake_count] = derived_data_var
self.fake_data_std[self.fake_count] = derived_data_std
self.fake_data_psd[self.fake_count] = derived_data_psd
self.fake_data_csd[self.fake_count] = derived_data_csd
self.fake_data_f1[self.fake_count] = derived_data_f1
self.fake_data_test[self.fake_count] = derived_data_test
self.fake_count += 1
else:
self.real_data_LCCE[self.real_count] = difc_LCCe
self.real_data_LCRC[self.real_count] = difc_LCRC
self.real_data_RCCE[self.real_count] = difc_RCCe
self.real_data_mean[self.real_count] = derived_data_mean
self.real_data_var[self.real_count] = derived_data_var
self.real_data_std[self.real_count] = derived_data_std
self.real_data_psd[self.real_count] = derived_data_psd
self.real_data_csd[self.real_count] = derived_data_csd
self.real_data_f1[self.real_count] = derived_data_f1
self.real_data_test[self.real_count] = derived_data_test
self.real_count += 1
self.vid_count += 1
self.real_df_LCCE = pd.DataFrame(self.real_data_LCCE)
self.real_df_LCRC = pd.DataFrame(self.real_data_LCRC)
self.real_df_RCCE = pd.DataFrame(self.real_data_RCCE)
self.fake_df_LCCE = pd.DataFrame(self.fake_data_LCCE)
self.fake_df_LCRC = pd.DataFrame(self.fake_data_LCRC)
self.fake_df_RCCE = pd.DataFrame(self.fake_data_RCCE)
self.real_df_m = pd.DataFrame(self.real_data_mean)
self.fake_df_m = pd.DataFrame(self.fake_data_mean)
self.real_df_v = pd.DataFrame(self.real_data_var)
self.fake_df_v = pd.DataFrame(self.fake_data_var)
self.real_df_s = pd.DataFrame(self.real_data_std)
self.fake_df_s = pd.DataFrame(self.fake_data_std)
self.real_df_p = pd.DataFrame(self.real_data_psd)
self.fake_df_p = pd.DataFrame(self.fake_data_psd)
self.real_df_csp = pd.DataFrame(self.real_data_csd)
self.fake_df_csp = pd.DataFrame(self.fake_data_csd)
self.real_df_f1 = pd.DataFrame(self.real_data_f1)
self.fake_df_f1 = pd.DataFrame(self.fake_data_f1)
self.real_df_test = pd.DataFrame(self.real_data_test)
self.fake_df_test = pd.DataFrame(self.fake_data_test)
r_lcce = self.real_df_LCCE.transpose()
r_lcrc = self.real_df_LCRC.transpose()
r_rcce = self.real_df_RCCE.transpose()
f_lcce = self.fake_df_LCCE.transpose()
f_lcrc = self.fake_df_LCRC.transpose()
f_rcce = self.fake_df_RCCE.transpose()
r_m = self.real_df_m.transpose()
f_m = self.fake_df_m.transpose()
r_v = self.real_df_v.transpose()
f_v = self.fake_df_v.transpose()
r_s = self.real_df_s.transpose()
f_s = self.fake_df_s.transpose()
r_p = self.real_df_s.transpose()
f_p = self.fake_df_s.transpose()
r_c = self.real_df_csp.transpose()
f_c = self.fake_df_csp.transpose()
r_f = self.real_df_f1.transpose()
f_f = self.fake_df_f1.transpose()
r_t = self.real_df_test.transpose()
f_t = self.fake_df_test.transpose()
r_f.to_csv("./real_f1.csv", index=False)
f_f.to_csv("./fake_f1.csv", index=False)
r_lcce['Target'] = 1
r_lcrc['Target'] = 1
r_rcce['Target'] = 1
f_lcce['Target'] = 0
f_lcrc['Target'] = 0
f_rcce['Target'] = 0
r_m['Target'] = 1
f_m['Target'] = 0
r_v['Target'] = 1
f_v['Target'] = 0
r_s['Target'] = 1
f_s['Target'] = 0
r_p['Target'] = 1
f_p['Target'] = 0
r_c['Target'] = 1
f_c['Target'] = 0
r_f['Target'] = 1
f_f['Target'] = 0
r_t['Target'] = 1
f_t['Target'] = 0
rf_lcce = r_lcce.append(f_lcce)
rf_lcrc = r_lcrc.append(f_lcrc)
rf_rcce = r_rcce.append(f_rcce)
rf_m = r_m.append(f_m)
rf_v = r_v.append(f_v)
rf_s = r_s.append(f_s)
rf_p = r_p.append(f_p)
rf_c = r_c.append(f_c)
rf_f = r_f.append(f_f)
rf_t = r_t.append(f_t)
test_v, train_v = train_test_split(rf_v, test_size=0.2)
test_m, train_m = train_test_split(rf_m, test_size=0.2)
test_s, train_s = train_test_split(rf_s, test_size=0.2)
test_p, train_p = train_test_split(rf_p, test_size=0.2)
test_c, train_c = train_test_split(rf_c, test_size=0.2)
test_f, train_f = train_test_split(rf_f, test_size=0.2)
test_t, train_t = train_test_split(rf_t, test_size=0.2)
test_lcce, train_lcce = train_test_split(rf_lcce, test_size=0.2)
test_lcrc, train_lcrc = train_test_split(rf_lcrc, test_size=0.2)
test_rcce, train_rcce = train_test_split(rf_rcce, test_size=0.2)
train_lcce.to_csv(self.train_data_lcce_path, index=False)
train_lcrc.to_csv(self.train_data_lcrc_path, index=False)
train_rcce.to_csv(self.train_data_rcce_path, index=False)
test_lcce.to_csv(self.test_data_lcce_path, index=False)
test_lcrc.to_csv(self.test_data_lcrc_path, index=False)
test_rcce.to_csv(self.test_data_rcce_path, index=False)
train_s.to_csv(self.train_data_s_path, index=False)
test_s.to_csv(self.test_data_s_path, index=False)
train_v.to_csv(self.train_data_v_path, index=False)
test_v.to_csv(self.test_data_v_path, index=False)
train_m.to_csv(self.train_data_m_path, index=False)
test_m.to_csv(self.test_data_m_path, index=False)
train_p.to_csv(self.train_data_p_path, index=False)
test_p.to_csv(self.test_data_p_path, index=False)
train_c.to_csv(self.train_data_c_path, index=False)
test_c.to_csv(self.test_data_c_path, index=False)
train_f.to_csv(self.train_data_f1_path, index=False)
test_f.to_csv(self.test_data_f1_path, index=False)
train_t.to_csv(self.train_data_test_path, index=False)
test_t.to_csv(self.test_data_test_path, index=False)
r_c.to_csv("./csd_real128.csv", index=False)
f_c.to_csv("./csd_fake128.csv", index=False)
p = ProcessSignalData()
| 2.4375 | 2 |
m31hst/paths.py | jonathansick/m31hst | 0 | 12765860 | <filename>m31hst/paths.py<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
"""
Access to HST data sets.
The environment variables $PHATDATA and $BROWNDATA should point to root
directories for the PHAT and Brown HLSP datasets.
2013-12-02 - Created by <NAME>
"""
import os
import glob
BROWNFIELDS = ('halo11', 'stream', 'disk', 'halo21', 'halo35a', 'halo35b')
BROWNBANDS = ('f606w', 'f814w')
def _instr_name(band):
"""Map instrument names to bands."""
if band in ["f475w", "f814w"]:
instr = "acs-wfc"
elif band in ["f110w", "f160w"]:
instr = "wfc3-ir"
elif band in ["f275w", "f336w"]:
instr = "wfc3-uvis"
return instr
def _phat_basedir(brick):
"""Base directory for PHAT brick data"""
return os.path.join(os.getenv('PHATDATA', None), "brick%02i" % brick)
class MissingData(BaseException):
pass
def phat_v2_phot_path(brick):
"""Get path to a photometry product for a specific phat field.
Parameters
----------
brick : int
Number of the brick (1-23).
"""
print os.getenv('PHATV2DATA', None)
paths = glob.glob(os.path.join(os.getenv('PHATV2DATA', None),
"*-b%02i_*_v2_st.fits" % (brick)))
if not len(paths) == 1:
raise MissingData
if not os.path.exists(paths[0]):
raise MissingData
return paths[0]
def phat_v2_ast_path():
path = os.path.join(os.getenv('PHATV2DATA'),
'table6.dat')
return path
def phat_v2_ast_summary_path():
path = os.path.join(os.getenv('PHATV2DATA'),
'table7.dat')
return path
def phat_phot_path(brick, field, filterset, kind='gst'):
"""Get path to a photometry product for a specific phat field.
Parameters
----------
brick : int
Number of the brick (1-23).
field : int
Number of the field within the brick.
filterset : str
Name of the filter set: ``f275w-f336w``.
"""
paths = glob.glob(
os.path.join(_phat_basedir(brick),
"*-b%02i-f%02i_%s_v1_%s.fits" % (brick,
field, filterset, kind)))
if not len(paths) == 1:
raise MissingData
if not os.path.exists(paths[0]):
raise MissingData
return paths[0]
def phat_brick_path(brick, band):
"""Get path to the drizzled FITS image for a PHAT brick.
Parameters
----------
brick : int
Number of the brick (1-23).
band : str
Name of the filter: e.g. ``f275w``.
"""
prefix = "hlsp_phat_hst"
b = "*-m31-b%02i" % brick
postfix = "v1_drz.fits"
instr = _instr_name(band)
filename = "_".join((prefix, instr, b, band, postfix))
temp = os.path.join(_phat_basedir(brick), filename)
paths = glob.glob(temp)
if not len(paths) == 1:
raise MissingData
if not os.path.exists(paths[0]):
raise MissingData
return paths[0]
def phat_field_path(brick, field, band):
"""Get path to the drizzled FITS image for a specific PHAT field.
Parameters
----------
brick : int
Number of the brick (1-23).
field : int
Number of the field within the brick.
band : str
Name of the filter: e.g. ``f275w``.
"""
prefix = "hlsp_phat_hst"
b = "*-m31-b%02i-f%02i" % (brick, field)
postfix = "v1_drz.fits"
instr = _instr_name(band)
filename = "_".join((prefix, instr, b, band, postfix))
s = os.path.join(_phat_basedir(brick), filename)
paths = glob.glob(s)
if not len(paths) == 1:
raise MissingData
if not os.path.exists(paths[0]):
raise MissingData
return paths[0]
def brown_phot_path(field, kind='cat'):
"""Get path for a Brown GO-10265 HLSP photometry product.
Parameters
----------
field : str
Field name: halo11, stream, disk, halo21, halo35a, halo35b
kind : str
Data type. `cat` for photometry catalog, `art` for artificial stars
and `msk` for a mask image.
"""
assert field in BROWNFIELDS
assert kind in ('cat', 'art', 'msk')
if kind == 'cat':
ext = '_v2_cat.txt'
elif kind == 'art':
ext = '_v2_art.fits'
elif kind == 'msk':
ext = '_v2_msk.fits'
path = os.path.join(
os.getenv('BROWNDATA', None),
'hlsp_andromeda_hst_acs-wfc_%s_f606w-f814w%s' % (field, ext))
if not os.path.exists(path):
raise MissingData
return path
def brown_image_path(field, band):
"""Get path for a Brown GO-10265 HLSP drizzled image product.
Parameters
----------
field : str
Field name: halo11, stream, disk, halo21, halo35a, halo35b
band : str
Filter name. Either f814w or f606w.
"""
assert field in BROWNFIELDS
assert band in BROWNBANDS
path = os.path.join(
os.getenv('BROWNDATA', None),
'hlsp_andromeda_hst_acs-wfc_%s_%s_v2_img.fits' % (field, band))
if not os.path.exists(path):
raise MissingData
return path
| 2.40625 | 2 |
Nas/auto_keras.py | motkeg/experimental | 0 | 12765861 | import autokeras as ak
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
EPOCHS = 50
BATCH = 5
NAME = "autokeras_classification"
DATASET_PATH = "/hdd/4celebs_training_set"
def build_train_set(image_size):
def make_train_generator():
data_generator = image.ImageDataGenerator(rescale=1. / 255,
# shear_range=0.2,
zoom_range=0.1,
horizontal_flip=True,
validation_split=0.15,
width_shift_range=0.2,
height_shift_range=0.2)
train_generator = data_generator.flow_from_directory(
DATASET_PATH,
target_size=(image_size, image_size),
batch_size=BATCH,
class_mode='categorical',
subset='training')
return train_generator
return tf.data.Dataset.from_generator(make_train_generator, (tf.float16, tf.float16))
def build_val_set(image_size):
def make_val_generator():
data_val_gen = image.ImageDataGenerator(rescale=1. / 255,
validation_split=0.1)
validation_generator = data_val_gen.flow_from_directory(
DATASET_PATH,
target_size=(image_size, image_size),
class_mode='categorical',
subset='validation')
return validation_generator
return tf.data.Dataset.from_generator(make_val_generator, (tf.float32, tf.float32))
def build_model():
model = ak.ImageClassifier(max_trials=100, objective="val_acc")
return model
def train(model, image_size):
tensorboard = TensorBoard(log_dir=f'output/logs/{NAME}',
write_graph=True,
# histogram_freq=5,
write_images=True,
write_grads=True,
profile_batch=3)
checkpointer = ModelCheckpoint(
filepath=f'output/weights/{NAME}_clooney.best.hdf5',
verbose=1,
save_best_only=True,
monitor='val_acc')
train_set = build_train_set(image_size)
# val_set = build_val_set(image_size)
model.fit(
train_set,
steps_per_epoch=EPOCHS * BATCH,
epochs=EPOCHS,
# validation_data=val_set,
validation_split=0.15,
verbose=2,
callbacks=[checkpointer, tensorboard])
if __name__ == '__main__':
model = build_model()
train(model, image_size=312)
| 2.5625 | 3 |
tests/test_core.py | yanglbme/actions-toolk | 10 | 12765862 | import asyncio
import io
import os
import sys
from actions_toolkit import core
from actions_toolkit.utils import to_command_properties, AnnotationProperties
test_env_vars = {
'my var': '',
'special char var \r\n];': '',
'my var2': '',
'my secret': '',
'special char secret \r\n];': '',
'my secret2': '',
'PATH': f'path1{os.pathsep}path2',
# Set inputs
'INPUT_MY_INPUT': 'val',
'INPUT_MISSING': '',
'INPUT_SPECIAL_CHARS_\'\t"\\': '\'\t"\\ response ',
'INPUT_MULTIPLE_SPACES_VARIABLE': 'I have multiple spaces',
'INPUT_BOOLEAN_INPUT': 'true',
'INPUT_BOOLEAN_INPUT_TRUE1': 'true',
'INPUT_BOOLEAN_INPUT_TRUE2': 'True',
'INPUT_BOOLEAN_INPUT_TRUE3': 'TRUE',
'INPUT_BOOLEAN_INPUT_FALSE1': 'false',
'INPUT_BOOLEAN_INPUT_FALSE2': 'False',
'INPUT_BOOLEAN_INPUT_FALSE3': 'FALSE',
'INPUT_WRONG_BOOLEAN_INPUT': 'wrong',
'INPUT_WITH_TRAILING_WHITESPACE': ' some val ',
'INPUT_MY_INPUT_LIST': 'val1\nval2\nval3',
# Save inputs
'STATE_TEST_1': 'state_val',
# File Commands
'GITHUB_PATH': '',
'GITHUB_ENV': ''
}
for k, v in test_env_vars.items():
os.environ[k] = v
file_path = os.path.join(os.getcwd(), 'test')
if not os.path.isdir(file_path):
os.makedirs(file_path)
def call(func, *args, **kw):
output = io.StringIO()
sys.stdout = output
func(*args, **kw)
sys.stdout = sys.__stdout__
return output.getvalue()
def create_file_command_file(command: str):
path = os.path.join(file_path, command)
os.environ[f'GITHUB_{command}'] = path
with open(path, 'a', encoding='utf-8', newline='') as fs:
fs.write('')
def verify_file_command(command: str, expected_contents: str):
path = os.path.join(file_path, command)
with open(path, 'r', encoding='utf-8', newline='') as fs:
contents = fs.read()
assert contents == expected_contents
os.unlink(path)
os.environ.pop(f'GITHUB_{command}', None)
assert call(core.export_variable, 'my var', 'var val') == f'::set-env name=my var::var val{os.linesep}'
assert call(core.export_variable, 'special char var \r\n,:',
'special val') == f'::set-env name=special char var %0D%0A%2C%3A::special val{os.linesep}'
assert os.getenv('special char var \r\n,:') == 'special val'
assert call(core.export_variable, 'my var2', 'var val\r\n') == f'::set-env name=my var2::var val%0D%0A{os.linesep}'
assert os.getenv('my var2') == 'var val\r\n'
assert call(core.export_variable, 'my var', True) == f'::set-env name=my var::true{os.linesep}'
assert call(core.export_variable, 'my var', 5) == f'::set-env name=my var::5{os.linesep}'
command = 'ENV'
create_file_command_file(command)
core.export_variable('my var', 'var val')
verify_file_command(command,
f'my var<<_GitHubActionsFileCommandDelimeter_{os.linesep}var val'
f'{os.linesep}_GitHubActionsFileCommandDelimeter_{os.linesep}')
command = 'ENV'
create_file_command_file(command)
core.export_variable('my var', True)
verify_file_command(command,
f'my var<<_GitHubActionsFileCommandDelimeter_{os.linesep}true'
f'{os.linesep}_GitHubActionsFileCommandDelimeter_{os.linesep}')
command = 'ENV'
create_file_command_file(command)
core.export_variable('my var', 5)
verify_file_command(command,
f'my var<<_GitHubActionsFileCommandDelimeter_{os.linesep}5'
f'{os.linesep}_GitHubActionsFileCommandDelimeter_{os.linesep}')
assert call(core.set_secret, 'secret val') == f'::add-mask::secret val{os.linesep}'
command = 'PATH'
create_file_command_file(command)
core.add_path('myPath')
assert os.getenv('PATH') == f'myPath{os.pathsep}path1{os.pathsep}path2'
verify_file_command(command, f'myPath{os.linesep}')
assert call(core.add_path, 'myPath') == f'::add-path::myPath{os.linesep}'
assert core.get_input('my input') == 'val'
assert core.get_input('my input', required=True) == 'val'
try:
core.get_input('missing', required=True)
except Exception as e:
assert str(e) == 'Input required and not supplied: MISSING'
else:
raise Exception('Expected raise Exception but it did not')
assert core.get_input('missing', required=False) == ''
assert core.get_input('My InPuT') == 'val'
assert core.get_input('special chars_\'\t"\\') == '\'\t"\\ response'
assert core.get_input('multiple spaces variable') == 'I have multiple spaces'
assert core.get_multiline_input('my input list') == ['val1', 'val2', 'val3']
assert core.get_input('with trailing whitespace') == 'some val'
assert core.get_input('with trailing whitespace', trim_whitespace=True) == 'some val'
assert core.get_input('with trailing whitespace', trim_whitespace=False) == ' some val '
assert core.get_boolean_input('boolean input') is True
assert core.get_boolean_input('boolean input', required=True) is True
assert core.get_boolean_input('boolean input true1') is True
assert core.get_boolean_input('boolean input true2') is True
assert core.get_boolean_input('boolean input true3') is True
assert core.get_boolean_input('boolean input false1') is False
assert core.get_boolean_input('boolean input false2') is False
assert core.get_boolean_input('boolean input false3') is False
try:
core.get_boolean_input('wrong boolean input')
except Exception as e:
assert str(e) == 'Input does not meet YAML 1.2 "Core Schema" specification: wrong boolean input\n' \
'Support boolean input list: `true | True | TRUE | false | False | FALSE`'
else:
raise Exception('Expected raise Exception but it did not')
assert call(core.set_output, 'some output', 'some value') == \
f'{os.linesep}::set-output name=some output::some value{os.linesep}'
assert call(core.set_output, 'some output', False) == f'{os.linesep}::set-output name=some output::false{os.linesep}'
assert call(core.set_output, 'some output', 1.01) == f'{os.linesep}::set-output name=some output::1.01{os.linesep}'
assert call(core.error, 'Error message') == f'::error::Error message{os.linesep}'
assert call(core.error, 'Error message\r\n\n') == f'::error::Error message%0D%0A%0A{os.linesep}'
message = 'this is my error message'
error = Exception(message)
assert call(core.error, error) == f'::error::Error: {message}{os.linesep}'
assert call(core.error, error, title='A title', file='root/test.txt',
start_column=1, end_column=2, start_line=5, end_line=5) == \
f'::error title=A title,file=root/test.txt,line=5,endLine=5,col=1,endColumn=2::Error: {message}{os.linesep}'
assert call(core.warning, 'Warning') == f'::warning::Warning{os.linesep}'
assert call(core.warning, '\r\nwarning\n') == f'::warning::%0D%0Awarning%0A{os.linesep}'
assert call(core.warning, error) == f'::warning::Error: {message}{os.linesep}'
assert call(core.warning, error, title='A title', file='root/test.txt',
start_column=1, end_column=2, start_line=5, end_line=5) == \
f'::warning title=A title,file=root/test.txt,line=5,endLine=5,col=1,endColumn=2::Error: {message}{os.linesep}'
assert call(core.notice, '\r\nnotice\n') == f'::notice::%0D%0Anotice%0A{os.linesep}'
assert call(core.notice, error) == f'::notice::Error: {message}{os.linesep}'
assert call(core.notice, error, title='A title', file='root/test.txt',
start_column=1, end_column=2, start_line=5, end_line=5) == \
f'::notice title=A title,file=root/test.txt,line=5,endLine=5,col=1,endColumn=2::Error: {message}{os.linesep}'
annotation_properties = AnnotationProperties(title='A title', file='root/test.txt', start_column=1, end_column=2,
start_line=5, end_line=5)
command_properties = to_command_properties(annotation_properties)
assert command_properties['title'] == 'A title'
assert command_properties['file'] == 'root/test.txt'
assert command_properties['col'] == 1
assert command_properties['endColumn'] == 2
assert command_properties['line'] == 5
assert command_properties['endLine'] == 5
assert command_properties.get('startColumn') is None
assert command_properties.get('startLine') is None
assert call(core.start_group, 'my-group') == f'::group::my-group{os.linesep}'
assert call(core.end_group) == f'::endgroup::{os.linesep}'
async def f():
async def in_group():
sys.stdout.write('in my group\n')
return True
result = await core.group('mygroup', in_group)
assert result is True
assert call(asyncio.run, f()) == f'::group::mygroup{os.linesep}in my group\n::endgroup::{os.linesep}'
assert call(core.debug, 'Debug') == f'::debug::Debug{os.linesep}'
assert call(core.debug, '\r\ndebug\n') == f'::debug::%0D%0Adebug%0A{os.linesep}'
assert call(core.save_state, 'state_1', 'some value') == f'::save-state name=state_1::some value{os.linesep}'
assert call(core.save_state, 'state_1', 1) == f'::save-state name=state_1::1{os.linesep}'
assert call(core.save_state, 'state_1', True) == f'::save-state name=state_1::true{os.linesep}'
assert core.get_state('TEST_1') == 'state_val'
os.environ.pop('RUNNER_DEBUG', None)
assert core.is_debug() is False
os.environ['RUNNER_DEBUG'] = '1'
assert core.is_debug() is True
os.environ.pop('RUNNER_DEBUG', None)
assert call(core.set_command_echo, True) == f'::echo::on{os.linesep}'
assert call(core.set_command_echo, False) == f'::echo::off{os.linesep}'
| 2.234375 | 2 |
test/test_meal_plan_sections.py | kingo55/pygrocy | 0 | 12765863 | import datetime
import pytest
class TestMealPlanSections:
@pytest.mark.vcr
def test_get_sections_valid(self, grocy):
sections = grocy.meal_plan_sections()
assert len(sections) == 2
section = sections[1]
assert section.id == 1
assert section.name == "Breakfast"
assert section.sort_number == 3
assert isinstance(section.row_created_timestamp, datetime.datetime)
@pytest.mark.vcr
def test_get_section_by_id_valid(self, grocy):
section = grocy.meal_plan_section(1)
assert section.id == 1
@pytest.mark.vcr
def test_get_section_by_id_invalid(self, grocy):
section = grocy.meal_plan_section(1000)
assert section is None
| 2.265625 | 2 |
problems/chapter15/nicetak/aoj1350.py | tokuma09/algorithm_problems | 1 | 12765864 | class UnionFind():
def __init__(self, n) -> None:
self.par = [-1] * n
self.siz = [1] * n
# return root of x
def root(self, x):
if self.par[x] == -1:
return x
else:
self.par[x] = self.root(self.par[x])
return self.par[x]
# return if x and y are in the same group
def issame(self, x, y):
return self.root(x) == self.root(y)
# combine group with x and one with y
def unite(self, x, y):
x = self.root(x)
y = self.root(y)
if x == y:
return False
if self.siz[x] < self.siz[y]:
x, y = y, x
# make y child of x
self.par[y] = x
self.siz[x] += self.siz[y]
return True
def size(self, x):
return self.siz[self.root(x)]
def kruskal(edges, n, id_ommit = -1):
uf = UnionFind(len(edges))
edges_used = []
cost = 0
for i in range(len(edges)):
s, t, w = edges[i][0], edges[i][1], edges[i][2]
if (i != id_ommit) & (not uf.issame(s, t)):
uf.unite(s, t)
edges_used.append(i)
cost += w
if len(edges_used) < n - 1:
cost = 10**13 # INF
return edges_used, cost
def main():
N, M = map(int, input().split())
edges = []
for _ in range(M):
s, t, w = map(int, input().split())
edges.append([s-1, t-1, w])
edges = sorted(edges, key=lambda x: x[2])
mst, cost_mst = kruskal(edges, N)
cnt = 0
cost = 0
for id in mst:
st, cost_st = kruskal(edges, N, id)
if cost_st > cost_mst:
cnt += 1
cost += edges[id][2]
print(str(cnt) + " " + str(cost))
if __name__=='__main__':
main()
| 3.21875 | 3 |
BacterialTyper/scripts/assembly_stats_caller.py | HCGB-IGTP/BacterialTyper | 2 | 12765865 | <reponame>HCGB-IGTP/BacterialTyper
#!/usr/bin/env python3
############################################################
## <NAME> ##
## Copyright (C) 2019-2021 <NAME>, IGTP, Spain ##
############################################################
from pandas.tests.io.json.conftest import orient
"""
Calls assembly_stats pip module
"""
## useful imports
import time
import io
import os
import re
import sys
from sys import argv
from io import open
from termcolor import colored
import pandas as pd
import assembly_stats
##
import HCGB.functions.aesthetics_functions as HCGB_aes
import HCGB.functions.main_functions as HCGB_main
############
def assembly_stats_caller(fasta_file, out_file, debug):
contig_lens, scaffold_lens, gc_cont = assembly_stats.read_genome(fasta_file)
## debug messages
if debug:
HCGB_aes.debug_message("contig_lens", "yellow")
print(contig_lens)
HCGB_aes.debug_message("scaffold_lens", "yellow")
print(scaffold_lens)
HCGB_aes.debug_message("gc_cont", "yellow")
print(gc_cont)
## get stats
contig_stats = assembly_stats.calculate_stats(contig_lens, gc_cont)
scaffold_stats = assembly_stats.calculate_stats(scaffold_lens, gc_cont)
## debug messages
if debug:
HCGB_aes.debug_message("contig_stats", "yellow")
print(contig_stats)
HCGB_aes.debug_message("scaffold_stats", "yellow")
print(scaffold_stats)
stat_output = {'Contig Stats': contig_stats,
'Scaffold Stats': scaffold_stats}
## save results in file
HCGB_main.printDict2file(out_file + '-contigs.csv', contig_stats, ",")
HCGB_main.printDict2file(out_file + '-scaffolds.csv', scaffold_stats, ",")
## create stats in excel file
assembly_stats_file = out_file + '_stats.xlsx'
parse_stats(stat_output, assembly_stats_file, debug)
return(stat_output, assembly_stats_file)
####################################
def parse_stats(stat_output, assembly_stats_file, debug):
## create single excel file
## write to excel
writer_Excel = pd.ExcelWriter(assembly_stats_file, engine='xlsxwriter') ## open excel handle
## loop over dictionary and print dataframe
for name, each_data in stat_output.items():
if debug:
print ("Name: ", name)
print ("Data: ", each_data)
each_df = pd.DataFrame.from_dict(each_data, orient='index')
each_df.to_excel(writer_Excel, sheet_name=name) ## write excel handle
writer_Excel.save() ## close excel handle
############
def help_options():
print ("\nUSAGE:\npython %s fasta_file out_file_name\n" %os.path.realpath(__file__))
############
def main():
## this code runs when call as a single script
## control if options provided or help
if len(sys.argv) > 1:
print ("")
else:
help_options()
exit()
file_in = os.path.abspath(argv[1])
file_out = os.path.abspath(argv[2])
##
path_to_sample = assembly_stats_caller(file_in, file_out, True)
############
'''******************************************'''
if __name__== "__main__":
main()
| 2.078125 | 2 |
examples/mock_pipe.py | phip123/glimmer | 1 | 12765866 | <reponame>phip123/glimmer
import logging
import multiprocessing
import os
import sys
from examples.mock_nodes import DevSource, DevSink, DevOperator, DevAvgOperator
from glimmer.processing.operator import LogOperator
from glimmer.processing.sync import SynchronousEnvironment, SynchronousTopology
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class MockApp:
def run(self):
source = DevSource()
sink = DevSink()
op1 = DevOperator()
op2 = DevAvgOperator()
op3 = LogOperator()
os.environ['home_controller_sleep'] = '2'
os.environ['home_controller_host'] = 'localhost'
composed = op1 - op2 - op3
topology = SynchronousTopology(source, composed, sink)
env = None
try:
env = SynchronousEnvironment(topology)
env.run()
except KeyboardInterrupt:
env.stop()
env.close()
if __name__ == "__main__":
app = MockApp()
app.run()
| 1.929688 | 2 |
Web_Server/webapps/bemoss_applications/migrations/0001_initial.py | ajfar-bem/wisebldg | 0 | 12765867 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-17 19:38
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('buildinginfos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ApplicationRegistered',
fields=[
('application_id', models.AutoField(primary_key=True, serialize=False)),
('app_name', models.CharField(blank=True, max_length=50, null=True)),
('description', models.CharField(blank=True, max_length=1000, null=True)),
('app_folder', models.CharField(max_length=200)),
('registered_time', models.DateTimeField()),
],
options={
'db_table': 'application_registered',
},
),
migrations.CreateModel(
name='ApplicationRunning',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_time', models.DateTimeField()),
('app_agent_id', models.CharField(max_length=50)),
('status', models.CharField(blank=True, max_length=20, null=True)),
('app_data', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('app_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bemoss_applications.ApplicationRegistered')),
('building', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='buildinginfos.BuildingInfo')),
('zone', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='buildinginfos.ZoneInfo')),
],
options={
'db_table': 'application_running',
},
),
]
| 1.75 | 2 |
cerebralcortex/data_import_export/export_diagnostic_data.py | MD2Korg/CerebralCortex-2.0-legacy | 0 | 12765868 | <filename>cerebralcortex/data_import_export/export_diagnostic_data.py
# Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, shutil
from cerebralcortex.CerebralCortex import CerebralCortex
from cerebralcortex.configuration import Configuration
from datetime import timedelta
from pytz import timezone
#create and load CerebralCortex object and configs
configuration_file = os.path.join(os.path.dirname(__file__), '../../cerebralcortex.yml')
CC_driver = CerebralCortex(configuration_file, master="local[*]", name="Data Diagnostic App", load_spark=True)
CC_worker = CerebralCortex(configuration_file, master="local[*]", name="Data Diagnostic App", load_spark=False)
#output folder path
output_folder = "/home/ali/Desktop/DUMP/data/tmp/"
shutil.rmtree(output_folder)
# load data diagnostic configs
config = Configuration(filepath="../data_processor/data_diagnostic/data_diagnostic_config.yml").config
def one_participant_data(participant_ids=None):
# get all streams for a participant
#export_data(participant_ids[0], "", CC_worker, config)
if len(participant_ids)>0:
participants_rdd = CC_driver.sc.parallelize(participant_ids)
results = participants_rdd.map(
lambda participant: export_data(participant, "", CC_worker, config))
results.count()
else:
print("No participant is selected")
def all_participants_data(study_name):
# get all participants' name-ids
participants = CC_driver.get_all_participants(study_name)
if len(participants)>0:
participants_rdd = CC_driver.sc.parallelize(participants)
results = participants_rdd.map(
lambda participant: export_data(participant["identifier"], participant["username"], CC_worker, config))
results.count()
else:
print("No participant is selected")
def export_data(participant_id, participant_username, CC, config):
streams = CC.get_participant_streams(participant_id)
if streams and len(streams)>0:
stream_names = filter_stream_names(streams)
for key,val in stream_names.items():
days = val["end_time"]-val["start_time"]
for day in range(days.days+1):
day_date = (val["start_time"]+timedelta(days=day)).strftime('%Y%m%d')
data = CC.get_cassandra_raw_data(val["identifier"], day_date)
write_to_bz2(output_folder+val["owner"]+"/", key+".csv", participant_username, data)
print("Participant ID: ", participant_id, " - Processed stream: ", key)
def filter_stream_names(stream_names):
filtered_names = {}
for key, val in stream_names.items():
if key.startswith('DATA-DIAGNOSTIC'):
filtered_names[key]=val
return filtered_names
def write_to_bz2(directory, file_name, participant_username, data):
if not os.path.exists(directory):
os.makedirs(directory)
if str(participant_username).startswith("mperf_1"):
localtz = timezone('US/Eastern')
elif str(participant_username).startswith("mperf_5"):
localtz = timezone('US/Central')
elif str(participant_username).startswith("mperf_9"):
localtz = timezone('US/Pacific')
else:
localtz = timezone('US/Pacific')
file_name = file_name.replace("IMPROPER-ATTACHMENT", "ATTACHMENT-MARKER")
with open(directory+file_name, 'a+') as fp:
for d in data:
if "label" in d[2] or "touch" in d[2] or "no-touch" in d[2]:
start_time = localtz.localize(d[0])
end_time = localtz.localize(d[1])
fp.write(str(start_time)+","+str(end_time)+","+str(d[2])+"\n")
if __name__ == '__main__':
# run with one participant
one_participant_data(["cd7c2cd6-d0a3-4680-9ba2-0c59d0d0c684"])
# run for all the participants in a study
#all_participants_data("mperf") | 1.648438 | 2 |
simplemfa/structures.py | mwhawkins/django-simple-mfa | 3 | 12765869 | <filename>simplemfa/structures.py<gh_stars>1-10
"""
A custom immutable dictionary data structure
"""
class CustomImmutableDict(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
# self[key] = value
return self._immutable()
def __delattr__(self, key):
return self._immutable()
# try:
# del self[key]
# except KeyError as k:
# raise AttributeError(k)
def __repr__(self):
return '<CustomImmutableDict ' + dict.__repr__(self) + '>'
def __hash__(self):
return id(self)
def _immutable(self, *args, **kws):
raise TypeError('This object is immutable')
__setitem__ = _immutable
__delitem__ = _immutable
clear = _immutable
update = _immutable
setdefault = _immutable
pop = _immutable
popitem = _immutable
| 2.84375 | 3 |
Python3/Books/Douson/chapter08/attribute_critter_ru.py | neon1ks/Study | 0 | 12765870 | # Attribute Critter
# Demonstrates creating and accessing object attributes
# Зверюшка с атрибутами
# Демонстрирует создание атрибутов объекта и доступ к ним
class Critter(object):
"""Виртуальный питомец"""
# """A virtual pet"""
def __init__(self, name):
# print("A new critter has been born!")
print("Появилась на свет новая зверюшка!")
self.name = name
def __str__(self):
# rep = "Critter object\n"
# rep += "name: " + self.name + "\n"
rep = "Объект класса Critter\n"
rep += "имя: " + self.name + "\n"
return rep
def talk(self):
# print("Hi. I'm", self.name, "\n")
print("Привет. <NAME>", self.name, "\n")
# main
crit1 = Critter("Бобик")
crit1.talk()
crit2 = Critter("Мурзик")
crit2.talk()
print("Вывод объекта crit1 на экран:")
print(crit1)
print("Непосредственный доступ к атрибуту crit1.name:")
print(crit1.name)
crit1.years = 10
print("years =", crit1.years)
# input("\n\nPress the enter key to exit.")
| 4.46875 | 4 |
scripts/lelantos_show_qso_catalog.py | corentinravoux/lelantos | 4 | 12765871 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import argparse
from lelantos import tomographic_objects
parser = argparse.ArgumentParser(description='Plot the QSO catalog')
parser.add_argument('-i',
'--input', help='Input QSO catalog',required=True)
parser.add_argument('-bins', help='Output QSO catalog', default=100,required=False)
parser.add_argument('-zname',
'--redshift-name', help='Pixel file name', default="Z",required=False)
args = vars(parser.parse_args())
catalog = tomographic_objects.QSOCatalog.init_from_fits(args["input"],redshift_name=args["redshift_name"])
RA = catalog.coord[:,0]
RA[RA>180] = RA[RA>180] - 360
DEC = catalog.coord[:,1]
plt.hist2d(RA,DEC,int(args["bins"]))
plt.colorbar()
plt.show()
| 2.609375 | 3 |
tests/node_2_kern_2_1.py | sharm294/shoal | 1 | 12765872 | <reponame>sharm294/shoal<gh_stars>1-10
import os
from sonar.testbench import Testbench, Module, TestVector, Thread
from sonar.interfaces import AXIS
from sonar.profile import Profiler
node_2_kern_2_1 = Testbench.default('node_2_kern_2_1')
filepath = os.path.join(os.path.dirname(__file__), 'build/node_2_kern_2_1/')
dut = Module.default("DUT")
dut.add_clock_port('ap_clk', '20ns')
dut.add_reset_port('ap_rst_n')
dut.add_port('interrupt_V', 'input')
axis_in = AXIS('in_r', 'slave', 'ap_clk')
axis_in.port.init_channels('tkeep', 64, True)
axis_in.port.add_channel('TDEST', 'tdest', 8)
dut.add_interface(axis_in)
axis_out = AXIS('out_r', 'master', 'ap_clk')
axis_out.port.init_channels('tkeep', 64, True)
axis_out.port.add_channel('TDEST', 'tdest', 8)
dut.add_interface(axis_out)
node_2_kern_2_1.add_module(dut)
profiled_data = Profiler(filepath + '/' + 'output.txt', 'PROFILE')
################################################################################
# Test Vectors
################################################################################
# Initialization thread (added to each test vector to reset everything)
initT = Thread()
initT.init_signals()
initT.wait_negedge('ap_clk')
initT.add_delay('40ns')
initT.set_signal('ap_rst_n', 1)
initT.set_signal('interrupt_V', 1)
#-------------------------------------------------------------------------------
# case_0
#
#-------------------------------------------------------------------------------
case_0 = TestVector()
case_0.add_thread(initT)
smA_t1 = case_0.add_thread()
smA_t1.add_delay('100ns')
# axis_in.writes(smA_t1, profiled_data.to_stream(profiled_data.read_data))
profiled_data.write(smA_t1, axis_in, axis_out)
smA_t1.print_elapsed_time("case_0")
smA_t1.end_vector()
# smA_t2 = case_0.add_thread()
# axis_out.writes(smA_t2, profiled_data.to_stream(profiled_data.write_data))
# smA_t2.print_elapsed_time("case_0")
# smA_t2.end_vector()
node_2_kern_2_1.add_test_vector(case_0)
node_2_kern_2_1.generateTB(filepath, 'sv')
| 1.992188 | 2 |
tests/plugins/test_dplay.py | fuglede/streamlink | 0 | 12765873 | import unittest
from streamlink.plugins.dplay import Dplay
class TestPluginDplay(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.dplay.dk/videoer/studie-5/season-2-episode-1',
'https://www.dplay.no/videoer/danskebaten/sesong-1-episode-1',
'https://www.dplay.se/videos/breaking-news/breaking-news-med-filip-fredrik-750',
]
for url in should_match:
self.assertTrue(Dplay.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(Dplay.can_handle_url(url))
| 3.015625 | 3 |
canbeta/__main__.py | FROM-THE-EARTH/canbeta | 1 | 12765874 | <gh_stars>1-10
import argparse
import subprocess
import os
import sys
sys.path.append(os.path.join(os.getcwd()))
PATH_CMD_RUN = os.path.join(os.getcwd(), "can09", "cmd", "run.py")
parser = argparse.ArgumentParser()
parser.add_argument("command", choices=["run"])
parser.add_argument("args")
args = parser.parse_args()
if args.command == "run":
subprocess.run(["python", PATH_CMD_RUN, args.args])
| 2.484375 | 2 |
tests/test_types.py | CAENels/frugy | 2 | 12765875 | """
SPDX-License-Identifier: BSD-3-Clause
Copyright (c) 2020 Deutsches Elektronen-Synchrotron DESY.
See LICENSE.txt for license details.
"""
import unittest
from frugy.types import FixedField, StringField, StringFmt, GuidField, ArrayField, FruAreaBase
class TestString(unittest.TestCase):
def test_null(self):
nul = StringField()
ser = nul.serialize()
self.assertEqual(ser, b'\xc0')
ser += b'remainder'
tmp = StringField()
self.assertEqual(tmp.deserialize(ser), b'remainder')
self.assertEqual(tmp.to_dict(), '')
def test_plain(self):
testStr = 'Hello world'
tmp = StringField(testStr, format=StringFmt.ASCII_8BIT)
self.assertEqual(tmp.bit_size(), 12 * 8)
ser = tmp.serialize()
self.assertEqual(ser, b'\xcbHello world')
ser += b'remainder'
tmp2 = StringField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), testStr)
def test_bcd_plus(self):
testStr = '123.45-67 890'
tmp = StringField(testStr, format=StringFmt.BCD_PLUS)
self.assertEqual(tmp.bit_size(), 8 * 8)
ser = tmp.serialize()
self.assertEqual(ser, b'\x47\x12\x3c\x45\xb6\x7a\x89\x0a')
ser += b'remainder'
tmp2 = StringField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), testStr + ' ') # append padding space
def test_ascii_6bit(self):
testStr = 'IPMI Hello world'
tmp = StringField(testStr, format=StringFmt.ASCII_6BIT)
self.assertEqual(tmp.bit_size(), 13 * 8)
ser = tmp.serialize()
self.assertEqual(ser,
b'\x8c\x29\xdc\xa6\x00Z\xb2\xec\x0b\xdc\xaf\xcc\x92')
ser += b'remainder'
tmp2 = StringField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), 'IPMI HELLO WORLD')
class ArrayTest(FruAreaBase):
_schema = [
('first_byte', FixedField, 'u8', {'default': 0}),
('second_byte', FixedField, 'u8', {'default': 0}),
('bits1', FixedField, 'u4', {'default': 0}),
('bits2', FixedField, 'u4', {'default': 0}),
]
class TestMisc(unittest.TestCase):
def test_uuid(self):
testUid = 'cafebabe-1234-5678-d00f-deadbeef4711'
tmp = GuidField(testUid)
self.assertEqual(tmp.bit_size(), 128)
ser = tmp.serialize()
self.assertEqual(ser, b'\xbe\xba\xfe\xca4\x12xV\xd0\x0f\xde\xad\xbe\xefG\x11')
ser += b'remainder'
tmp2 = GuidField()
self.assertEqual(tmp2.deserialize(ser), b'remainder')
self.assertEqual(tmp2.to_dict(), testUid)
def test_array(self):
tmp = ArrayField(ArrayTest, initdict=[
{ 'first_byte': 1, 'second_byte': 2, 'bits1': 3, 'bits2': 4, },
{ 'first_byte': 5, 'second_byte': 6, 'bits1': 7, 'bits2': 8, },
{ 'first_byte': 9, 'second_byte': 10, 'bits1': 11, 'bits2': 12, },
])
self.assertEqual(tmp.size_total(), 3 * 3)
ser = tmp.serialize()
self.assertEqual(ser, b'\x01\x024\x05\x06x\t\n\xbc')
tmp2 = ArrayField(ArrayTest)
tmp2.deserialize(ser)
self.assertEqual(tmp.__repr__(), tmp2.__repr__())
if __name__ == '__main__':
unittest.main()
| 2.15625 | 2 |
bcs-ui/backend/tests/container_service/cluster_tools/conftest.py | kayinli/bk-bcs | 0 | 12765876 | <reponame>kayinli/bk-bcs
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import mock
import pytest
from backend.container_service.cluster_tools import models
@pytest.fixture
def tool(db):
return models.Tool.objects.create(
name="GameStatefulSet", chart_name='bcs-gamestatefulset-operator', default_version='0.6.0-beta3'
)
@pytest.fixture(autouse=True)
def patch_get_random_string():
with mock.patch('backend.helm.toolkit.deployer.get_random_string', new=lambda *args, **kwargs: '12345678'):
yield
| 1.601563 | 2 |
makahiki/apps/widgets/status/DWGG/views.py | justinslee/Wai-Not-Makahiki | 1 | 12765877 | <filename>makahiki/apps/widgets/status/DWGG/views.py
"""Handle the rendering of the water DEGG widget."""
from apps.widgets.status import status
def supply(request, page_name):
"""Supply the view_objects content."""
_ = request
_ = page_name
return status.resource_goal_status("water")
| 1.914063 | 2 |
utils/test_images_generator/random_image_generator.py | pgolab/mosaic-maker-opencv-python | 2 | 12765878 | import cv2
import numpy as np
from utils.test_images_generator.generator_config import AVAILABLE_SHAPES_DICT
from utils.test_images_generator.generator_utils import generate_random_color, generate_random_image_points
def generate_random_image(width, height):
# ToDo generate white image
# https://numpy.org/doc/1.18/reference/generated/numpy.full.html
generated_image = np.zeros((height, width, 3), dtype=np.uint8)
# ToDo choose random number of shapes from AVAILABLE_SHAPES_DICT
# https://numpy.org/doc/1.18/reference/random/generated/numpy.random.randint.html
# https://numpy.org/doc/1.18/reference/random/generated/numpy.random.choice.html
chosen_shapes = []
for shape in chosen_shapes:
if shape == AVAILABLE_SHAPES_DICT['LINE']:
_draw_random_line(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['TRIANGLE']:
_draw_random_triangle(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['RECTANGLE']:
_draw_random_rectangle(generated_image)
elif shape == AVAILABLE_SHAPES_DICT['CIRCLE']:
_draw_random_circle(generated_image)
return generated_image
def _draw_random_line(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
def _draw_random_triangle(generated_image):
# ToDo draw random triangle (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/3.1.0/dc/da5/tutorial_py_drawing_functions.html
# https://docs.opencv.org/master/d6/d6e/group__imgproc__draw.html#ga3069baf93b51565e386c8e591f8418e6
# format for triangle: reshape((-1, 1, 2)
# https://numpy.org/doc/1.18/reference/generated/numpy.reshape.html
return
def _draw_random_rectangle(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
def _draw_random_circle(generated_image):
# ToDo draw random line (use _generate_random_image_points and _generate_random_color)
# https://docs.opencv.org/master/dc/da5/tutorial_py_drawing_functions.html
return
| 2.828125 | 3 |
Answers Of The Questions.py | denizgurzihin/Python-Encryption | 0 | 12765879 | <filename>Answers Of The Questions.py
import random
import hashlib
import os
import time
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.backends import default_backend
from Cryptodome.Cipher import DES
#Question 1_Generating RSA public and private key
# generate private/public key pair
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.ciphers.algorithms import AES
# generate private/public key pair
KA_minus = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
KA_plus = KA_minus.public_key()
# store the keys in file
KA_minus_pem = KA_minus.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
KA_plus_pem = KA_plus.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
KA_minus_File = open("KA_minus.txt", mode = 'wb') # open a file in the write mode
KA_minus_File.write(KA_minus_pem) # write down the decrypted file
KA_minus_File.close()
KA_plus_File = open("KA_plus.txt", mode = 'wb') # open a file in the write mode
KA_plus_File.write(KA_plus_pem) # write down the decrypted file
KA_plus_File.close()
#QUESTION 2
print("QUESTION 2 : ")
K1_128 = os.urandom(16) # generate 128 bit key, 128/8 = 16byte
K2_256 = os.urandom(32) # generate 256 bit key, 256/8 = 32byte
print(" Generated 128 bit(16byte) key is : " + str(K1_128)) # print the 128 bit key
print(" Generated 256 bit(32byte) key is : " + str(K2_256)) # print the 256 bit key
# encryption of 128 bit key with public key
Encrypted_K1_128 = KA_plus.encrypt(
K1_128,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
# encryption of 256 bit key with public key
Encrypted_K2_256 = KA_plus.encrypt(
K2_256,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
print("\n Encrypted 128 bit key is : " + str(Encrypted_K1_128)) # printing encrypted 128 bit key
print("\n Encrpyted 256 bit key is : " + str(Encrypted_K2_256)) # printing encrypted 256 bit key
# decrypted the 128 bit key
Decrypted_K1_128 = KA_minus.decrypt(
Encrypted_K1_128,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
# decrypted the 256 bit key
Decrypted_K2_256 = KA_minus.decrypt(
Encrypted_K2_256,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
print("")
print(" Decrypted 128 bit key is : " + str(Decrypted_K1_128)) # printing the decrypted 128 bit key
print(" Decrypted 256 bit key is : " + str(Decrypted_K2_256)) # printing the decrypted 256 bit key
#QUESTION3
print("\n\nQUESTION 3 : ")
# Creating Long Text M
Long_Text_M = "\nBu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir.\n" \
"Bu mesaj kendini tekrar eden bir mesajtır ve 10 kere tekrar edecektir."
Question3_File = open("Question3_File.txt", mode = 'w') # open a file in the write mode
Question3_File.write(" The Long Message M is : ")
Question3_File.write(Long_Text_M) # write down the decrypted file
Question3_File.write("\n\n")
# Hashing the Long Text M
Hash_M = hashlib.sha256(Long_Text_M.encode())
Hex_Hash_M = Hash_M.hexdigest() #
Bytes_Hash_M = bytes(str(Hex_Hash_M), 'utf-8') #
Question3_File.write(" Message Digest H(m) is : \n")
Question3_File.write(Hex_Hash_M)
Question3_File.write("\n\n")
# Encrpyt Hash_M with Private Key
Encrypted_Hash_M = KA_minus.sign(
Bytes_Hash_M,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
# Verify The Digital Signature
Decrypted_Hash_M_2 = KA_plus.verify(
Encrypted_Hash_M,
Bytes_Hash_M,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
print(" The Long Message M : " + Long_Text_M)
print("\n The H(m) is : " + str(Bytes_Hash_M))
print("\n Digital signature is : " + str(Encrypted_Hash_M))
Question3_File.write(" Digital signature is : \n")
Question3_File.write(str(Encrypted_Hash_M))
Question3_File.write("\n\n")
Question3_File.close()
# QUESTION 4
print("\n\nQUESTION 4")
IV = os.urandom(16) # generate 64 bits for IV
IV_new = os.urandom(16) # generate 64 bits for new IV part d
#read the file and prepare the plain text
with open("Input.txt", 'r') as f:
Plain_Text = f.read()
f.close()
while len(Plain_Text) % 16 != 0: # make the message length as multiplicity of 16 for the cipher blocks
Plain_Text = Plain_Text + " "
block_number = len(Plain_Text) / 16 # calculate how many block will need for the operation
backend = default_backend()
block_Cipher_AES_128 = Cipher(algorithms.AES(K1_128), modes.CBC(IV), backend=backend) # create cipher for AES_128
AES_128_encryptor = block_Cipher_AES_128.encryptor() # create the encryptor AES_128
AES_128_decryptor = block_Cipher_AES_128.decryptor() # create the decryptor AES_128
final_Encrypted_AES128_File = b'' # create initialy empty byte variable to hold the final encrypted file for AES128
###### ENCRYPTION FOR AES128
Time_Start_AES_128 = time.time() # the time when aes start
for i in range(0, int(block_number)): # for every block of the message
S = 16 * i # first index
E = 16 * i + 16 # second index
block = Plain_Text[S:E] # take 16 string each times
block_Byte = bytes(block, 'utf-8') # convert string to byte
encrypted_block = AES_128_encryptor.update(block_Byte) # encrypt 16bytes of block with AES
final_Encrypted_AES128_File = b"".join([final_Encrypted_AES128_File, encrypted_block]) # add it up for to send just one message
Time_End_AES_128 = time.time() # the time when aes_128 finished
Total_Time_AES_128 = Time_End_AES_128 - Time_Start_AES_128 # total time spent for aes_128
print("This amount of time has been spend for AES_128 encryption : "+ str(Total_Time_AES_128))
AES_128_File_Encrypted = open("Encrypted_AES_128.txt", mode = 'w', encoding='utf-8') # open a new txt file in the write mode
AES_128_File_Encrypted.write(str(final_Encrypted_AES128_File)) # write down the encrypted file
AES_128_File_Encrypted.close() # close the file
###### DECRYPTION FOR AES128
final_Decrypted_AES128_File = b''
for i in range(0, int(block_number)):
S = 16 * i # first index
E = 16 * i + 16 # second index
block = final_Encrypted_AES128_File[S:E] # take from message the 16 bytes
decrypted_block = AES_128_decryptor.update(block) # decrypt the message with AES
final_Decrypted_AES128_File = b"".join([final_Decrypted_AES128_File, decrypted_block]) # add it up to the final message
AES_128_File_Decrypted = open("Decrypted_AES_128.txt", mode = 'wb') # open a new txt file in the write byte mode
AES_128_File_Decrypted.write(final_Decrypted_AES128_File) # write down the decrypted file
AES_128_File_Decrypted.close() # close the file
#----------------------------------------------------------------------------------------------------------------------------------------------------------------#
####### ENCRYPTION WITH AES_128 WITH NEW IV VECTOR
block_Cipher_AES_128_New = Cipher(algorithms.AES(K1_128), modes.CBC(IV_new), backend=backend) # create cipher for AES_128
AES_128_encryptor_New = block_Cipher_AES_128_New.encryptor() # create the encryptor AES_128
final_Encrypted_AES128_New_File = b'' # create initialy empty byte variable to hold the final encrypted file for AES128
###### ENCRYPTION FOR AES128
for i in range(0, int(block_number)): # for every block of the message
S = 16 * i # first index
E = 16 * i + 16 # second index
block = Plain_Text[S:E] # take 16 string each times
block_Byte = bytes(block, 'utf-8') # convert string to byte
encrypted_block = AES_128_encryptor_New.update(block_Byte) # encrypt 16bytes of block with AES
final_Encrypted_AES128_New_File = b"".join([final_Encrypted_AES128_New_File, encrypted_block]) # add it up for to send just one message
AES_128_File_Encrypted_New = open("Encrypted_AES_128_NEW.txt", mode = 'w', encoding='utf-8') # open a new txt file in the write byte mode
AES_128_File_Encrypted_New.write(str(final_Encrypted_AES128_New_File)) # write down the decrypted file
AES_128_File_Encrypted_New.close()
#----------------------------------------------------------------------------------------------------------------------------------------------------------------#
###### PREPARE ENCRYPTOR FOR AES 256
block_Cipher_AES_256 = Cipher(algorithms.AES(K2_256), modes.CBC(IV), backend=backend) # create cipher for AES256
AES_256_encryptor = block_Cipher_AES_256.encryptor() # create the encryptor AES256
AES_256_decryptor = block_Cipher_AES_256.decryptor() # create the decryptor AES256
Time_Start_AES_256 = time.time() # the time when aes start
final_Encrypted_AES256_File = b'' # create initialy empty byte variable to hold the final encrypted file for AES256
###### ENCRYPTION FOR AES256
for i in range(0, int(block_number)): # for every block of the message
S = 16 * i # first index
E = 16 * i + 16 # second index
block = Plain_Text[S:E] # take 16 string each times
block_Byte = bytes(block, 'utf-8') # convert string to byte
encrypted_block = AES_256_encryptor.update(block_Byte) # encrypt 16bytes of block with AES
final_Encrypted_AES256_File = b"".join([final_Encrypted_AES256_File, encrypted_block]) # add it up for to send just one message
Time_End_AES_256 = time.time() # the time when aes_128 finished
Total_Time_AES_256 = Time_End_AES_256 - Time_Start_AES_256 # total time spent for aes_128
print("This amount of time has been spend for AES_256 encryption : "+ str(Total_Time_AES_256))
AES_256_File_Encrypted = open("Encrypted_AES_256.txt", mode = 'w', encoding='utf-8') # open file in write mode
AES_256_File_Encrypted.write(str(final_Encrypted_AES256_File)) # write down the encrypted file
AES_256_File_Encrypted.close() # close the file
###### DECRYPTION FOR AES256
final_Decrypted_AES256_File = b''
for i in range(0, int(block_number)):
S = 16 * i # first index
E = 16 * i + 16 # second index
block = final_Encrypted_AES256_File[S:E] # take from message the 16 bytes
decrypted_block = AES_256_decryptor.update(block) # decrypt the message with AES
final_Decrypted_AES256_File = b"".join([final_Decrypted_AES256_File, decrypted_block]) # add it up to the final message
AES_256_File_Decrypted = open("Decrypted_AES_256.txt", mode = 'wb') # open the file in write byte mode
AES_256_File_Decrypted.write(final_Decrypted_AES256_File) # wite down the decrypted file
AES_256_File_Decrypted.close() # close the file
#----------------------------------------------------------------------------------------------------------------------------------------------------------------#
###### DES ENCRYPTION
#read the file again and prepare the plain text
with open("Input.txt", 'r') as f:
Plain_Text = f.read()
f.close()
while len(Plain_Text) % 8 != 0: # make the message length as multiplicity of 8 for the cipher blocks
Plain_Text = Plain_Text + " "
block_number_DES = len(Plain_Text) / 8 # calculate how many block will need for the operation
DES_Key = os.urandom(8) # random 8 byte DES key, one of the byte has been ignored so that it becomes 56bit
DES_IV_Key = os.urandom(8) # random IV key
block_Cipher_DES = DES.new(DES_Key, DES.MODE_CBC, DES_IV_Key) # DES object for encryption
###### ENCRYPTION FOR DES
Time_Start_DES = time.time() # measure the time when DES start
final_Encrypted_DES_File = b''
for i in range(0, int(block_number_DES)): # for every block of the message
S = 8 * i # first index
E = 8 * i + 8 # second index
block = Plain_Text[S:E] # take 8 string each times
block_Byte = bytes(block, 'utf-8') # convert string to byte
encrypted_block = block_Cipher_DES.encrypt(block_Byte) # encrypt 8 bytes of block with DES
final_Encrypted_DES_File = b"".join([final_Encrypted_DES_File, encrypted_block]) # add it all up
Time_End_DES = time.time() # measure the time, DES finished
Time_Spent_DES = Time_End_DES - Time_Start_DES # calculate how many seconds spend
print("This amount of time has been spend for DES encryption : "+ str(Time_Spent_DES))
DES_File_Encrypted = open("Encrypted_DES.txt", mode = 'w', encoding='utf-8') # open file in write mode
DES_File_Encrypted.write(str(final_Encrypted_DES_File)) # write down the encrypted file
DES_File_Encrypted.close() # close the file
##### DECRYPTION FOR DES
final_Decrypted_DES_File = b''
block_Cipher_DES_Decrypt = DES.new(DES_Key, DES.MODE_CBC, DES_IV_Key) # new DES object for decrypt
for i in range(0, int(block_number_DES)): # for every block of the file
S = 8 * i # first index
E = 8 * i + 8 # second index
block = final_Encrypted_DES_File[S:E]
decrypted_block = block_Cipher_DES_Decrypt.decrypt(block) # decrypt 8 byte with DES object
final_Decrypted_DES_File = b"".join([final_Decrypted_DES_File, decrypted_block]) # add it up for to write just one file
DES_File_Decrypted = open("Decrypted_DES.txt", mode = 'wb') # open a file in the write mode
DES_File_Decrypted.write(final_Decrypted_DES_File) # write down the decrypted file
DES_File_Decrypted.close() # close the file
| 3.296875 | 3 |
test/test_23_filterproplist.py | growell/svnhook | 1 | 12765880 | <filename>test/test_23_filterproplist.py
#!/usr/bin/env python
######################################################################
# Test Property List Filter
######################################################################
import os, re, sys, unittest, time
# Prefer local modules.
mylib = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..'))
if os.path.isdir(mylib): sys.path.insert(0, mylib)
from test.base import HookTestCase
class TestFilterPropList(HookTestCase):
"""Pre-Commit Property List Filter Tests"""
def setUp(self):
super(TestFilterPropList, self).setUp(
re.sub(r'^test_?(.+)\.[^\.]+$', r'\1',
os.path.basename(__file__)))
def test_01_no_regex(self):
"""No regex tags"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterCommitList>
<PathRegex>.*</PathRegex>
<FilterPropList>
<SendError>You're bothering me.</SendError>
</FilterPropList>
</FilterCommitList>
</Actions>
''')
# Add a working copy change.
self.setWcProperty('junk', 'x', 'fileA1.txt')
# Attempt to commit the change.
p = self.commitWc()
# Verify that an error is indicated. Please note that this is
# NOT the hook script exit code. This is the "svn commit" exit
# code - that indicates if the commit succeeded (zero) or
# failed (one).
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Internal hook error',
'Internal error message not returned')
# Verify that the detailed error is logged.
self.assertLogRegexp(
'pre-commit', r'\nValueError: Required tag missing',
'Expected error not found in hook log')
def test_02_name_match(self):
"""Property name match"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterCommitList>
<PathRegex>.*</PathRegex>
<FilterPropList>
<PropNameRegex>junk</PropNameRegex>
<SendError>Cannot mark as junk.</SendError>
</FilterPropList>
</FilterCommitList>
</Actions>
''')
# Add a working copy change.
self.setWcProperty('junk', 'x', 'fileA1.txt')
# Attempt to commit the change.
p = self.commitWc()
# Verify that an error is indicated.
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Cannot mark as junk',
'Expected error message not returned')
def test_03_name_mismatch(self):
"""Property name mismatch"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterCommitList>
<PathRegex>.*</PathRegex>
<FilterPropList>
<PropNameRegex>junk</PropNameRegex>
<SendError>Cannot mark as junk.</SendError>
</FilterPropList>
</FilterCommitList>
</Actions>
''')
# Add a working copy change.
self.setWcProperty('zjunk', 'x', 'fileA1.txt')
# Attempt to commit the changes.
p = self.commitWc()
# Verify that an error isn't indicated.
self.assertEqual(
p.returncode, 0,
'Success exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that an error message isn't sent.
self.assertRegexpMatches(
p.stderr.read(), r'(?s)^\s*$',
'Unexpected error message found')
def test_04_value_match(self):
"""Property value match"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterCommitList>
<PathRegex>.*</PathRegex>
<FilterPropList>
<PropValueRegex>junk</PropValueRegex>
<SendError>Cannot use junk value.</SendError>
</FilterPropList>
</FilterCommitList>
</Actions>
''')
# Add a working copy change.
self.setWcProperty('zot', 'junk', 'fileA1.txt')
# Attempt to commit the change.
p = self.commitWc()
# Verify that an error is indicated.
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Cannot use junk value',
'Expected error message not returned')
def test_05_value_mismatch(self):
"""Property value mismatch"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterCommitList>
<PathRegex>.*</PathRegex>
<FilterPropList>
<PropValueRegex>junk</PropValueRegex>
<SendError>Cannot mark as junk.</SendError>
</FilterPropList>
</FilterCommitList>
</Actions>
''')
# Add a working copy change.
self.setWcProperty('zot', 'blah', 'fileA1.txt')
# Attempt to commit the changes.
p = self.commitWc()
# Verify that an error isn't indicated.
self.assertEqual(
p.returncode, 0,
'Success exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that an error message isn't sent.
self.assertRegexpMatches(
p.stderr.read(), r'(?s)^\s*$',
'Unexpected error message found')
def test_06_both_match(self):
"""Property name and value match"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterCommitList>
<PathRegex>.*</PathRegex>
<FilterPropList>
<PropNameRegex>stuff</PropNameRegex>
<PropValueRegex>junk</PropValueRegex>
<SendError>Cannot use junk value for stuff.</SendError>
</FilterPropList>
</FilterCommitList>
</Actions>
''')
# Add a working copy change.
self.setWcProperty('stuffing', 'some junk', 'fileA1.txt')
# Attempt to commit the change.
p = self.commitWc()
# Verify that an error is indicated.
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Cannot use junk value for stuff',
'Expected error message not returned')
def test_07_both_mismatch(self):
"""Property name and value mismatch"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterCommitList>
<PathRegex>.*</PathRegex>
<FilterPropList>
<PropNameRegex>stuff</PropNameRegex>
<PropValueRegex>^junk</PropValueRegex>
<SendError>Cannot use junk value for stuff.</SendError>
</FilterPropList>
</FilterCommitList>
</Actions>
''')
# Add a working copy change.
self.setWcProperty('stuffing', 'some junk', 'fileA1.txt')
# Attempt to commit the change.
p = self.commitWc()
# Verify that an error isn't indicated.
self.assertEqual(
p.returncode, 0,
'Success exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that an error message is provided.
self.assertRegexpMatches(
p.stderr.read(), r'(?s)^\s*$',
'Unexpected error message returned')
# Allow manual execution of tests.
if __name__=='__main__':
for tclass in [TestFilterPropList]:
suite = unittest.TestLoader().loadTestsFromTestCase(tclass)
unittest.TextTestRunner(verbosity=2).run(suite)
########################### end of file ##############################
| 2.15625 | 2 |
accelerator/tests/factories/application_factory.py | masschallenge/django-accelerator | 6 | 12765881 | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from datetime import (
datetime,
timedelta,
)
import swapper
from factory import SubFactory
from factory.django import DjangoModelFactory
from pytz import utc
from accelerator.tests.factories.application_type_factory import (
ApplicationTypeFactory
)
from accelerator.tests.factories.program_cycle_factory import (
ProgramCycleFactory
)
from accelerator.tests.factories.startup_factory import StartupFactory
from accelerator_abstract.models.base_application import (
INCOMPLETE_APP_STATUS,
)
Application = swapper.load_model('accelerator', 'Application')
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
cycle = SubFactory(ProgramCycleFactory)
startup = SubFactory(StartupFactory)
application_type = SubFactory(ApplicationTypeFactory)
application_status = INCOMPLETE_APP_STATUS
submission_datetime = utc.localize(datetime.now() + timedelta(-2))
| 1.976563 | 2 |
etd_drop/wsgi.py | MetaArchive/etd-drop | 1 | 12765882 | """
WSGI config for etd_drop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "etd_drop.settings")
#Attempt to set environment variables from DOTENV, if we've been provided
#a path
DOTENV_path = os.environ.get('DOTENV', None)
if DOTENV_path is not None:
import dotenv
dotenv.read_dotenv(DOTENV_path)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 1.84375 | 2 |
utils/pylib/flow.py | jianzi123/contrail-vrouter | 0 | 12765883 | <reponame>jianzi123/contrail-vrouter
#
# Copyright (c) 2019 Juniper Networks, Inc. All rights reserved.
#
import constants
from object_base import *
from vr_py_sandesh.vr_py.ttypes import *
class Flow(ObjectBase, vr_flow_req):
"""Base class to create flows"""
def __init__(
self,
sip_l,
sip_u,
dip_l,
dip_u,
sport,
dport,
proto,
family,
idx=-1,
ridx=-1,
flags=constants.VR_FLOW_FLAG_ACTIVE,
flow_nh_idx=None,
src_nh_idx=None,
qos_id=-1,
action=constants.VR_FLOW_ACTION_FORWARD,
ecmp_nh_index=-1,
flow_vrf=None,
rflow_nh_idx=0,
flags1=None,
mirr_idx=None,
extflags=None,
**kwargs):
super(Flow, self).__init__()
vr_flow_req.__init__(self)
self.fr_op = constants.FLOW_OPER_SET
self.fr_index = idx
self.fr_rindex = ridx
self.fr_flow_sip_l = sip_l
self.fr_flow_sip_u = sip_u
self.fr_flow_dip_l = dip_l
self.fr_flow_dip_u = dip_u
self.fr_family = family
self.fr_flow_proto = proto
self.fr_flow_sport = socket.htons(sport)
self.fr_flow_dport = socket.htons(dport)
self.fr_flags = constants.VR_FLOW_FLAG_ACTIVE | flags
self.fr_flow_nh_id = flow_nh_idx
self.fr_flow_vrf = flow_vrf
self.fr_src_nh_index = src_nh_idx
self.fr_ecmp_nh_index = ecmp_nh_index
self.fr_action = action
self.fr_qos_id = qos_id
self.fr_flags1 = flags1
self.fr_mir_id = mirr_idx
self.fr_extflags = extflags
# set reverse flow params as mirror of forward flow by default
self.rflow_sip_u = self.fr_flow_dip_u
self.rflow_sip_l = self.fr_flow_dip_l
self.rflow_dip_u = self.fr_flow_sip_u
self.rflow_dip_l = self.fr_flow_sip_l
self.rflow_sport = self.fr_flow_dport
self.rflow_dport = self.fr_flow_sport
self.rflow_nh_id = rflow_nh_idx
self.sreq_class = vr_flow_req.__name__
def __repr__(self):
"""Display basic details of the flow"""
return "Flow(sip:{} dip:{} sport:{} dport{})".format(
self.fr_flow_sip_l, self.fr_flow_dip_l,
self.fr_flow_sport, self.fr_flow_dport)
def __str__(self):
"""Display basic details of the flow"""
return "Flow(sip:{} dip:{} sport:{} dport{})".format(
self.fr_flow_sip_l, self.fr_flow_dip_l, self.fr_flow_sport,
self.fr_flow_dport)
def sync_and_add_reverse_flow(self):
"""
Sends a message to add forward flow and
create corresponding reverse flow, then link both
"""
# get forward flow index and gen_id
self.sync(resp_required=True)
fr_indx = self.get_fr_index()
fr_genid = self.get_fr_gen_id()
# create reverse flow
rflow = Flow(sip_l=self.fr_flow_dip_l, sip_u=self.fr_flow_dip_u,
dip_l=self.fr_flow_sip_l, dip_u=self.fr_flow_sip_u,
sport=self.fr_flow_dport, dport=self.fr_flow_sport,
proto=self.fr_flow_proto, family=self.fr_family)
rflow.fr_op = constants.FLOW_OPER_SET
rflow.fr_index = -1
rflow.fr_rindex = fr_indx
rflow.fr_action = constants.VR_FLOW_ACTION_FORWARD
rflow.fr_flags = \
constants.VR_FLOW_FLAG_ACTIVE | constants.VR_RFLOW_VALID
rflow.fr_flow_nh_id = self.rflow_nh_id
rflow.fr_src_nh_index = self.rflow_nh_id
rflow.fr_qos_id = self.fr_qos_id
rflow.fr_ecmp_nh_index = self.fr_ecmp_nh_index
rflow.fr_flow_vrf = self.fr_flow_vrf
rflow.rflow_sip_u = rflow.fr_flow_dip_u
rflow.rflow_sip_l = rflow.fr_flow_dip_l
rflow.rflow_dip_u = rflow.fr_flow_sip_u
rflow.rflow_dip_l = rflow.fr_flow_sip_l
rflow.rflow_nh_id = rflow.fr_flow_nh_id
rflow.rflow_sport = self.rflow_sport
# sync reverse flow
rflow.sync(resp_required=True)
rfr_indx = rflow.get_fr_index()
# Update forward flow
self.fr_index = fr_indx
self.fr_rindex = int(rfr_indx)
self.fr_gen_id = int(fr_genid)
self.fr_flags |= constants.VR_RFLOW_VALID
self.sync(resp_required=True)
def sync_and_link_flow(self, flow2):
"""
Links the current flow with flow2
"""
self.sync(resp_required=True)
fr_indx = self.get_fr_index()
fr_genid = self.get_fr_gen_id()
# update flow2
flow2.fr_rindex = fr_indx
flow2.fr_flags |= constants.VR_RFLOW_VALID
flow2.sync(resp_required=True)
rfr_indx = flow2.get_fr_index()
# Update forward flow
self.fr_index = fr_indx
self.fr_rindex = int(rfr_indx)
self.fr_gen_id = int(fr_genid)
self.fr_flags |= constants.VR_RFLOW_VALID
self.sync()
# Update reverse flow
flow2.fr_index = flow2.get_fr_index()
flow2.fr_gen_id = flow2.get_fr_gen_id()
flow2.sync()
def get(self, key):
"""
Queries vrouter and return the key value from the response xml file
"""
return super(Flow, self).get(key)
def get_fr_index(self):
"""
Queries vrouter and returns fresp_index value from the response xml\
file
"""
return int(self.get('fresp_index'))
def get_fr_gen_id(self):
"""
Queries vrouter and returns fresp_gen_id value from the response xml\
file
"""
return int(self.get('fresp_gen_id'))
def delete(self):
"""Deletes flow"""
self.fr_index = self.get_fr_index()
self.fr_gen_id = self.get_fr_gen_id()
self.fr_flags = 0
self.sync(resp_required=True)
class InetFlow(Flow):
"""
InetFlow class to create inet flow
Mandatory parameters:
--------------------
sip : str
Source ip
dip : str
Destination ip
sport : int
Source port
dport : int
Destination port
proto : int
Protocol
Optional Parameters:
-------------------
flags : int
Flow flags
flow_nh_idx : int
Flow nexthop id
flow_vrf : int
Flow vrf
rflow_nh_idx : int
Reverse flow id
"""
def __init__(self, sip, dip, sport, dport, proto,
flow_nh_idx=None, flow_vrf=None, rflow_nh_idx=None,
flags=constants.VR_FLOW_FLAG_ACTIVE, **kwargs):
super(InetFlow, self).__init__(
self.vt_ipv4(sip),
0,
self.vt_ipv4(dip),
0,
sport,
dport,
proto,
flow_nh_idx=flow_nh_idx,
flow_vrf=flow_vrf,
rflow_nh_idx=rflow_nh_idx,
family=constants.AF_INET,
flags=flags,
**kwargs)
class Inet6Flow(Flow):
"""
Inet6Flow class to create inet6 flow
Mandatory parameters:
--------------------
sip6_str : str
Source ipv6 address
dip6_str : str
Destination ipv6 address
sport : int
Source port
dport : int
Destination port
proto : int
Protocol
Optional Parameters:
-------------------
flags : int
Flow flags
flow_nh_idx : int
Flow nexthop id
qos_id : int
Qos id
action : int
Flow action
ecmp_nh_index : int
Ecmp nexthop index
flow_vrf : int
Flow vrf
rflow_nh_idx : int
Reverse flow id
mirr_idx : int
Mirror Index
"""
def __init__(
self,
sip6_str,
dip6_str,
proto,
sport,
dport,
mirr_idx=None,
**kwargs):
sip6_u, sip6_l = self.vt_ipv6(sip6_str)
dip6_u, dip6_l = self.vt_ipv6(dip6_str)
super(Inet6Flow, self).__init__(
sip6_l,
sip6_u,
dip6_l,
dip6_u,
sport,
dport,
proto,
constants.AF_INET6,
mirr_idx=mirr_idx,
**kwargs)
class NatFlow(Flow):
"""
NatFlow class to create nat flow
Mandatory parameters:
--------------------
sip : str
Source ip address
dip : str
Destination ip address
sport : int
Source port
dport : int
Destination port
proto : int
Protocol
flow_dvrf : int
Flow dvrf
rflow_sip : str
Reverse flow source ip
rflow_dip : str
Reverse flow destination ip
flags : int
Flow flags
flow_nh_idx : int
Flow nexthop id
qos_id : int
Qos id
action : int
Flow action
ecmp_nh_index : int
Ecmp nexthop index
flow_vrf : int
Flow vrf
rflow_nh_idx : int
Reverse flow id
rflow_sport : int
Reverse flow source port
"""
def __init__(self, sip, dip, sport, dport, proto, flow_dvrf,
rflow_sip, rflow_dip, rflow_nh_idx, rflow_sport,
**kwargs):
super(NatFlow, self).__init__(
self.vt_ipv4(sip),
0,
self.vt_ipv4(dip),
0,
sport,
dport,
proto,
family=constants.AF_INET,
**kwargs)
self.fr_action = constants.VR_FLOW_ACTION_NAT
self.fr_flow_dvrf = flow_dvrf
self.rflow_sip_u = 0
self.rflow_sip_l = self.vt_ipv4(rflow_sip)
self.rflow_dip_u = 0
self.rflow_dip_l = self.vt_ipv4(rflow_dip)
self.rflow_sport = rflow_sport
self.rflow_nh_id = rflow_nh_idx
class MirrorFlow(Flow):
def __init__():
pass
| 2.03125 | 2 |
kbcqa/method_sp/grounding/_2_1_grounded_graph/node_linking/entity_linking_aqqu_vocab/u.py | nju-websoft/SkeletonKBQA | 6 | 12765884 | """
Copyright 2015, University of Freiburg.
<NAME> <<EMAIL>>
"""
import re
def normalize_entity_name(name):
name = name.lower()
name = name.replace('!', '')
name = name.replace('.', '')
name = name.replace(',', '')
name = name.replace('-', '')
name = name.replace('_', '')
name = name.replace(' ', '')
name = name.replace('\'', '')
return name
def read_abbreviations(abbreviations_file):
'''
Return a set of abbreviations.
:param abbreviations_file:
:return:
'''
abbreviations = set()
with open(abbreviations_file, 'r') as f:
for line in f:
abbreviations.add(line.strip().decode('utf-8').lower())
return abbreviations
def remove_abbreviations_from_entity_name(entity_name,
abbreviations):
tokens = entity_name.lower().split(' ')
non_abbr_tokens = [t for t in tokens if t not in abbreviations]
return ' '.join(non_abbr_tokens)
def remove_prefixes_from_name(name):
if name.startswith('the'):
name = name[3:]
return name
def remove_suffixes_from_name(name):
if '#' in name or '(' in name:
name = remove_number_suffix(name)
name = remove_bracket_suffix(name)
return name
def remove_number_suffix(name):
res = re.match(r'.*( #[0-9]+)$', name)
if res:
name = name[:res.start(1)]
return name
else:
return name
def remove_bracket_suffix(name):
res = re.match(r'.*( \([^\(\)]+\))$', name)
if res:
name = name[:res.start(1)]
return name
else:
return name | 3.203125 | 3 |
test.py | nmningmei/simple_tensorflow_logistic_regression_classifier | 0 | 12765885 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 19 11:30:56 2022
@author: adowa
"""
import numpy as np
import tensorflow as tf
from utils import (build_logistic_regression,
compile_logistic_regression)
from tensorflow.keras import regularizers
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
if __name__ == "__main__":
n_epochs = int(1e3) # just a large number
print_train = True
# clear memory states
tf.keras.backend.clear_session()
# generate random test data
X,y = make_classification(n_samples = 150,
n_features = 100,
n_informative = 3,
n_redundant = 10,
n_classes = 2,
n_clusters_per_class = 4,
flip_y = .01,
class_sep = .75,# how easy to separate the two classes
shuffle = True,
random_state = 12345,
)
# one-hot encoding for softmax
y = y.reshape((-1,1))
y = np.hstack([y,1-y])
# split the data into train, validation, and test
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = .1,random_state = 12345)
X_train,X_valid,y_train,y_valid = train_test_split(X_train,y_train,test_size = .1,random_state = 12345)
# add some 0.5 labeled data - don't use too much
X_noise = np.random.normal(X_train.mean(),X_train.std(),size = (int(X_train.shape[0]/4),100))
y_noise = np.array([[0.5,0.5]] * int(X_train.shape[0]/4))
X_train = np.concatenate([X_train,X_noise])
y_train = np.concatenate([y_train,y_noise])
# X_noise = np.random.normal(X_test.mean(),X_test.std(),size = (int(X_test.shape[0]/2),100))
# y_noise = np.array([[0.5,0.5]] * int(X_test.shape[0]/2))
# X_test = np.concatenate([X_test,X_noise])
# y_test = np.concatenate([y_test,y_noise])
# build the model
tf.random.set_seed(12345)
logistic_regression = build_logistic_regression(
input_size = X_train.shape[1],
output_size = 2,
special = False,
kernel_regularizer = regularizers.L2(l2 = 1e-3),
activity_regularizer = regularizers.L1(l1 = 1e-3),
print_model = True,
)
# compile the model
logistic_regression,callbacks = compile_logistic_regression(
logistic_regression,
model_name = 'temp.h5',
optimizer = None,
loss_function = None,
metric = None,
callbacks = None,
learning_rate = 1e-3,
tol = 1e-4,
patience = 10,
)
# train and validate the model
logistic_regression.fit(
X_train,
y_train,
batch_size = 4,
epochs = n_epochs,
verbose = print_train,
callbacks = callbacks,
validation_data = (X_valid,y_valid),
shuffle = True,
class_weight = None,# tf has this but I don't think it is the same as sklearn
)
y_pred = logistic_regression.predict(X_test)
print(roc_auc_score(y_test,y_pred,)) | 2.859375 | 3 |
libraries/botbuilder-dialogs/botbuilder/dialogs/choices/choice_factory.py | awaemmanuel/botbuilder-python | 1 | 12765886 | <filename>libraries/botbuilder-dialogs/botbuilder/dialogs/choices/choice_factory.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List
from botbuilder.core import CardFactory, MessageFactory
from botbuilder.schema import ActionTypes, Activity, CardAction, HeroCard, InputHints
from . import Channel, Choice, ChoiceFactoryOptions
class ChoiceFactory:
@staticmethod
def for_channel(
channel_id: str,
choices: List[Choice],
text: str = None,
speak: str = None,
options: ChoiceFactoryOptions = None,
) -> Activity:
if channel_id is None:
channel_id = ""
if choices is None:
choices = []
# Find maximum title length
max_title_length = 0
for choice in choices:
if choice.action is not None and choice.action.title not in (None, ""):
l = len(choice.action.title)
else:
l = len(choice.value)
if l > max_title_length:
max_title_length = l
# Determine list style
supports_suggested_actions = Channel.supports_suggested_actions(
channel_id, len(choices)
)
supports_card_actions = Channel.supports_card_actions(channel_id, len(choices))
max_action_title_length = Channel.max_action_title_length(channel_id)
long_titles = max_title_length > max_action_title_length
if not long_titles and not supports_suggested_actions and supports_card_actions:
# SuggestedActions is the preferred approach, but for channels that don't
# support them (e.g. Teams, Cortana) we should use a HeroCard with CardActions
return ChoiceFactory.hero_card(choices, text, speak)
elif not long_titles and supports_suggested_actions:
# We always prefer showing choices using suggested actions. If the titles are too long, however,
# we'll have to show them as a text list.
return ChoiceFactory.suggested_action(choices, text, speak)
elif not long_titles and len(choices) <= 3:
# If the titles are short and there are 3 or less choices we'll use an inline list.
return ChoiceFactory.inline(choices, text, speak, options)
else:
# Show a numbered list.
return List(choices, text, speak, options)
@staticmethod
def inline(
choices: List[Choice],
text: str = None,
speak: str = None,
options: ChoiceFactoryOptions = None,
) -> Activity:
if choices is None:
choices = []
if options is None:
options = ChoiceFactoryOptions()
opt = ChoiceFactoryOptions(
inline_separator=options.inline_separator or ", ",
inline_or=options.inline_or or " or ",
inline_or_more=options.inline_or_more or ", or ",
include_numbers=options.include_numbers or True,
)
# Format list of choices
connector = ""
txt_builder: List[str] = [text]
txt_builder.append(" ")
for index, choice in enumerate(choices):
title = (
choice.action.title
if (choice.action is not None and choice.action.title is not None)
else choice.value
)
txt_builder.append(connector)
if opt.include_numbers is True:
txt_builder.append("(")
txt_builder.append(f"{index + 1}")
txt_builder.append(") ")
txt_builder.append(title)
if index == (len(choices) - 2):
connector = opt.inline_or if index == 0 else opt.inline_or_more
connector = connector or ""
else:
connector = opt.inline_separator or ""
# Return activity with choices as an inline list.
return MessageFactory.text(
"".join(txt_builder), speak, InputHints.expecting_input
)
@staticmethod
def list_style(
choices: List[Choice],
text: str = None,
speak: str = None,
options: ChoiceFactoryOptions = None,
):
if choices is None:
choices = []
if options is None:
options = ChoiceFactoryOptions()
if options.include_numbers is None:
include_numbers = True
else:
include_numbers = options.include_numbers
# Format list of choices
connector = ""
txt_builder = [text]
txt_builder.append("\n\n ")
for index, choice in enumerate(choices):
title = (
choice.action.title
if choice.action is not None and choice.action.title is not None
else choice.value
)
txt_builder.append(connector)
if include_numbers:
txt_builder.append(f"{index + 1}")
txt_builder.append(". ")
else:
txt_builder.append("- ")
txt_builder.append(title)
connector = "\n "
# Return activity with choices as a numbered list.
txt = "".join(txt_builder)
return MessageFactory.text(txt, speak, InputHints.expecting_input)
@staticmethod
def suggested_action(
choices: List[Choice], text: str = None, speak: str = None
) -> Activity:
# Return activity with choices as suggested actions
return MessageFactory.suggested_actions(
ChoiceFactory._extract_actions(choices),
text,
speak,
InputHints.expecting_input,
)
@staticmethod
def hero_card(
choices: List[Choice], text: str = None, speak: str = None
) -> Activity:
attachment = CardFactory.hero_card(
HeroCard(text=text, buttons=ChoiceFactory._extract_actions(choices))
)
# Return activity with choices as HeroCard with buttons
return MessageFactory.attachment(
attachment, None, speak, InputHints.expecting_input
)
@staticmethod
def _to_choices(choices: List[str]) -> List[Choice]:
if choices is None:
return []
else:
return [Choice(value=choice.value) for choice in choices]
@staticmethod
def _extract_actions(choices: List[Choice]) -> List[CardAction]:
if choices is None:
choices = []
card_actions: List[CardAction] = []
for choice in choices:
if choice.action is not None:
card_action = choice.action
else:
card_action = CardAction(
type=ActionTypes.im_back, value=choice.value, title=choice.value
)
card_actions.append(card_action)
return card_actions
| 2.515625 | 3 |
The_Basics/The_Basics_Data_Types.py | vishu22314558/py-code | 0 | 12765887 | import datetime
mynow = datetime.datetime.now()
print("My datetime is " , mynow)
mynumber = 10
mytext = "Hello"
print(mynumber, mytext)
x = 10
y = "10"
z = 10.1
sum1 = x+x
sum2 = y+y
print(sum1 , sum2)
print(type(x), type(y), type(z))
## List Type
grade = [9.5,8.5,6.45]
## range - We can use ragne to create list automatically
test_range = list(range(1,10))
print(test_range)
test_range_by_3 = list(range(1,20,2))
print(test_range_by_3)
rainfall = [10.0,10,'Test',[1,2,3,4]]
print(rainfall[2])
######### use dir() for help(str.upper) ## use q for quit help
## built in function dir(__builtins__)
grade_1 = [9.5,8.5,6.45,21]
grade_1_sum = sum(grade_1)
print(grade_1_sum)
grade_1_len = len(grade_1)
print(grade_1_len)
grade_avg = grade_1_sum/grade_1_len
print(grade_avg)
## dict
dict_grade = {"Kar":10,"Tes":10,"Vis":10}
dict_grade_sum = sum(dict_grade.values())
dict_grade_len = len(dict_grade.values())
dict_grade_avg = dict_grade_sum/dict_grade_len
print(dict_grade_avg)
## tuple has () and cannot add any new valuse ...
day_temperatures = {'morning': (1.1 , 2.2, 3.4), 'noon': (2.3, 4.5, 3.1), 'evening': (2.4, 3.5, 6.5)}
| 3.921875 | 4 |
Basics/flask_send_email.py | abondar24/FlaskDemo | 0 | 12765888 | <filename>Basics/flask_send_email.py
# flask db and email demo
# I have combined db adding ans sending emails demo as it has been done in the book
# Before run setup MAIL_USERNAME,MAIL_PASSWORD
import os
from flask import Flask
from flask_mail import Mail
from flask_mail import Message
from flask import redirect
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from flask import render_template
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask import session
from flask import url_for
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = "SS-217."
app.config['FLASKY_MAIL_SENDER'] = '<NAME> <<EMAIL>>'
app.config['FLASKY_ADMIN'] =app.config['MAIL_USERNAME'] #os.environ.get('FLASKY_ADMIN')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SECRET_KEY'] = 'fffe16hff'
db = SQLAlchemy(app)
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
# models for db
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
# send email in a sep thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.body = render_template(template + '.html', **kwargs)
mail.send(msg)
@app.route('/', methods=['GET', 'POST'])
def db_form():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
session['known'] = False
if app.config['FLASKY_ADMIN']:
send_email(app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('db_form'))
return render_template('dbForm.html', form=form,
name=session.get('name'),
known=session.get('known', False))
if __name__ == "__main__":
bootstrap = Bootstrap(app)
moment = Moment(app)
mail = Mail(app)
app.run(debug=True)
| 2.96875 | 3 |
4.3/src/vnfd/dnsserver_vnf/scripts/dnsserver_initial_config.py | RIFTIO/RIFT.io-Descriptor-Packages | 3 | 12765889 | #!/usr/bin/env python3
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
import paramiko
'''
config-agent: {}
nsr_name: ccore_testbed_nsd
parameter: {}
vnfr:
1:
connection_point:
- ip_address: 172.16.58.3
name: homesteadprov_vnfd/sigport
mgmt_ip_address: 10.66.113.180
mgmt_port: 0
name: NS1__homesteadprov_vnfd__1
vdur:
- id: 723aff17-7e10-423f-8780-28b6287608d2
management_ip: 10.66.113.180
name: iovdu_0
vm_management_ip: 10.0.113.16
2:
connection_point:
- ip_address: 192.168.127.12
name: homesteadprov_vnfd/sigport
mgmt_ip_address: 10.66.113.178
mgmt_port: 0
name: NS1__homesteadprov_vnfd__2
vdur:
- id: 10684c1d-13fc-4164-a6d0-381ac725f85c
management_ip: 10.66.113.178
name: iovdu_0
vm_management_ip: 10.0.113.15
3:
connection_point:
- ip_address: 172.16.31.10
name: homesteadprov_vnfd/sigport
mgmt_ip_address: 10.66.113.181
mgmt_port: 0
name: NS1__homesteadprov_vnfd__3
vdur:
- id: 84c3f536-a56a-4040-ac39-152574397300
management_ip: 10.66.113.181
name: iovdu_0
vm_management_ip: 10.0.113.18
4:
connection_point:
- ip_address: 172.16.17.32
name: sipp_vnfd/cp0
mgmt_ip_address: 10.66.113.177
mgmt_port: 2022
name: NS1__sipp_vnfd__4
vdur:
- id: 85035b3b-14b5-4f61-a943-66b0e1ef858a
management_ip: 10.66.113.177
name: iovdu
vm_management_ip: 10.0.113.14
5:
mgmt_ip_address: 10.66.113.179
mgmt_port: 0
name: NS1__dnsserver_vnfd__5
vdur:
- id: 4f365a48-49b5-44c0-b38d-50113ddc7fa4
management_ip: 10.66.113.179
name: iovdu_0
vm_management_ip: 10.0.113.17
6:
connection_point:
- ip_address: 172.16.31.10
name: sprout_vnfd/sigport
mgmt_ip_address: 10.66.113.182
mgmt_port: 0
name: NS1__sprout_vnfd__6
vdur:
- id: 604895c1-a7fe-4341-90d6-9a1c5b56dabd
management_ip: 10.66.113.182
name: iovdu_0
vm_management_ip: 10.0.113.19
vnfr_name: NS1__dnsserver_vnfd__5
'''
class ConfigurationError(Exception):
pass
def copy_file_ssh_sftp_data(server, username, dirname, filename, data):
sshclient = paramiko.SSHClient()
sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
sshclient.load_system_host_keys(filename="/dev/null")
sshclient.connect(server, username=username, password="<PASSWORD>")
sftpclient = sshclient.open_sftp()
filehdl = sftpclient.open(dirname + '/' + filename, 'w')
filehdl.write(data)
filehdl.close()
sshclient.close()
'''
script to configure DNS server
'''
def configure_dns(logger, run_dir, vnf_mgmt_ip, vm_mgmt_ip):
#Add file to DNS server config
file_str = '''
# Static DNS for IMS solution
local-zone: "test.com." static
# Management A records
# A records for individual Clearwater nodes
# A record load-balancing
# S-CSCF cluster
# I-CSCF cluster
# Reverse lookups for individual nodes
'''.encode('ascii')
copy_file_ssh_sftp_data(vnf_mgmt_ip, 'fedora', '/tmp/', 'test.com.conf', file_str)
sh_file = "{}/configure_dnsserver-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating DNS server script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "fedora"
set pw "fedora"
set success 0
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@{vnf_mgmt_ip}
set spid $spawn_id
set timeout 60
expect -i $spid \
"*?assword:" {{
exp_send -i $spid "$pw\r"
if {{ $success == 0 }} {{
incr success -1
exp_continue
}}
}} "]$ " {{
set success 1
}} "yes/no" {{
exp_send -i $spid "yes\r"
exp_continue
}} timeout {{
set success -1
}}
send "sudo su\r"
expect "]# "
send "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf\r"
expect "]# "
sleep 5
send "yum install -y unbound\r"
expect "]# "
send "sed -iE 's/# interface: 0\.0\.0\.0/interface: 0\.0\.0\.0/' /etc/unbound/unbound.conf\r"
expect "]# "
send "sed -iE 's/# access-control: 0\.0\.0\.0\\/0.*/access-control: 0\.0\.0\.0\\/0 allow/' /etc/unbound/unbound.conf\r"
expect "]# "
send "sed -iE 's/interface-automatic: no/interface-automatic: yes/' /etc/unbound/unbound.conf\r"
expect "]# "
send "cp /tmp/test.com.conf /etc/unbound/local.d/test.com.conf\r"
expect "]# "
exp_close -i $spid
'''.format(vnf_mgmt_ip=vnf_mgmt_ip, vm_mgmt_ip=vm_mgmt_ip))
os.chmod(sh_file, stat.S_IRWXU)
cmd = "{sh_file}".format(sh_file=sh_file)
logger.debug("Executing shell cmd : %s", cmd)
rc = subprocess.call(cmd, shell=True)
if rc != 0:
raise ConfigurationError("Configuration of {} failed: {}".format(vnf_mgmt_ip, rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/dnsserver_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
def find_vnfr(vnfr_dict, name):
try:
for k, v in vnfr_dict.items():
if v['name'] == name:
return v
except KeyError:
logger.warn("Could not find vnfr for name : %s", name)
def find_cp_ip(vnfr, cp_name):
for cp in vnfr['connection_point']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", cp_name)
# This is temporary. All this data should come from VLR
def get_ipv4_subnet(vnfr, cp_name):
for cp in vnfr['connection_point']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
nw = cp['ip_address'].split(".")
subnet = nw[0]+"."+nw[1]+"."+nw[2]+".0/24"
return subnet
raise ValueError("Could not find vnfd %s connection point %s", cp_name)
def find_vnfr_mgmt_ip(vnfr):
#return vnfr['mgmt_interface']['ip_address']
return vnfr['mgmt_ip_address']
def find_vdur_mgmt_ip(vnfr):
return vnfr['vdur'][0]['vm_management_ip']
def find_param_value(param_list, input_param):
for item in param_list:
logger.debug("Parameter: %s", format(item))
if item['name'] == input_param:
return item['value']
dns_vnfr = find_vnfr(yaml_cfg['vnfr'], yaml_cfg['vnfr_name'])
dns_vnf_mgmt_ip = find_vnfr_mgmt_ip(dns_vnfr)
dns_vm_mgmt_ip = find_vdur_mgmt_ip(dns_vnfr)
logger.debug("Sleeping for 1 min while we wait for VNFs to boot up ..")
time.sleep(60)
logger.debug("Configuring DNS server VNF..")
configure_dns(logger, run_dir, dns_vnf_mgmt_ip, dns_vm_mgmt_ip)
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()
| 1.5625 | 2 |
python/sheet_meshing.py | jpanetta/Inflatables | 10 | 12765890 | import scipy
import scipy.sparse.csgraph
import wall_generation, mesh
import mesh_operations
import utils
import triangulation, filters
from mesh_utilities import SurfaceSampler, tubeRemesh
import numpy as np
def meshComponents(m, cutEdges):
"""
Get the connected components of triangles of a mesh cut along the edges `cutEdges`.
Parameters
----------
m
The mesh to split
cutEdges
The edges (vertex index pairs) splitting the mesh up into disconnected regions
Returns
-------
ncomponents
Number of connected components
components
The component index for each mesh triangle.
"""
cutEdgeSet = set([(min(fs), max(fs)) for fs in cutEdges])
tri_sets = [set(t) for t in m.triangles()]
# Build dual graph, excluding dual edges that cross the fused segments.
def includeDualEdge(u, v):
common_vertices = tri_sets[u] & tri_sets[v]
return (len(common_vertices) == 2) and ((min(common_vertices), max(common_vertices)) not in cutEdgeSet)
dual_edges = [(u, v) for u in range(len(tri_sets))
for v in m.trisAdjTri(u)
if includeDualEdge(u, v)]
adj = scipy.sparse.coo_matrix((np.ones(len(dual_edges)), np.transpose(dual_edges))).tocsc()
return scipy.sparse.csgraph.connected_components(adj)
def wallMeshComponents(sheet, distinctTubeComponents = False):
"""
Get the connected wall components of a sheet's mesh (assigning the tubes "component" -1 by default or components -1, -2, ... if distinctTubeComponents is True).
"""
m = sheet.mesh()
nt = m.numTris()
iwt = np.array([sheet.isWallTri(ti) for ti in range(nt)], dtype=np.bool)
dual_edges = [(u, v) for u in range(nt)
for v in m.trisAdjTri(u)
if iwt[u] == iwt[v]]
adj = scipy.sparse.coo_matrix((np.ones(len(dual_edges)), np.transpose(dual_edges))).tocsc()
numComponents, components = scipy.sparse.csgraph.connected_components(adj)
wallLabels = components[iwt].copy()
renumber = np.empty(numComponents, dtype=np.int)
renumber[:] = -1 # This assigns all non-wall triangles the "component" -1
uniqueWallLabels = np.unique(wallLabels)
numWallComponents = len(uniqueWallLabels)
renumber[uniqueWallLabels] = np.arange(numWallComponents, dtype=np.int)
if distinctTubeComponents:
uniqueTubeLabels = np.unique(components[~iwt])
renumber[uniqueTubeLabels] = -1 - np.arange(len(uniqueTubeLabels), dtype=np.int)
components = renumber[components]
return numWallComponents, components
def remeshWallRegions(m, fuseMarkers, fuseSegments, pointSetLiesInWall, permitWallInteriorVertices = False, pointsLieInHole = None):
"""
Take an initial mesh of the sheet and determine the connected triangle components
that correspond to fused regions. Also remesh these regions so as not to have
interior wall vertices (if requested).
Parameters
----------
m
The initial sheet mesh
fuseMarkers
Fused vertices of the original sheet mesh
fuseSegments
Fused edges of the original sheet mesh
pointSetLiesInWall
Function for testing whether a given point set lies within a wall region
permitWallInteriorVertices
Whether to permit Triangle to add vertices inside the wall regions.
(This should usually be `False`, since these vertices permit the walls to crumple.)
pointsLieInHole
If provided, this function is used to test whether a given mesh
component is actually a hole.
Returns
-------
remeshedSheet
A `MeshFEM` triangle mesh of the top sheet ready for inflation simulation.
isWallVtx
Per-vertex boolean array specifying whether each vertex is part of the wall region
(interior or boundary).
isWallBdryVtx
Per-vertex boolean array specifying whether each vertex is part of the wall boundary.
If `permitWallInteriorVertices` is `False`, then this is the same as `isWallVtx`.
"""
ncomponents, components = meshComponents(m, fuseSegments)
############################################################################
# Determine which connected components are walls.
############################################################################
triCenters = m.vertices()[m.triangles()].mean(axis=1)
numWalls = 0
wallLabels = -np.ones(m.numTris(), dtype=np.int)
for c in range(ncomponents):
component_tris = np.flatnonzero(components == c)
centers = triCenters[component_tris]
# Discard the entire component if it is actually a hole of the flattened input mesh.
if (pointsLieInHole is not None):
if (pointsLieInHole(centers)):
wallLabels[component_tris] = -2 # only labels -1 (tube) and >= 0 (wall) are kept
if (pointSetLiesInWall(centers)):
wallLabels[component_tris] = numWalls
numWalls = numWalls + 1
############################################################################
# Separately remesh each wall sub-mesh, preserving the non-wall component.
############################################################################
origV = m.vertices()
origF = m.triangles()
meshes = [(origV, origF[wallLabels == -1])]
remeshFlags = 'Y' + ('S0' if not permitWallInteriorVertices else '')
for wall in range(numWalls):
# Extract the original wall mesh.
wallMesh = mesh.Mesh(*mesh_operations.submesh(origV, origF, wallLabels == wall))
# Perform the initial remeshing of the wall's boundary segments.
wallMesh, wmFuseMarkers, wmFusedEdges = wall_generation.triangulate_channel_walls(*mesh_operations.removeDanglingVertices(wallMesh.vertices()[:, 0:2], wallMesh.boundaryElements()), triArea=float('inf'), flags=remeshFlags)
# Note: if the wall mesh encloses holes, Triangle will also have triangulated the holes;
# we must detect these and remove them from the output.
# We decompose the remeshed wall into connected components (after
# cutting away the original wall boundary segments) and keep only the one
# inside the wall region. Exactly one component should remain after this
# process.
nc, wallMeshComponents = meshComponents(wallMesh, wmFusedEdges)
if (nc != 1):
wmV = wallMesh.vertices()
wmF = wallMesh.triangles()
triCenters = wmV[wmF].mean(axis=1)
keepTri = np.zeros(wallMesh.numTris(), dtype=np.bool)
keptComponents = 0
for c in range(nc):
component_tris = np.flatnonzero(wallMeshComponents == c)
if (pointSetLiesInWall(triCenters[component_tris])):
keepTri[component_tris] = True
keptComponents += 1
if (keptComponents != 1): raise Exception('Should have kept exactly one component of the remeshed wall')
# Extract only the kept component.
wallMesh = mesh.Mesh(*mesh_operations.removeDanglingVertices(wmV, wmF[keepTri]))
meshes.append(wallMesh)
mergedV, mergedF = mesh_operations.mergedMesh(meshes)
remeshedSheet = mesh.Mesh(mergedV, mergedF)
############################################################################
# Determine wall vertices and wall boundary vertices.
############################################################################
wallVertices = set()
wallBoundaryVertices = set()
def addVertices(vtxSet, V):
for v in V: vtxSet.add(tuple(v))
rsV = remeshedSheet.vertices()
addVertices(wallBoundaryVertices, rsV[remeshedSheet.boundaryVertices()])
for wallmesh in meshes[1:]:
wmV = wallmesh.vertices()
addVertices(wallBoundaryVertices, wmV[wallmesh.boundaryVertices()])
addVertices(wallVertices, wmV)
wallVertices = wallVertices | wallBoundaryVertices
isWallVtx = np.array([tuple(v) in wallVertices for v in rsV])
isWallBdryVtx = np.array([tuple(v) in wallBoundaryVertices for v in rsV])
return remeshedSheet, isWallVtx, isWallBdryVtx
# Consider a point set to lie within a hole if over half of its points are within a hole (to a given tolerance)
class HoleTester:
def __init__(self, meshVertices, meshSampler):
self.V = meshVertices
self.sampler = meshSampler
self.eps = 1e-12 * utils.bbox_dims(meshVertices).max()
def dist(self, X):
"""
Compute the distance of each point in "X" to the sampler mesh.
"""
# This could be more efficient if SurfaceSampler had a method to get a
# distance to the sampled mesh...
if (X.shape[1] == 2):
X = np.pad(X, [(0, 0), (0, 1)])
closestPts = self.sampler.sample(X, self.V)
#print(closestPts)
return np.linalg.norm(X - closestPts, axis=1)
def pointWithinHole(self, X):
"""
Check whether each point in X individually lies with a hole.
"""
return self.dist(X) > self.eps
def __call__(self, X):
"""
Check whether a point set generally lies within a hole (i.e. if more
than half of its points are within a hole).
"""
return np.count_nonzero(self.pointWithinHole(X)) >= (X.shape[0] / 2)
# Note: if `targetEdgeSpacing` is set too low relative to `triArea`, `triangle`
# will insert new boundary points that fall in the interior of the flattened
# target surface (in strictly convex regions) when refining the triangulation.
#
# If the parametrization is subsequently used to lift the boundary points to
# 3D, these lifted points will not lie on the target surface's boundary. E.g.,
# they may lift off the ground plane even if all boundary vertices of the
# target surface lie on the ground plane.
def generateSheetMesh(sdfVertices, sdfTris, sdf, triArea, permitWallInteriorVertices = False, targetEdgeSpacing = 0.5, minContourLen = 0.75):
"""
Extract the channel walls described by a signed distance function and use
them generate a high-quality triangle mesh of the inflatable sheet.
Parameters
----------
sdfVertices
Vertices for the wall SDF domain mesh.
sdfTris
Triangles for the wall SDF domain mesh.
sdf
Per-vertex signed distances to the channel walls
triArea
Maximum triangle area (passed to Triangle)
permitWallInteriorVertices
Whether to permit Triangle to add vertices inside the wall regions.
(This should usually be `False`, since these vertices permit the walls to crumple.)
targetEdgeSpacing
The approximate resolution at which the extracted contours of the SDF are resampled
to generate the wall boundary curves.
minContourLen
The length threshold below which extracted contours are discarded.
Returns
-------
remeshedSheet
A `MeshFEM` triangle mesh of the top sheet ready for inflation simulation.
isWallVtx
Per-vertex boolean array specifying whether each vertex is part of the wall region
(interior or boundary).
isWallBdryVtx
Per-vertex boolean array specifying whether each vertex is part of the wall boundary.
If `permitWallInteriorVertices` is `False`, then this is the same as `isWallVtx`.
"""
pts, edges = wall_generation.extract_contours(sdfVertices, sdfTris, sdf,
targetEdgeSpacing=targetEdgeSpacing,
minContourLen=minContourLen)
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(pts[:,0:2], edges, triArea)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
pointsAreInWall = lambda X: np.mean(sdfSampler.sample(X, sdf)) < 0
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
return remeshWallRegions(m, fuseMarkers, fuseSegments, pointsAreInWall, permitWallInteriorVertices, pointsLieInHole=pointsLieInHole)
def generateSheetMeshCustomEdges(sdfVertices, sdfTris, sdf, customPts, customEdges, triArea, permitWallInteriorVertices = False):
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(customPts[:,0:2], customEdges, triArea)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
pointsAreInWall = lambda X: np.mean(sdfSampler.sample(X, sdf)) < 0
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
return remeshWallRegions(m, fuseMarkers, fuseSegments, pointsAreInWall, permitWallInteriorVertices, pointsLieInHole=pointsLieInHole)
def meshWallsAndTubes(fusing_V, fusing_E, m, isWallTri, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles):
"""
Create a high quality mesh of the wall and tube regions enclosed by given fusing curves.
Parameters
----------
fusing_V, fusing_E
PSLG to be triangulated representing the fusing curves.
m
An initial mesh of the sheet region (with any hole triangles removed!) used to obtain the intersection of the wall regions with the sheet boundary.
isWallTri
Boolean array holding whether each wall of `m` is a wall triangle
holePoints, tubePoints, wallPoints
Lists of points within the hole, tube, and wall regions.
triArea
Maximum triangle area for the triangulation
permitWallInteriorVertices
Whether wall regions get interior vertices.
Returns
-------
remeshedSheet, isWallVtx, isWallBdryVtx
"""
############################################################################
# 1. Create a quality mesh of the air tubes.
############################################################################
# print(f"fusing_V.shape: {fusing_V.shape}")
# print(f"fusing_E.shape: {fusing_E.shape}")
# print(f"wallPoints: {np.array(wallPoints).shape}")
# print(f"holePoints: {np.array(holePoints).shape}")
mTubes, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(fusing_V[:, 0:2], fusing_E, holePoints=wallPoints + holePoints, triArea=triArea, omitQualityFlag=False, flags="j") # jettison vertices that got eaten by holes...
#utils.save((mTubes, fuseMarkers, fuseSegments), 'tubes_and_markers.pkl.gz')
if avoidSpuriousFusedTriangles:
try:
mTubes = tubeRemesh(mTubes, fuseMarkers, fuseSegments, minRelEdgeLen=0.3) # retriangulate where necessary to avoid spurious fused triangles in the tubes
except:
utils.save((mTubes, fuseMarkers, fuseSegments), utils.freshPath('tubeRemeshFailure', suffix='.pkl.gz'))
raise
fuseMarkers += [0 for i in range(mTubes.numVertices() - len(fuseMarkers))]
#mTubes.save('remeshedTubes.msh')
# For meshes without finite-thickness wall regions, we are done
# (and all vertices in fusing_V are wall/wall boundary vertices.)
if not np.any(isWallTri):
isWallVtx = np.array(fuseMarkers, dtype=np.bool)
return mTubes, isWallVtx, isWallVtx
############################################################################
# 2. Triangulate the wall meshes without inserting any Steiner points.
############################################################################
# We need to triangulate the new collection of boundary segments, which consists of the
# boundary segments from the new tube mesh along with the original mesh boundary segments
# that border wall regions.
boundarySegments = [(mTubes.vertices(), mTubes.boundaryElements())]
#print(boundarySegments)
# mesh.save("tube_bdry.obj", *boundarySegments[0])
wallBoundaryElements = m.boundaryElements()[isWallTri[m.elementsAdjacentBoundary()]]
if len(wallBoundaryElements) > 0:
boundarySegments.append((m.vertices(), wallBoundaryElements))
# mesh.save("sheet_bdry_intersect_walls.obj", *boundarySegments[1])
newPts, newEdges = mesh_operations.mergedMesh(boundarySegments)
# mesh.save("new_contour.obj", newPts, newEdges)
wallmeshFlags = 'Y' + ('S0' if not permitWallInteriorVertices else '')
mWall, _, _ = wall_generation.triangulate_channel_walls(newPts[:,0:2], newEdges, holePoints=tubePoints + holePoints, triArea=triArea if permitWallInteriorVertices else float('inf'), omitQualityFlag=False, flags="j" + wallmeshFlags) # jettison vertices that got eaten by holes...
# mWall.save("walls.obj")
############################################################################
# 3. Merge the tube and wall meshes
############################################################################
mFinal = mesh.Mesh(*mesh_operations.mergedMesh([mTubes, mWall]), embeddingDimension=3)
# mFinal.save("final.obj")
############################################################################
# 4. Determine wall vertices and wall boundary vertices.
############################################################################
wallVertices = set()
wallBoundaryVertices = set()
def addVertices(vtxSet, V):
for v in V: vtxSet.add(tuple(v))
finalV = mFinal.vertices()
addVertices(wallBoundaryVertices, finalV[mFinal.boundaryVertices()])
wmV = mWall.vertices()
addVertices(wallBoundaryVertices, wmV[mWall.boundaryVertices()])
addVertices(wallVertices, wmV)
# Also include fused vertices marked inside the tube mesh (i.e., those
# fused by zero-width curves)
addVertices(wallBoundaryVertices, mTubes.vertices()[np.array(fuseMarkers, dtype=np.bool)])
wallVertices = wallVertices | wallBoundaryVertices
isWallVtx = np.array([tuple(v) in wallVertices for v in finalV])
isWallBdryVtx = np.array([tuple(v) in wallBoundaryVertices for v in finalV])
return mFinal, isWallVtx, isWallBdryVtx
def newMeshingAlgorithm(sdfVertices, sdfTris, sdf, customPts, customEdges, triArea, permitWallInteriorVertices = False, avoidSpuriousFusedTriangles = True):
############################################################################
# 1. Perform an initial, low quality triangulation used only to segment the
# design domain into tube and wall regions.
############################################################################
m, fuseMarkers, fuseSegments = wall_generation.triangulate_channel_walls(customPts[:, 0:2], customEdges, triArea=float('inf'), omitQualityFlag=True, flags="YY")
# m.save('initial_triangulation.msh')
############################################################################
# 2. Determine the wall components/hole points.
############################################################################
triCenters = m.vertices()[m.triangles()].mean(axis=1)
sdfSampler = SurfaceSampler(sdfVertices, sdfTris)
numWalls = 0
wallPoints = []
tubePoints = []
holePoints = []
ncomponents, components = meshComponents(m, fuseSegments)
# First detect and remove holes
pointsLieInHole = HoleTester(sdfVertices, sdfSampler)
for c in range(ncomponents):
component_tris = components == c
centers = triCenters[component_tris]
p = centers[0, 0:2] # Todo: pick center of largest area triangle?
if (pointsLieInHole(centers)):
components[component_tris] = -1 #mark for deletion
holePoints.append(p)
continue
if len(holePoints) > 0:
print(f'Detected {len(holePoints)} holes')
# Note: there shouldn't be any dangling vertices since no new vertices
# are inserted inside the holes.
m = mesh.Mesh(m.vertices(), m.elements()[components >= 0])
ncomponents, components = meshComponents(m, fuseSegments)
triCenters = m.vertices()[m.triangles()].mean(axis=1)
# m.save('without_holes.msh')
wallLabels = -np.ones(m.numTris(), dtype=np.int) # assign -1 to tubes
# Next, pick a point within each air tube
for c in range(ncomponents):
component_tris = components == c
centers = triCenters[component_tris]
p = centers[0, 0:2] # Todo: pick center of largest area triangle?
# Note: the inside/outside test for some sample points of a connected component may disagree due to the limited
# precision at which we extracted the contours (and the contour resampling), so we take a vote.
triCentersAreInWall = np.mean(sdfSampler.sample(centers, sdf)) < 0
if (triCentersAreInWall):
wallPoints.append(p)
wallLabels[component_tris] = numWalls
numWalls = numWalls + 1
else:
tubePoints.append(p)
return meshWallsAndTubes(customPts, customEdges, m, wallLabels >= 0, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles)
def generateSheetMeshNewAlgorithm(sdfVertices, sdfTris, sdf, triArea, permitWallInteriorVertices = False, targetEdgeSpacing = 0.5, minContourLen = 0.75, avoidSpuriousFusedTriangles=True):
pts, edges = wall_generation.extract_contours(sdfVertices, sdfTris, sdf,
targetEdgeSpacing=targetEdgeSpacing,
minContourLen=minContourLen)
return newMeshingAlgorithm(sdfVertices, sdfTris, sdf, pts, edges, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles)
def remeshSheet(isheet, triArea, permitWallInteriorVertices = False, omitWallsContainingPoints=[]):
"""
Remesh an inflatable sheet design with a high quality triangulation
(leaving the fusing curves unchanged).
We can omit certain walls by passing a nonempty point set for the `omitWallsContainingPoints` argument.
Returns
-------
remeshedSheet, isWallVtx, isWallBdryVtx
"""
nwmc, wmc = wallMeshComponents(isheet, distinctTubeComponents=True)
im = isheet.mesh()
imV = im.vertices()
imF = im.triangles()
# Convert walls specified by `omitWallsContainingPoints` into tube regions.
ssampler = SurfaceSampler(imV, imF)
omittedWallComponents = []
if (len(omitWallsContainingPoints) > 0):
tris, _ = ssampler.closestTriAndBaryCoords(np.array(omitWallsContainingPoints))
omittedWallComponents = np.unique(wmc[tris])
if (np.any(omittedWallComponents < 0)): raise Exception("omitWallsContainingPoints contains non-wall points.")
wmc[tris] = -1 # Reassign omitted walls to the first tube region.
# Generate tube and wall points (one in each tube/wall component)
tubePoints = []
wallPoints = []
triCenters = imV[:, 0:2][imF].mean(axis=1)
#print(np.unique(wmc))
for c in range(np.min(wmc), nwmc):
#print(f'Component: {c}')
if c in omittedWallComponents: continue
p = triCenters[np.where(wmc == c)[0][0]]
if (c < 0): tubePoints.append(p)
else : wallPoints.append(p)
# Generate hole points inside each internal boundary loop; this
# requires a low-quality triangulation of the boundary loops.
sheetBoundary = mesh_operations.removeDanglingVertices(imV, im.boundaryElements())
mHoleDetect, mFuseMarkers, mFuseSegments = wall_generation.triangulate_channel_walls(sheetBoundary[0][:, 0:2], sheetBoundary[1], triArea=float('inf'), omitQualityFlag=True, flags="YY")
nholeDetectComponents, holeDetectComponents = meshComponents(mHoleDetect, mFuseSegments)
holeTest = HoleTester(imV, ssampler)
holePoints = np.array([triCenters[np.where(holeDetectComponents == c)[0][0]] for c in range(nholeDetectComponents)])
holePoints = list(holePoints[holeTest.pointWithinHole(holePoints)])
# Get all design curves for remeshing. These consist of the union of two disjoint sets of curves:
# - Boundaries of the wall regions.
# - The intersection of the tube and sheet boundaries.
wm = mesh.Mesh(*mesh_operations.mergedMesh([(imV, imF[wmc == i]) for i in range(nwmc)])) # Mesh of walls only.
sheetBoundaryIntersectTubes = mesh_operations.removeDanglingVertices(imV, im.boundaryElements()[wmc[im.elementsAdjacentBoundary()] < 0])
fusing_V, fusing_E = mesh_operations.mergedMesh([(wm.vertices(), wm.boundaryElements()), sheetBoundaryIntersectTubes])
isWallTri = (wmc >= 0)
return meshWallsAndTubes(fusing_V, fusing_E, im, isWallTri, holePoints, tubePoints, wallPoints, triArea, permitWallInteriorVertices, avoidSpuriousFusedTriangles=True)
import triangulation, field_sampler
def forward_design_mesh(V, E, fusedPts, holePts, triArea):
"""
Create an inflatable sheet mesh from a collection of curves and points indicating
whether the closed curve containing them should be considered a wall or a hole
(instead of a tube/pillow).
"""
sdfV, sdfF, pointMarkers, edgeMarkers = triangulation.triangulate(V[:, 0:2], E, holePts=holePts, triArea=1e8, omitQualityFlag=True, outputPointMarkers=True, outputEdgeMarkers=True)
minit = mesh.Mesh(sdfV[:, 0:2], sdfF)
# Create a SDF field indicating the wall regions (the data needed by newMeshingAlgorithm)
nc, c = meshComponents(minit, edgeMarkers)
sdf = c
fs = field_sampler.FieldSampler(minit)
if len(fusedPts) > 0:
fusedComponents = np.array(np.unique(fs.sample(fusedPts, c)), dtype=np.int)
sdf[c == fusedComponents] = -1
return newMeshingAlgorithm(sdfV, sdfF, sdf, V, E, triArea=triArea)
| 2.359375 | 2 |
days/day24.py | Kurocon/AdventOfCode2020 | 0 | 12765891 | <gh_stars>0
from days import AOCDay, day
import re
from collections import defaultdict
@day(24)
class Day24(AOCDay):
test_input = """sesenwnenenewseeswwswswwnenewsewsw
neeenesenwnwwswnenewnwwsewnenwseswesw
seswneswswsenwwnwse
nwnwneseeswswnenewneswwnewseswneseene
swweswneswnenwsewnwneneseenw
eesenwseswswnenwswnwnwsewwnwsene
sewnenenenesenwsewnenwwwse
wenwwweseeeweswwwnwwe
wsweesenenewnwwnwsenewsenwwsesesenwne
neeswseenwwswnwswswnw
nenwswwsewswnenenewsenwsenwnesesenew
enewnwewneswsewnwswenweswnenwsenwsw
sweneswneswneneenwnewenewwneswswnese
swwesenesewenwneswnwwneseswwne
enesenwswwswneneswsenwnewswseenwsese
wnwnesenesenenwwnenwsewesewsesesew
nenewswnwewswnenesenwnesewesw
eneswnwswnwsenenwnwnwwseeswneewsenese
neswnwewnwnwseenwseesewsenwsweewe
wseweeenwnesenwwwswnew""".split("\n")
direction_regex = re.compile(r"(se|sw|ne|nw|e|w)")
tiles = []
grid = defaultdict(lambda: True)
NEIGHBOURS = [(1, 0), (1, -1), (0, 1), (-1, 0), (0, -1), (-1, 1)]
def east(self, x, y):
return x+1, y
def southeast(self, x, y):
return x+1, y-1
def northeast(self, x, y):
return x, y+1
def west(self, x, y):
return x-1, y
def southwest(self, x, y):
return x, y-1
def northwest(self, x, y):
return x-1, y+1
DIRECTION_MAP = {
"e": east,
"se": southeast,
"ne": northeast,
"w": west,
"sw": southwest,
"nw": northwest
}
def get_tile(self, direction):
position = (0, 0)
for step in direction:
position = self.DIRECTION_MAP[step](self, *position)
return position
def flip(self, direction):
pos = self.get_tile(direction)
self.grid[pos] = not self.grid[pos]
def count_black_neighbours(self, x, y):
black = 0
for dx, dy in self.NEIGHBOURS:
try:
if not self.grid[(x + dx, y + dy)]:
black += 1
except KeyError:
pass
return black
@property
def min_x(self):
return min(self.grid.keys(), key=lambda x: x[0])[0]
@property
def max_x(self):
return max(self.grid.keys(), key=lambda x: x[0])[0]
@property
def min_y(self):
return min(self.grid.keys(), key=lambda x: x[1])[1]
@property
def max_y(self):
return max(self.grid.keys(), key=lambda x: x[1])[1]
def flip_day(self):
new_grid = {}
to_check = set()
for x, y in self.grid.keys():
to_check.add((x, y))
for dx, dy in self.NEIGHBOURS:
to_check.add((x + dx, y + dy))
for x, y in to_check:
try:
tile = self.grid[(x, y)]
except KeyError:
tile = True
black = self.count_black_neighbours(x, y)
if not tile and black == 0 or black > 2:
new_grid[(x, y)] = True
elif tile and black == 2:
new_grid[(x, y)] = False
else:
new_grid[(x, y)] = tile
self.grid = new_grid
def common(self, input_data):
# input_data = self.test_input
self.grid = defaultdict(lambda: True)
self.tiles = []
for line in input_data:
self.tiles.append(self.direction_regex.findall(line))
def part1(self, input_data):
for tile in self.tiles:
self.flip(tile)
yield sum(not x for x in self.grid.values())
def part2(self, input_data):
for tile in self.tiles:
self.flip(tile)
self.grid = dict(self.grid)
self.debug(f"Day 0: {sum(not x for x in self.grid.values())}")
for i in range(100):
self.flip_day()
if i < 10 or (i+1) % 10 == 0:
self.debug(f"Day {i+1}: {sum(not x for x in self.grid.values())}")
yield sum(not x for x in self.grid.values())
| 3.03125 | 3 |
mse.py | BrightLamp/PyLearningCodes | 0 | 12765892 | import numpy as np
from sklearn import linear_model
np.random.seed(123)
np.set_printoptions(suppress=True, linewidth=120)
X = np.random.random([10, 5]).astype(np.float)
y = np.random.random(10).astype(np.float)
# sklearn
linear = linear_model.LinearRegression()
linear.fit(X, y)
# Pure Python
X = np.hstack([np.ones([10, 1]), X])
IX = np.linalg.inv(np.matmul(X.T, X))
XIX = np.matmul(X, IX)
w = np.matmul(y, XIX)
print("----- Code Output -----")
print("sklearn coef", linear.coef_)
print("sklearn intercept", linear.intercept_)
print("numpy coef", w[1:])
print("numpy intercept", w[0])
"""
----- Code Output -----
sklearn coef [ 0.49571807 -0.4013861 0.67121452 -0.4458699 -0.68057386]
sklearn intercept 0.767935574124093
numpy coef [ 0.49571807 -0.4013861 0.67121452 -0.4458699 -0.68057386]
numpy intercept 0.7679355741241028
"""
| 3.515625 | 4 |
RareVariants/ResultCollector.py | afif-elghraoui/CorrelatedVariants | 0 | 12765893 | <filename>RareVariants/ResultCollector.py
#################################################################################
# Copyright (c) 2011-2013, Pacific Biosciences of California, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Pacific Biosciences nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC BIOSCIENCES AND ITS
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#################################################################################
# Author: <NAME>, <NAME>
import cProfile, logging, os.path
from multiprocessing import Process
from threading import Thread
from .options import options
class ResultCollector(object):
"""
Gathers results and writes to a file.
"""
def __init__(self, resultsQueue):
self._resultsQueue = resultsQueue
def _run(self):
self.onStart()
sentinelsReceived = 0
while sentinelsReceived < options.numWorkers:
result = self._resultsQueue.get()
if result is None:
sentinelsReceived += 1
else:
self.onResult(result)
self.onFinish()
def run(self):
if options.doProfiling:
cProfile.runctx("self._run()",
globals=globals(),
locals=locals(),
filename=os.path.join(options.temporaryDirectory,
"profile-%s.out" % (self.name)))
else:
self._run()
# ==================================
# Overridable interface begins here.
#
def onStart(self):
pass
def onResult(self, result):
pass
def onFinish(self):
pass
class ResultCollectorProcess(ResultCollector, Process):
def __init__(self, *args):
Process.__init__(self)
self.daemon = True
super(ResultCollectorProcess,self).__init__(*args)
class ResultCollectorThread(ResultCollector, Thread):
def __init__(self, *args):
Thread.__init__(self)
self.daemon = True
self.exitcode = 0
super(ResultCollectorThread,self).__init__(*args)
| 0.960938 | 1 |
tests/test_oauth2.py | amehta1/t1-python | 24 | 12765894 | <reponame>amehta1/t1-python<filename>tests/test_oauth2.py
"""Test OAuth2 methods."""
import unittest
from terminalone import T1
import responses
mock_credentials = {
'client_id': 'client_id',
'client_secret': 'secret',
'username': 'username',
'password': '<PASSWORD>'
}
class TestOauth2ResourceOwnerLogin(unittest.TestCase):
"""Tests for OAuth2 Resource Owner Login."""
@responses.activate
def setUp(self):
"""Setup."""
with open('tests/fixtures/json/access_token.json') as f:
token = f.read()
responses.add(responses.POST, 'https://auth.mediamath.com/oauth/token',
body=token,
adding_headers={
'Set-Cookie': 'adama_session=1',
},
content_type='application/json')
self.t1 = T1(auth_method='oauth2-resourceowner',
**mock_credentials)
@responses.activate
def test_minimum_creds_for_oauth2(self):
"""Test Minimum Credentials."""
self.assertEqual(self.t1.auth_params['method'], 'oauth2-resourceowner')
| 3.078125 | 3 |
test_lzhw.py | MNoorFawi/lzhw | 4 | 12765895 | import lzhw
from sys import getsizeof
from random import sample, choices
import pandas as pd
def test_weather():
weather = ["Sunny", "Sunny", "Overcast", "Rain", "Rain", "Rain", "Overcast", "Sunny", "Sunny",
"Rain", "Sunny", "Overcast", "Overcast", "Rain", "Rain", "Sunny", "Sunny"]
comp_weather = lzhw.LZHW(weather)
comp_weather2 = lzhw.LZHW(weather, sliding_window = 5)
assert getsizeof(weather) > comp_weather.size()
assert all(weather == comp_weather.decompress())
assert all(weather == comp_weather2.decompress())
def test_num():
numbers = choices(sample(range(0, 5), 5), k=20)
comp_num = lzhw.LZHW(numbers)
assert getsizeof(numbers) > comp_num.size()
assert numbers == list(map(int, comp_num.decompress()))
def test_read_write():
weather = ["Sunny", "Sunny", "Overcast", "Rain", "Rain", "Rain", "Overcast", "Sunny", "Sunny",
"Rain", "Sunny", "Overcast", "Overcast", "Rain", "Rain", "Sunny", "Sunny"]
comp_weather = lzhw.LZHW(weather)
comp_weather.save_to_file("test.pkl")
decomp = lzhw.decompress_from_file("test.pkl")
assert all(weather == decomp)
def test_comp_df():
df = pd.DataFrame({"a": [1, 1, 2, 2, 1, 3, 4, 4],
"b": ["A", "A", "B", "B", "A", "C,D", "D C", "D C"]})
comp_df = lzhw.CompressedDF(df, parallel=True)
comp_df2 = lzhw.CompressedDF(df, sliding_window = 10)
assert all(comp_df.compressed[1].decompress() == df.b)
assert all(comp_df2.compressed[0].decompress() == df.a)
def test_comp_chunks():
df = pd.DataFrame({"a": [1, 1, 2, 2, 1, 3, 4, 4],
"b": ["A", "A", "B", "B", "A", "C,D", "D C", "D C"]})
df.to_csv("example.csv", index=False)
chunks = 4
compressed_chunks = lzhw.CompressedFromCSV("example.csv", chunksize=chunks)
totals = (df.shape[0] / chunks)
assert totals == len(compressed_chunks.all_comp.keys())
compressed_chunks.save_to_file("comp_ex.txt")
decomp_chunk = lzhw.decompress_df_from_file("comp_ex.txt")
assert len(decomp_chunk) == totals
assert all(decomp_chunk[0].a == df.a[:4])
assert all(compressed_chunks.all_comp[1].compressed[1].decompress() == df.b[4:8])
| 2.84375 | 3 |
homeassistant/components/switch/netio.py | smilepc/Home-assistant | 0 | 12765896 | """
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_USERNAME, \
CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP, STATE_ON
from homeassistant.helpers import validate_config
from homeassistant.components.switch import SwitchDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
REQUIREMENTS = ['pynetio==0.1.6']
DEFAULT_USERNAME = 'admin'
DEFAULT_PORT = 1234
URL_API_NETIO_EP = "/api/netio/<host>"
CONF_OUTLETS = "outlets"
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
ATTR_TODAY_MWH = "today_mwh"
ATTR_TOTAL_CONSUMPTION_KWH = "total_energy_kwh"
ATTR_CURRENT_POWER_MWH = "current_power_mwh"
ATTR_CURRENT_POWER_W = "current_power_w"
Device = namedtuple('device', ['netio', 'entities'])
DEVICES = {}
ATTR_START_DATE = 'start_date'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Configure the netio platform."""
from pynetio import Netio
if validate_config({"conf": config}, {"conf": [CONF_OUTLETS,
CONF_HOST]}, _LOGGER):
if len(DEVICES) == 0:
hass.wsgi.register_view(NetioApiView)
dev = Netio(config[CONF_HOST],
config.get(CONF_PORT, DEFAULT_PORT),
config.get(CONF_USERNAME, DEFAULT_USERNAME),
config.get(CONF_PASSWORD, DEFAULT_USERNAME))
DEVICES[config[CONF_HOST]] = Device(dev, [])
# Throttle the update for all NetioSwitches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(DEVICES[config[CONF_HOST]].netio, key,
config[CONF_OUTLETS][key])
DEVICES[config[CONF_HOST]].entities.append(switch)
add_devices_callback(DEVICES[config[CONF_HOST]].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = "api:netio"
def get(self, request, host):
"""Request handler."""
data = request.args
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
dev.update_ha_state()
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a netio linked switch."""
def __init__(self, netio, outlet, name):
"""Defined to handle throttle."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Netio device's name."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return not hasattr(self, 'telnet')
def turn_on(self):
"""Turn switch on."""
self._set(True)
def turn_off(self):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[self.outlet - 1] = "1" if value else "0"
self.netio.get('port list %s' % ''.join(val))
self.netio.states[self.outlet - 1] = value
self.update_ha_state()
@property
def is_on(self):
"""Return switch's status."""
return self.netio.states[self.outlet - 1]
def update(self):
"""Called by HA."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {ATTR_CURRENT_POWER_W: self.current_power_w,
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[self.outlet - 1]
@property
def cumulated_consumption_kwh(self):
"""Total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[self.outlet - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[self.outlet - 1]
| 2.296875 | 2 |
split_check.py | joicejoseph3198/Hard-Hat-Detection | 1 | 12765897 | import os
image_dir ="H:/Python Space/Hard_Hat _Detection/images"
label_dir= "H:/Python Space/Hard_Hat _Detection/labels"
print("No. of Training images", len(os.listdir(image_dir + "/train")))
print("No. of Training labels", len(os.listdir(label_dir + "/train")))
print("No. of valid images", len(os.listdir(image_dir + "/val")))
print("No. of valid labels", len(os.listdir(label_dir + "/val")))
| 2.703125 | 3 |
open_spiel/python/algorithms/psro_v2/ars_ray/workers.py | wyz2368/open_spiel3 | 0 | 12765898 | import numpy as np
import ray
import pyspiel
from open_spiel.python.algorithms.psro_v2.ars_ray.shared_noise import *
from open_spiel.python.algorithms.psro_v2.ars_ray.utils import rewards_combinator
from open_spiel.python.algorithms.psro_v2 import rl_policy
from open_spiel.python import rl_environment
import tensorflow.compat.v1 as tf
import random
# Function that loads the game.
# @ray.remote
# def worker(env_name):
# game = pyspiel.load_game_as_turn_based(env_name,
# {"players": pyspiel.GameParameter(
# 2)})
# env = rl_environment.Environment(game)
# return env.name
#
# SB worker
# @ray.remote
# class Worker(object):
# def __init__(self,
# env_name,
# env_seed=2,
# deltas=None,
# slow_oracle_kargs=None,
# fast_oracle_kargs=None
# ):
# pass
#
# def output(self):
# import sys
# return sys.path
@ray.remote
class Worker(object):
"""
Object class for parallel rollout generation.
"""
def __init__(self,
env_name,
env_seed=2,
deltas=None,
slow_oracle_kargs=None,
fast_oracle_kargs=None
):
# initialize rl environment.
from open_spiel.python import rl_environment
import pyspiel
self._num_players = 2
game = pyspiel.load_game_as_turn_based(env_name,
{"players": pyspiel.GameParameter(
self._num_players)})
self._env = rl_environment.Environment(game)
# Each worker gets access to the shared noise table
# with independent random streams for sampling
# from the shared noise table.
self.deltas = SharedNoiseTable(deltas, env_seed + 7)
self._policies = [[] for _ in range(self._num_players)]
self._slow_oracle_kargs = slow_oracle_kargs
self._fast_oracle_kargs = fast_oracle_kargs
self._delta_std = self._fast_oracle_kargs['noise']
self._sess = tf.get_default_session()
if self._sess is None:
self._sess = tf.Session()
if self._slow_oracle_kargs is not None:
self._slow_oracle_kargs['session'] = self._sess
def sample_episode(self,
unused_time_step,
agents,
is_evaluation=False,
noise=None,
chosen_player=None):
"""
Sample an episode and get the cumulative rewards. Notice that we do not
update the agents during this sampling.
:param unused_time_step: placeholder for openspiel.
:param agents: a list of policies, one per player.
:param is_evaluation: evaluation flag.
:param noise: noise to be added to current policy.
:param live_agent_id: id of the agent being trained.
:return: a list of returns, one per player.
"""
time_step = self._env.reset()
cumulative_rewards = 0.0
while not time_step.last():
if time_step.is_simultaneous_move():
action_list = []
for i, agent in enumerate(agents):
if i == chosen_player:
output = agent.step(time_step,
is_evaluation=is_evaluation,
noise=noise)
else:
output = agent.step(time_step, is_evaluation=is_evaluation)
action_list.append(output.action)
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
else:
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(
time_step, is_evaluation=is_evaluation)
action_list = [agent_output.action]
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
# No agents update at this step. This step may not be necessary.
if not is_evaluation:
for agent in agents:
agent.step(time_step)
return cumulative_rewards
def do_sample_episode(self,
probabilities_of_playing_policies,
chosen_player,
num_rollouts = 1,
is_evaluation = False):
"""
Generate multiple rollouts using noisy policies.
"""
with self._sess:
rollout_rewards = [[] for _ in range(self._num_players)]
deltas_idx = []
for _ in range(num_rollouts):
agents = self.sample_agents(probabilities_of_playing_policies, chosen_player)
if is_evaluation:
deltas_idx.append(-1)
reward = self.sample_episode(None, agents, is_evaluation)
for i, rew in enumerate(reward):
rollout_rewards[i].append(rew)
else:
# The idx marks the beginning of a sequence of noise with length dim.
# Refer to shared_noise.py
idx, delta = self.deltas.get_delta(agents[chosen_player].get_weights().size)
delta = (self._delta_std * delta).reshape(agents[chosen_player].get_weights().shape)
deltas_idx.append(idx)
# compute reward used for positive perturbation rollout. List, one reward per player.
pos_reward = self.sample_episode(None, agents, is_evaluation, delta, chosen_player)
# compute reward used for negative pertubation rollout. List, one reward per player.
neg_reward = self.sample_episode(None, agents, is_evaluation, -delta, chosen_player)
# a list of lists, one per player. For each player, a list contains the positive
# rewards and negative rewards in a format [[pos rew, neg rew],
# [pos rew, neg rew]]
#, one row per noise.
rollout_rewards = rewards_combinator(rollout_rewards, pos_reward, neg_reward)
return {'deltas_idx': deltas_idx, 'rollout_rewards': rollout_rewards}
def freeze_all(self):
"""Freezes all policies within policy_per_player.
Args:
policies_per_player: List of list of number of policies.
"""
for policies in self._policies:
for pol in policies:
pol.freeze()
# def sync_total_policies(self, extra_policies_weights, policies_types, chosen_player):
# with self._sess:
# if chosen_player is not None:
# self._policies[chosen_player][-1].set_weights(extra_policies_weights[chosen_player][-1])
# else:
# for player in range(self._num_players):
# for i, policy_type in enumerate(policies_types[player]):
# new_pol = self.best_responder(policy_type, player)
# new_pol.set_weights(extra_policies_weights[player][i])
# self._policies[player].append(new_pol)
def get_num_policies(self):
return len(self._policies[0])
# def best_responder(self, policy_type, player):
# if policy_type == "DQN":
# agent_class = rl_policy.DQNPolicy
# assert self._slow_oracle_kargs is not None
# new_pol = agent_class(self._env, player, **self._slow_oracle_kargs)
# elif policy_type == "PG":
# agent_class = rl_policy.PGPolicy
# assert self._slow_oracle_kargs is not None
# new_pol = agent_class(self._env, player, **self._slow_oracle_kargs)
# elif policy_type == "ARS_parallel":
# agent_class = rl_policy.ARSPolicy_parallel
# new_pol = agent_class(self._env, player, **self._fast_oracle_kargs)
# else:
# raise ValueError("Agent class not supported in workers")
#
# return new_pol
def sample_agents(self, probabilities_of_playing_policies, chosen_player):
agents = self.sample_strategy_marginal(self._policies, probabilities_of_playing_policies)
agents[chosen_player] = self._policies[chosen_player][-1]
return agents
def sample_strategy_marginal(self, total_policies, probabilities_of_playing_policies):
"""Samples strategies given marginal probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: A list, each element a list of each player's policies.
probabilities_of_playing_policies: This is a list, with the k-th element
also a list specifying the play probabilities of the k-th player's
policies.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
num_players = len(total_policies)
sampled_policies = []
for k in range(num_players):
current_policies = total_policies[k]
current_probabilities = probabilities_of_playing_policies[k]
sampled_policy_k = self.random_choice(current_policies, current_probabilities)
sampled_policies.append(sampled_policy_k)
return sampled_policies
def random_choice(self, outcomes, probabilities):
"""Samples from discrete probability distribution.
`numpy.choice` does not seem optimized for repeated calls, this code
had higher performance.
Args:
outcomes: List of categorical outcomes.
probabilities: Discrete probability distribtuion as list of floats.
Returns:
Entry of `outcomes` sampled according to the distribution.
"""
cumsum = np.cumsum(probabilities)
return outcomes[np.searchsorted(cumsum / cumsum[-1], random.random())]
def output(self):
return "asdf"
| 2.15625 | 2 |
microbial_deep_learning/butterfree/analysis/classification.py | kellylab/Metagenomic-Disease-Classification-With-Deep-Learning | 7 | 12765899 | import clusters as c
import tensorflow as tf
import pandas as pd
import re
def one_hot(i, n):
""" Makes a one-hot vector of length n with 1 in position i. """
one_hot = [0 for x in range(n)]
one_hot[i] = 1
return one_hot
datasets = c.datasets
data_type = 'metaphlan_bugs_list'
body_site = 'stool'
df, dataframes, key_sets = c.__load_data(datasets, data_type, body_site)
cols = [re.sub('_','1',re.sub('\|', '0', x)) for x in df.columns]
df.columns = cols
labels = c.get_labels(dataframes, body_site, key_sets, df)
one_hots = {}
n = len(key_sets)
for i, key in zip(range(n), key_sets):
one_hots[key] = one_hot(i, n)
lala = [one_hots[label] for label in labels]
lala = pd.Series(lala, index=df.index)
lala = labels.apply(lambda x: "Ascniar" in x).astype(int)
train_fn = tf.estimator.inputs.pandas_input_fn(x=df.iloc[0:4000], y=lala[0:4000], batch_size = 20, shuffle=True)
test_fn = tf.estimator.inputs.pandas_input_fn(x=df.iloc[4000:], y = lala[4000:], batch_size = 20, shuffle=True)
features = [tf.feature_column.numeric_column(x) for x in df.columns[0:-3]]
nn = tf.estimator.DNNClassifier(
feature_columns=features,
hidden_units = [100, 100],
activation_fn = tf.nn.relu,
n_classes = n,
dropout = .5,
)
nn.train(input_fn=train_fn)
nn.evaluate(input_fn=test_fn)
| 2.609375 | 3 |
hybrid.py | peractio/hybrid_ml_model | 1 | 12765900 | # credit card default dataset: https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients
# kaggle link: https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset
import pandas as pd
from fim import fpgrowth#,fim
import numpy as np
#import math
#from itertools import chain, combinations
import itertools
from numpy.random import random
#from scipy import sparse
from bisect import bisect_left
from random import sample
#from scipy.stats.distributions import poisson, gamma, beta, bernoulli, binom
from time import time
#import scipy
#from sklearn.preprocessing import binarize
import operator
#from collections import Counter, defaultdict
from scipy.sparse import csc_matrix
from sklearn.ensemble import RandomForestClassifier#, AdaBoostClassifier
class hyb(object):
def __init__(self, binary_data,Y,Yb):
"""
:param binary_data: X_train? excludes labels?
:param Y: is this the y labels for the data predicted by interpretable model? no, is correct label
:param Yb: is this the y labels for the data to be predicted by the black box model? no, is all predictions from the black box model
:return: None
"""
self.df = binary_data
self.Y = Y
# no. of training examples
self.N = float(len(Y))
self.Yb = Yb
def set_parameters(self, alpha = 1, beta = 0.1):
"""
initialise weights in objective function
:param alpha: weight of interpretability (no. of rules) in objective function
:param beta: weight of transparency (proportion of rules predicted by interpretable model) in objective function
:return: None
"""
# input al and bl are lists
self.alpha = alpha
self.beta = beta
def generate_rulespace(self,supp,maxlen,N, need_negcode = False,njobs = 5, method = 'fpgrowth',criteria = 'IG',add_rules = []):
"""
generates initial rulespace, from which rules are taken
then screens the rules using self.screen_rules
:param supp: (int, according to arg of fpgrowth)
:param maxlen: (int)
:param N: (int)
:param add_rules: seems to be useless??
"""
print('generating rulespace...')
if method == 'fpgrowth':
if need_negcode:
df = 1-self.df
df.columns = [name.strip() + 'neg' for name in self.df.columns]
df = pd.concat([self.df,df],axis = 1)
else:
df = 1 - self.df
# [0] needed to get first dimension (others empty)
pindex = np.where(self.Y==1)[0]
nindex = np.where(self.Y!=1)[0]
itemMatrix = [[item for item in df.columns if row[item] ==1] for i,row in df.iterrows() ]
# are the supp arguments for fpgrowth supposed to be different? according to the lower bounds (minsupp) in the paper
prules= fpgrowth([itemMatrix[i] for i in pindex],supp = supp,zmin = 1,zmax = maxlen)
prules = [np.sort(x[0]).tolist() for x in prules]
nrules= fpgrowth([itemMatrix[i] for i in nindex],supp = supp,zmin = 1,zmax = maxlen)
nrules = [np.sort(x[0]).tolist() for x in nrules]
else:
print('Using random forest to generate rules ...')
prules = []
for length in range(2,maxlen+1,1):
n_estimators = 250*length# min(5000,int(min(comb(df.shape[1], length, exact=True),10000/maxlen)))
clf = RandomForestClassifier(n_estimators = n_estimators,max_depth = length)
clf.fit(self.df,self.Y)
for n in range(n_estimators):
prules.extend(extract_rules(clf.estimators_[n],self.df.columns))
prules = [list(x) for x in set(tuple(np.sort(x)) for x in prules)]
nrules = []
for length in range(2,maxlen+1,1):
n_estimators = 250*length# min(5000,int(min(comb(df.shape[1], length, exact=True),10000/maxlen)))
clf = RandomForestClassifier(n_estimators = n_estimators,max_depth = length)
clf.fit(self.df,1-self.Y)
for n in range(n_estimators):
nrules.extend(extract_rules(clf.estimators_[n],self.df.columns))
nrules = [list(x) for x in set(tuple(np.sort(x)) for x in nrules)]
df = 1-self.df
df.columns = [name.strip() + 'neg' for name in self.df.columns]
df = pd.concat([self.df,df],axis = 1)
print('unpruned prules (' + str(len(prules)) + '):\n' + str(prules))
print()
print('unpruned nrules (' + str(len(nrules)) + '):\n' + str(nrules))
self.prules, self.pRMatrix, self.psupp, self.pprecision, self.perror = self.screen_rules(prules,df,self.Y,N,supp)
self.nrules, self.nRMatrix, self.nsupp, self.nprecision, self.nerror = self.screen_rules(nrules,df,1-self.Y,N,supp)
print('rulespace generated')
# print '\tTook %0.3fs to generate %d rules' % (self.screen_time, len(self.rules))
def screen_rules(self,rules,df,y,N,supp,criteria = 'precision',njobs = 5,add_rules = []):
"""
screens rules??? how????
helper, used by self.generate_rulespace
"""
print ('screening rules')
start_time = time() #removed time. and changed import statement above
itemInd = {}
# create a dictionary of col name : index -- why??
for i,name in enumerate(df.columns):
itemInd[name] = int(i)
len_rules = [len(rule) for rule in rules]
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
# array of indices corresponding to the features in the rules e.g. [r1a r1b r2a r3a]
indices = np.array(list(itertools.chain.from_iterable([[itemInd[x] for x in rule] for rule in rules])))
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
indptr =list(accumulate(len_rules))
indptr.insert(0,0)
indptr = np.array(indptr)
data = np.ones(len(indices))
# standard CSC representation where the row indices for column i are stored in indices[indptr[i]:indptr[i+1]] and their corresponding values are stored in data[indptr[i]:indptr[i+1]]
# csc_matrix helps expand the compressed representation (data, indices, indptr), which ignores many of the zeros in the expanded matrix
ruleMatrix = csc_matrix((data,indices,indptr),shape = (len(df.columns),len(rules)))
# mat = sparse.csr_matrix.dot(df,ruleMatrix)
mat = np.matrix(df)*ruleMatrix
lenMatrix = np.matrix([len_rules for i in range(df.shape[0])])
Z = (mat ==lenMatrix).astype(int)
Zpos = [Z[i] for i in np.where(y>0)][0]
TP = np.array(np.sum(Zpos,axis=0).tolist()[0])
supp_select = np.where(TP>=supp*sum(y)/100)[0]
# if len(supp_select)<=N:
# rules = [rules[i] for i in supp_select]
# RMatrix = np.array(Z[:,supp_select])
# rules_len = [len(set([name.split('_')[0] for name in rule])) for rule in rules]
# supp = np.array(np.sum(Z,axis=0).tolist()[0])[supp_select]
# else:
FP = np.array(np.sum(Z,axis = 0))[0] - TP
# TN = len(y) - np.sum(self.Y) - FP
# FN = np.sum(y) - TP
p1 = TP.astype(float)/(TP+FP)
# p2 = FN.astype(float)/(FN+TN)
# pp = (TP+FP).astype(float)/(TP+FP+TN+FN)
supp_select = np.array([i for i in supp_select if p1[i]>np.mean(y)])
select = np.argsort(p1[supp_select])[::-1][:N].tolist()
ind = list(supp_select[select])
rules = [rules[i] for i in ind]
RMatrix = np.array(Z[:,ind])
rules_len = [len(set([name.split('_')[0] for name in rule])) for rule in rules]
supp = np.array(np.sum(Z,axis=0).tolist()[0])[ind]
return rules, RMatrix, supp, p1[ind], FP[ind]
def train(self, Niteration = 5000, print_message=True, interpretability = 'size'):
"""
unused
"""
print('training hybrid...')
self.maps = []
int_flag = int(interpretability =='size')
T0 = 0.01
nprules = len(self.prules)
pnrules = len(self.nrules)
prs_curr = sample(list(range(nprules)),3)
nrs_curr = sample(list(range(pnrules)),3)
obj_curr = 1000000000
obj_min = obj_curr
self.maps.append([-1,obj_curr,prs_curr,nrs_curr,[]])
p = np.sum(self.pRMatrix[:,prs_curr],axis = 1)>0
n = np.sum(self.nRMatrix[:,nrs_curr],axis = 1)>0
overlap_curr = np.multiply(p,n)
pcovered_curr = p ^ overlap_curr
ncovered_curr = n ^ overlap_curr
covered_curr = np.logical_xor(p,n)
Yhat_curr,TP,FP,TN,FN = self.compute_obj(pcovered_curr,covered_curr)
print(Yhat_curr,TP,FP,TN,FN)
nfeatures = len(np.unique([con.split('_')[0] for i in prs_curr for con in self.prules[i]])) + len(np.unique([con.split('_')[0] for i in nrs_curr for con in self.nrules[i]]))
obj_curr = ( FN + FP)/self.N +self.alpha*(int_flag *(len(prs_curr) + len(nrs_curr))+(1-int_flag)*nfeatures)+ self.beta * sum(~covered_curr)/self.N
self.actions = []
for iter in range(Niteration):
if iter >0.75 * Niteration:
prs_curr,nrs_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr = prs_opt[:],nrs_opt[:],pcovered_opt[:],ncovered_opt[:],overlap_opt[:],covered_opt[:], Yhat_opt[:]
prs_new,nrs_new , pcovered_new,ncovered_new,overlap_new,covered_new= self.propose_rs(prs_curr,nrs_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr, obj_min,print_message)
self.covered1 = covered_new[:]
self.Yhat_curr = Yhat_curr
# if sum(covered_new)<len(self.Y):
# # bbmodel.fit(self.df.iloc[~covered_new],self.Y[~covered_new])
# bbmodel.fit(self.df,self.Y)
Yhat_new,TP,FP,TN,FN = self.compute_obj(pcovered_new,covered_new)
self.Yhat_new = Yhat_new
nfeatures = len(np.unique([con.split('_')[0] for i in prs_new for con in self.prules[i]])) + len(np.unique([con.split('_')[0] for i in nrs_new for con in self.nrules[i]]))
obj_new = (FP + FN)/self.N +self.alpha*(int_flag *(len(prs_new) + len(nrs_new))+(1-int_flag)*nfeatures)+ self.beta * sum(~covered_new)/self.N
T = T0**(iter/Niteration)
alpha = np.exp(float(-obj_new +obj_curr)/T) # minimize
if obj_new < self.maps[-1][1]:
prs_opt,nrs_opt,obj_opt,pcovered_opt,ncovered_opt,overlap_opt,covered_opt, Yhat_opt = prs_new[:],nrs_new[:],obj_new,pcovered_new[:],ncovered_new[:],overlap_new[:],covered_new[:], Yhat_new[:]
perror, nerror, oerror, berror = self.diagnose(pcovered_new,ncovered_new,overlap_new,covered_new,Yhat_new)
accuracy_min = float(TP+TN)/self.N
explainability_min = sum(covered_new)/self.N
covered_min = covered_new
print('\n** max at iter = {} ** \n {}(obj) = {}(error) + {}(nrules) + {}(exp)\n accuracy = {}, explainability = {}, nfeatures = {}\n perror = {}, nerror = {}, oerror = {}, berror = {}\n '.format(iter,round(obj_new,3),(FP+FN)/self.N, self.alpha*(len(prs_new) + len(nrs_new)), self.beta*sum(~covered_new)/self.N, (TP+TN+0.0)/self.N,sum(covered_new)/self.N,nfeatures,perror,nerror,oerror,berror ))
self.maps.append([iter,obj_new,prs_new,nrs_new])
if print_message:
perror, nerror, oerror, berror = self.diagnose(pcovered_new,ncovered_new,overlap_new,covered_new,Yhat_new)
if print_message:
print('\niter = {}, alpha = {}, {}(obj) = {}(error) + {}(nrules) + {}(exp)\n accuracy = {}, explainability = {}, nfeatures = {}\n perror = {}, nerror = {}, oerror = {}, berror = {}\n '.format(iter,round(alpha,2),round(obj_new,3),(FP+FN)/self.N, self.alpha*(len(prs_new) + len(nrs_new)), self.beta*sum(~covered_new)/self.N, (TP+TN+0.0)/self.N,sum(covered_new)/self.N, nfeatures,perror,nerror,oerror,berror ))
print('prs = {}, nrs = {}'.format(prs_new, nrs_new))
if random() <= alpha:
prs_curr,nrs_curr,obj_curr,pcovered_curr,ncovered_curr,overlap_curr,covered_curr, Yhat_curr = prs_new[:],nrs_new[:],obj_new,pcovered_new[:],ncovered_new[:],overlap_new[:],covered_new[:], Yhat_new[:]
self.prs_min = prs_opt
self.nrs_min = nrs_opt
print('training complete')
return self.maps,accuracy_min,covered_min
def diagnose(self, pcovered, ncovered, overlapped, covered, Yhat):
"""
returns sums of the misclassification errors
helper, used in self.train
what is "~"???? invert/complement function, https://stackoverflow.com/questions/8305199/the-tilde-operator-in-python
integers ~x become (-x) - 1
"""
perror = sum(self.Y[pcovered]!=Yhat[pcovered])
nerror = sum(self.Y[ncovered]!=Yhat[ncovered])
oerror = sum(self.Y[overlapped]!=Yhat[overlapped])
# does it work as expected?
berror = sum(self.Y[~covered]!=Yhat[~covered])
return perror, nerror, oerror, berror
def compute_obj(self,pcovered,covered):
"""
helper, used in self.train
"""
Yhat = np.zeros(int(self.N))
Yhat[pcovered] = 1
Yhat[~covered] = self.Yb[~covered] #self.Y[~covered]#
TP,FP,TN,FN = getConfusion(Yhat,self.Y)
return Yhat,TP,FP,TN,FN
def propose_rs(self, prs,nrs,pcovered,ncovered,overlapped, covered,Yhat, vt,print_message = False):
"""
helper, used in self.train
"""
incorr = np.where(Yhat[covered]!=self.Y[covered])[0]# correct interpretable models
incorrb = np.where(Yhat[~covered]!=self.Y[~covered])[0]
overlapped_ind = np.where(overlapped)[0]
p = np.sum(self.pRMatrix[:,prs],axis = 1)
n = np.sum(self.nRMatrix[:,nrs],axis = 1)
ex = -1
if sum(covered) ==self.N: # covering all examples.
if print_message:
print('===== already covering all examples ===== ')
# print('0')
move = ['cut']
self.actions.append(0)
if len(prs)==0:
sign = [0]
elif len(nrs)==0:
sign = [1]
else:
sign = [int(random()<0.5)]
elif len(incorr) ==0 and (len(incorrb)==0 or len(overlapped) ==self.N) or sum(overlapped) > sum(covered):
if print_message:
print(' ===== 1 ===== ')
self.actions.append(1)
# print('1')
move = ['cut']
sign = [int(random()<0.5)]
# elif (len(incorr) == 0 and (sum(covered)>0)) or len(incorr)/sum(covered) >= len(incorrb)/sum(~covered):
# if print_message:
# print(' ===== 2 ===== ')
# self.actions.append(2)
# ex = sample(list(np.where(~covered)[0]) + list(np.where(overlapped)[0]),1)[0]
# if overlapped[ex] or len(prs) + len(nrs) >= (vt + self.beta)/self.alpha:
# # print('2')
# move = ['cut']
# sign = [int(random()<0.5)]
# else:
# # print('3')
# move = ['expand']
# sign = [int(random()<0.5)]
else:
# if sum(overlapped)/sum(pcovered)>.5 or sum(overlapped)/sum(ncovered)>.5:
# if print_message:
# print(' ===== 3 ===== ')
# # print('4')
# move = ['cut']
# sign = [int(len(prs)>len(nrs))]
# else:
t = random()
if t< 1./3: # try to decrease errors
self.actions.append(3)
if print_message:
print(' ===== decrease error ===== ')
ex = sample(list(incorr) + list(incorrb),1)[0]
if ex in incorr: # incorrectly classified by the interpretable model
rs_indicator = (pcovered[ex]).astype(int) # covered by prules
if random()<0.5:
# print('7')
move = ['cut']
sign = [rs_indicator]
else:
# print('8')
move = ['cut','add']
sign = [rs_indicator,rs_indicator]
# elif overlapped[ex]:
# if random()<0.5 :
# # print('5')
# move = ['cut']
# sign = [1 - self.Y[ex]]
# else:
# # print('6')
# move = ['cut','add']
# sign = [1 - self.Y[ex],1 - self.Y[ex]]
else: # incorrectly classified by the black box model
# print('9')
move = ['add']
sign = [int(self.Y[ex]==1)]
elif t<2./3: # decrease coverage
self.actions.append(4)
if print_message:
print(' ===== decrease size ===== ')
move = ['cut']
sign = [round(random())]
else: # increase coverage
self.actions.append(5)
if print_message:
print(' ===== increase coverage ===== ')
move = ['expand']
sign = [round(random())]
# if random()<0.5:
# move.append('add')
# sign.append(1-rs_indicator)
# else:
# move.extend(['cut','add'])
# sign.extend([1-rs_indicator,1-rs_indicator])
for j in range(len(move)):
if sign[j]==1:
prs = self.action(move[j],sign[j],ex,prs,Yhat,pcovered)
else:
nrs = self.action(move[j],sign[j],ex,nrs,Yhat,ncovered)
p = np.sum(self.pRMatrix[:,prs],axis = 1)>0
n = np.sum(self.nRMatrix[:,nrs],axis = 1)>0
o = np.multiply(p,n)
return prs, nrs,p,n^o,o, np.logical_xor(p,n) + o
def action(self,move, rs_indicator, ex, rules,Yhat,covered):
"""
helper, used in self.propose_rs
"""
if rs_indicator==1:
RMatrix = self.pRMatrix
# error = self.perror
supp = self.psupp
else:
RMatrix = self.nRMatrix
# error = self.nerror
supp = self.nsupp
Y = self.Y if rs_indicator else 1- self.Y
if move=='cut' and len(rules)>0:
# print('======= cut =======')
""" cut """
if random()<0.25 and ex >=0:
candidate = list(set(np.where(RMatrix[ex,:]==1)[0]).intersection(rules))
if len(candidate)==0:
candidate = rules
cut_rule = sample(candidate,1)[0]
else:
p = []
all_sum = np.sum(RMatrix[:,rules],axis = 1)
for index,rule in enumerate(rules):
Yhat= ((all_sum - np.array(RMatrix[:,rule]))>0).astype(int)
TP,FP,TN,FN = getConfusion(Yhat,Y)
p.append(TP.astype(float)/(TP+FP+1))
# p.append(log_betabin(TP,TP+FP,self.alpha_1,self.beta_1) + log_betabin(FN,FN+TN,self.alpha_2,self.beta_2))
p = [x - min(p) for x in p]
p = np.exp(p)
p = np.insert(p,0,0)
p = np.array(list(accumulate(p)))
if p[-1]==0:
cut_rule = sample(rules,1)[0]
else:
p = p/p[-1]
index = find_lt(p,random())
cut_rule = rules[index]
rules.remove(cut_rule)
elif move == 'add' and ex>=0:
# print('======= add =======')
""" add """
score_max = -self.N *10000000
if self.Y[ex]*rs_indicator + (1 - self.Y[ex])*(1 - rs_indicator)==1:
# select = list(np.where(RMatrix[ex] & (error +self.alpha*self.N < self.beta * supp))[0]) # fix
select = list(np.where(RMatrix[ex])[0])
else:
# select = list(np.where( ~RMatrix[ex]& (error +self.alpha*self.N < self.beta * supp))[0])
select = list(np.where( ~RMatrix[ex])[0])
self.select = select
if len(select)>0:
if random()<0.25:
add_rule = sample(select,1)[0]
else:
# cover = np.sum(RMatrix[(~covered)&(~covered2), select],axis = 0)
# =============== Use precision as a criteria ===============
# Yhat_neg_index = np.where(np.sum(RMatrix[:,rules],axis = 1)<1)[0]
# mat = np.multiply(RMatrix[Yhat_neg_index.reshape(-1,1),select].transpose(),Y[Yhat_neg_index])
# TP = np.sum(mat,axis = 1)
# FP = np.array(np.sum(RMatrix[Yhat_neg_index.reshape(-1,1),select],axis = 0) - TP)
# TN = np.sum(Y[Yhat_neg_index]==0)-FP
# FN = sum(Y[Yhat_neg_index]) - TP
# p = (TP.astype(float)/(TP+FP+1)) + self.alpha * supp[select]
# add_rule = select[sample(list(np.where(p==max(p))[0]),1)[0]]
# =============== Use objective function as a criteria ===============
for ind in select:
z = np.logical_or(RMatrix[:,ind],Yhat)
TP,FP,TN,FN = getConfusion(z,self.Y)
score = FP+FN -self.beta * sum(RMatrix[~covered ,ind])
if score > score_max:
score_max = score
add_rule = ind
if add_rule not in rules:
rules.append(add_rule)
else: # expand
# print(['======= expand =======', len(rules)])
# candidates = np.where(error < self.beta * supp-self.alpha*self.N)[0] # fix
candidates = [x for x in range(RMatrix.shape[1])]
if rs_indicator:
select = list(set(candidates).difference(rules))
else:
select = list(set(candidates).difference(rules))
# self.error = error
self.supp = supp
self.select = select
self.candidates = candidates
self.rules = rules
if random()<0.25:
add_rule = sample(select, 1)[0]
else:
# Yhat_neg_index = np.where(np.sum(RMatrix[:,rules],axis = 1)<1)[0]
Yhat_neg_index = np.where(~covered)[0]
mat = np.multiply(RMatrix[Yhat_neg_index.reshape(-1,1),select].transpose(),Y[Yhat_neg_index])
# TP = np.array(np.sum(mat,axis = 0).tolist()[0])
TP = np.sum(mat,axis = 1)
FP = np.array(np.sum(RMatrix[Yhat_neg_index.reshape(-1,1),select],axis = 0) - TP)
TN = np.sum(Y[Yhat_neg_index]==0)-FP
FN = sum(Y[Yhat_neg_index]) - TP
score = (FP + FN)+ self.beta * (TN + FN)
# score = (TP.astype(float)/(TP+FP+1)) + self.alpha * supp[select] # using precision as the criteria
add_rule = select[sample(list(np.where(score==min(score))[0]),1)[0]]
if add_rule not in rules:
rules.append(add_rule)
return rules
def print_rules(self, rules_max):
"""
unused
"""
for rule_index in rules_max:
print(self.rules[rule_index])
def predict_text(self,df,Y,Yb):
"""
unused
"""
prules = [self.prules[i] for i in self.prs_min]
nrules = [self.nrules[i] for i in self.nrs_min]
if len(prules):
p = [[] for rule in prules]
for i,rule in enumerate(prules):
p[i] = np.array((np.sum(df[:,list(rule)],axis=1)==len(rule)).flatten().tolist()[0]).astype(int)
p = (np.sum(p,axis=0)>0).astype(int)
else:
p = np.zeros(len(Y))
if len(nrules):
n = [[] for rule in nrules]
for i,rule in enumerate(nrules):
n[i] = np.array((np.sum(df[:,list(rule)],axis=1)==len(rule)).flatten().tolist()[0]).astype(int)
n = (np.sum(n,axis=0)>0).astype(int)
else:
n = np.zeros(len(Y))
pind = list(np.where(p)[0])
nind = list(np.where(n)[0])
covered = [x for x in range(len(Y)) if x in pind or x in nind]
Yhat = Yb
Yhat[nind] = 0
Yhat[pind] = 1
return Yhat,covered,Yb
def predict(self, df, Y,Yb ):
"""
unused
"""
prules = [self.prules[i] for i in self.prs_min]
nrules = [self.nrules[i] for i in self.nrs_min]
# if isinstance(self.df, scipy.sparse.csc.csc_matrix)==False:
dfn = 1-df #df has negative associations
dfn.columns = [name.strip() + 'neg' for name in df.columns]
df_test = pd.concat([df,dfn],axis = 1)
if len(prules):
p = [[] for rule in prules]
for i,rule in enumerate(prules):
p[i] = (np.sum(df_test[list(rule)],axis=1)==len(rule)).astype(int)
p = (np.sum(p,axis=0)>0).astype(int)
else:
p = np.zeros(len(Y))
if len(nrules):
n = [[] for rule in nrules]
for i,rule in enumerate(nrules):
n[i] = (np.sum(df_test[list(rule)],axis=1)==len(rule)).astype(int)
n = (np.sum(n,axis=0)>0).astype(int)
else:
n = np.zeros(len(Y))
pind = list(np.where(p)[0])
nind = list(np.where(n)[0])
covered = [x for x in range(len(Y)) if x in pind or x in nind]
Yhat = np.array([i for i in Yb])
Yhat[nind] = 0
Yhat[pind] = 1
return Yhat,covered,Yb
def accumulate(iterable, func=operator.add):
"""
helper, used in hyb.action, hyb.screen_rules
"""
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
def find_lt(a, x):
"""
hepler, used in hyb.action
"""
""" Find rightmost value less than x"""
i = bisect_left(a, x)
if i:
return int(i-1)
else:
return 0
def getConfusion(Yhat,Y):
"""
helper, used in hyb.computeObject, hyb.action
"""
if len(Yhat)!=len(Y):
raise NameError('Yhat has different length')
TP = np.dot(np.array(Y),np.array(Yhat))
FP = np.sum(Yhat) - TP
TN = len(Y) - np.sum(Y)-FP
FN = len(Yhat) - np.sum(Yhat) - TN
return TP,FP,TN,FN
def extract_rules(tree, feature_names):
"""
helper, used in hyb.generate_rulespace, when using random forest to generate rulespace
"""
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
features = [feature_names[i] for i in tree.tree_.feature]
# get ids of child nodes
idx = np.argwhere(left == -1)[:,0]
def recurse(left, right, child, lineage=None):
if lineage is None:
lineage = []
if child in left:
parent = np.where(left == child)[0].item()
suffix = 'neg'
else:
parent = np.where(right == child)[0].item()
suffix = ''
# lineage.append((parent, split, threshold[parent], features[parent]))
lineage.append((features[parent].strip()+suffix))
if parent == 0:
lineage.reverse()
return lineage
else:
return recurse(left, right, parent, lineage)
rules = []
for child in idx:
rule = []
for node in recurse(left, right, child):
rule.append(node)
rules.append(rule)
return rules
def binary_code(df,collist,Nlevel, length):
"""
preprocessing
converts a column of continuous values to binary format, into Nlevel number of parts
modifies df in place
:param df: dataframe to be modified
:param collist: list of names of columns with continuous values to be modified
:param Nlevel: number of parts to split the column into (will create new N-1 columns, deleting the original)
:return: None
"""
for col in collist:
for q in range(1,Nlevel,1):
threshold = df[col].quantile(float(q)/Nlevel)
df[col+'_geq_'+str(int(q))+'q'] = (df[col] >= threshold).astype(float)
print(col)
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
df.drop(collist,axis = 1, inplace = True)
# =============================================================================
#
# =============================================================================
import os
# =============================================================================
# preprocessing
# =============================================================================
df = pd.read_excel('default of credit card clients.xls', sheet_name='Data', header=1)
length = len(df)
print('length: ' + str(length))
# drop duplicates
df.drop_duplicates(subset=[x for x in df.columns if x != 'ID'], inplace=True)
print('drop_duplicates')
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
# drop weird values
dict_correct_vals = {'EDUCATION': [1, 2, 3, 4],
'MARRIAGE': [1, 2, 3]}
cols_payment_hist = ['PAY_'+str(x) for x in range(7) if x!= 1]
# removed this part because it somehow drops too many rows (half the samples have 0 in PAY_0, and so is unspecified in the paper)
# =============================================================================
# for col in cols_payment_hist:
# dict_correct_vals[col] = [x for x in range(-1,10,1) if x!=0]
# =============================================================================
for col in dict_correct_vals.keys():
df[col] = df[col].apply(lambda x: x if x in dict_correct_vals[col] else 0)
df = df[df[col] != 0]
print(col)
if length - len(df) > 500:
raise Exception('exit')
length = len(df)
print('length: ' + str(length))
cols_bill_amt = ['BILL_AMT' + str(x) for x in range(1,7)]
cols_past_payment = ['PAY_AMT'+str(x) for x in range(1,7)]
binary_code(df, ['LIMIT_BAL', 'EDUCATION', 'AGE'] + cols_payment_hist + cols_bill_amt + cols_past_payment, 4, length=length)
# OHE
df = pd.get_dummies(df, columns=['MARRIAGE'])
df['SEX'] = df['SEX'].apply(lambda x: 0 if 2 else x)
# =============================================================================
# black box models
# =============================================================================
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import f1_score, roc_auc_score, average_precision_score, precision_recall_curve
import matplotlib.pyplot as plt
X, y = df.drop(labels=['ID', 'default payment next month'], axis='columns'), df.loc[:,'default payment next month']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y)
from time import time
from scipy.stats import randint as sp_randint
# =============================================================================
# # build a classifier
# clf = RandomForestClassifier(n_estimators=20)
#
#
# # Utility function to report best scores
# def report(results, n_top=3):
# for i in range(1, n_top + 1):
# candidates = np.flatnonzero(results['rank_test_score'] == i)
# for candidate in candidates:
# print("Model with rank: {0}".format(i))
# print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
# results['mean_test_score'][candidate],
# results['std_test_score'][candidate]))
# print("Parameters: {0}".format(results['params'][candidate]))
# print("")
#
#
# # specify parameters and distributions to sample from
# param_dist = {"n_estimators":sp_randint(10, 1000), # added this argument, removed the n_estimators argument above
# "max_depth": [3, None],
# "max_features": sp_randint(1, 11),
# "min_samples_split": sp_randint(2, 11),
# "bootstrap": [True, False],
# "criterion": ["gini", "entropy"]}
#
# # run randomized search
# n_iter_search = 20
# random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
# n_iter=n_iter_search, cv=10, iid=False)
#
# start = time()
# random_search.fit(X_train, y_train)
# print("RandomizedSearchCV took %.2f seconds for %d candidates"
# " parameter settings." % ((time() - start), n_iter_search))
# report(random_search.cv_results_)
# =============================================================================
clf = RandomForestClassifier(n_estimators=20, random_state=0, **{'bootstrap': True, 'criterion': 'entropy', 'max_depth': None, 'max_features': 9, 'min_samples_split': 10})
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print('rf accuracy: ' + str(acc))
yb = clf.predict(X_test)
model = hyb(X_train, y_train, yb)
model.set_parameters(alpha=1, beta=0.1)
model.set_parameters(alpha=0.01, beta=0.95) # added after running
model.generate_rulespace(supp=30, maxlen=10, N=model.N, method='rf')
maps,accuracy_min,covered_min = model.train()
Yhat,covered,Yb = model.predict(X_test, y_test, yb)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_true=y_test, y_pred=Yhat)
coverage = len(covered)/len(Yhat)
print('accuracy: ' + str(accuracy))
print('coverage: ' + str(coverage))
| 2.640625 | 3 |
leetcode-algorithms/146. LRU Cache/lrucache.py | rongpenl/algorithms-and-oj | 0 | 12765901 | <reponame>rongpenl/algorithms-and-oj
class Node:
def __init__(self, k, v):
self.key = k
self.val = v
self.pre = None
self.nxt = None
class LRUCache:
def __init__(self, capacity: int):
self.store = {}
self.curCap = 0
self.cap = capacity
self.tail = Node(0, 0)
self.head = Node(0, 0)
self.head.nxt = self.tail
self.tail.pre = self.head
def get(self, key: int) -> int:
if key not in self.store:
return -1
val = self.store[key].val
self._remove(self.store[key])
self._add(Node(key, val))
return val
def put(self, key: int, value: int) -> None:
# remove the original if exists
if key in self.store:
self._remove(self.store[key])
if self.curCap == self.cap:
self._remove(self.tail.pre)
self._add(Node(key, value))
def _remove(self, Node):
p, n = Node.pre, Node.nxt
p.nxt, n.pre = n, p
assert(Node.key in self.store)
del self.store[Node.key]
self.curCap -= 1
def _add(self, Node):
# add a node to the beginning
tmp = self.head.nxt
self.head.nxt = Node
Node.nxt = tmp
tmp.pre = Node
Node.pre = self.head
assert(Node.key not in self.store)
self.store[Node.key] = Node
self.curCap += 1
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| 3.640625 | 4 |
AI/home/views.py | Phong940253/math-word-problem | 1 | 12765902 | <filename>AI/home/views.py<gh_stars>1-10
from django.shortcuts import render
from django.http import HttpResponse
from .EngineCKB import Engine
def home(request):
return render(request, 'page/page.html')
def thread(request):
return render(request, 'page/thread.html')
def about(request):
return render(request, 'page/aboutus.html')
def result(request):
if request.method == 'POST':
test = request.POST["search"]
engine = Engine(test)
print(engine.res)
return render(request, 'page/result.html', {'engine': engine.res})
| 2.21875 | 2 |
tests/sett/test_strategy_flow_unitrenbtc.py | mushroomsforest/badger-system | 0 | 12765903 | <reponame>mushroomsforest/badger-system
from helpers.time_utils import days, hours
import brownie
import pytest
from brownie import *
from helpers.constants import *
from helpers.sett.SnapshotManager import SnapshotManager
from tests.conftest import badger_single_sett, settTestConfig
from tests.helpers import distribute_from_whales, getTokenMetadata
from tests.test_recorder import EventRecord, TestRecorder
from rich.console import Console
console = Console()
# @pytest.mark.skip()
@pytest.mark.parametrize(
"settConfig", settTestConfig,
)
def test_single_user_harvest_flow(settConfig):
badger = badger_single_sett(settConfig)
controller = badger.getController(settConfig["id"])
sett = badger.getSett(settConfig["id"])
strategy = badger.getStrategy(settConfig["id"])
want = badger.getStrategyWant(settConfig["id"])
settKeeper = accounts.at(sett.keeper(), force=True)
strategyKeeper = accounts.at(strategy.keeper(), force=True)
snap = SnapshotManager(badger, settConfig["id"])
deployer = badger.deployer
governance = strategy.governance()
tendable = strategy.isTendable()
distribute_from_whales(deployer);
startingBalance = want.balanceOf(deployer)
depositAmount = 50 * 1e8 # renBTC decimal is 8
assert startingBalance >= depositAmount
assert startingBalance >= 0
# Deposit
want.approve(sett, MaxUint256, {"from": deployer})
snap.settDeposit(depositAmount, {"from": deployer})
assert want.balanceOf(sett) > 0
print("want.balanceOf(sett)", want.balanceOf(sett))
# Earn
snap.settEarn({"from": settKeeper})
# Harvest
chain.sleep(hours(0.1))
chain.mine()
snap.settHarvest({"from": strategyKeeper})
# Withdraw half
snap.settWithdraw(depositAmount // 2, {"from": deployer})
# KeepMinRatio to maintain collateralization safe enough from liquidation
currentRatio = strategy.currentRatio()
safeRatio = currentRatio + 20
strategy.setMinRatio(safeRatio, {"from": governance})
strategy.keepMinRatio({"from": governance})
assert strategy.currentRatio() > safeRatio
# sugar-daddy usdp discrepancy due to accrued interest in Unit Protocol
debtTotal = strategy.getDebtBalance()
curveGauge = interface.ICurveGauge("0x055be5DDB7A925BfEF3417FC157f53CA77cA7222")
usdp3crvInGauge = curveGauge.balanceOf(strategy)
curvePool = interface.ICurveFi("0x42d7025938bEc20B69cBae5A77421082407f053A")
usdpOfPool = curvePool.calc_withdraw_one_coin(usdp3crvInGauge,0)
sugar = (debtTotal - usdpOfPool) * 2
if(sugar > 0):
usdpToken = interface.IERC20("0x1456688345527bE1f37E9e627DA0837D6f08C925")
usdpToken.transfer(strategy, sugar, {'from':deployer})
print("sugar debt=", sugar)
# Harvest again
chain.sleep(hours(0.1))
chain.mine()
snap.settHarvest({"from": strategyKeeper})
# Withdraw all
wantInSettBalance = sett.getPricePerFullShare() * sett.totalSupply() / 1e18
print("wantInSett=", wantInSettBalance)
print("wantInStrategy=", strategy.balanceOfPool())
print("pricePerFullShare=", sett.getPricePerFullShare())
wantToWithdraw = sett.balanceOf(deployer) * sett.getPricePerFullShare() / 1e18
print("wantToWithdraw=", wantToWithdraw)
assert wantToWithdraw <= wantInSettBalance
renbtcToken = interface.IERC20("0xEB4C2781e4ebA804CE9a9803C67d0893436bB27D")
controller.withdrawAll(renbtcToken, {"from": deployer})
snap.settWithdrawAll({"from": deployer})
assert True
| 2.21875 | 2 |
spillway/mixins.py | timgates42/django-spillway | 62 | 12765904 | <filename>spillway/mixins.py<gh_stars>10-100
from rest_framework import exceptions
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.settings import api_settings
class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
fields = '__all__'
return DefaultSerializer
class ResponseExceptionMixin(object):
"""Handle response exceptions by negotating among default renderers.
The default exception handler passes error message dicts to the renderer
which should be avoided with GDAL and Mapnik renderers, e.g. we wouldn't
want a 404 returned as image/png.
"""
def handle_exception(self, exc):
response = super(ResponseExceptionMixin, self).handle_exception(exc)
renderers = tuple(api_settings.DEFAULT_RENDERER_CLASSES)
accepted = getattr(self.request, 'accepted_renderer', None)
if (response.exception and accepted
and not isinstance(accepted, renderers)):
conneg = self.get_content_negotiator()
try:
render_cls, mtype = conneg.select_renderer(
self.request, renderers, self.format_kwarg)
except exceptions.NotAcceptable:
render_cls = TemplateHTMLRenderer
mtype = render_cls.media_type
self.request.accepted_renderer = render_cls()
self.request.accepted_media_type = mtype
return response
| 2.15625 | 2 |
addons/website_forum_visibility/models/ir_http.py | marionumza/vocal_v12 | 0 | 12765905 | # © 2017 <NAME> <<EMAIL>>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import re
from odoo.http import request
from odoo import models
class IRHttp(models.AbstractModel):
_inherit = 'ir.http'
@classmethod
def _serve_404(cls):
req_page = request.httprequest.path
if req_page.find('/forum/') > -1:
bid = re.search(r'\/forum\/[a-zA-Z0-9-]+([0-9]+)', req_page).group(1)
if bid:
custom_404 = request.env['forum.forum'].sudo().browse(
int(bid)
).forum_denied_page_id
if custom_404:
return request.render(custom_404.key, {'path': req_page[1:]})
return super(IRHttp, cls)._serve_404()
| 2.078125 | 2 |
XcomLAN/thingsboard/thingsboard_client.py | Mustafa-Abu-Ghazy/XcomLAN | 0 | 12765906 | <reponame>Mustafa-Abu-Ghazy/XcomLAN
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
This file contains definition/implementation of a ThingsBoardClient Class
that Manages dealing with / pushing data to ThingsBoard server.
"""
import csv
import logging
import re
import threading
from datetime import datetime
from queue import Queue
from time import sleep
from time import time
from tb_device_mqtt import TBPublishInfo
from tb_gateway_mqtt import TBGatewayMqttClient
# -------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------- ThingsBoardClient Class -------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------------- #
class ThingsBoardClient(TBGatewayMqttClient):
"""
ThingsBoardClient Class
"""
def __init__(self, host, token=<PASSWORD>, port=1883,
gateway=None, quality_of_service=1,
node_profile='Studer Xcom-LAN Node'):
super(ThingsBoardClient, self).__init__(host, token, port, gateway, quality_of_service)
self._node_profile = node_profile
self.log = logging.getLogger(__name__ + ":" + self._node_profile)
# Initializing a Telemetry Queue
self._telemetry_queue = Queue()
self._telemetry_restored_queue = Queue()
# Initializing and Start a Thread to server the stored telemetry information
self._thread = threading.Thread(target=self._send_nodes_telemetry_loop)
self._thread.name = self.__class__.__name__ + self._thread.name
self._thread.setDaemon(True)
self._thread.start()
def __del__(self):
self.disconnect()
@property
def node_profile(self):
"""
Returns the Node Profile.
"""
return self._node_profile
def node_telemetry_enqueue(self, node_name, timestamp, telemetry_values):
"""
enqueue method add the telemetry's information to the _telemetry_queue queue to be served in order by
the _send_nodes_telemetry_loop method thread.
"""
valid_node_name = type(node_name) is str
valid_timestamp = type(timestamp) is int
valid_telemetry_values = type(telemetry_values) is dict and len(telemetry_values)
if not (valid_node_name and valid_timestamp and valid_telemetry_values):
self.log.error("Invalid telemetry information to be added to the queue. (" +
"node_name= " + str(node_name) + ", " +
"timestamp= " + str(timestamp) + ", " +
"telemetry_values= " + str(telemetry_values) + ")"
)
return
self._telemetry_queue.put({
"node_name": node_name,
"timestamp": timestamp,
"telemetry_values": telemetry_values
})
def _send_nodes_telemetry_loop(self):
"""
This method designed to be executed as a separate thread
"""
log = logging.getLogger(__name__ + ":" + "send_nodes_telemetry_loop")
while True:
if self._telemetry_restored_queue.qsize():
telemetry = self._telemetry_restored_queue.get()
else:
telemetry = self._telemetry_queue.get()
try:
# Try to connect to the Server if not connected yet
if not self.is_connected():
self.connect()
# Try sending telemetry to the server
self.send_node_telemetry(
node_name=telemetry["node_name"],
timestamp=telemetry["timestamp"],
telemetry_values=telemetry["telemetry_values"]
)
except Exception as e:
self._telemetry_restored_queue.put(telemetry) # Restore the telemetry back to the restored_queue
log.exception(e) # log the exception
sleep(30) # Sleep 30 Seconds
# telemetry === UserInfo
def send_node_telemetry(self, node_name, timestamp, telemetry_values):
"""
Sending Telemetry of node to TB Server
"""
log = logging.getLogger(__name__ + ":" + self.node_profile + ":" + node_name)
# Connect The Device: ThingsBoard will publish updates for this particular device to this Gateway.
# Used only to ensure creating the device with the proper device_type if not exist.
# device_type: will be used only when creating not existing device
self.gw_connect_device(node_name, device_type=self.node_profile)
# Sending Telemetry Data (UserInfo) to TB Device
telemetry = { # Contains JSON/Dict
'ts': timestamp,
'values': telemetry_values
}
result = self.gw_send_telemetry(node_name, telemetry)
result_status = result.get() == TBPublishInfo.TB_ERR_SUCCESS
log.info('Send to Node: ' + node_name + ' Telemetry Content: ' + str(telemetry) +
' Sending Telemetry Result Status: ' + str(result_status))
self.gw_disconnect_device(node_name)
# attribute === Parameter
def send_node_attributes(self, node_name, attributes_values):
"""
Sending Attributes of node to TB Server
"""
log = logging.getLogger(__name__ + ":" + self.node_profile + ":" + node_name)
# Connect The Device: ThingsBoard will publish updates for this particular device to this Gateway.
# Used only to ensure creating the device with the proper device_type if not exist.
# device_type: will be used only when creating not existing device
self.gw_connect_device(node_name, device_type=self.node_profile)
# Sending Attributes Data (Parameter) to TB Device
attributes = attributes_values # Contains JSON/Dict
result = self.gw_send_attributes(node_name, attributes)
result_status = result.get() == TBPublishInfo.TB_ERR_SUCCESS
log.info('Send to Node: ' + node_name + ' Attributes Content: ' + str(attributes) +
' Sending Attributes Result Status: ' + str(result_status))
self.gw_disconnect_device(node_name)
@classmethod
def _generate_dict_header_for_csv_log_file(cls, csv_log_file_path):
# supports only nodes with single Xtender device
first_row, second_row, third_row = list(csv.reader(open(csv_log_file_path, 'r')))[0:3]
header = ['ts'] + [''] * (len(first_row) - 2)
for idx in range(1, len(first_row) - 1):
if first_row[idx] == '':
first_row[idx] = first_row[idx - 1]
if first_row[idx].startswith('XT') or first_row[idx].startswith('DEV XT'):
header[idx] = 'XT' + '1' + second_row[idx]
elif first_row[idx].startswith('VS') or first_row[idx].startswith('DEV VS'):
header[idx] = 'VS' + third_row[idx] + second_row[idx]
elif first_row[idx].startswith('VT') or first_row[idx].startswith('DEV VT'):
header[idx] = 'VT' + third_row[idx] + second_row[idx]
elif first_row[idx].startswith('BSP'):
header[idx] = 'BSP' + second_row[idx]
elif first_row[idx].startswith('DEV'):
header[idx] = 'DEV' + second_row[idx]
elif first_row[idx] == 'Solar power (ALL) [kW]':
header[idx] = 'SolarPowerALL' + second_row[idx]
else:
header[idx] = re.sub('[^0-9a-zA-Z]+', '', first_row[idx]) + third_row[idx] + second_row[idx]
return header
@classmethod
def _parse_value_from_csv_log_file(cls, value):
if value.isdigit():
return int(value)
elif len(value) > 1 and value[0] in ('-', '+') and value[1:].isdigit():
return int(value)
elif value.replace('.', '', 1).isdigit():
return float(value)
elif len(value) > 1 and value[0] in ('-', '+') and value.replace('.', '', 1)[1:].isdigit():
return float(value)
else:
return value
def push_csv_input_to_tb(self, node_name, csv_log_file_path, feed_as_realtime=False):
"""
Read Telemetry from CSV file of node and Push them to TB Server
:param feed_as_realtime: if set the Telemetry will be parsed and send as a realtime
neglecting the stored timestamp
"""
log = logging.getLogger(__name__ + ":" + self.node_profile + ":" + node_name)
# Connect The Device
# device_type: will be used only when creating not existing device
self.gw_connect_device(node_name, device_type=self.node_profile)
telemetry = {}
attributes = {}
# Define Fields Names
fieldnames = self._generate_dict_header_for_csv_log_file(csv_log_file_path)
# Creating A CSV DictReader object
csv_dict_reader = list(csv.DictReader(open(csv_log_file_path, 'r'), fieldnames=fieldnames))
# Read (CSVDictReader), Skip First 3 Rows. Focus on Telemetries
for index, row in enumerate(csv_dict_reader[3:1443]):
try:
logging.info('CSV ROW Number: ' + str(index + 4))
date_time_str = row['ts']
date_time_obj = datetime.strptime(date_time_str, '%d.%m.%Y %H:%M')
timestamp = date_time_obj.timestamp()
telemetry['ts'] = int(timestamp * 1000) if not feed_as_realtime else int(round(time() * 1000))
telemetry['values'] = {i: self._parse_value_from_csv_log_file(row[i]) for i in fieldnames[1:]}
result = self.gw_send_telemetry(node_name, telemetry)
result_status = result.get() == TBPublishInfo.TB_ERR_SUCCESS
log.info('Node: ' + node_name + ' Telemetry Content: ' + str(telemetry))
log.info('Node: ' + node_name + ' File: ' + csv_log_file_path +
' Send Telemetry Result Status: ' + str(result_status))
except Exception as e:
logging.exception(e)
# Read (CSVDictReader), Skip First 1443 Rows. Focus on Attributes
for index, row in enumerate(csv_dict_reader[1443:]):
if re.match(r'[PI][0-9]+', row[fieldnames[0]]):
attributes[row[fieldnames[0]]] = self._parse_value_from_csv_log_file(row[fieldnames[1]])
result = self.gw_send_attributes(node_name, attributes)
result_status = result.get() == TBPublishInfo.TB_ERR_SUCCESS
log.info('Node: ' + node_name + ' File: ' + csv_log_file_path + ' Attributes Content: ' + str(attributes))
log.info('Node: ' + node_name + ' Send Attributes Result Status: ' + str(result_status))
# Disconnect The Device
self.gw_disconnect_device(node_name)
if feed_as_realtime:
sleep(60)
# -------------------------------------------------------------------------------------------------------------------- #
# ----------------------------------------- End of ThingsBoardClient Class ----------------------------------------- #
# -------------------------------------------------------------------------------------------------------------------- #
| 1.9375 | 2 |
maskrcnn_benchmark/data/collate_batch.py | worldlife123/maskrcnn-benchmark | 0 | 12765907 | <reponame>worldlife123/maskrcnn-benchmark
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.structures.image_list import to_image_list
import torch
class BatchCollator(object):
"""
From a list of samples from the dataset,
returns the batched images and targets.
This should be passed to the DataLoader
"""
def __init__(self, size_divisible=0):
self.size_divisible = size_divisible
def __call__(self, batch):
transposed_batch = list(zip(*batch))
if isinstance(transposed_batch[0][0], dict):
images = {}
for image_dict in transposed_batch[0]:
for k,v in image_dict.items():
if not images.get(k): images[k] = []
images[k].append(v)
if images.get("img_info"):
img_info = images.pop("img_info")
else:
img_info = None
images = {k: to_image_list(v, self.size_divisible, img_info) for k,v in images.items()}
else:
images = to_image_list(transposed_batch[0], self.size_divisible)
if isinstance(transposed_batch[1][0], dict):
targets = {}
for target_dict in transposed_batch[1]:
for k,v in target_dict.items():
if not targets.get(k): targets[k] = []
targets[k].append(v)
targets = {k: v for k,v in targets.items()}
else:
targets = transposed_batch[1]
img_ids = transposed_batch[2]
return images, targets, img_ids
class BatchCollator2Input(object):
"""
From a list of samples from the dataset,
returns the batched images and targets.
This should be passed to the DataLoader
"""
def __init__(self, size_divisible=0):
self.size_divisible = size_divisible
def __call__(self, batch):
transposed_batch = list(zip(*batch))
images = to_image_list(transposed_batch[0], self.size_divisible)
targets = transposed_batch[2]
images2 = to_image_list(transposed_batch[1], self.size_divisible)
targets2 = transposed_batch[3]
img_ids = transposed_batch[4]
return images, images2, targets, targets2, img_ids
class BatchCollatorSelfAdapt(object):
"""
From a list of samples from the dataset,
returns the batched images and targets.
This should be passed to the DataLoader
"""
def __init__(self, size_divisible=0):
self.size_divisible = size_divisible
def __call__(self, batch):
transposed_batch = list(zip(*batch))
transposed_batch = [(to_image_list(b, self.size_divisible) if isinstance(b[0], torch.Tensor) else b) for b in transposed_batch ]
# images = to_image_list(transposed_batch[0], self.size_divisible)
# targets = transposed_batch[1]
# img_ids = transposed_batch[2]
# return images, targets, img_ids
return tuple(transposed_batch)
class BBoxAugCollator(object):
"""
From a list of samples from the dataset,
returns the images and targets.
Images should be converted to batched images in `im_detect_bbox_aug`
"""
def __call__(self, batch):
return list(zip(*batch))
| 2.390625 | 2 |
multistep_utils.py | UtkarshMishra04/pixel-representations-RL | 2 | 12765908 | <gh_stars>1-10
import torch
from torch import nn
from torch import distributions as pyd
import torch.nn.functional as F
import gym
import os
from collections import deque
import random
import math
import time
from gym import spaces
def get_params(models):
for m in models:
for p in m.parameters():
yield p
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
# delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf
assert m.weight.size(2) == m.weight.size(3)
m.weight.data.fill_(0.0)
m.bias.data.fill_(0.0)
mid = m.weight.size(2) // 2
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)
class MLP(nn.Module):
def __init__(self,
input_dim,
output_dim,
hidden_dim,
hidden_depth,
output_mod=None):
super().__init__()
if isinstance(output_mod, str):
if output_mod == 'tanh':
output_mod = torch.nn.Tanh()
else:
assert False
self.trunk = mlp(input_dim, hidden_dim, output_dim, hidden_depth,
output_mod)
self.apply(weight_init)
def forward(self, x):
return self.trunk(x)
def mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):
if hidden_depth == 0:
mods = [nn.Linear(input_dim, output_dim)]
else:
mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
for i in range(hidden_depth - 1):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
mods.append(nn.Linear(hidden_dim, output_dim))
if output_mod is not None:
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk
def conv_mlp_encoder(input_shape, output_dim, hidden_depth, output_mod=None):
if hidden_depth == 0:
mods = [nn.Conv2d(input_shape[0], 32, 3, stride=1)]
else:
mods = [nn.Conv2d(input_shape[0], 32, 3, stride=1), nn.ReLU(inplace=True)]
for i in range(hidden_depth - 1):
mods += [nn.Conv2d(32, 32, 3, stride=1), nn.ReLU(inplace=True)]
if output_mod is not None:
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk
def conv_mlp_decoder(output_shape, feature_dim, hidden_depth, output_mod=None):
if hidden_depth == 0:
assert False
else:
pads = [0, 1, 0]
mods = [nn.ConvTranspose2d(32, 32, 3, stride=2, output_padding=1), nn.ReLU(inplace=True)]
for i in range(hidden_depth - 1):
output_padding = pads[i]
mods += [nn.ConvTranspose2d(32, 32, 3, stride=2, output_padding=output_padding), nn.ReLU(inplace=True)]
mods.append(nn.ConvTranspose2d(32, output_shape[0], 3, stride=2, output_padding=1))
if output_mod is not None:
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk
import numpy as np
# https://pswww.slac.stanford.edu/svn-readonly/psdmrepo/RunSummary/trunk/src/welford.py
class Welford(object):
"""Knuth implementation of Welford algorithm.
"""
def __init__(self, x=None):
self._K = np.float64(0.)
self.n = np.float64(0.)
self._Ex = np.float64(0.)
self._Ex2 = np.float64(0.)
self.shape = None
self._min = None
self._max = None
self._init = False
self.__call__(x)
def add_data(self, x):
"""Add data.
"""
if x is None:
return
x = np.array(x)
self.n += 1.
if not self._init:
self._init = True
self._K = x
self._min = x
self._max = x
self.shape = x.shape
else:
self._min = np.minimum(self._min, x)
self._max = np.maximum(self._max, x)
self._Ex += (x - self._K) / self.n
self._Ex2 += (x - self._K) * (x - self._Ex)
self._K = self._Ex
def __call__(self, x):
self.add_data(x)
def max(self):
"""Max value for each element in array.
"""
return self._max
def min(self):
"""Min value for each element in array.
"""
return self._min
def mean(self, axis=None):
"""Compute the mean of accumulated data.
Parameters
----------
axis: None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
"""
if self.n < 1:
return None
val = np.array(self._K + self._Ex / np.float64(self.n))
if axis:
return val.mean(axis=axis)
else:
return val
def sum(self, axis=None):
"""Compute the sum of accumulated data.
"""
return self.mean(axis=axis)*self.n
def var(self):
"""Compute the variance of accumulated data.
"""
if self.n <= 1:
return np.zeros(self.shape)
val = np.array((self._Ex2 - (self._Ex*self._Ex)/np.float64(self.n)) / np.float64(self.n-1.))
return val
def std(self):
"""Compute the standard deviation of accumulated data.
"""
return np.sqrt(self.var())
def __str__(self):
if self._init:
return "{} +- {}".format(self.mean(), self.std())
else:
return "{}".format(self.shape)
def __repr__(self):
return "< Welford: {:} >".format(str(self))
| 2.296875 | 2 |
bii_webapp/apps/accounts/models.py | ISA-tools/bii-webapp | 2 | 12765909 | <gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
# class B(models.Model):
# b1=models.CharField(max_length=200)
#
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
website = models.URLField("Website", blank=True)
company = models.CharField(max_length=50, blank=True)
| 2.265625 | 2 |
BiblioPixelAnimations/circle/hyperspace.py | jessicaengel451/BiblioPixelAnimations | 36 | 12765910 | <gh_stars>10-100
from bibliopixel.animation.circle import Circle
from bibliopixel.colors import COLORS
from bibliopixel.colors.arithmetic import color_scale
import random
class Hyperspace(Circle):
COLOR_DEFAULTS = ('colors', [COLORS.Green]),
def __init__(self, layout, tail=4, growthRate=4, angleDiff=6, **kwds):
super().__init__(layout, **kwds)
self._tail = tail
self._tails = [[] for x in range(360)]
self._growthRate = growthRate
self._angleDiff = angleDiff
def pre_run(self):
self._step = 0
def _drawTail(self, angle, ring, color):
for i in range(self._tail):
if ring - i >= 0 and ring - i <= self.lastRing:
level = 255 - ((255 // self._tail) * i)
self.layout.set(ring - i, angle, color_scale(color, level))
def step(self, amt=1):
self.layout.all_off()
for i in range(self._growthRate):
newTail = random.randrange(0, 360, self._angleDiff)
color = random.choice(self.palette)
self._tails[newTail].append((0, color))
for a in range(360):
angle = self._tails[a]
if len(angle) > 0:
removals = []
for r in range(len(angle)):
tail = angle[r]
if tail[0] <= self.lastRing:
self._drawTail(a, tail[0], tail[1])
if tail[0] - (self._tail - 1) <= self.lastRing:
tail = (tail[0] + amt, tail[1])
self._tails[a][r] = tail
else:
removals.append(tail)
for r in removals:
self._tails[a].remove(r)
self._step = 0
class HyperspaceRainbow(Circle):
def __init__(self, layout, tail=4, growthRate=4, angleDiff=6, **kwds):
super().__init__(layout, **kwds)
self._tail = tail
self._tails = [[] for x in range(360)]
self._growthRate = growthRate
self._angleDiff = angleDiff
def pre_run(self):
self._step = 0
def _drawTail(self, angle, ring, color):
for i in range(self._tail):
if ring - i >= 0 and ring - i <= self.lastRing:
level = 255 - ((255 // self._tail) * i)
self.layout.set(ring - i, angle, color_scale(color, level))
def step(self, amt=1):
self.layout.all_off()
for i in range(self._growthRate):
newTail = random.randrange(0, 360, self._angleDiff)
self._tails[newTail].append(0)
for a in range(360):
angle = self._tails[a]
if len(angle) > 0:
removals = []
for r in range(len(angle)):
tail = angle[r]
if tail <= self.lastRing:
c = self.palette.get(tail * (255 // self.lastRing))
self._drawTail(a, tail, c)
if tail - (self._tail - 1) <= self.lastRing:
tail = tail + amt
self._tails[a][r] = tail
else:
removals.append(tail)
for r in removals:
self._tails[a].remove(r)
self._step = 0
| 3.0625 | 3 |
parallel/__init__.py | santiagobasulto/parallel | 22 | 12765911 | import os
import enum
import functools
import itertools
import collections
import concurrent.futures as cf
from . import errors
from . import utils
from . import exceptions
from .models import (
ParallelJob,
ParallelArg,
ParallelStatus,
FailedTask,
SequentialMapResult,
NamedMapResult,
)
# __all__ = ["decorate", "arg", "future", "map", "async_map", "par", "async_par"]
__all__ = ["map", "async_map", "par", "async_par"]
__version__ = "0.9.1"
__author__ = "<NAME> <<EMAIL>>"
class ExecutorStrategy(enum.Enum):
THREAD_EXECUTOR = "thread"
PROCESS_EXECUTOR = "process"
THREAD_EXECUTOR = ExecutorStrategy.THREAD_EXECUTOR
PROCESS_EXECUTOR = ExecutorStrategy.PROCESS_EXECUTOR
class BaseParallelExecutor:
def __init__(
self,
jobs,
max_workers=None,
timeout=None,
silent=False,
ResultClass=SequentialMapResult,
):
self.jobs = jobs
self.max_workers = max_workers
self.timeout = timeout
self.silent = silent
self.ResultClass = ResultClass
self.__status = ParallelStatus.NOT_STARTED
self.__executor = None
self.__results = None
def _get_executor_class(self): # pragma: no cover
raise NotImplementedError()
@property
def status(self):
return self.__status
def start(self):
if self.__status == ParallelStatus.STARTED:
raise exceptions.ParallelStatusException(errors.STATUS_EXECUTOR_RUNNING)
self.__status = ParallelStatus.STARTED
ExecutorClass = self._get_executor_class()
self.__executor = ExecutorClass(max_workers=self.max_workers)
for job in self.jobs:
future = self.__executor.submit(job.fn, *job.args, **(job.kwargs))
# TODO: Check status for ParallelJob
job.future = future
def __enter__(self):
self.start()
return self
def __exit__(self, *args, **kwargs):
self.shutdown()
def results(self, timeout=None):
if self.__results:
return self.__results
if self.__status == ParallelStatus.NOT_STARTED:
raise exceptions.ParallelStatusException(errors.STATUS_EXECUTOR_NOT_STARTED)
ResultClass = self.ResultClass
self.__results = ResultClass()
for job in self.jobs:
try:
result = job.future.result(timeout=(timeout or self.timeout))
except cf.TimeoutError as e:
raise exceptions.TimeoutException() from e
except Exception as exc:
if not self.silent:
self.__status = ParallelStatus.FAILED
raise exc
self.__results.new_result(job.name, FailedTask(job, exc))
else:
self.__results.new_result(job.name, result)
self.__status = ParallelStatus.DONE
return self.__results
def shutdown(self):
self.__executor.shutdown()
class ThreadExecutor(BaseParallelExecutor):
def _get_executor_class(self):
return cf.ThreadPoolExecutor
class ProcessExecutor(BaseParallelExecutor):
def _get_executor_class(self):
return cf.ProcessPoolExecutor
EXECUTOR_MAPPING = {
ExecutorStrategy.THREAD_EXECUTOR: ThreadExecutor,
ExecutorStrategy.PROCESS_EXECUTOR: ProcessExecutor,
}
class ParallelHelper:
def __init__(self, executor=ExecutorStrategy.THREAD_EXECUTOR):
if isinstance(executor, ExecutorStrategy):
executor = EXECUTOR_MAPPING[executor]
self.ExecutorClass = executor
def get_result_class(self, params):
# return SequentialMapResult
if isinstance(params, collections.abc.Sequence):
return SequentialMapResult
return NamedMapResult
def map(
self,
fn,
params,
extras=None,
unpack_arguments=True,
max_workers=None,
timeout=None,
silent=False,
):
jobs = ParallelJob.build_for_callable_from_params(
fn, params, extras=extras, unpack_arguments=unpack_arguments
)
ResultClass = self.get_result_class(params)
with self.ExecutorClass(
jobs,
max_workers=max_workers,
timeout=timeout,
silent=silent,
ResultClass=ResultClass,
) as ex:
return ex.results()
def async_map(
self,
fn,
params,
extras=None,
unpack_arguments=True,
max_workers=None,
timeout=None,
silent=False,
):
jobs = ParallelJob.build_for_callable_from_params(
fn, params, extras=extras, unpack_arguments=unpack_arguments
)
ResultClass = self.get_result_class(params)
ex = self.ExecutorClass(
jobs,
max_workers=max_workers,
timeout=timeout,
silent=silent,
ResultClass=ResultClass,
)
return ex
def par(
self,
params,
extras=None,
unpack_arguments=True,
max_workers=None,
timeout=None,
silent=False,
):
jobs = ParallelJob.build_jobs_from_params(
params, extras=extras, unpack_arguments=unpack_arguments
)
ResultClass = self.get_result_class(params)
with self.ExecutorClass(
jobs,
max_workers=max_workers,
timeout=timeout,
silent=silent,
ResultClass=ResultClass,
) as ex:
return ex.results()
def split(
self,
collection,
fn,
executor=ExecutorStrategy.THREAD_EXECUTOR,
workers=None,
timeout=None,
extras=None,
):
workers = workers or min(32, (os.cpu_count() or 1) + 4)
chunks = utils.split_collection(collection, workers)
jobs = [
ParallelJob(fn, None, [chunk], (extras or {}).copy())
for chunk in chunks
]
with self.ExecutorClass(
jobs,
max_workers=workers,
timeout=timeout,
ResultClass=SequentialMapResult,
) as ex:
results = ex.results()
return list(itertools.chain.from_iterable(results))
#[item for sublist in l for item in sublist]
def map(
fn,
params,
executor=ExecutorStrategy.THREAD_EXECUTOR,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
return ParallelHelper(executor).map(
fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=max_workers,
timeout=timeout,
silent=silent,
)
def async_map(
fn,
params,
executor=ExecutorStrategy.THREAD_EXECUTOR,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
return ParallelHelper(executor).async_map(
fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=max_workers,
timeout=timeout,
silent=silent,
)
def par(
params,
executor=ExecutorStrategy.THREAD_EXECUTOR,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
return ParallelHelper(executor).par(
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=max_workers,
timeout=timeout,
silent=silent,
)
def split(
collection,
fn,
executor=ExecutorStrategy.THREAD_EXECUTOR,
workers=None,
timeout=None,
extras=None,
):
return ParallelHelper(executor).split(
collection, fn, extras=extras, workers=workers, timeout=timeout
)
class ParallelCallable:
def __init__(
self, fn, executor, timeout, max_workers,
):
self.fn = fn
self.executor = executor
self.timeout = timeout
self.max_workers = max_workers
def map(
self,
params,
executor=None,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
executor = executor or self.executor
return ParallelHelper(executor).map(
self.fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=(max_workers or self.max_workers),
timeout=(timeout or self.timeout),
silent=silent,
)
def async_map(
self,
params,
executor=None,
max_workers=None,
timeout=None,
extras=None,
silent=False,
unpack_arguments=True,
):
executor = executor or self.executor
return ParallelHelper(executor).async_map(
self.fn,
params,
extras=extras,
unpack_arguments=unpack_arguments,
max_workers=(max_workers or self.max_workers),
timeout=(timeout or self.timeout),
silent=silent,
)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
class ParallelDecorator:
def __init__(
self,
fn,
executor=ExecutorStrategy.THREAD_EXECUTOR,
timeout=None,
max_workers=None,
):
self.fn = fn
self.thread = ParallelCallable(
fn,
ExecutorStrategy.THREAD_EXECUTOR,
timeout=timeout,
max_workers=max_workers,
)
self.process = ParallelCallable(
fn,
ExecutorStrategy.PROCESS_EXECUTOR,
timeout=timeout,
max_workers=max_workers,
)
if executor == ExecutorStrategy.THREAD_EXECUTOR:
self.default_executor = self.thread
else:
self.default_executor = self.process
map = lambda self, *args, **kwargs: self.default_executor.map(*args, **kwargs)
async_map = lambda self, *args, **kwargs: self.default_executor.async_map(
*args, **kwargs
)
def future(self, *args, **kwargs):
return job(self.fn, *args, **kwargs)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def decorate(*args, **kwargs):
if len(args) == 1 and callable(args[0]):
# Invoked without parameters
obj = ParallelDecorator(args[0])
return obj
else:
def wrapper(fn):
obj = ParallelDecorator(fn, *args, **kwargs)
return obj
return wrapper
thread = ParallelHelper(ThreadExecutor)
process = ParallelHelper(ProcessExecutor)
arg = lambda *args, **kwargs: ParallelArg(*args, **kwargs)
arg.__doc__ = "TODO"
def job(*args, **kwargs):
"TODO"
fn, *args = args
return ParallelJob(fn, None, args, kwargs)
NOT_STARTED = ParallelStatus.NOT_STARTED
STARTED = ParallelStatus.STARTED
DONE = ParallelStatus.DONE
FAILED = ParallelStatus.FAILED
| 2.28125 | 2 |
twominutejournal/journal.py | tjmcginnis/tmj | 2 | 12765912 | """
twominutejournal.journal
~~~~~~~~~~~~~~~~~~~~~~~~
A daily gratitude journal library.
"""
import uuid
import datetime
from .errors import EntryAlreadyExistsError
def create_prompt(question: str, responses: int) -> dict:
'''Create a new journal prompt.'''
if not isinstance(question, str):
raise TypeError('question must be of type str')
if not isinstance(responses, int):
raise TypeError('responses must be of type int')
return {
'question': question,
'responses': responses
}
def save_prompt(prompt: dict, storage_adapter: object):
'''Save a journal prompt.'''
if not isinstance(prompt, dict):
raise TypeError('prompt must be of type dict')
storage_adapter.store_prompt(prompt)
def get_todays_prompts(storage_adapter: object) -> list:
'''Get today's journal prompts.'''
today = datetime.datetime.today()
last_entry = storage_adapter.get_last_entry()
# compare latest entry date to today's date
if last_entry.get('created').date() == today.date():
raise EntryAlreadyExistsError(
'An entry has already been written today')
prompts = storage_adapter.get_prompts()
return prompts
def create_entry() -> dict:
'''Create a new journal entry.'''
return {
'id': str(uuid.uuid4()),
'created': datetime.datetime.today()
}
def view_all_entries(storage_adapter: object) -> list:
'''View all journal entries.'''
return storage_adapter.get_all_entries()
def create_response(prompt: str, body: str) -> dict:
'''Create a new journal response.'''
if not isinstance(prompt, str):
raise TypeError('prompt must be of type str.')
if not isinstance(body, str):
raise TypeError('body must be of type str.')
return {
'id': str(uuid.uuid4()),
'prompt': prompt,
'body': body
}
def submit_responses(entry: dict, responses: list, storage_adapter: object):
'''Submit an entry and list of responses.'''
if not isinstance(entry, dict):
raise TypeError('entry must be of type dict')
if not isinstance(responses, list):
raise TypeError('responses must be of type list')
storage_adapter.store_entry(entry)
for response in responses:
storage_adapter.store_response(response, entry['id'])
def view_entry_responses(entry_id: str, storage_adapter: object) -> list:
'''View the responses for a journal entry.'''
if not isinstance(entry_id, str):
raise TypeError('entry_id must be of type str')
responses = storage_adapter.get_entry_responses(entry_id)
return responses
| 2.953125 | 3 |
herbieapp/models/message_models_and_serializers.py | merltron-pa/herbie-test | 0 | 12765913 | <reponame>merltron-pa/herbie-test
from rest_framework import serializers
from rest_framework.fields import JSONField
from herbieapp.constants import MessageActionConstants as Constants
class EntityUpdateMessage:
def __init__(self, _type, key, version, payload, created, modified, tags):
self.tags = tags
self.action = Constants.UPDATE
self.type = _type
self.key = key
self.version = version
self.payload = payload
self.created = created
self.modified = modified
def get_serializer(self):
return EntityUpdateMessageSerializer(self)
class EntityDeleteMessage:
def __init__(self, _type, key, version=None):
self.action = Constants.DELETE
self.type = _type
self.key = key
self.version = version
def get_serializer(self):
return EntityDeleteMessageMessageSerializer(self)
class EntityUpdateMessageSerializer(serializers.Serializer):
tags = serializers.ListField()
action = serializers.CharField()
type = serializers.CharField()
key = serializers.CharField()
version = serializers.CharField()
payload = JSONField()
created = serializers.CharField()
modified = serializers.CharField()
class EntityDeleteMessageMessageSerializer(serializers.Serializer):
action = serializers.CharField()
type = serializers.CharField()
key = serializers.CharField()
version = serializers.CharField()
| 2.1875 | 2 |
lab5/graph.py | casseyshao/CSC190 | 0 | 12765914 | <reponame>casseyshao/CSC190<gh_stars>0
# Lab 5
'''
G=(V,E)
Let V be the set v0,v1,v2,...
(i.e., with ascending non-negative indices)
and E be the set e0,e1,e2,...
(i.e., with ascending non-negative indices)
'''
class stack:
def __init__(self):
self.lifo = []
def push(self,ele):
self.lifo = self.lifo + [ele]
return True
def pop(self):
if self.lifo == []:
return False
else:
pop_ele = self.lifo[-1]
self.lifo = self.lifo[0:-1]
return pop_ele
def isEmpty(self):
if self.lifo == []:
return True
else:
return False
class queue:
def __init__(self):
self.fifo = []
def push(self,ele):
self.fifo = self.fifo + [ele]
return True
def pop(self):
if self.fifo == []:
return False
else:
pop_ele = self.fifo[0]
self.fifo = self.fifo[1:len(self.fifo)]
return pop_ele
def isEmpty(self):
if self.fifo == []:
return True
else:
return False
class graph:
# create an empty store for the graph, which will be an adjacency list
def __init__(self):
self.store = []
# add "n" vertices to the graph
# return the value of the final number of vertices in the graph
# if there is an error return -1
def addVertex(self,n):
if (n <= 0):
return -1
for i in range (0,n):
self.store = self.store + [[]]
return len(self.store)
# from_idx and to_idx are nonnegative integers
# directed is either True or False
# weight is any integer other than 0
# this function adds an edge (a directed edge if directed==True, otherwise an undirected edge) from vertex<from_idx> to vertex<to_idx> with "weight"
# if there is an error return False, else True
def addEdge(self,from_idx,to_idx,directed,weight):
if (from_idx not in range (0,len(self.store))) or (to_idx not in range (0,len(self.store))) or type(directed) != bool or weight == 0:
return False
self.store[from_idx] = self.store[from_idx] + [[to_idx,weight]]
if directed == False:
self.store[to_idx] = self.store[to_idx] + [[from_idx,weight]]
return True
# return a list obtained from a breadth (typeBreadth==True) or depth (typeBreadth==False) traversal of the graph
# breadth traversal uses queue
# depth traversal uses stack
# if there is an error, return an empty list
# return a list consisting of all nodes visited via the traversal
# if start is set, this will be one list
# if start is not set, this will be a list of lists
def traverse(self,start,typeBreadth):
# initialize C to empty
if typeBreadth == True: # breadth traversal
C = queue()
elif typeBreadth == False: # depth traversal
C = stack()
else:
return []
while C.isEmpty == False:
C.pop()
# initialize Discovered and Processed to have as many slots as there are v in V
# set all slots of Discovered and Processed to be False
Discovered = []
Processed = []
for i in range (0,len(self.store)):
Discovered += [False]
Processed += [False]
rlist = []
V = []
# if start==None->traversal must traverse the entire graph
if start == None:
V = list(range(0,len(self.store)))
# if start is integer in range of graph vertices indices, traversal is only to vertices that are connected to it
elif type(start) == int:
if start not in range (0,len(self.store)):
return []
V = [start]
else:
return []
for v in V:
sublist = []
if Discovered[v] == False:
C.push(v)
Discovered[v] = True
w = C.pop()
while (type(w) == int):
if (Processed[w] == False):
sublist = sublist + [w]
Processed[w] = True
# for x = all vertices adjacent to w
for x in self.store[w]:
if Discovered[x[0]] == False:
C.push(x[0])
Discovered[x[0]] = True
w = C.pop()
if start == None:
rlist = rlist + [sublist]
else:
rlist = rlist + sublist
return rlist
# return a 2-list
# element[0] is True if there is a path from vx to vy, else False
# element[1] is True if there is a path from vy to vx, else False
def connectivity(self,vx,vy):
rlist = [False,False]
for i in self.traverse(vx,False):
if (i == vy):
rlist[0] = True
for i in self.traverse(vy,False):
if (i == vx):
rlist[1] = True
return rlist
# return a 2-list
# element[0] is a list of vertices from vx to vy, if there is a path, otherwise []
# element[1] is a list of vertices from vy to vx, if there is a path, otherwise []
# include endpoints
def path(self,vx,vy):
rlist = [[],[]]
if (self.connectivity(vx,vy)[0] == True):
rlist[0] = self.helper_path(vx,vy,[])
if (self.connectivity(vx,vy)[1] == True):
rlist[1] = self.helper_path(vy,vx,[])
return rlist
def helper_path(self,vx,vy,temp_list):
found = False
if (vx == vy):
temp_list += [vx]
return temp_list
vy_idx = -1
cnt = 0
for i in self.traverse(vx,False):
if (i == vy):
found = True
vy_idx = cnt
break
cnt+=1
# find the last directly connected vertex to vx on path to vy
last_idx = -1
for i in range(0,len(self.traverse(vx,False))):
for j in range(0,len(self.store[vx])):
if (self.traverse(vx,False)[i] == self.store[vx][j]) and (i<vy_idx):
if (i > last_idx):
last_idx = i
else:
break
# add all elements in depth search from last directly connected to vy to vy to path
for i in range(last_idx+1,vy_idx):
if (self.connectivity(self.traverse(vx,False)[i],vy)[0] == True):
temp_list += [self.traverse(vx,False)[i]]
temp_list += [self.traverse(vx,False)[vy_idx]]
return temp_list
| 3.40625 | 3 |
codewars/omnibool.py | TimothyDJones/learn-python | 0 | 12765915 | # omnibool.py
# https://www.codewars.com/kata/5a5f9f80f5dc3f942b002309/train/python
class Omnibool():
def __eq__(self, x):
return True
if __name__ == "__main__":
omnibool = Omnibool()
print(omnibool == True and omnibool == False)
| 2.921875 | 3 |
refresh_resources.py | Modosmart/modosmart-api | 0 | 12765916 | <gh_stars>0
import requests
import time
import json
while True:
time.sleep(1800) # wait for 30 minutes
# response = requests.get('http://localhost:3030/api/scan')
# get public resources
public_resources = requests.get(
'https://symbioteweb.eu.ngrok.io/innkeeper/public_resources')
resources = json.loads(public_resources.text)
for resource in resources:
resource_obj = resource['resource']
# resource_id = resource_obj['id']
resource_name = resource_obj['name']
resource_type = resource_obj['@c']
mac_address = ''
ssp_id = ''
if (resource_type == '.StationarySensor'):
mac_address = resource_name.replace('SM006_', '')
strings_slices = mac_address.split("_")
mac_address = strings_slices[0]
ssp_id = strings_slices[1]
elif (resource_type == '.Actuator'):
mac_address = resource_name.replace('AC_SWITCH_', '')
strings_slices = mac_address.split("_")
mac_address = strings_slices[0]
ssp_id = strings_slices[1]
print(mac_address)
print(ssp_id)
if (mac_address != ''):
body = {}
# body['mac_address'] = mac_address
body['ssp_id'] = ssp_id
json_data = json.dumps(body)
response = requests.post(
'http://localhost:3030/api/keep_alive', json=json_data)
print response
| 2.90625 | 3 |
djangobmf/contrib/task/workflows.py | dmatthes/django-bmf | 1 | 12765917 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from djangobmf.workflows import Workflow, State, Transition
from djangobmf.settings import CONTRIB_TIMESHEET
from djangobmf.utils.model_from_name import model_from_name
def cancel_condition(object, user):
if getattr(object, 'referee_id', False) and object.referee_id != user.pk:
return False
return True
class GoalWorkflow(Workflow):
class States:
open = State(_(u"Open"), default=True, delete=False)
completed = State(_(u"Completed"), update=False, delete=True)
cancelled = State(_(u"Cancelled"), update=False, delete=True)
class Transitions:
complete = Transition(_("Complete this Goal"), "open", "completed")
cancel = Transition(_("Cancel this Goal"), "open", "cancelled", condition=cancel_condition)
reopen = Transition(_("Reopen this Goal"), ["completed", "cancelled"], "open")
def complete(self):
if self.instance.task_set.filter(completed=False).count() > 0: # TODO untested
raise ValidationError(_('You can not complete a goal which has open tasks'))
self.instance.completed = True
def cancel(self):
# TODO autoclose all tasks - needs testing / exception catching
# for obj in self.instance.task_set.filter(completed=False):
# obj.bmfworkflow_transition('cancel', self.user)
if self.instance.task_set.filter(completed=False).count() > 0: # TODO untested
raise ValidationError(_('You can not complete a goal which has open tasks'))
self.instance.completed = True
def reopen(self):
self.instance.completed = False
def start_condition(object, user):
if getattr(object, 'employee_id', False) and object.employee_id != user.pk: # TODO: untested
return False
return True
def finish_condition(object, user):
if object.goal and object.goal.referee_id and user.pk != object.goal.referee_id: # TODO: untested
return False
return True
class TaskWorkflow(Workflow):
class States:
new = State(_(u"New"), True, delete=False)
open = State(_(u"Open"), delete=False)
hold = State(_(u"Hold"), delete=False)
todo = State(_(u"Todo"), delete=False)
started = State(_(u"Started"), delete=False)
review = State(_(u"Review"), delete=False, update=False)
finished = State(_(u"Finished"), update=False, delete=True)
cancelled = State(_(u"Cancelled"), update=False, delete=True)
class Transitions:
start = Transition(
_("Work on this task"),
["new", "hold", "open", "todo"],
"started",
condition=start_condition,
)
todo = Transition(
_("Mark as todo"),
["new", "open", "started", "hold"],
"todo",
)
hold = Transition(
_("Set this task on hold"),
["new", "open", "started", "todo"],
"hold",
)
stop = Transition(
_("Stop working on this task"),
"started",
"open",
)
finish = Transition(
_("Finish this task"),
["started", "open", "hold", "new", "review", "todo"],
"finished",
condition=finish_condition,
)
review = Transition(
_("Set to review"),
["started", "open", "hold", "new", "todo"],
"review",
)
reopen = Transition(
_("Reopen this task"),
['finished', 'cancelled'],
'open',
)
unreview = Transition(
_("Reopen this task"),
['review'],
'open',
)
cancel = Transition(
_("Cancel this task"),
('new', 'hold', 'open', 'review'),
'cancelled',
condition=finish_condition,
)
def start(self):
self.instance.in_charge = self.user.djangobmf_employee
self.instance.employee = self.user.djangobmf_employee
if self.instance.project:
project = self.instance.project
elif self.instance.goal:
project = self.instance.goal.project
else:
project = None
timesheet = model_from_name(CONTRIB_TIMESHEET)
if timesheet is not None:
obj = timesheet(
task=self.instance,
employee=self.user.djangobmf_employee,
auto=True,
project=project,
summary=self.instance.summary
)
obj.save()
def todo(self):
self.instance.in_charge = self.user.djangobmf_employee
self.instance.employee = self.user.djangobmf_employee
self.stop()
def stop(self):
if not self.instance.in_charge and self.instance.employee:
self.instance.in_charge = self.instance.employee
timesheet = model_from_name(CONTRIB_TIMESHEET)
if timesheet is not None:
for obj in timesheet.objects.filter(
task=self.instance,
employee__in=[self.instance.in_charge, self.user.djangobmf_employee],
end=None,
auto=True,
):
obj.bmfworkflow_transition('finish', self.user)
def hold(self):
self.stop()
def unreview(self):
self.instance.employee = self.instance.in_charge
def reopen(self):
self.instance.employee = self.instance.in_charge
self.instance.completed = False
def review(self):
if not self.instance.in_charge and self.instance.employee:
self.instance.in_charge = self.instance.employee
if self.instance.goal:
self.instance.employee = self.instance.goal.referee
self.stop()
def finish(self):
self.stop()
self.instance.due_date = None
self.instance.completed = True
def cancel(self):
self.finish()
| 2.15625 | 2 |
sagar/io/vasp.py | unkcpz/sagar | 2 | 12765918 | # -*- coding: utf-8 -*-
import numpy
import warnings
import operator
import collections
from sagar.crystal.structure import Cell
from sagar.element.base import get_symbol
def read_vasp(filename='POSCAR'):
"""
Import POSCAR/CONTCAR or filename with .vasp suffix
parameter:
filename: string, the filename
return: Cell object.
"""
# TODO: read velocities, now not supported. or not needed?
with open(filename, "r") as f:
# _read_string return Cell object.
return _read_string(f.read())
def _read_string(data):
"""
_read_string make io easy to be tested.
parameter: string of vasp input
return: Cell object
"""
lines = [l for l in data.split('\n') if l.rstrip()]
name = lines[0]
lattice_scale = float(lines[1].split()[0])
# lattice vectors
lattice = []
for i in [2, 3, 4]:
s = lines[i].split()
vec = float(s[0]), float(s[1]), float(s[2])
lattice.append(vec)
lattice = numpy.array(lattice)
if lattice_scale < 0:
# In vasp , a negative scale factor is treated as a volume.
# http://pymatgen.org/_modules/pymatgen/io/vasp/inputs.html#POSCAR
vol = abs(numpy.linalg.det(lattice))
lattice *= (-lattice_scale / vol) ** (1 / 3)
else:
lattice *= lattice_scale
# atoms
vasp5 = False
_fifth_line = lines[5].split()
# VASP 5.x use the fifth line to represent atomic symbols
try:
for i in _fifth_line:
int(i)
numofatoms = _fifth_line
except ValueError:
vasp5 = True
atomtypes = _fifth_line
numofatoms = lines[6].split() # list of string here
if not vasp5:
warnings.warn("symbols of elements in fifth line are missing, "
"all atoms are init to NaN_i (i=0,1,2...)", UserWarning, stacklevel=2)
atomtypes = [str("NaN_{:}".format(i)) for i in range(len(numofatoms))]
atoms = []
for i, num in enumerate(numofatoms):
# https://gitlab.com/ase/ase/blob/master/ase/io/vasp.py
numofatoms[i] = int(num)
[atoms.append(atomtypes[i]) for na in range(numofatoms[i])]
if not vasp5:
line_coortype = 6
else:
line_coortype = 7
# TODO: Supporting Cartesian coordinates vasp input
coortype = lines[line_coortype].split()[0]
if coortype[0] in "sS":
warnings.warn("Sorry! Selective dynamics "
"are not supported now", FutureWarning, stacklevel=2)
line_coortype += 1
coortype = lines[line_coortype].split()[0]
if coortype[0] in "cCkK":
line_first_pos = line_coortype + 1
iscart=True
else:
iscart =False
if coortype[0] in "dD":
line_first_pos = line_coortype + 1
positions = []
total_atoms = sum(numofatoms)
for i in range(line_first_pos, line_first_pos + total_atoms):
s = lines[i].split()
vec = float(s[0]), float(s[1]), float(s[2])
positions.append(vec)
if iscart:
positions = numpy.dot(numpy.array(positions),numpy.linalg.inv(lattice))
return Cell(lattice, positions, atoms)
def write_vasp(cell, filename='POSCAR', suffix='.vasp', long_format=True):
"""
write vasp POSCAR type into file, vasp5 format only.
always write atoms sorted POSCAR.
parameters:
cell: Cell object, the Cell that you wanna write into vasp POSCAR.
filename: string, filename of output file, default='POSCAR'
suffix: string, suffix of filename, default='.vasp'
long_format: bool, if True format %.16f will be write, else %.6f
ref: https://gitlab.com/ase/ase/blob/master/ase/io/vasp.py
if optional parameters (filename and suffix) are not set,
the filename will be 'POSCAR.vasp'
"""
# TODO: write Cartesian coor POSCAR
filname_suffix = ''.join([filename, suffix])
with open(filname_suffix, "w") as f:
f.write(_write_string(cell, long_format))
def _write_string(cell, long_format, print_vacc=False):
"""
_write_string make io easy to be tested.
return: string represtent POSCAR
"""
# 对原子种类合并排序,用以产生体系名称和原子顺序数目和正确的坐标排序
# sorted is a list of tuple(atom, na)
atoms_dict = collections.Counter(cell.atoms)
if not print_vacc:
del atoms_dict[0]
sorted_symbols = sorted(atoms_dict.items(), key=operator.itemgetter(0))
list_symbols = ["{:}{:}".format(get_symbol(atom), na)
for atom, na in sorted_symbols]
comment = ' '.join(list_symbols)
comment += '\n'
scale = '{:9.6f}'.format(1.0)
scale += '\n'
lattice_string = ""
if long_format:
latt_form = '21.16f'
else:
latt_form = '11.6f'
for vec in cell.lattice:
lattice_string += ' '
for v in vec:
lattice_string += '{:{form}}'.format(v, form=latt_form)
lattice_string += '\n'
# atom types and their numbers
atom_types = ' '.join([get_symbol(i[0]) for i in sorted_symbols])
atom_types += '\n'
atom_numbers = ' '.join([str(i[1]) for i in sorted_symbols])
atom_numbers += '\n'
# TODO: write Cartesian coor
coor_type = 'Direct\n'
# argsort atoms and resort coor
idx = numpy.argsort(cell.atoms)
coord = cell.positions[idx]
atoms = cell.atoms[idx]
positions_string = ""
if long_format:
pos_form = '19.16f'
else:
pos_form = '9.6f'
for i, vec in enumerate(coord):
if atoms[i] == 0:
continue
positions_string += ' '
for v in vec:
positions_string += '{:{form}}'.format(v, form=pos_form)
positions_string += ' ' + get_symbol(atoms[i])
positions_string += '\n'
poscar_string = ''.join([comment,
scale,
lattice_string,
atom_types,
atom_numbers,
coor_type,
positions_string])
return poscar_string
| 2.515625 | 3 |
tests/test_data/test_accounts.py | TomVollerthun1337/logsmith | 19 | 12765919 | def get_test_accounts() -> dict:
return {
'development': {
'color': '#388E3C',
'team': 'awesome-team',
'region': 'us-east-1',
'profiles': [
{
'profile': 'developer',
'account': '123495678901',
'role': 'developer',
'default': 'true',
},
{
'profile': 'readonly',
'account': '012349567890',
'role': 'readonly',
}
]
},
'live': {
'color': '#388E3C',
'team': 'awesome-team',
'region': 'us-east-1',
'profiles': [
{
'profile': 'developer',
'account': '123456789012',
'role': 'developer',
'default': 'true',
},
{
'profile': 'readonly',
'account': '012345678901',
'role': 'readonly',
}
]
}
}
def get_test_group():
return {
'color': '#388E3C',
'team': 'awesome-team',
'region': 'us-east-1',
'profiles': [
{
'profile': 'developer',
'account': '123456789012',
'role': 'developer',
},
{
'profile': 'readonly',
'account': '012345678901',
'role': 'readonly',
'default': 'true',
}
]
}
def get_test_group_no_default():
return {
'color': '#388E3C',
'team': 'awesome-team',
'region': 'us-east-1',
'profiles': [
{
'profile': 'developer',
'account': '123456789012',
'role': 'developer',
},
{
'profile': 'readonly',
'account': '012345678901',
'role': 'readonly'
}
]
}
def get_test_group_chain_assume():
return {
'color': '#388E3C',
'team': 'awesome-team',
'region': 'us-east-1',
'profiles': [
{
'profile': 'developer',
'account': '123456789012',
'role': 'developer',
},
{
'profile': 'service',
'account': '012345678901',
'role': 'service',
'source': 'developer'
}
]
}
def get_test_profile():
return {
'profile': 'readonly',
'account': '123456789012',
'role': 'readonly-role',
'default': 'true',
}
def get_test_profile_no_default():
return {
'profile': 'readonly',
'account': '123456789012',
'role': 'readonly-role',
}
| 2.125 | 2 |
deepext_with_lightning/models/classification/__init__.py | pei223/deepext_with_lightning | 1 | 12765920 | from .mobilenet_v3.mobilenet_v3 import MobileNetV3
from .efficientnet.efficientnet import EfficientNet
from .abn.attention_branch_network import AttentionBranchNetwork
from .customnet.custom_classification import CustomClassificationNetwork
from . import functions
| 1.140625 | 1 |
install.py | ktiwary2/nerf_pl | 0 | 12765921 | import os
from os.path import exists, join, basename
project_name = "pyrender"
if not exists(project_name):
# clone and install
!git clone -q https://github.com/mmatl/pyrender.git
#requirements file gives the wrong pyglet
#ERROR: pyrender 0.1.23 has requirement pyglet==1.4.0b1, but you'll have pyglet 1.4.0a1 which is incompatible.
#!cd $project_name && pip install -q -r requirements.txt
import sys
sys.path.append(project_name) | 2.109375 | 2 |
api/api/de_ws.py | weng-lab/SCREEN | 5 | 12765922 |
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 <NAME>, <NAME>, <NAME>, <NAME>
import sys
import os
from models.de import DE
sys.path.append(os.path.join(os.path.dirname(__file__), '../common/'))
from common.pg_search import PGsearch
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from config import Config
class DeWebServiceWrapper:
def __init__(self, args, ps, cacheW, staticDir):
def makeWS(assembly):
return DeWebService(args, ps, cacheW[assembly], staticDir, assembly)
self.assemblies = Config.assemblies
self.wss = {a: makeWS(a) for a in self.assemblies}
def process(self, j, args, kwargs):
if "assembly" not in j:
raise Exception("assembly not defined")
if j["assembly"] not in self.assemblies:
raise Exception("invalid assembly")
return self.wss[j["assembly"]].process(j, args, kwargs)
class DeWebService(object):
def __init__(self, args, ps, cache, staticDir, assembly):
self.args = args
self.ps = ps
self.cache = cache
self.staticDir = staticDir
self.assembly = assembly
self.actions = {"search": self.search}
def process(self, j, args, kwargs):
action = args[0]
try:
return self.actions[action](j, args[1:])
except:
raise
def search(self, j, args):
gene = j["gene"] # TODO: check for valid gene
ct1 = j["ct1"]
ct2 = j["ct2"]
if not ct1 or not ct2:
raise Exception("ct1 and/or ct2 empty!")
try:
de = DE(self.cache, self.ps, self.assembly, gene, ct1, ct2)
nearbyDEs = de.nearbyDEs()
diffCREs = {"data": None}
if nearbyDEs["data"]:
diffCREs = de.diffCREs(nearbyDEs["xdomain"])
return {gene: {"xdomain": nearbyDEs["xdomain"],
"coord": de.coord().toDict(),
"diffCREs": diffCREs,
"nearbyDEs": nearbyDEs},
"assembly": self.assembly,
"gene": gene,
"ct1": ct1,
"ct2": ct2}
except:
raise
return {}
| 2.109375 | 2 |
league_bot.py | a-camarillo/league-bot | 1 | 12765923 | import discord
from discord.ext import commands
import league
import matplotlib.pyplot as plt
import seaborn as sns
import os
client = commands.Bot(command_prefix='?')
token = os.environ.get('DISCORD_TOKEN')
@client.event
async def on_ready():
print('Bot is ready.')
@client.command()
async def rank(ctx, name, region='na1'):
summoner = league.Summoner(name=name,region=region)
tier, rank, lp = summoner.get_rank()
await ctx.send(f'{summoner.name} is {tier} {rank}, {lp} LP')
@client.command()
async def stats(ctx, name, region='na1'):
summoner = league.Summoner(name=name,region=region)
avg_kills, avg_deaths, avg_assists, avg_gold = summoner.get_stats()
await ctx.send(f'Average Kills: {avg_kills}\nAverage Deaths: {avg_deaths}\nAverage Assists: {avg_assists}\nGold Average: {avg_gold}')
@client.command()
async def champs(ctx, name, region='na1'):
summoner = league.Summoner(name=name,region=region)
wins, champions = summoner.get_champs()
sns.set()
graph = sns.countplot(y=champions,hue=wins)
plt.legend(('Loss','Win'))
graph.figure.savefig('graph.png',bbox_inches='tight')
await ctx.send(file=discord.File('graph.png'))
os.remove('graph.png')
client.run(token)
| 2.90625 | 3 |
src/com/python/demo/tuple_Demo.py | Leeo1124/pythonDemo | 0 | 12765924 | '''
Created on 2016年7月27日
@author: Administrator
元组:tuple。tuple和list非常类似,但是tuple一旦初始化就不能修改
'''
classmates = ('Michael', 'Bob', 'Tracy')
print(classmates)
print(len(classmates))
print(classmates[0])
print(classmates[-1])
#只有1个元素的tuple定义时必须加一个逗号,,来消除歧义
t = (1,)
print(t)
t = ('a', 'b', ['A', 'B'])
t[2][0] = 'X'
t[2][1] = 'Y'
print(t) | 4.21875 | 4 |
parlai/tasks/taskmaster3/agents.py | twstewart42/ParlAI | 1 | 12765925 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Taskmaster-3 implementation for ParlAI.
No official train/valid/test splits are available as of 2020-05-18, so we make our own
splits.
"""
from parlai.core.params import ParlaiParser
import os
import pandas as pd
from parlai.core.opt import Opt
import parlai.core.tod.tod_core as tod
import json
from typing import Optional
from parlai.utils.data import DatatypeHelper
from parlai.utils.io import PathManager
import parlai.tasks.taskmaster3.build as build_
import parlai.core.tod.tod_agents as tod_agents
SILENCE_TOKEN = <PASSWORD>"
class Taskmaster3Parser(tod_agents.TodStructuredDataParser):
"""
Abstract data loader.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
return super().add_cmdline_args(parser, partial_opt)
def __init__(self, opt: Opt, shared=None):
self.fold = DatatypeHelper.fold(opt["datatype"])
opt["datafile"] = self.fold
self.dpath = os.path.join(opt["datapath"], "taskmaster-3")
build_.build(opt)
super().__init__(opt, shared)
def _load_data(self, fold):
# load up the ontology
fn = os.path.join(self.dpath, "apis.json")
with PathManager.open(fn, "r") as f:
api_schema_raw = json.load(f)
chunks = []
for section in range(20):
section = f"{section:02d}"
with PathManager.open(
os.path.join(self.dpath, f"data_{section}.json")
) as f:
subset = pd.read_json(f)
chunks.append(subset)
chunks = pd.concat(chunks, axis=0)
# deterministic shuffle data for splits
chunks = chunks.sample(frac=1.0, random_state=42)
split_size = len(chunks) // 10
if fold == "train":
chunks = chunks[: split_size * 8]
elif fold == "valid":
chunks = chunks[split_size * 8 : split_size * 9]
elif fold == "test":
chunks = chunks[split_size * 9 :]
return chunks, api_schema_raw
def _parse_apis_to_slots(self, apis_blob):
calls = []
responses = []
for api in apis_blob:
call = api["args"]
call[tod.STANDARD_API_NAME_SLOT] = api["name"]
calls.append(call)
response = api["response"]
response[tod.STANDARD_API_NAME_SLOT] = api["name"]
responses.append(response)
return calls, responses
def _get_utterance_and_apis_for_speaker(self, speaker, utterances, idx):
utts = []
calls = []
responses = []
while idx < len(utterances):
here = utterances[idx]
if here["speaker"] != speaker:
break
utts.append(here["text"])
here_calls, here_responses = self._parse_apis_to_slots(here.get("apis", []))
calls += here_calls
responses += here_responses
idx += 1
return idx, "\n".join(utts), calls, responses
def _parse_to_api_schema(self, raw):
result = []
for key, val in raw.items():
here = {}
here[tod.STANDARD_API_NAME_SLOT] = key
req = val.get("args", {}).get("all_of", [])
opt = val.get("args", {}).get("any_of", [])
if len(req) > 0:
here[tod.STANDARD_REQUIRED_KEY] = req
if len(opt) > 0:
here[tod.STANDARD_OPTIONAL_KEY] = opt
result.append(here)
return result
def _get_turns_from_parsed(self, user_utt, api_calls, api_resps, sys_utt):
assert len(api_calls) == len(api_resps)
if len(api_calls) == 0:
api_calls = [{}]
api_resps = [{}]
turns = len(api_calls)
user_utts = [SILENCE_TOKEN] * turns
user_utts[0] = user_utt
sys_utts = [SILENCE_TOKEN] * turns
sys_utts[turns - 1] = sys_utt
result = []
for i in range(turns):
result.append(
tod.TodStructuredRound(
sys_utt=sys_utts[i],
api_call_machine=api_calls[i],
api_resp_machine=api_resps[i],
user_utt=user_utts[i],
)
)
return result
def setup_episodes(self, fold):
"""
Parses into TodStructuredEpisode.
"""
chunks, api_schema_raw = self._load_data(fold)
api_schemas_machine = self._parse_to_api_schema(api_schema_raw)
episodes = []
for _, row in chunks.iterrows():
utterances = row["utterances"][:]
idx = 0
rounds = []
goal_calls = []
if len(utterances) > 0 and utterances[0]["speaker"] == "assistant":
(
idx,
sys_utt,
api_call,
api_resp,
) = self._get_utterance_and_apis_for_speaker(
"assistant", utterances, idx
)
turns = self._get_turns_from_parsed(
SILENCE_TOKEN, api_call, api_resp, sys_utt
)
for t in turns:
rounds.append(t)
while idx < len(utterances):
(
idx,
user_utt,
calls_user,
responses_user,
) = self._get_utterance_and_apis_for_speaker("user", utterances, idx)
(
idx,
sys_utt,
calls_system,
responses_system,
) = self._get_utterance_and_apis_for_speaker(
"assistant", utterances, idx
)
api_calls = calls_user + calls_system
api_resps = responses_user + responses_system
goal_calls += api_calls
turns = self._get_turns_from_parsed(
user_utt, api_calls, api_resps, sys_utt
)
for t in turns:
rounds.append(t)
episode = tod.TodStructuredEpisode(
api_schemas_machine=api_schemas_machine,
goal_calls_machine=goal_calls,
rounds=rounds,
delex=self.opt.get("delex", False),
)
episodes.append(episode)
return episodes
def get_id_task_prefix(self):
return "Taskmaster3"
def _label_fold(self, chunks):
return chunks.conversation_id.apply(self._h)
class SystemTeacher(Taskmaster3Parser, tod_agents.TodSystemTeacher):
pass
class UserSimulatorTeacher(Taskmaster3Parser, tod_agents.TodUserSimulatorTeacher):
pass
class DefaultTeacher(SystemTeacher):
pass
| 1.789063 | 2 |
create_db.py | nalbarr/hello-neo4j-gdsl | 0 | 12765926 | <reponame>nalbarr/hello-neo4j-gdsl
from neo4j import GraphDatabase
from neo4j.exceptions import ClientError
import os
import altair
bolt_url = os.getenv("NEO4J_BOLT_URL", "bolt://localhost")
user = os.getenv("NEO4J_USER", "neo4j")
password = os.getenv("NEO4J_PASSWORD", "<PASSWORD>")
driver = GraphDatabase.driver("bolt://localhost", auth=(user, password))
def create_db():
with driver.session() as session:
try:
session.run(
"""
CREATE CONSTRAINT ON (p:Place) ASSERT p.name IS UNIQUE;
"""
)
print('database created.')
except ClientError as ex:
print(ex)
result = session.run(
"""
USING PERIODIC COMMIT 1000
LOAD CSV WITH HEADERS FROM "https://github.com/neo4j-examples/graph-embeddings/raw/main/data/roads.csv"
AS row
MERGE (origin:Place {name: row.origin_reference_place})
MERGE (originCountry:Country {code: row.origin_country_code})
MERGE (destination:Place {name: row.destination_reference_place})
MERGE (destinationCountry:Country {code: row.destination_country_code})
MERGE (origin)-[:IN_COUNTRY]->(originCountry)
MERGE (destination)-[:IN_COUNTRY]->(destinationCountry)
MERGE (origin)-[eroad:EROAD {number: row.road_number}]->(destination)
SET eroad.distance = toInteger(row.distance), eroad.watercrossing = row.watercrossing;
"""
)
# display(result.consume().counters)
def main():
create_db()
if __name__ == "__main__":
main()
| 2.609375 | 3 |
topnum/scores/calinski_harabasz.py | machine-intelligence-laboratory/OptimalNumberOfTopics | 5 | 12765927 | import logging
import dill
from sklearn.metrics import calinski_harabasz_score
from topicnet.cooking_machine import Dataset
from topicnet.cooking_machine.models import (
BaseScore as BaseTopicNetScore,
TopicModel
)
from .base_custom_score import BaseCustomScore
_Logger = logging.getLogger()
class CalinskiHarabaszScore(BaseCustomScore):
"""
Uses of Calinski-Harabasz:
https://link.springer.com/article/10.1007/s40815-017-0327-9
"""
def __init__(
self,
name: str,
validation_dataset: Dataset
):
super().__init__(name)
self._score = _CalinskiHarabaszScore(validation_dataset)
class _CalinskiHarabaszScore(BaseTopicNetScore):
def __init__(self, validation_dataset):
super().__init__()
self._dataset = validation_dataset
self._keep_dataset_in_memory = validation_dataset._small_data
self._dataset_internals_folder_path = validation_dataset._internals_folder_path
self._dataset_file_path = validation_dataset._data_path
def call(self, model: TopicModel):
theta = model.get_theta(dataset=self._dataset)
theta.columns = range(len(theta.columns))
objects_clusters = theta.values.argmax(axis=0)
# TODO: or return some numeric?
if len(set(objects_clusters)) == 1:
_Logger.warning(
'Only one unique cluster! Returning None as score value'
)
return float('nan')
return calinski_harabasz_score(theta.T.values, objects_clusters)
# TODO: this piece is copy-pastd among three different scores
def save(self, path: str) -> None:
dataset = self._dataset
self._dataset = None
with open(path, 'wb') as f:
dill.dump(self, f)
self._dataset = dataset
@classmethod
def load(cls, path: str):
"""
Parameters
----------
path
Returns
-------
an instance of this class
"""
with open(path, 'rb') as f:
score = dill.load(f)
score._dataset = Dataset(
score._dataset_file_path,
internals_folder_path=score._dataset_internals_folder_path,
keep_in_memory=score._keep_dataset_in_memory,
)
return score
| 2.25 | 2 |
app.py | ikhichar/toplay | 2 | 12765928 | <reponame>ikhichar/toplay
from flask import Flask
app = Flask("toplay")
import random
choices=['Hilary','Trump','Jill','Gary']
@app.route('/')
def is_nuts():
return "{} is nuts!".format(random.choice(choices))
@app.route('/<person>')
def is_nuts_person(person):
return "{} is nuts!".format(person)
if __name__ == '__main__':
app.run()
| 2.828125 | 3 |
setup.py | kreneskyp/ixian | 0 | 12765929 | <reponame>kreneskyp/ixian
from setuptools import setup
setup(setup_requires=["pbr"], pbr=True, long_description_content_type="text/markdown")
| 0.910156 | 1 |
apps/quotas/views.py | ecognize-hub/ecognize | 1 | 12765930 | from crum import get_current_user
from django.views.generic import TemplateView
from apps.quotas.models import UsageLimitations, Quota, Plans
class PlanOverview(TemplateView):
template_name = "plans_overview.html"
def get_context_data(self, **kwargs):
context = super(PlanOverview, self).get_context_data()
current_user = get_current_user()
current_org = current_user.profile.primary_org
context['limitations'] = UsageLimitations.limitations
quota_obj = Quota.objects.get(pk=current_org.pk)
context['plan_name'] = quota_obj.get_plan_display()
context['limitations_filesharing_F'] = UsageLimitations.limitations['filesharing']['F'] / (1024 * 1024)
context['limitations_filesharing_S'] = UsageLimitations.limitations['filesharing']['S'] / (1024 * 1024)
if quota_obj.plan == Plans.FREE.value:
context['plan_validity_date'] = "unlimited"
else:
context['plan_validity_date'] = quota_obj.paid_until
return context
| 1.898438 | 2 |
backend/utils/models/fields.py | xingxingzaixian/python-django-online-exam | 47 | 12765931 | <filename>backend/utils/models/fields.py
from django.db.models import CharField
| 1.195313 | 1 |
oui.py | ejohnfel/ouisearch | 0 | 12765932 | #!/usr/bin/env python3.8
# You might want to change the line above to generic python, however, it does require python 3.8 or above to run correctly.
# You MIGHT get away with older versions... but... no warranty here.
import os, sys, io, re
import argparse
import json, csv
import requests
from datetime import datetime,timedelta
import time
import py_helper as ph
from py_helper import CmdLineMode, DebugMode, DbgMsg, Msg, ErrMsg, DbgAuto
from py_helper import DownloadContent
#
# Purpose : Given a string (MAC Address, OUI Code or "other") search through the IEEE MA-L, MA-M, MA-L and CID OUI Code listings.
#
# This code does not dynamically search the online database, it downloads the CSV's and caches them. (See Usage Notes)
#
# Usage Notes:
#
# This module can be used as a command line tool, a plugin using the "run(**kwargs)/arguments/args Pattern" or just as a module
# for some other python program.
#
# There are "output" calls in this code, Msg/DbgMsg, these are controlled by the DebugMode and CmdLineMode functions of the
# py_helper module. Accordingly, when using as a Module or plugin, you want CmdLineMode(False) somewhere in your code prior to
# calling anything in here to avoid output. DebugMode(True) [or False] controls the debug output. The default is CmdLineMode(False).
#
# There are two dependencies in this module...
#
# 1. My py_helper library needs to be installed for this thing to work.
# 2. The variable "StorageLocation" has to be set to someplace writable by the caller. It can be "/tmp" if need be.
# The caches do not have to be stored permanantly, as they will be dynamically pulled down everytime there is a
# query when they are not present, however, it's not a good idea to keep pulling these down from IEEE when a
# cache will do. These files don't change much.
#
# So, please consider this dependency when implementing or customizing this code. Don't abuse the download.
#
#
# Global Variables and Constants
#
#
# Originally, this was designed as plugin for another cmdline tool. But I felt it meritted it's own module
# accordingly, there are some plugin artifacts (like the output Msg calls and other plugin infrastructure)
# still in the code. Also, IT CAN still be used as a plugin.
#
VERSION=(0,0,4)
Version = __version__ = ".".join([ str(x) for x in VERSION ])
# Plugins Name
Name = "oui"
# Short Description
ShortDescription = "Get OUI Info from IEEE"
# Description of what this does
Description = "Given a MAC Address or OUI Code, retrieve the registration from IEEE"
# Help, Usage Description
Help = f"{Name} [mac-address|oui-code|search-str]"
# Last Result
LastResult = None
# Internal Functions list
InternalFunctions = list(['run'])
# Tags
Tags = [ "plugins", "command" ]
#
# Non-Plugin Functional Bits
#
# Find Temp Space
__tempspace__ = os.environ["TEMP"] if sys.platform == "win32" else os.environ.get("tmp","/tmp")
# Storage Location (User may want to change this location)
StorageLocation=f"{__tempspace__}/ieee_oui_cache"
# RefreshInterval (in days)
RefreshInterval = 30
# Refresh Cmdline Flag
__Refresh__ = False
# URL To IEEE File
Caches = {
"oui-ma-l" : [ "oui.csv","http://standards-oui.ieee.org/oui/oui.csv",None,None ],
"oui-ma-m" : [ "mam.csv","http://standards-oui.ieee.org/oui28/mam.csv",None,None ],
"oui-ma-s" : [ "oui36.csv","http://standards-oui.ieee.org/oui36/oui36.csv",None,None ],
"cid" : [ "cid.csv","http://standards-oui.ieee.org/cid/cid.csv",None,None ]
}
# Parser
__Parser__ = None
# Parsed Args
__Args__ = None
__Unknowns__ = None
# Expressions and RegEx Objects
MACExpr = "^([a-fA-F\d]{2}[\:\-\s]{0,1}){6}$"
OUIExpr = "^([a-fA-F\d]{2}[\:\-\s]{0,1}){3}$"
MACAddress_re = re.compile(MACExpr)
OUICode_re = re.compile(OUIExpr)
#
# Internal functions
#
# Local Cache Loader
def LoadCache(datafile):
"""Load Local Cache"""
items = list()
if os.path.exists(datafile):
with open(datafile,"r",newline="") as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(csvfile,dialect=dialect)
for row in reader:
items.append(row)
else:
Msg(f"Can't find local cache, {datafile}")
return items
# Clean strings
def Strip(mac_oui):
"""Strip extraneous characters out of MAC/OUI"""
data = mac_oui.replace(":","").replace("-","").replace(" ","")
return data
# Get All IEEE Files
def GetCaches(storage_location=None,caches=None):
"""Get IEEE Data Files"""
global StorageLocation, Caches, RefreshInterval, __Refresh__
caches = caches if caches != None else Caches
storage_location = storage_location if storage_location != None else StorageLocation
if not os.path.exists(storage_location):
os.makedirs(storage_location,mode=0o777,exist_ok=True)
dl_files = list()
for key,value in caches.items():
fname,url,cache_location,cache = value
if cache_location == None:
cache_location = os.path.join(storage_location,f"{fname}")
value[2] = cache_location
# Prep td in case cache does not exist yet
td = timedelta(days=RefreshInterval + 1)
if os.path.exists(cache_location):
# If file in fs is > RefreshInternval old, redownload
modtime = datetime.fromtimestamp(os.path.getmtime(cache_location))
localtm = datetime.fromtimestamp(time.mktime(time.localtime()))
td = localtm - modtime
if not os.path.exists(cache_location) or td.days > RefreshInterval or __Refresh__:
Msg(f"{key} does not exist or is out of date, downloading...")
if DownloadContent(url,cache_location):
dl_files.append((key,cache_location))
value[3] = LoadCache(cache_location)
if value[3] == None:
value[3] = LoadCache(value[2])
return dl_files
# Search (The bidness end of this thing)
def Search(search_str):
"""Search The Caches for Matching OUI or string"""
global MACAddress_re, OUICode_re, Name
found = list()
code = None
if MACAddress_re.search(search_str):
# MACAddress
DbgMsg("MAC Address detected")
m = Strip(search_str)
code = m[0:6]
elif OUICode_re.search(search_str):
# OUI
DbgMsg("OUI Code detected")
code = Strip(search_str)
code = code.upper()
DbgMsg(f"{Name} : Beginning Search {search_str} / {code}")
for cache_name,cache_list in Caches.items():
fname,url,cache_file,cache = cache_list
DbgMsg(f"Checking {cache_name}")
if cache == None:
cache_list[3] = cache = LoadCache(cache_file)
for row in cache:
if code and code == row["Assignment"]:
DbgMsg(f"Found row by {code}")
found.append(row)
elif code == None:
for value in row.values():
if search_str in value:
DbgMsg(f"Found row by {search_str}")
found.append(row)
return found
# Build Parser
def BuildParser():
"""Build Parser"""
global __Parser__
if __Parser__ == None:
parser = __Parser__ = argparse.ArgumentParser(prog="oui",description="IEEE OUI Code Lookup")
parser.add_argument("-s",help="Storage Location for Cache files")
parser.add_argument("-r",action="store_true",help="Force refresh")
parser.add_argument("-d","--debug",action="store_true",help="Enter debug mode")
parser.add_argument("-t","--test",action="store_true",help="Execute Test Stub")
parser.add_argument("searchfor",nargs="*",help="OUI Codes to look up")
# Parse Args
def ParseArgs(arguments=None):
"""Parse Arguments"""
global __Parser__, __Args__, __Unknowns__
global StorageLocation, __Refresh__
if arguments:
args,unknowns = __Parser__.parse_known_args()
else:
args,unknowns = __Parser__.parse_known_args()
__Args__ = args
__Unknowns__ = unknowns
# Check Debug Mode Flag
if args.debug:
DebugMode(True)
DbgMsg("Debug Mode Enabled")
# Set Cache Location if supplied
if args.s: StorageLocation = args.s
__Refresh__ = args.r
return args,unknowns
# Init Pattern
def Initialize():
"""Initialize Module"""
BuildParser()
#
# Run Pattern entry point (for plugin model)
#
# Plugin Starting Point
def run(**kwargs):
"""Required Plugin Entry Point"""
global Name, StorageLocation, Caches
DbgMsg(f"Entering {Name}")
# If arguments, a list of cmdline args was supplied
arguments = kwargs.get("arguments",None)
# If args, a argparse namespace was supplied (i.e. pre-processed cmdline args)
args = kwargs.get("args",None)
data = list() # Any returned data, for machine processing (i.e. csv rows)
if arguments:
args,unknowns = ParseArgs(arguments)
if not args:
args,unknowns = ParseArgs()
# Make sure the caches are available (or get them)
downloaded = GetCaches(StorageLocation,Caches)
if args.test:
Test()
return data
# Alrighty then, let's git down to bidness
for arg in args.searchfor:
found = Search(arg)
if len(found) > 0:
data.extend(found)
if len(data) == 0:
Msg("Nothing found")
elif CmdLineMode():
keys = data[0].keys()
Msg(",".join(keys))
for row in data:
values = row.values()
line = ",".join(values)
Msg(line)
return data
#
# Test Stub
#
# Test Stub
def Test():
"""Test Stub"""
pass
#
# Pre Execute Inits
#
Initialize()
#
# Main Loop
#
if __name__ == "__main__":
CmdLineMode(True)
run()
| 2.171875 | 2 |
tests/test_shape.py | wesselb/matrix | 3 | 12765933 | <gh_stars>1-10
import lab as B
import pytest
from matrix.shape import (
compatible,
assert_compatible,
assert_vector,
assert_matrix,
assert_square,
broadcast,
expand_and_broadcast,
)
# noinspection PyUnresolvedReferences
from .util import dense1
def _shapes(*xs):
return tuple(B.shape(x) for x in xs)
def test_assert_vector():
assert_vector(B.ones(3), "test")
assert_vector(B.ones(3, 4), "test")
with pytest.raises(AssertionError):
assert_vector(1, "test")
def test_assert_matrix():
assert_matrix(B.ones(4, 5), "test")
assert_matrix(B.ones(3, 4, 5), "test")
with pytest.raises(AssertionError):
assert_matrix(B.ones(3), "test")
def test_assert_square():
assert_square(B.ones(3, 3), "test")
assert_square(B.ones(4, 3, 3), "test")
with pytest.raises(AssertionError):
assert_square(B.ones(3), "test")
with pytest.raises(AssertionError):
assert_square(B.ones(3, 4), "test")
with pytest.raises(AssertionError):
assert_square(B.ones(3, 4, 5), "test")
def test_compatible():
assert compatible(*_shapes(B.ones(5, 5), B.ones(5, 5)))
assert compatible(*_shapes(B.ones(5, 5), B.ones(5, 1)))
assert compatible(*_shapes(B.ones(4), B.ones(5, 4)))
assert not compatible(*_shapes(B.ones(5, 3), B.ones(5)))
assert not compatible(*_shapes(B.ones(5, 5), B.ones(5, 4)))
assert not compatible(*_shapes(B.ones(5), B.ones(5, 4)))
with pytest.raises(AssertionError):
assert_compatible(*_shapes(B.ones(5), B.ones(4)))
def test_broadcast():
# Assertions are tested in `compatible`.
# Must give at least one shape.
with pytest.raises(ValueError):
broadcast()
# If one thing is given, this is directly given back.
assert broadcast(1) is 1
# Shapes must have the same lengths, because this function does not expand.
with pytest.raises(RuntimeError):
broadcast(*_shapes(B.ones(5, 4), B.ones(5)))
assert broadcast(*_shapes(B.ones(5, 4), B.ones(5, 4))) == (5, 4)
assert broadcast(*_shapes(B.ones(5, 4), B.ones(5, 1))) == (5, 4)
# Test more than two shapes.
assert broadcast(*_shapes(B.ones(5, 4), B.ones(1, 1), B.ones(1, 4))) == (5, 4)
def test_expand_and_broadcast():
# Must give at least one shape.
with pytest.raises(ValueError):
expand_and_broadcast()
# If one thing is given, this is directly given back.
assert expand_and_broadcast(1) is 1
assert expand_and_broadcast(*_shapes(B.ones(4), B.ones(5, 4))) == (5, 4)
assert expand_and_broadcast(*_shapes(B.ones(5, 4), B.ones(4))) == (5, 4)
assert expand_and_broadcast(*_shapes(B.ones(5, 4), B.ones(5, 4))) == (5, 4)
# Test more than two shapes.
shape = expand_and_broadcast(*_shapes(B.ones(4), B.ones(5, 1), B.ones(5, 4)))
assert shape == (5, 4)
| 2.3125 | 2 |
acs/acs/Core/CatalogParser/CatalogParser.py | wangji1/test-framework-and-suites-for-android | 0 | 12765934 | """
:copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
:organization: INTEL MCG PSI
:summary: This module implements Interface for parsing catalog files
:since: 03/12/13
:author: ssavrim
"""
import os
import yaml
from lxml import etree
from acs.ErrorHandling.AcsConfigException import AcsConfigException
class CatalogParser(object):
"""
This class implements the Catalog Parser interface
It is an entry point to parse xml files and return it as a dictionary
It is a folder (i.e.: _Catalogs/UseCase) composed of
a list of .xml files validated by a .xsd schema
i.e.: _Catalogs/UseCase/usecase.xsd (MANDATORY to check the xml file)
/telephony.xml (MUST respect defined rules in usecase.xsd)
/cws/cws.xml (MUST respect defined rules in usecase.xsd)
"""
ID_ATTRIB = "Id"
DOMAIN_ATTRIB = "Domain"
SUBDOMAIN_ATTRIB = "SubDomain"
FEATURE_ATTRIB = "Feature"
CLASSNAME_ELEM = "ClassName"
DESCRIPTION_ELEM = "Description"
PARAMETERS_ELEM = "Parameters"
PARAMETER_ELEM = "Parameter"
XML_SCHEMA_FILE = ""
YAML_CONFIG_FILE = ""
CATALOG_EXTENTION = ".xml"
def __init__(self, catalog_paths):
"""
Validate optional XML schema (xsd) & YAML logic files
& load their contents
:type catalog_paths: list
:param catalog_paths: catalog paths list.
Catalogs can come from different locations (acs, acs_test_scripts)
"""
self._xml_schema = None
self._yaml_config = None
self._catalog_paths = catalog_paths
if self.XML_SCHEMA_FILE and os.path.isfile(self.XML_SCHEMA_FILE):
self._xml_schema = self.__load_xml_schema(self.XML_SCHEMA_FILE)
if self.YAML_CONFIG_FILE and os.path.isfile(self.YAML_CONFIG_FILE):
self._yaml_config = self.__load_yaml_config(self.YAML_CONFIG_FILE)
def __load_xml_schema(self, xml_schema):
"""
Load xml schema to validate the xml catalog
:type xml_schema: string
:param xml_schema: Xml schema to load
:rtype: etree.XMLSchema
:return: XML schema to validate the xml catalog file
"""
try:
return etree.XMLSchema(etree.parse(xml_schema))
except etree.XMLSchemaParseError as xml_schema_error:
raise AcsConfigException(AcsConfigException.XML_PARSING_ERROR,
"'%s' schema is invalid ! (%s)"
% (xml_schema, str(xml_schema_error)))
def __load_yaml_config(self, yaml_config):
"""
Load YAML configuration to validate
the xml catalog domain, subdomain & feature
:type yaml_config: string
:param yaml_config: YAML config to load
:rtype: dict
:return: dict of complete YAML file
"""
try:
with open(yaml_config) as f:
return yaml.load(f)
except yaml.scanner.ScannerError as yaml_config_error:
raise AcsConfigException(AcsConfigException.YAML_PARSING_ERROR,
"'%s' is invalid ! (%s)"
% (yaml_config, str(yaml_config_error)))
def __check_xml_schema(self, catalog_file):
"""
Validate catalog file regarding loaded xml schema (if any)
Return it as a dictionary
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: etree.XML
:return: X to validate the xml catalog file
"""
try:
# Parse the xml file
catalog_etree = etree.parse(catalog_file)
if catalog_etree and self._xml_schema:
# Apply xml schema on the xml file
self._xml_schema.assertValid(catalog_etree)
except etree.Error as xml_parsing_error:
raise AcsConfigException(AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (%s)"
% (catalog_file, str(xml_parsing_error)))
return catalog_etree
def _check_xml_logic(self, catalog_file):
"""
Validate catalog file regarding loaded YAML logic
regarding to Domains, SubDomains & Features
Return a boolean setting if xml file logic is valid
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: none
:return: none
"""
# Parse the xml file
catalog_etree = etree.parse(catalog_file)
if catalog_etree and self._yaml_config:
domains = self._yaml_config.get("DOMAINS")
if not domains:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' file is invalid ! (DOMAINS section does not exists!)"
% self._yaml_config_file)
for node in catalog_etree.getroot():
item_id = node.attrib[CatalogParser.ID_ATTRIB]
item_domain = node.attrib[CatalogParser.DOMAIN_ATTRIB]
item_subdomain = node.attrib[CatalogParser.SUBDOMAIN_ATTRIB]
item_feature = node.attrib[CatalogParser.FEATURE_ATTRIB]
# Check that logic between Domain, SubDomain & Feature is respected
item_features = self._check_subdomain(catalog_file,
item_id,
item_domain,
item_subdomain,
domains.keys(),
domains.get(item_domain))
self._check_feature(catalog_file,
item_id,
item_subdomain,
item_feature,
item_features)
def _check_subdomain(self,
catalog_file,
item_id,
item_domain,
item_subdomain,
possible_domains,
possible_subdomains):
"""
Validate SubDomain attributes using YAML config
:type catalog_file: string
:param catalog_file: Catalog file
:type item_id: string
:param item_id: Id of the item (UseCase or Test Step)
:type item_domain: string
:param item_domain: Domain to be checked
:type item_subdomain: string
:param item_subdomain: SubDomain to be checked
:type possible_domains: list
:param possible_domains: List of possible domains
:type possible_subdomains: list
:param possible_subdomains: List of possible subdomains
:rtype: list
:return: List of associated features when item_subdomain is validated
"""
subdomains = self._yaml_config.get("SUB_DOMAINS")
if not subdomains:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' file is invalid ! (SUB_DOMAINS section does not exists!)"
% self._yaml_config_file)
elif possible_subdomains is None:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' is invalid ! (Domain %s is not valid for item %s; Expected values are %s)"
% (catalog_file, str(item_domain),
str(item_id), str(possible_domains)))
elif len(possible_subdomains) == 0:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' file is invalid ! (no SubDomains exist for Domain %s)"
% (self._yaml_config_file, str(item_domain)))
elif item_subdomain not in possible_subdomains:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (SubDomain %s is not valid for item %s; Expected values are %s)"
% (catalog_file, str(item_subdomain),
str(item_id), str(possible_subdomains)))
else:
return subdomains.get(item_subdomain)
def _check_feature(self, catalog_file, item_id, item_subdomain,
item_feature, possible_features):
"""
Validate Feature attributes using YAML config
:type catalog_file: string
:param catalog_file: Catalog file
:type item_id: string
:param item_id: Id of the item (UseCase or Test Step)
:type item_subdomain: string
:param item_subdomain: SubDomain to be checked
:type item_feature: string
:param item_feature: Feature to be checked
:type possible_features: list
:param possible_features: List of possible features
:rtype: none
:return: none
"""
if possible_features is None:
raise AcsConfigException(
AcsConfigException.YAML_PARSING_ERROR,
"'%s' is invalid ! (SubDomain %s does not exist)"
% (self._yaml_config_file, str(item_subdomain)))
elif len(possible_features) > 0 and item_feature not in possible_features:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (Feature %s is not valid for item %s; Expected values are %s)"
% (catalog_file, str(item_feature),
str(item_id), str(possible_features)))
elif len(possible_features) == 0 and len(item_feature) > 0:
raise AcsConfigException(
AcsConfigException.XML_PARSING_ERROR,
"'%s' catalog is invalid ! (Feature %s is not valid for item %s; No features expected)"
% (catalog_file, str(item_feature), str(item_id)))
def validate_catalog_file(self, catalog_file):
"""
Validate catalog file regarding xml schema & logic (if any)
Return it as a dictionary
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: etree.XML
:return: X to validate the xml catalog file
"""
catalog_etree = self.__check_xml_schema(catalog_file)
if catalog_etree:
self._check_xml_logic(catalog_file)
return catalog_etree
def parse_catalog_file(self, catalog_file):
"""
Parse catalog and validate regarding loaded xml schema (if any)
Return it as a dictionary
:type catalog_file: string
:param catalog_file: Catalog file to parse
:rtype: dict
:return: Dictionary containing catalog elements
"""
raise AcsConfigException(AcsConfigException.FEATURE_NOT_IMPLEMENTED,
"'parse_catalog_file' is not implemented !")
def parse_catalog_folder(self):
"""
Parse folder(s) which are known to contain catalogs (usecases, test steps, parameters)
If multiple catalogs are in the folder, it will return a concatenated dictionary.
If a key is already defined in the dictionary, raise an AcsConfigException
:type catalog_paths: list
:param catalog_paths: catalog paths list. Catalogs can come from different locations (acs, acs_test_scripts)
:type: dict
:return: Dictionary containing catalog elements
"""
concatenated_dictionary = {}
for catalog_path in self._catalog_paths:
for root, _, catalog_list in os.walk(catalog_path):
for catalog_file in catalog_list:
if catalog_file.lower().endswith(self.CATALOG_EXTENTION):
temp_dictionary = self.parse_catalog_file(os.path.join(root, catalog_file))
for item_name in temp_dictionary.iterkeys():
if item_name in concatenated_dictionary:
raise AcsConfigException(AcsConfigException.PROHIBITIVE_BEHAVIOR,
"item '%s' is defined more than one time !" % item_name)
concatenated_dictionary.update(temp_dictionary)
return concatenated_dictionary
| 1.375 | 1 |
sdk/python/test/test_v1alpha3_file_system_path.py | PoornimaDevii/katib | 0 | 12765935 | <reponame>PoornimaDevii/katib<filename>sdk/python/test/test_v1alpha3_file_system_path.py
# coding: utf-8
"""
katib
swagger description for katib # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import katib
from katib.models.v1alpha3_file_system_path import V1alpha3FileSystemPath # noqa: E501
from katib.rest import ApiException
class TestV1alpha3FileSystemPath(unittest.TestCase):
"""V1alpha3FileSystemPath unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha3FileSystemPath(self):
"""Test V1alpha3FileSystemPath"""
# FIXME: construct object with mandatory attributes with example values
# model = katib.models.v1alpha3_file_system_path.V1alpha3FileSystemPath() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 2 | 2 |
api/serializers.py | backdev96/yamdb_final | 0 | 12765936 | from rest_framework import serializers
from .models import Category, Comment, Genre, Review, Title
class CategorySerializer(serializers.ModelSerializer):
'''Serializer for Category model'''
class Meta:
fields = ('name', 'slug')
model = Category
lookup_field = 'slug'
class GenreSerializer(serializers.ModelSerializer):
'''Serializer for Genre model'''
class Meta:
fields = ('name', 'slug')
model = Genre
lookup_field = 'slug'
class CategoryReprField(serializers.SlugRelatedField):
'''Serializer for Category model'''
def to_representation(self, value):
return {'name': value.name, 'slug': value.slug}
class GenreReprField(serializers.SlugRelatedField):
'''GenreReprField Serializer'''
def to_representation(self, value):
return {'name': value.name, 'slug': value.slug}
class TitleSerializer(serializers.ModelSerializer):
'''Serializer for Title model.'''
category = CategoryReprField(slug_field='slug',
queryset=Category.objects.all())
genre = GenreReprField(slug_field='slug',
queryset=Genre.objects.all(),
many=True)
class Meta:
fields = (
'id',
'name',
'year',
'rating',
'description',
'genre',
'category',
)
model = Title
class ReviewSerializer(serializers.ModelSerializer):
'''Serializer for Review model. Slug related field author.'''
author = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
def get_serializer_context(self):
return {'title_id': self.kwargs['title_id'], 'request': self.request}
def validate(self, data):
'''Call the instance's validate() method and
raise error if user has already added a review for this tittle.
'''
title_id = self.context.get('request').parser_context['kwargs']['title_id']
if (Review.objects.filter(title_id=title_id, author=self.context['request'].user).exists()
and self.context['request'].method == 'POST'):
raise serializers.ValidationError('This user has already added review for this title')
return data
class Meta:
fields = ('id', 'text', 'author', 'score', 'pub_date',)
model = Review
class CommentSerializer(serializers.ModelSerializer):
'''Serializer for Comment model. Slug related field author.'''
author = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
class Meta:
model = Comment
fields = ('id', 'text', 'author', 'pub_date')
class EmailSerializer(serializers.Serializer):
'''Email Serializer'''
email = serializers.EmailField(required=True)
| 2.453125 | 2 |
questions/question#11.py | seunghk1206/1-Manhattan-FullStack-Development | 1 | 12765937 | def NoC():
paid = int(input())
moneyL = [500, 100, 50, 10, 5, 1]
count = 0
leftOver = 1000 - paid
for each in moneyL:
n = leftOver//each
if n > 0:
count += n
leftOver -= n * each
elif n == 0:
pass
return count
print(NoC()) | 3.28125 | 3 |
test/data/test_AnyValue.py | pip-services-python/pip-services-commons-python | 0 | 12765938 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
tests.config.test_AnyValue
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) Conceptual Vision Consulting LLC 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pip_services3_commons.convert import TypeCode
from pip_services3_commons.data import AnyValue
class TestAnyValue:
def test_get_and_set_any_value(self):
value = AnyValue()
assert None == value.get_as_object()
value.set_as_object(1)
assert 1 == value.get_as_integer()
assert 0.001 > 1.0 - value.get_as_float()
assert "1" == value.get_as_string()
def test_equal_any_value(self):
value = AnyValue(1)
assert 1 == value
assert 1.0 == value
assert "1" == value
assert value.equals_as_type(TypeCode.Float, "1")
| 2.1875 | 2 |
example/download_stories.py | arthus-leroy/novelai-api | 6 | 12765939 | <reponame>arthus-leroy/novelai-api
from sys import path
from os import environ as env
from os.path import join, abspath, dirname
path.insert(0, abspath(join(dirname(__file__), '..')))
from novelai_api import NovelAI_API
from novelai_api.utils import get_encryption_key, decrypt_user_data
from aiohttp import ClientSession
from logging import Logger, StreamHandler
from asyncio import run
if "NAI_USERNAME" not in env or "NAI_PASSWORD" not in env:
raise RuntimeError("Please ensure that NAI_USERNAME and NAI_PASSWORD are set in your environment")
username = env["NAI_USERNAME"]
password = env["NAI_PASSWORD"]
logger = Logger("NovelAI")
logger.addHandler(StreamHandler())
async def main():
async with ClientSession() as session:
api = NovelAI_API(session, logger = logger)
login = await api.high_level.login(username, password)
logger.info(login)
key = get_encryption_key(username, password)
keystore = await api.high_level.get_keystore(key)
logger.info(keystore)
stories = await api.high_level.download_user_stories()
decrypt_user_data(stories, keystore)
logger.info(stories)
run(main()) | 2.0625 | 2 |
core/modules/OTModule.py | UmSenhorQualquer/workflow-editor | 2 | 12765940 | from PyQt4 import uic
import uuid
import os
class OTModule(object):
"""
Module abstract class implementation
"""
def __init__(self, name):
self._unique_id = uuid.uuid1() #: Module name
self._name = name #: Module unique identifier
def save(self, saver):
"""
Save the module values into the saver variable
@param saver: Dictionary used to store the module information
@type saver: Dict
"""
saver['unique_id'] = self._unique_id
saver['name'] = self.name
saver['class'] = self.__class__
def load(self, loader):
"""
Load the module values from the loader variable
@param loader: Dictionary with the stored module information
@type loader: Dict
"""
self.name = loader['name']
self._unique_id = loader['unique_id']
def close(self):
"""
Close the module
"""
pass
############################################################################
############ Properties ####################################################
############################################################################
@property
def name(self): return self._name
@name.setter
def name(self, value): self._name = value
############################################################################
@property
def uid(self): return self._unique_id
| 2.859375 | 3 |
src/models/random_forest.py | mgarzon/BOERGER-CSI4900-VPN-Detection | 0 | 12765941 | from datetime import datetime
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
import src.config as cfg
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
def main():
connection_rw_size = cfg.CONNECTION_RW_SIZE
timw_rw_size_min = cfg.TIME_RW_SIZE
features_csv = f'../../data/processed/full_ft_netflow_crw_{connection_rw_size}_trw_{timw_rw_size_min}_2.csv'
features_df = pd.read_csv(features_csv)
label = np.array(features_df['VPN'])
baseline_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'VPN']
comb_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'VPN','Tot Pkts', 'TotLen']
time_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'Tot Pkts', 'TotLen', 'VPN']
conn_drop_table = ['Flow ID', 'Src IP', 'Dst IP', 'Timestamp', 'Tot Pkts', 'TotLen', 'VPN']
time_features = features_df.loc[:, 'Time Flow count': 'Time-Rev Pkt Len Tot'].columns
conn_features = features_df.loc[:, 'Con Flow count': 'Conn-Rev Pkt Len Tot'].columns
for tft in time_features:
baseline_drop_table.append(tft)
conn_drop_table.append(tft)
for ctf in conn_features:
baseline_drop_table.append(ctf)
time_drop_table.append(ctf)
rf_baseline_ft = features_df.drop(baseline_drop_table, axis=1)
rf_time_ft = features_df.drop(time_drop_table, axis=1)
rf_conn_ft = features_df.drop(conn_drop_table, axis=1)
rf_comb_ft = features_df.drop(comb_drop_table,axis=1)
baseline_ft_name = list(rf_baseline_ft.columns)
time_ft_name=list(rf_time_ft.columns)
conn_ft_name=list(rf_conn_ft.columns)
comb_ft_name=list(rf_comb_ft.columns)
rf_baseline_ft_array=np.array(rf_baseline_ft)
rf_time_ft_array=np.array(rf_time_ft)
rf_conn_ft_array=np.array(rf_conn_ft)
rf_comb_ft_array=np.array(rf_comb_ft)
baseline_predictions, baseline_y_test, baseline_model=random_forest_classifyer(rf_baseline_ft_array,label)
time_predictions, time_y_test, time_model = random_forest_classifyer(rf_time_ft_array, label)
conn_predictions, conn_y_test, conn_model = random_forest_classifyer(rf_conn_ft_array, label)
comb_predictions, comb_y_test, comb_model = random_forest_classifyer(rf_comb_ft_array, label)
print('///////////////// Baseline /////////////////')
print('\n')
evaluate_rf_results(baseline_predictions,baseline_y_test,baseline_model, rf_baseline_ft_array, label)
feature_importance(baseline_model, baseline_ft_name)
print('///////////////// Time /////////////////')
print('\n')
evaluate_rf_results(time_predictions,time_y_test, time_model, rf_time_ft_array, label)
print('///////////////// Connection /////////////////')
print('\n')
evaluate_rf_results(conn_predictions,conn_y_test,conn_model,rf_conn_ft_array,label)
print('///////////////// Combined /////////////////')
print('\n')
evaluate_rf_results(comb_predictions, comb_y_test, comb_model, rf_comb_ft_array, label)
feature_importance(comb_model,comb_ft_name)
def random_forest_classifyer(feature_data, label, test_size=0.3, random_state=None):
x_train, x_test, y_train, y_test = train_test_split(
feature_data, label, test_size=test_size, random_state=random_state)
random_forest = RandomForestClassifier(n_jobs=2, random_state=random_state)
start_time=datetime.now()
random_forest.fit(x_train, y_train)
print(datetime.now()-start_time)
start_time = datetime.now()
predictions = random_forest.predict(x_test)
print(datetime.now() - start_time)
return predictions, y_test, random_forest
def evaluate_rf_results(predictions, y_test, model, feature_data, label):
start_time = datetime.now()
rfc_cv_score = cross_val_score(model, feature_data, label, cv=10, scoring='roc_auc')
print(datetime.now() - start_time)
start_time = datetime.now()
rfc_cv_score_acc = cross_val_score(model, feature_data, label, cv=10, scoring='accuracy')
print(datetime.now() - start_time)
print("=== Confusion Matrix ===")
start_time = datetime.now()
cf_matrix = confusion_matrix(y_test, predictions)
print(datetime.now() - start_time)
print(cf_matrix)
print('\n')
print("=== Classification Report ===")
start_time = datetime.now()
class_report = classification_report(y_test, predictions)
print(datetime.now() - start_time)
print(class_report)
print('\n')
print("=== All AUC Scores ===")
print(rfc_cv_score)
print('\n')
print("=== Mean AUC Score ===")
print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean())
print('\n')
print("=== All ACC Scores ===")
print(rfc_cv_score_acc)
print('\n')
print("=== Mean ACC Score ===")
print("Mean ACC Score - Random Forest: ", rfc_cv_score_acc.mean())
return cf_matrix, class_report, rfc_cv_score, rfc_cv_score.mean()
def feature_importance(model, feature_names):
fi = pd.DataFrame({'feature': feature_names,
'importance': model.feature_importances_}). \
sort_values('importance', ascending=False)
print(fi)
return fi
if __name__ == '__main__':
main()
# print(features_connection)
# connection_train_ft, connection_test_ft, connection_train_labels, conection_test_labels=train_test_split(features_connection, label, test_size=0.3, random_state=42)
#
# print(f'Training Features Shape: {connection_train_ft.shape}' )
# print(f'Training Labels Shape:{connection_test_ft.shape}' )
# print(f'Testing Features Shape:{connection_train_labels.shape}')
# print(f'Testing Labels Shape:{conection_test_labels.shape}')
#
# accuracy_list=[]
#
# random_forest= RandomForestClassifier(n_jobs=2)
# # t = tqdm(total=1000)
# # for xx in range(0,1000):
#
# random_forest.fit(connection_train_ft,connection_train_labels)
#
# predictions=random_forest.predict(connection_test_ft)
#
#
#
# matches=0
#
# # print(int(predictions[20]))
# # print(int(conection_test_labels[20]))
#
#
# for x in range(0, predictions.shape[0]):
# if int(predictions[x])==int(conection_test_labels[x]):
# matches+=1
#
# # print(matches/predictions.shape[0])
# accuracy_list.append(matches/predictions.shape[0])
# # t.update(1)
#
# print('///////////////////////////////////')
# print(max(accuracy_list))
# print(min(accuracy_list))
# print(sum(accuracy_list)/len(accuracy_list))
#
# rfc_cv_score = cross_val_score(random_forest, features_connection, label, cv=10, scoring='roc_auc')
# print("=== Confusion Matrix ===")
# print(confusion_matrix(conection_test_labels, predictions))
# print('\n')
# print("=== Classification Report ===")
# print(classification_report(conection_test_labels, predictions))
# print('\n')
# print("=== All AUC Scores ===")
# print(rfc_cv_score)
# print('\n')
# print("=== Mean AUC Score ===")
# print("Mean AUC Score - Random Forest: ", rfc_cv_score.mean())
#
# fi=pd.DataFrame({'feature': features_connection_names,
# 'importance': random_forest.feature_importances_}).\
# sort_values('importance', ascending = False)
#
# print(fi)
# print("/////////////")
# print(fi.head()) | 2.125 | 2 |
testscripts/RDKB/component/TAD/TS_TAD_IPPingTest_InvalidInterface_CheckSuccessAndResponse.py | rdkcmf/rdkb-tools-tdkb | 0 | 12765942 | <reponame>rdkcmf/rdkb-tools-tdkb<gh_stars>0
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2019 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>2</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_TAD_IPPingTest_InvalidInterface_CheckSuccessAndResponse</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id> </primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>TADstub_Get</primitive_test_name>
<!-- -->
<primitive_test_version>3</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>Check if the ping test done using Device.IP.Diagnostics.X_RDKCENTRAL-COM_PingTest.Run with an invalid interface is success or not</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>2</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
<box_type>Emulator</box_type>
<!-- -->
<box_type>RPI</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_TAD_60</test_case_id>
<test_objective>Check if the ping test done using Device.IP.Diagnostics.X_RDKCENTRAL-COM_PingTest.Run with an invalid interface is success or not</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband,Emulator,RPI.</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components.
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>TADstub_Get, TADstub_Set</api_or_interface_used>
<input_parameters>Device.IP.Diagnostics.IPPing.Host
Device.IP.Diagnostics.IPPing.Interface
Device.IP.Diagnostics.X_RDKCENTRAL-COM_PingTest.Run
Device.IP.Diagnostics.IPPing.SuccessCount
Device.IP.Diagnostics.IPPing.AverageResponseTime
Device.IP.Diagnostics.IPPing.MinimumResponseTime
Device.IP.Diagnostics.IPPing.MaximumResponseTime</input_parameters>
<automation_approch>1. Load TAD module
2. Get the value of Device.IP.Diagnostics.TraceRoute.Host
3. Get the hostname from tdkb config file
4. If the current hostname is not same as the one in config file, set it as new host name
5.Set Device.IP.Diagnostics.IPPing.Interface as an invalid interface
6. Set Device.IP.Diagnostics.X_RDKCENTRAL-COM_PingTest.Run as true to start the ping test
7. After ping test, check if the success count, average response time, minimum response time, maximum response time is greater than zero
8. Unload the TAD module</automation_approch>
<except_output>The ping test done using Device.IP.Diagnostics.X_RDKCENTRAL-COM_PingTest.Run with an invalid interface should be success</except_output>
<priority>High</priority>
<test_stub_interface>tad</test_stub_interface>
<test_script>TS_TAD_IPPingTest_InvalidInterface_CheckSuccessAndResponse</test_script>
<skipped>No</skipped>
<release_version>M64</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
import time;
import tdkutility;
from tdkutility import *
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("tad","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_TAD_IPPingTest_InvalidInterface_CheckSuccessAndResponse');
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
host = tdkutility.readtdkbConfigFile(obj);
tdkTestObj = obj.createTestStep('TADstub_Get');
tdkTestObj.addParameter("paramName","Device.IP.Diagnostics.IPPing.Host");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
orgHost = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
if host == "NULL":
tdkTestObj.setResultStatus("FAILURE");
print "Host name not available in tdkb config file"
else:
if orgHost != host :
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.IPPing.Host");
tdkTestObj.addParameter("ParamValue",host);
tdkTestObj.addParameter("Type","string");
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Set the host of IPPing";
print "EXPECTED RESULT 2: Should set the host of IPPing";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Set the host of IPPing";
print "EXPECTED RESULT 2: Should set the host of IPPing";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("tad");
exit();
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.IPPing.Interface")
tdkTestObj.addParameter("ParamValue","Interface");
tdkTestObj.addParameter("Type","string");
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Set Ping interface as an invalid value"
print "EXPECTED RESULT 3: Should set Ping interface as an invalid value"
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#start the ping test after setting host value
tdkTestObj = obj.createTestStep('TADstub_Set');
tdkTestObj.addParameter("ParamName","Device.IP.Diagnostics.X_RDKCENTRAL-COM_PingTest.Run")
tdkTestObj.addParameter("ParamValue","true");
tdkTestObj.addParameter("Type","boolean");
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Set PingTest.Run of IPPing as true";
print "EXPECTED RESULT 4: Should set PingTest.Run of IPPing as true";
print "ACTUAL RESULT 4: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
time.sleep(40);
#Check if the ping test was succes, by checking the ping's success count and response time values
paramList=["Device.IP.Diagnostics.IPPing.SuccessCount", "Device.IP.Diagnostics.IPPing.AverageResponseTime", "Device.IP.Diagnostics.IPPing.MinimumResponseTime", "Device.IP.Diagnostics.IPPing.MaximumResponseTime"]
print "TEST STEP 5: Get the successcount, AverageResponseTime, MinimumResponseTime, MaximumResponseTime of ping test"
print "EXPECTED RESULT 5: successcount, AverageResponseTime, MinimumResponseTime, MaximumResponseTime of ping test should be greater than 0"
tdkTestObj,status,orgValue = getMultipleParameterValues(obj,paramList)
if expectedresult in status and orgValue[0]>0 and orgValue[1]>0 and orgValue[2]>0 and orgValue[3]>0:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT 5: %s" %orgValue;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT 5: %s" %orgValue;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE"
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Set PingTest.Run of IPPing as true";
print "EXPECTED RESULT 4: Should set PingTest.Run of IPPing as true";
print "ACTUAL RESULT 4: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Set Ping interface as an invalid value"
print "EXPECTED RESULT 3: Should set Ping interface as an invalid value"
print "ACTUAL RESULT 3: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "Failed to get Device.IP.Diagnostics.IPPing.Host"
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("tad");
else:
print "Failed to load tad module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 1.398438 | 1 |
aea/configurations/base.py | cyenyxe/agents-aea | 0 | 12765943 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Classes to handle AEA configurations."""
import re
from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, Generic, List, Optional, Set, Tuple, TypeVar, Union, cast
# from aea.helpers.base import generate_fingerprint
DEFAULT_AEA_CONFIG_FILE = "aea-config.yaml"
DEFAULT_SKILL_CONFIG_FILE = "skill.yaml"
DEFAULT_CONNECTION_CONFIG_FILE = "connection.yaml"
DEFAULT_PROTOCOL_CONFIG_FILE = "protocol.yaml"
DEFAULT_PRIVATE_KEY_PATHS = {"fetchai": "", "ethereum": ""}
T = TypeVar("T")
"""
A dependency is a dictionary with the following (optional) keys:
- version: a version specifier(s) (e.g. '==0.1.0').
- index: the PyPI index where to download the package from (default: https://pypi.org)
- git: the URL to the Git repository (e.g. https://github.com/fetchai/agents-aea.git)
- ref: either the branch name, the tag, the commit number or a Git reference (default: 'master'.)
If the 'git' field is set, the 'version' field will be ignored.
They are supposed to be forwarded to the 'pip' command.
"""
Dependency = dict
"""
A dictionary from package name to dependency data structure (see above).
The package name must satisfy the constraints on Python packages names.
For details, see https://www.python.org/dev/peps/pep-0426/#name.
The main advantage of having a dictionary is that we implicitly filter out dependency duplicates.
We cannot have two items with the same package name since the keys of a YAML object form a set.
"""
Dependencies = Dict[str, Dependency]
class ConfigurationType(Enum):
"""Configuration types."""
AGENT = "agent"
PROTOCOL = "protocol"
CONNECTION = "connection"
SKILL = "skill"
def _get_default_configuration_file_name_from_type(
item_type: Union[str, ConfigurationType]
) -> str:
"""Get the default configuration file name from item type."""
item_type = ConfigurationType(item_type)
if item_type == ConfigurationType.AGENT:
return DEFAULT_AEA_CONFIG_FILE
elif item_type == ConfigurationType.PROTOCOL:
return DEFAULT_PROTOCOL_CONFIG_FILE
elif item_type == ConfigurationType.CONNECTION:
return DEFAULT_CONNECTION_CONFIG_FILE
elif item_type == ConfigurationType.SKILL:
return DEFAULT_SKILL_CONFIG_FILE
else:
raise ValueError("Item type not valid: {}".format(str(item_type)))
class ProtocolSpecificationParseError(Exception):
"""Exception for parsing a protocol specification file."""
class JSONSerializable(ABC):
"""Interface for JSON-serializable objects."""
@property
@abstractmethod
def json(self) -> Dict:
"""Compute the JSON representation."""
@classmethod
def from_json(cls, obj: Dict):
"""Build from a JSON object."""
class Configuration(JSONSerializable, ABC):
"""Configuration class."""
class CRUDCollection(Generic[T]):
"""Interface of a CRUD collection."""
def __init__(self):
"""Instantiate a CRUD collection."""
self._items_by_id = {} # type: Dict[str, T]
def create(self, item_id: str, item: T) -> None:
"""
Add an item.
:param item_id: the item id.
:param item: the item to be added.
:return: None
:raises ValueError: if the item with the same id is already in the collection.
"""
if item_id in self._items_by_id:
raise ValueError("Item with name {} already present!".format(item_id))
else:
self._items_by_id[item_id] = item
def read(self, item_id: str) -> Optional[T]:
"""
Get an item by its name.
:param item_id: the item id.
:return: the associated item, or None if the item id is not present.
"""
return self._items_by_id.get(item_id, None)
def update(self, item_id: str, item: T) -> None:
"""
Update an existing item.
:param item_id: the item id.
:param item: the item to be added.
:return: None
"""
self._items_by_id[item_id] = item
def delete(self, item_id: str) -> None:
"""Delete an item."""
if item_id in self._items_by_id.keys():
del self._items_by_id[item_id]
def read_all(self) -> List[Tuple[str, T]]:
"""Read all the items."""
return [(k, v) for k, v in self._items_by_id.items()]
class PublicId(JSONSerializable):
"""This class implement a public identifier.
A public identifier is composed of three elements:
- author
- name
- version
The concatenation of those three elements gives the public identifier:
author/name:version
>>> public_id = PublicId("author", "my_package", "0.1.0")
>>> assert public_id.author == "author"
>>> assert public_id.name == "my_package"
>>> assert public_id.version == "0.1.0"
>>> another_public_id = PublicId("author", "my_package", "0.1.0")
>>> assert hash(public_id) == hash(another_public_id)
>>> assert public_id == another_public_id
"""
AUTHOR_REGEX = r"[a-zA-Z_][a-zA-Z0-9_]*"
PACKAGE_NAME_REGEX = r"[a-zA-Z_][a-zA-Z0-9_]*"
VERSION_REGEX = r"(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?"
PUBLIC_ID_REGEX = r"^({})/({}):({})$".format(
AUTHOR_REGEX, PACKAGE_NAME_REGEX, VERSION_REGEX
)
def __init__(self, author: str, name: str, version: str):
"""Initialize the public identifier."""
self._author = author
self._name = name
self._version = version
@property
def author(self):
"""Get the author."""
return self._author
@property
def name(self):
"""Get the name."""
return self._name
@property
def version(self):
"""Get the version."""
return self._version
@classmethod
def from_str(cls, public_id_string: str) -> "PublicId":
"""
Initialize the public id from the string.
>>> str(PublicId.from_str("author/package_name:0.1.0"))
'author/package_name:0.1.0'
A bad formatted input raises value error:
>>> PublicId.from_str("bad/formatted:input")
Traceback (most recent call last):
...
ValueError: Input 'bad/formatted:input' is not well formatted.
:param public_id_string: the public id in string format.
:return: the public id object.
:raises ValueError: if the string in input is not well formatted.
"""
if not re.match(cls.PUBLIC_ID_REGEX, public_id_string):
raise ValueError(
"Input '{}' is not well formatted.".format(public_id_string)
)
else:
username, package_name, version = re.findall(
cls.PUBLIC_ID_REGEX, public_id_string
)[0][:3]
return PublicId(username, package_name, version)
@property
def json(self) -> Dict:
"""Compute the JSON representation."""
return {"author": self.author, "name": self.name, "version": self.version}
@classmethod
def from_json(cls, obj: Dict):
"""Build from a JSON object."""
return PublicId(obj["author"], obj["name"], obj["version"],)
def __hash__(self):
"""Get the hash."""
return hash((self.author, self.name, self.version))
def __str__(self):
"""Get the string representation."""
return "{author}/{name}:{version}".format(
author=self.author, name=self.name, version=self.version
)
def __eq__(self, other):
"""Compare with another object."""
return (
isinstance(other, PublicId)
and self.author == other.author
and self.name == other.name
and self.version == other.version
)
def __lt__(self, other):
"""Compare two public ids."""
return str(self) < str(other)
ProtocolId = PublicId
SkillId = PublicId
class PackageConfiguration(Configuration, ABC):
"""This class represent a package configuration."""
def __init__(self, name: str, author: str, version: str):
"""Initialize a package configuration."""
self.name = name
self.author = author
self.version = version
@property
def public_id(self) -> PublicId:
"""Get the public id."""
return PublicId(self.author, self.name, self.version)
class ConnectionConfig(PackageConfiguration):
"""Handle connection configuration."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
class_name: str = "",
protocols: Optional[Set[PublicId]] = None,
restricted_to_protocols: Optional[Set[PublicId]] = None,
excluded_protocols: Optional[Set[PublicId]] = None,
dependencies: Optional[Dependencies] = None,
description: str = "",
**config
):
"""Initialize a connection configuration object."""
super().__init__(name, author, version)
self.license = license
self.fingerprint = ""
self.class_name = class_name
self.protocols = protocols if protocols is not None else []
self.restricted_to_protocols = (
restricted_to_protocols if restricted_to_protocols is not None else set()
)
self.excluded_protocols = (
excluded_protocols if excluded_protocols is not None else set()
)
self.dependencies = dependencies if dependencies is not None else {}
self.description = description
self.config = config
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"class_name": self.class_name,
"protocols": sorted(map(str, self.protocols)),
"restricted_to_protocols": sorted(map(str, self.restricted_to_protocols)),
"excluded_protocols": sorted(map(str, self.excluded_protocols)),
"dependencies": self.dependencies,
"description": self.description,
"config": self.config,
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
restricted_to_protocols = obj.get("restricted_to_protocols", set())
restricted_to_protocols = {
PublicId.from_str(id_) for id_ in restricted_to_protocols
}
excluded_protocols = obj.get("excluded_protocols", set())
excluded_protocols = {PublicId.from_str(id_) for id_ in excluded_protocols}
dependencies = obj.get("dependencies", {})
protocols = {PublicId.from_str(id_) for id_ in obj.get("protocols", set())}
return ConnectionConfig(
name=cast(str, obj.get("name")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
class_name=cast(str, obj.get("class_name")),
protocols=cast(Set[PublicId], protocols),
restricted_to_protocols=cast(Set[PublicId], restricted_to_protocols),
excluded_protocols=cast(Set[PublicId], excluded_protocols),
dependencies=cast(Dependencies, dependencies),
description=cast(str, obj.get("description", "")),
**cast(dict, obj.get("config"))
)
class ProtocolConfig(PackageConfiguration):
"""Handle protocol configuration."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
dependencies: Optional[Dependencies] = None,
description: str = "",
):
"""Initialize a connection configuration object."""
super().__init__(name, author, version)
self.license = license
self.fingerprint = ""
self.dependencies = dependencies if dependencies is not None else {}
self.description = description
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"dependencies": self.dependencies,
"description": self.description,
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
dependencies = cast(Dependencies, obj.get("dependencies", {}))
return ProtocolConfig(
name=cast(str, obj.get("name")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
dependencies=dependencies,
description=cast(str, obj.get("description", "")),
)
class HandlerConfig(Configuration):
"""Handle a skill handler configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a handler configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {"class_name": self.class_name, "args": self.args}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return HandlerConfig(class_name=class_name, **obj.get("args", {}))
class BehaviourConfig(Configuration):
"""Handle a skill behaviour configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a behaviour configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {"class_name": self.class_name, "args": self.args}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return BehaviourConfig(class_name=class_name, **obj.get("args", {}))
class ModelConfig(Configuration):
"""Handle a skill model configuration."""
def __init__(self, class_name: str = "", **args):
"""Initialize a model configuration."""
self.class_name = class_name
self.args = args
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {"class_name": self.class_name, "args": self.args}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
class_name = cast(str, obj.get("class_name"))
return ModelConfig(class_name=class_name, **obj.get("args", {}))
class SkillConfig(PackageConfiguration):
"""Class to represent a skill configuration file."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
protocols: List[PublicId] = None,
dependencies: Optional[Dependencies] = None,
description: str = "",
):
"""Initialize a skill configuration."""
super().__init__(name, author, version)
self.license = license
self.fingerprint = ""
self.protocols = (
protocols if protocols is not None else []
) # type: List[PublicId]
self.dependencies = dependencies if dependencies is not None else {}
self.description = description
self.handlers = CRUDCollection[HandlerConfig]()
self.behaviours = CRUDCollection[BehaviourConfig]()
self.models = CRUDCollection[ModelConfig]()
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"protocols": sorted(map(str, self.protocols)),
"dependencies": self.dependencies,
"handlers": {key: h.json for key, h in self.handlers.read_all()},
"behaviours": {key: b.json for key, b in self.behaviours.read_all()},
"models": {key: m.json for key, m in self.models.read_all()},
"description": self.description,
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
name = cast(str, obj.get("name"))
author = cast(str, obj.get("author"))
version = cast(str, obj.get("version"))
license = cast(str, obj.get("license"))
protocols = cast(
List[PublicId],
[PublicId.from_str(id_) for id_ in obj.get("protocols", [])],
)
dependencies = cast(Dependencies, obj.get("dependencies", {}))
description = cast(str, obj.get("description", ""))
skill_config = SkillConfig(
name=name,
author=author,
version=version,
license=license,
protocols=protocols,
dependencies=dependencies,
description=description,
)
for behaviour_id, behaviour_data in obj.get("behaviours", {}).items(): # type: ignore
behaviour_config = BehaviourConfig.from_json(behaviour_data)
skill_config.behaviours.create(behaviour_id, behaviour_config)
for handler_id, handler_data in obj.get("handlers", {}).items(): # type: ignore
handler_config = HandlerConfig.from_json(handler_data)
skill_config.handlers.create(handler_id, handler_config)
for model_id, model_data in obj.get("models", {}).items(): # type: ignore
model_config = ModelConfig.from_json(model_data)
skill_config.models.create(model_id, model_config)
return skill_config
class AgentConfig(PackageConfiguration):
"""Class to represent the agent configuration file."""
def __init__(
self,
agent_name: str = "",
aea_version: str = "",
author: str = "",
version: str = "",
license: str = "",
fingerprint: str = "",
registry_path: str = "",
description: str = "",
logging_config: Optional[Dict] = None,
):
"""Instantiate the agent configuration object."""
super().__init__(agent_name, author, version)
self.agent_name = agent_name
self.aea_version = aea_version
self.license = license
self.fingerprint = fingerprint
self.registry_path = registry_path
self.description = description
self.private_key_paths = CRUDCollection[str]()
self.ledger_apis = CRUDCollection[Dict]()
self.logging_config = logging_config if logging_config is not None else {}
self._default_ledger = None # type: Optional[str]
self._default_connection = None # type: Optional[PublicId]
self.connections = set() # type: Set[PublicId]
self.protocols = set() # type: Set[PublicId]
self.skills = set() # type: Set[PublicId]
if self.logging_config == {}:
self.logging_config["version"] = 1
self.logging_config["disable_existing_loggers"] = False
@property
def default_connection(self) -> str:
"""Get the default connection."""
assert self._default_connection is not None, "Default connection not set yet."
return str(self._default_connection)
@default_connection.setter
def default_connection(self, connection_id: Optional[Union[str, PublicId]]):
"""
Set the default connection.
:param connection_id: the name of the default connection.
:return: None
"""
if connection_id is None:
self._default_connection = None
elif isinstance(connection_id, str):
self._default_connection = PublicId.from_str(connection_id)
else:
self._default_connection = connection_id
@property
def default_ledger(self) -> str:
"""Get the default ledger."""
assert self._default_ledger is not None, "Default ledger not set yet."
return self._default_ledger
@default_ledger.setter
def default_ledger(self, ledger_id: str):
"""
Set the default ledger.
:param ledger_id: the id of the default ledger.
:return: None
"""
self._default_ledger = ledger_id
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"agent_name": self.agent_name,
"aea_version": self.aea_version,
"author": self.author,
"version": self.version,
"license": self.license,
"fingerprint": self.fingerprint,
"registry_path": self.registry_path,
"description": self.description,
"private_key_paths": {
key: path for key, path in self.private_key_paths.read_all()
},
"ledger_apis": {key: config for key, config in self.ledger_apis.read_all()},
"logging_config": self.logging_config,
"default_ledger": self.default_ledger,
"default_connection": self.default_connection,
"connections": sorted(map(str, self.connections)),
"protocols": sorted(map(str, self.protocols)),
"skills": sorted(map(str, self.skills)),
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
agent_config = AgentConfig(
agent_name=cast(str, obj.get("agent_name")),
aea_version=cast(str, obj.get("aea_version")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
registry_path=cast(str, obj.get("registry_path")),
description=cast(str, obj.get("description", "")),
logging_config=cast(Dict, obj.get("logging_config", {})),
)
for crypto_id, path in obj.get("private_key_paths", {}).items(): # type: ignore
agent_config.private_key_paths.create(crypto_id, path)
for ledger_id, ledger_data in obj.get("ledger_apis", {}).items(): # type: ignore
agent_config.ledger_apis.create(ledger_id, ledger_data)
# parse connection public ids
connections = set(
map(lambda x: PublicId.from_str(x), obj.get("connections", []))
)
agent_config.connections = cast(Set[PublicId], connections)
# parse protocol public ids
protocols = set(map(lambda x: PublicId.from_str(x), obj.get("protocols", [])))
agent_config.protocols = cast(Set[PublicId], protocols)
# parse skills public ids
skills = set(map(lambda x: PublicId.from_str(x), obj.get("skills", [])))
agent_config.skills = cast(Set[PublicId], skills)
# set default connection
default_connection_name = obj.get("default_connection", None)
agent_config.default_connection = default_connection_name
default_ledger_id = obj.get("default_ledger", None)
agent_config.default_ledger = default_ledger_id
return agent_config
class SpeechActContentConfig(Configuration):
"""Handle a speech_act content configuration."""
def __init__(self, **args):
"""Initialize a speech_act content configuration."""
self.args = args # type: Dict[str, str]
self._check_consistency()
def _check_consistency(self):
"""Check consistency of the args."""
for content_name, content_type in self.args.items():
if type(content_name) is not str or type(content_type) is not str:
raise ProtocolSpecificationParseError(
"Contents' names and types must be string."
)
# Check each content definition key/value (i.e. content name/type) is not empty
if content_name == "" or content_type == "":
raise ProtocolSpecificationParseError(
"Contents' names and types cannot be empty."
)
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return self.args
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
return SpeechActContentConfig(**obj)
class ProtocolSpecification(ProtocolConfig):
"""Handle protocol specification."""
def __init__(
self,
name: str = "",
author: str = "",
version: str = "",
license: str = "",
description: str = "",
):
"""Initialize a protocol specification configuration object."""
super().__init__(name, author, version, license, description=description)
self.speech_acts = CRUDCollection[SpeechActContentConfig]()
@property
def json(self) -> Dict:
"""Return the JSON representation."""
return {
"name": self.name,
"author": self.author,
"version": self.version,
"license": self.license,
"description": self.description,
"speech_acts": {
key: speech_act.json for key, speech_act in self.speech_acts.read_all()
},
}
@classmethod
def from_json(cls, obj: Dict):
"""Initialize from a JSON object."""
protocol_specification = ProtocolSpecification(
name=cast(str, obj.get("name")),
author=cast(str, obj.get("author")),
version=cast(str, obj.get("version")),
license=cast(str, obj.get("license")),
description=cast(str, obj.get("description", "")),
)
for speech_act, speech_act_content in obj.get("speech_acts", {}).items(): # type: ignore
speech_act_content_config = SpeechActContentConfig.from_json(
speech_act_content
)
protocol_specification.speech_acts.create(
speech_act, speech_act_content_config
)
protocol_specification._check_consistency()
return protocol_specification
def _check_consistency(self):
"""Validate the correctness of the speech_acts."""
if len(self.speech_acts.read_all()) == 0:
raise ProtocolSpecificationParseError(
"There should be at least one performative defined in the speech_acts."
)
content_dict = {}
for performative, speech_act_content_config in self.speech_acts.read_all():
if type(performative) is not str:
raise ProtocolSpecificationParseError(
"A 'performative' is not specified as a string."
)
if performative == "":
raise ProtocolSpecificationParseError(
"A 'performative' cannot be an empty string."
)
for content_name, content_type in speech_act_content_config.args.items():
if content_name in content_dict.keys():
if content_type != content_dict[content_name]:
raise ProtocolSpecificationParseError(
"The content '{}' appears more than once with different types in speech_acts.".format(
content_name
)
)
content_dict[content_name] = content_type
| 1.703125 | 2 |
python_scripts/01_tabular_data_exploration.py | mehrdad-dev/scikit-learn-mooc | 0 | 12765944 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # First look at our dataset
#
# In this notebook, we will look at the necessary steps required before any
# machine learning takes place. It involves:
#
# * loading the data;
# * looking at the variables in the dataset, in particular, differentiate
# between numerical and categorical variables, which need different
# preprocessing in most machine learning workflows;
# * visualizing the distribution of the variables to gain some insights into
# the dataset.
# %% [markdown]
# ## Loading the adult census dataset
#
# We will use data from the "Current Population adult_census" from 1994 that we
# downloaded from [OpenML](http://openml.org/).
#
# We use pandas to read this dataset.
#
# ```{note}
# [Pandas](https://pandas.pydata.org/) is a Python library used for
# manipulating 1 and 2 dimensional structured data.
# ```
# %%
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
# %% [markdown]
# We can look at the OpenML webpage to learn more about this dataset:
# <http://www.openml.org/d/1590>
#
# The goal with this data is to predict whether a person earns over 50K a year
# from heterogeneous data such as age, employment, education, family
# information, etc.
# %% [markdown]
# ## The variables (columns) in the dataset
#
# The data are stored in a pandas dataframe. A dataframe is type of structured
# data composed of 2 dimensions. This type of data are also referred as tabular
# data.
#
# The rows represents a record. In the field of machine learning or descriptive
# statistics, the terms commonly used to refer to rows are "sample",
# "instance", or "observation".
#
# The columns represents a type of information collected. In the field of
# machined learning and descriptive statistics, the terms commonly used to
# refer to columns are "feature", "variable", "attribute", or "covariate".
# %%
adult_census.head() # Print the first few lines of our dataframe
# %% [markdown]
# The column named **class** is our target variable (i.e., the variable which
# we want to predict). The two possible classes are `<=50K` (low-revenue) and
# `>50K` (high-revenue). The resulting prediction problem is therefore a
# binary classification problem, while we will use the other columns as input
# variables for our model.
# %%
target_column = 'class'
adult_census[target_column].value_counts()
# %% [markdown]
# ```{note}
# Classes are slightly imbalanced, meaning there are more samples of one or
# more classes compared to others. Class imbalance happens often in practice
# and may need special techniques when building a predictive model.
#
# For example in a medical setting, if we are trying to predict whether
# subjects will develop a rare disease, there will be a lot more healthy
# subjects than ill subjects in the dataset.
# ```
# %% [markdown]
# The dataset contains both numerical and categorical data. Numerical values
# take continuous values, for example `age`. Categorical values can have a
# finite number of values, for example `native-country`.
# %%
numerical_columns = [
'age', 'education-num', 'capital-gain', 'capital-loss',
'hours-per-week']
categorical_columns = [
'workclass', 'education', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'native-country']
all_columns = numerical_columns + categorical_columns + [
target_column]
adult_census = adult_census[all_columns]
# %% [markdown]
# Note that for simplicity, we have ignored the "fnlwgt" (final weight) column
# that was crafted by the creators of the dataset when sampling the dataset to
# be representative of the full census database.
# %% [markdown]
# We can check the number of samples and the number of columns available in
# the dataset:
# %%
print(f"The dataset contains {adult_census.shape[0]} samples and "
f"{adult_census.shape[1]} columns")
# %% [markdown]
# We can compute the number of features by counting the number of columns and
# subtract 1, since of the column is the target.
# %%
print(f"The dataset contains {adult_census.shape[1] - 1} features.")
# %% [markdown]
# ## Visual inspection of the data
# Before building a predictive model, it is a good idea to look at the data:
#
# * maybe the task you are trying to achieve can be solved without machine
# learning;
# * you need to check that the information you need for your task is actually
# present in the dataset;
# * inspecting the data is a good way to find peculiarities. These can
# arise during data collection (for example, malfunctioning sensor or missing
# values), or from the way the data is processed afterwards (for example
# capped values).
# %% [markdown]
# Let's look at the distribution of individual features, to get some insights
# about the data. We can start by plotting histograms, note that this only
# works for features containing numerical values:
# %%
_ = adult_census.hist(figsize=(20, 14))
# %% [markdown]
# ```{tip}
# In the cell, we are calling the following pattern: `_ = func()`. It assigns
# the output of `func()` into the variable called `_`. By convention, in Python
# `_` serves as a "garbage" variable to store results that we are not
# interested in.
# ```
#
# We can already make a few comments about some of the variables:
#
# * `age`: there are not that many points for 'age > 70'. The dataset
# description does indicate that retired people have been filtered out
# (`hours-per-week > 0`);
# * `education-num`: peak at 10 and 13, hard to tell what it corresponds to
# without looking much further. We'll do that later in this notebook;
# * `hours-per-week` peaks at 40, this was very likely the standard number of
# working hours at the time of the data collection;
# * most values of `capital-gain` and `capital-loss` are close to zero.
# %% [markdown]
# For categorical variables, we can look at the distribution of values:
# %%
adult_census['sex'].value_counts()
# %%
adult_census['education'].value_counts()
# %% [markdown]
# As noted above, `education-num` distribution has two clear peaks around 10
# and 13. It would be reasonable to expect that `education-num` is the number
# of years of education.
#
# Let's look at the relationship between `education` and `education-num`.
# %%
pd.crosstab(index=adult_census['education'],
columns=adult_census['education-num'])
# %% [markdown]
# This shows that `education` and `education-num` gives you the same
# information. For example, `education-num=2` is equivalent to
# `education='1st-4th'`. In practice that means we can remove `education-num`
# without losing information. Note that having redundant (or highly correlated)
# columns can be a problem for machine learning algorithms.
# %% [markdown]
# ```{note}
# In the upcoming notebooks, we will only keep the `education` variable,
# excluding the `education-num` variable.
# ```
# %% [markdown]
# Another way to inspect the data is to do a `pairplot` and show how each
# variable differs according to our target, `class`. Plots along the diagonal
# show the distribution of individual variables for each `class`. The plots on
# the off-diagonal can reveal interesting interactions between variables.
# %%
import seaborn as sns
n_samples_to_plot = 5000
columns = ['age', 'education-num', 'hours-per-week']
_ = sns.pairplot(data=adult_census[:n_samples_to_plot], vars=columns,
hue=target_column, plot_kws={'alpha': 0.2},
height=3, diag_kind='hist', diag_kws={'bins': 30})
# %% [markdown]
#
# By looking at the data you could infer some hand-written rules to predict the
# class:
#
# * if you are young (less than 25 year-old roughly), you are in the
# `<=50K` class;
# * if you are old (more than 70 year-old roughly), you are in the
# `<=50K` class;
# * if you work part-time (less than 40 hours roughly) you are in the
# `<=50K` class.
#
# These hand-written rules could work reasonably well without the need for any
# machine learning. Note however that it is not very easy to create rules for
# the region `40 < hours-per-week < 60` and `30 < age < 70`. We can hope that
# machine learning can help in this region. Also note that visualization can
# help creating hand-written rules but is limited to 2 dimensions (maybe 3
# dimensions), whereas machine learning models can build models in
# high-dimensional spaces.
#
# Another thing worth mentioning in this plot: if you are young (less than 25
# year-old roughly) or old (more than 70 year-old roughly) you tend to work
# less. This is a non-linear relationship between age and hours per week.
# Linear machine learning models can only capture linear interactions, so this
# may be a factor when deciding which model to chose.
#
# In a machine-learning setting, an algorithm automatically create the "rules"
# in order to make predictions on new data.
# %% [markdown]
# The plot below shows the rules of a simple model, called decision tree.
# We will explain how this model works in a latter notebook, for now let us
# just consider the model predictions when trained on this dataset:
#
# 
#
# The background color in each area represents the probability of the class
# `high-income` as estimated by the model. Values towards 0 (dark blue)
# indicates that the model predicts `low-income` with a high probability.
# Values towards 1 (dark orange) indicates that the model predicts
# `high-income` with a high probability. Values towards 0.5 (white) indicates
# that the model is not very sure about its prediction.
#
# Looking at the plot here is what we can gather:
#
# * In the region `age < 28.5` (left region) the prediction is `low-income`.
# The dark blue color indicates that the model is quite sure about its
# prediction.
# * In the region `age > 28.5 AND hours-per-week < 40.5`
# (bottom-right region), the prediction is `low-income`. Note that the blue
# is a bit lighter that for the left region which means that the algorithm is
# not as certain in this region.
# * In the region `age > 28.5 AND hours-per-week > 40.5` (top-right region),
# the prediction is `low-income`. However the probability of the class
# `low-income` is very close to 0.5 which means the model is not sure at all
# about its prediction.
#
# It is interesting to see that a simple model create rules similar to the ones
# that we could have created by hand. Note that machine learning is really
# interesting when creating rules by hand is not straightforward, for example
# because we are in high dimension (many features) or because there is no
# simple and obvious rules that separate the two classes as in the top-right
# region
# %% [markdown]
#
# In this notebook we have:
#
# * loaded the data from a CSV file using `pandas`;
# * looked at the different kind of variables to differentiate between
# categorical and numerical variables;
# * inspected the data with `pandas` and `seaborn`. Data inspection can allow
# you to decide whether using machine learning is appropriate for your data
# and to highlight potential peculiarities in your data.
#
# Ideas which will be discussed more in details later:
#
# * if your target variable is imbalanced (e.g., you have more samples from one
# target category than another), you may need special techniques for training
# and evaluating your machine learning model;
# * having redundant (or highly correlated) columns can be a problem for
# some machine learning algorithms;
# * contrary to decision tree, linear models can only capture linear
# interaction, so be aware of non-linear relationships in your data.
| 3.6875 | 4 |
pycalc/MAVProxy/modules/mavproxy_mmap/mmap_server.py | joakimzhang/python-electron | 0 | 12765945 | import BaseHTTPServer
import json
import os.path
import thread
import urlparse
DOC_DIR = os.path.join(os.path.dirname(__file__), 'mmap_app')
class Server(BaseHTTPServer.HTTPServer):
def __init__(self, handler, address='', port=9999, module_state=None):
BaseHTTPServer.HTTPServer.__init__(self, (address, port), handler)
self.allow_reuse_address = True
self.module_state = module_state
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
scheme, host, path, params, query, frag = urlparse.urlparse(self.path)
if path == '/data':
state = self.server.module_state
data = {'lat': state.lat,
'lon': state.lon,
'heading': state.heading,
'alt': state.alt,
'airspeed': state.airspeed,
'groundspeed': state.groundspeed}
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(data))
else:
# Remove leading '/'.
path = path[1:]
# Ignore all directories. E.g. for ../../bar/a.txt serve
# DOC_DIR/a.txt.
unused_head, path = os.path.split(path)
# for / serve index.html.
if path == '':
path = 'index.html'
content = None
error = None
try:
with open(os.path.join(DOC_DIR, path), 'rb') as f:
content = f.read()
except IOError, e:
error = str(e)
if content:
self.send_response(200)
self.end_headers()
self.wfile.write(content)
else:
self.send_response(404)
self.end_headers()
self.wfile.write('Error: %s' % (error,))
def start_server(address, port, module_state):
server = Server(
Handler, address=address, port=port, module_state=module_state)
thread.start_new_thread(server.serve_forever, ())
return server
| 2.59375 | 3 |
Flo et toto/solo/project/constants.py | geverartsdev/TechnofuturTIC | 0 | 12765946 | import pygame
# Constantes de jeu
MAX_TIRS = 10 # nombre maximum de boulets sur l'ecran
MAX_ALIEN = 10
PROBA_ALIEN = 22 # probabilité qu'un alien apparaisse
NOUVEL_ALIEN = 12 # Rafraichissement de l'ecran entre chaque alien
ECRAN = pygame.Rect(0, 0, 1825, 900)
SONS = True # Mettre a True si on veut activer le sons
| 2.21875 | 2 |
code/RNC/read_noise_calculation.py | claudiavr/AIS | 0 | 12765947 | # -*- coding: utf-8 -*-
"""
Read Noise Calculation Class
============================
This software has the ReadNoiseCalc class. This class calculates the read noise
of the SPARC4 EMCCDs as a function of their operation mode. The calculations
are done based on a series of characterization of the SPARC4 cameras. For the
conventional mode, it is read the respective value of the read noise in the
Tabelas_Valores_Ruido_Leitura spreadsheet. For the EM mode, it is done an
interpolation of the data presented by the respective spreadshhet, as a
function of the EM gain.
"""
# <NAME>.
# 08/10/2019.
import openpyxl
from scipy.interpolate import interp1d
class Read_Noise_Calculation:
"""Read Noise Calculation Class.
Parameters
----------
ccd_operation_mode: dictionary
A dictionary with the parameter of the CCD operation mode.
em_mode : [0, 1]
CCD Electron Multiplying Mode
em_gain : float
CCD Electron Multiplying gain
hss : [0.1, 1, 10, 20, 30]
Horizontal Shift Spedd of the pixels
preamp : [1, 2]
Pre-amplifer gain
binn : [1, 2]
Binning of the pixels
directory : string
Directory of the spreadsheet with the read noise of the CCD
"""
def __init__(self, ccd_operation_mode, directory):
"""Initialize the class."""
self.em_mode = ccd_operation_mode['em_mode']
self.em_gain = ccd_operation_mode['em_gain']
self.hss = ccd_operation_mode['hss']
self.preamp = ccd_operation_mode['preamp']
self.binn = ccd_operation_mode['binn']
self.directory = directory
def get_operation_mode(self):
"""Print the operation mode on the screen."""
print('em_mode = ', self.em_mode)
print('em_gain = ', self.em_gain)
print('hss = ', self.hss)
print('preamp = ', self.preamp)
print('binn = ', self.binn)
def calculate_read_noise(self):
"""Calculate the read noise of the CCD.
For the conventional mode, it is used the read noise values of the
Read_noise_and_gain_values spreadsheet
For the EM mode, the read noise is obtained through an interpolation of
the values presente by the respective spreadsheet, as a function of the
CCD EM gain.
"""
if self.em_mode == 0:
self._calculate_read_noise_conventional_mode()
if self.em_mode == 1:
self._calculate_read_noise_em_mode()
return self.read_noise
def _calculate_read_noise_conventional_mode(self):
"""Calculate the read noise for the conventional mode."""
indice_tab = 0
if self.hss == 1:
if self.preamp == 1:
indice_tab = 19
if self.preamp == 2:
indice_tab = 21
if self.hss == 0.1:
if self.preamp == 1:
indice_tab = 23
if self.preamp == 2:
indice_tab = 25
if self.binn == 2:
indice_tab += 1
path = r'code/RNC/spreadsheet' \
+ '/' + self.directory + '/' + 'Read_noise_and_gain_values.xlsx'
spreadsheet = openpyxl.load_workbook(path).active
self.read_noise = spreadsheet.cell(indice_tab, 6).value
def _calculate_read_noise_em_mode(self):
"""Calculate the read noise for the EM mode."""
tab_name = 'code/RNC/spreadsheet' + '/' + self.directory + '/' + 'RN_PA'\
+ str(int(self.preamp)) + 'B' + str(int(self.binn))\
+ 'HSS' + str(int(self.hss)) + '.xlsx'
spreadsheet = list(openpyxl.load_workbook(tab_name).active.values)
column_em_gain = [value[0] for value in spreadsheet[1:12]]
column_noise = [value[1] for value in spreadsheet[1:12]]
f = interp1d(column_em_gain, column_noise)
read_noise = f(self.em_gain)
self.read_noise = float(read_noise)
| 2.953125 | 3 |
src/ralph_scrooge/plugins/collect/ralph3_profit_center.py | ar4s/ralph_pricing | 0 | 12765948 | <reponame>ar4s/ralph_pricing
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from django.db import transaction
# TODO(xor-xor): BusinessLine should be renamed to BusinessSegment (this also
# applies to other occurrences of this name in this plugin).
from ralph_scrooge.models import BusinessLine, ProfitCenter
from ralph_scrooge.plugins import plugin_runner
from ralph_scrooge.plugins.collect.utils import get_from_ralph
logger = logging.getLogger(__name__)
@transaction.atomic
def update_profit_center(pc, default_business_line):
if pc['business_segment'] is not None:
business_line = BusinessLine.objects.get(
ralph3_id=pc['business_segment']['id']
)
else:
business_line = default_business_line
profit_center, created = ProfitCenter.objects.get_or_create(
ralph3_id=pc['id'],
defaults=dict(
name=pc['name'],
)
)
profit_center.name = pc['name']
profit_center.description = pc['description']
profit_center.business_line = business_line
profit_center.save()
return created
@plugin_runner.register(chain='scrooge', requires=['ralph3_business_segment'])
def ralph3_profit_center(**kwargs):
new_pc = total = 0
default_business_line = BusinessLine.objects.get(pk=1)
for pc in get_from_ralph("profit-centers", logger):
created = update_profit_center(pc, default_business_line)
if created:
new_pc += 1
total += 1
return True, '{} new profit center(s), {} updated, {} total'.format(
new_pc,
total - new_pc,
total,
)
| 1.78125 | 2 |
lowercase.py | dash219/linux-pipe-spellcheck | 0 | 12765949 | #!/usr/bin/python3
import sys
while True:
for line in sys.stdin:
lline = line.lower()
sys.stdout.write(lline)
sys.stdout.write("\n")
break | 2.953125 | 3 |
alipay/aop/api/domain/StageGroupInfoVO.py | antopen/alipay-sdk-python-all | 213 | 12765950 | <filename>alipay/aop/api/domain/StageGroupInfoVO.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.StageCateInfoVO import StageCateInfoVO
class StageGroupInfoVO(object):
def __init__(self):
self._group_name = None
self._stage_cate_infos = None
@property
def group_name(self):
return self._group_name
@group_name.setter
def group_name(self, value):
self._group_name = value
@property
def stage_cate_infos(self):
return self._stage_cate_infos
@stage_cate_infos.setter
def stage_cate_infos(self, value):
if isinstance(value, list):
self._stage_cate_infos = list()
for i in value:
if isinstance(i, StageCateInfoVO):
self._stage_cate_infos.append(i)
else:
self._stage_cate_infos.append(StageCateInfoVO.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.group_name:
if hasattr(self.group_name, 'to_alipay_dict'):
params['group_name'] = self.group_name.to_alipay_dict()
else:
params['group_name'] = self.group_name
if self.stage_cate_infos:
if isinstance(self.stage_cate_infos, list):
for i in range(0, len(self.stage_cate_infos)):
element = self.stage_cate_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.stage_cate_infos[i] = element.to_alipay_dict()
if hasattr(self.stage_cate_infos, 'to_alipay_dict'):
params['stage_cate_infos'] = self.stage_cate_infos.to_alipay_dict()
else:
params['stage_cate_infos'] = self.stage_cate_infos
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = StageGroupInfoVO()
if 'group_name' in d:
o.group_name = d['group_name']
if 'stage_cate_infos' in d:
o.stage_cate_infos = d['stage_cate_infos']
return o
| 1.90625 | 2 |