content
stringlengths 5
1.05M
|
|---|
from rest_framework import viewsets, generics
from rest_framework.authentication import BasicAuthentication
from rest_framework.permissions import IsAuthenticated, DjangoModelPermissions
from escola.models import Aluno, Curso, Matricula
from escola.serializer import AlunoSerializer, AlunoSerializerV2, CursoSerializer, MatriculaSerializer, ListaMatriculasAlunoSerializer, ListaAlunosMatriculadosSerializer
class AlunosViewSet(viewsets.ModelViewSet):
"""Exibindo todos os alunos e alunas"""
queryset = Aluno.objects.all()
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
if self.request.version == 'v2':
return AlunoSerializerV2
else:
return AlunoSerializer
class CursosViewSet(viewsets.ModelViewSet):
"""Exibindo todos os cursos"""
queryset = Curso.objects.all()
serializer_class = CursoSerializer
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated]
class MatriculasViewSet(viewsets.ModelViewSet):
"""Exibindo todas as matriculas"""
queryset = Matricula.objects.all()
serializer_class = MatriculaSerializer
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated]
class ListaMatriculasAluno(generics.ListAPIView):
"""Listando as matriculas de um aluno ou aluna"""
def get_queryset(self):
queryset = Matricula.objects.filter(aluno_id=self.kwargs['pk'])
return queryset
serializer_class = ListaMatriculasAlunoSerializer
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated]
class ListaAlunosMatriculados(generics.ListAPIView):
"""Listando alunos ou alunas matriculados em um curso."""
def get_queryset(self):
queryset = Matricula.objects.filter(curso_id=self.kwargs['pk'])
return queryset
serializer_class = ListaAlunosMatriculadosSerializer
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated]
|
import bbb
class IR_AIN(object):
#by default, travel is on ADC0, pos is on ADC1
def __init__(self, anum):
self.adc = bbb.ADC( anum )
self.ratio = 1 #normalization ratio
self.thresh = 1
def val(self):
# adcs are 12-bit, but report a millivolt value via SysFS
n = 5
samples = [ self.adc.volts for i in range(n) ]
average = (sum(samples) / n)
return average / self.ratio
def set_thresh(self, val):
self.thresh = val
def get_thresh(self):
return self.thresh
def is_white(self):
return (self.val() < self.thresh)
|
import os
# rename files based on the actual file name, excluding the additional kobocat
# directories from name
files = len(os.listdir('.'))
i = 1
print('This directory has '+str(files)+' files.')
for filename_old in os.listdir('.'):
filename, file_extension = os.path.splitext(filename_old)
if file_extension == '.txt':
parts = filename.split('%2F')
# extract file name and remove trailing . added by kobocat
nameonly = parts[len(parts)-1].split('.')[0]
os.rename(filename_old, nameonly+'.txt')
print(str(i)+'/'+str(files)+' done. renamed '+filename_old+' > '+nameonly+'.txt')
i+=1
|
# Time: O(h * p^2), p is the number of patterns
# Space: O(p^2)
# bitmask, backtracking, dp
class Solution(object):
def buildWall(self, height, width, bricks):
"""
:type height: int
:type width: int
:type bricks: List[int]
:rtype: int
"""
MOD = 10**9+7
def backtracking(height, width, bricks, total, mask, lookup, patterns):
if mask in lookup:
return
lookup.add(mask)
if total >= width:
if total == width:
patterns.append(mask^(1<<width))
return
for x in bricks:
backtracking(height, width, bricks, total+x, mask|(1<<(total+x)), lookup, patterns)
patterns, lookup = [], set()
backtracking(height, width, bricks, 0, 0, lookup, patterns)
adj = [[j for j, r2 in enumerate(patterns) if not (r1 & r2)] for r1 in patterns]
dp = [[1]*len(patterns), [0]*len(patterns)]
for i in xrange(height-1):
dp[(i+1)%2] = [sum(dp[i%2][k] for k in adj[j]) % MOD for j in xrange(len(patterns))]
return sum(dp[(height-1)%2]) % MOD
# Time: O(p^3 * logh), p is the number of patterns, p may be up to 512
# Space: O(p^3)
# bitmask, backtracking, matrix exponentiation
class Solution_TLE(object):
def buildWall(self, height, width, bricks):
"""
:type height: int
:type width: int
:type bricks: List[int]
:rtype: int
"""
MOD = 10**9+7
def backtracking(height, width, bricks, total, mask, lookup, patterns):
if mask in lookup:
return
lookup.add(mask)
if total >= width:
if total == width:
patterns.append(mask^(1<<width))
return
for x in bricks:
backtracking(height, width, bricks, total+x, mask|(1<<(total+x)), lookup, patterns)
def matrix_mult(A, B):
ZB = zip(*B)
return [[sum(a*b % MOD for a, b in itertools.izip(row, col)) % MOD for col in ZB] for row in A]
def matrix_expo(A, K):
result = [[int(i == j) for j in xrange(len(A))] for i in xrange(len(A))]
while K:
if K % 2:
result = matrix_mult(result, A)
A = matrix_mult(A, A)
K /= 2
return result
patterns, lookup = [], set()
backtracking(height, width, bricks, 0, 0, lookup, patterns)
return reduce(lambda x,y: (x+y)%MOD,
matrix_mult([[1]*len(patterns)],
matrix_expo([[int((mask1 & mask2) == 0)
for mask2 in patterns]
for mask1 in patterns], height-1))[0],
0) # Time: O(p^3 * logh), Space: O(p^2)
|
from html.parser import HTMLParser
from urllib.request import urlopen
from urllib import parse
import time
from datetime import date
import re
# We are going to create a class called LinkParser that inherits some
# methods from HTMLParser which is why it is passed into the definition
class ResultsPrinter:
def format_body(self, links):
body = []
for link in links:
body.append('<li><a href=\"{}\">{}</a></li>'.format(*link))
return body
def format_html(self, body):
template = '''
<!DOCTYPE html>
<html>
<head>
<title>{}</title>
</head>
<body>
<ol>
{}
</ol>
</body>
</html>
'''.format('-'.join(['Results ', self.today]),'\n'.join(body))
return template
def print_result(self, page):
file = ''.join([self.path,'results-', self.today,self.extension])
new = open(file, 'w')
new.close()
with open(file,'a') as f:
print(page, file=f)
def present_results(self, parsedLinks):
self.path = './results/'
self.extension = '.html'
self.today = date.today().isoformat()
i = 0
full_results = self.format_html(self.format_body(parsedLinks))
self.print_result(full_results)
class LinkParser(HTMLParser):
def handle_endtag(self, tag):
if tag == 'a':
self.flag_for_data = -1
def handle_data(self, data):
if (self.flag_for_data != -1):
self.links_inner_html.insert(self.flag_for_data, data)
def keyword_filter(self, words, link):
is_valid = True
quality = 0
if self.matcher.search(words):
formatted_words = words.lower().split()
for word in formatted_words:
if is_valid == True:
is_valid = (word not in self.not_keywords)
if word in self.must_keywords:
quality = quality + 1
if quality > 0:
print('check')
return (link, words)
else:
return None
else:
return None
def handle_starttag(self, tag, attrs):
if tag == 'a':
for (key, value) in attrs:
if (key == 'href' and (value.find('job') != -1)):
newUrl = parse.urljoin(self.baseUrl, value)
# And add it to our colection of links:
self.links = self.links + [newUrl]
self.flag_for_data = len(self.links) - 1
def getLinks(self, url, yes_words, no_words):
self.must_keywords = yes_words
self.not_keywords = no_words
self.matcher = re.compile('develop(er|ment)|engineer', re.IGNORECASE)
self.links = []
self.flag_for_data = -1
self.links_inner_html = []
self.baseUrl = url
response = urlopen(url)
print(response.getheader('Content-Type').find('text/html'))
if response.getheader('Content-Type').find('text/html') != -1:
htmlBytes = response.read()
htmlString = htmlBytes.decode("utf-8")
self.feed(htmlString)
i = 0
result = []
while i < len(self.links):
new_link = self.keyword_filter(self.links_inner_html[i], self.links[i])
if new_link:
result = result + [new_link]
i += 1
return result
else:
return None
def main():
search_terms = {'front-end', 'frontend', 'javascript', 'associate', 'junior', 'software', 'python', 'ruby', 'rails', 'back-end', 'backend', 'web', 'apprentice'}
not_terms = {'senior', 'architect', 'lead', 'sales'}
# pagesToVisit = [put it hereee]
links = []
#INDEED
#pages = [""]
#front_door_url = https://www.indeed.com/recommendedjobs?
#rootDomain = ""
#BUILT IN
pages = ["1","2","3","4","5","6","7"]
front_door_url = "http://www.builtinchicago.org/jobs?page="
rootDomain = "http://www.builtinchicago.org"
for page in pages:
current_url = "".join([front_door_url, page])
try:
parser = LinkParser()
links = links + parser.getLinks(current_url, search_terms, not_terms)
print(" **Success!**")
except:
print(" **Failed!**")
printer = ResultsPrinter()
printer.present_results(links)
print('woot')
if __name__ == '__main__':
main()
|
"""
Migration script
"""
from pymongo import MongoClient
from tqdm import tqdm
def migrate(target_db, target_coll, new_coll_name='disp_entry', client=None):
"""
Migrate the database to the ODM style
"""
if not client:
client = MongoClient()
database = client[target_db]
col = database[target_coll]
# Add class identifier
col.update_many({'document_type': 'res'},
{'$set': {
'_cls': 'DispEntry.ResFile'
}})
col.update_many({'document_type': 'seed'},
{'$set': {
'_cls': 'DispEntry.SeedFile'
}})
col.update_many({'document_type': 'param'},
{'$set': {
'_cls': 'DispEntry.ParmaFile'
}})
col.update_many({'document_type': 'initial_structure'},
{'$set': {
'_cls': 'DispEntry.InitialStructureFile'
}})
print('Updated class identifier')
# Remove the document type field
col.update_many({'document_type': {
'$exists': 1
}}, {'$unset': {
'document_type': ''
}})
print('Removed old identifier')
# Remove the unused param_hash field
col.update_many({'param_hash': {
'$exists': 1
}}, {'$unset': {
'param_hash': ''
}})
col.update_many({'param_content': {
'$exists': 1
}}, {'$rename': {
'param_content': 'content'
}})
col.update_many({'seed_content': {
'$exists': 1
}}, {'$rename': {
'seed_content': 'content'
}})
col.update_many({'struct_content': {
'$exists': 1
}}, {'$rename': {
'struct_content': 'content'
}})
col.update_many({'res_content': {
'$exists': 1
}}, {'$rename': {
'res_content': 'content'
}})
print('Renamed xx_content fields')
# Create index for md5hash
col.create_index('md5hash', background=False)
col.create_index('_cls', background=False)
print('Linking SEED to ResFile and InitialStructureFile')
query = {
'_cls': {
'$in': ['DispEntry.ResFile', 'DispEntry.InitialStructureFile']
}
}
tot = col.count_documents(query)
for entry in tqdm(list(col.find(query, projection=['seed_hash'])),
total=tot):
# Link seed_hash field
if entry.get('seed_hash'):
seed = col.find_one({'md5hash': entry['seed_hash']})
if seed:
res = col.find_one_and_update(
{'_id': entry['_id']},
{'$set': {
'seed_file': seed['_id']
}})
if not res:
print('Warning: cannot find the seed for {}'.format(
entry['struct_name']))
col.update_many(query, {'$unset': {'seed_hash': ''}})
print('Linked seed to the ResFile and InitialStructureFile')
print('Linking InitialStructureFile to ResFile')
# Link the initial structures to the res files
query = {'_cls': 'DispEntry.InitialStructureFile'}
tot = col.count_documents(query)
for entry in tqdm(list(
col.find(query,
projection=['project_name', 'seed_name', 'struct_name'])),
total=tot):
res_entry = col.find_one({
'_cls': 'DispEntry.ResFile',
'project_name': entry['project_name'],
'seed_name': entry['seed_name'],
'struct_name': entry['struct_name']
})
# Link update the entry, not all have it
if res_entry:
col.find_one_and_update(
{'_id': res_entry['_id']},
{'$set': {
'init_structure_file': entry['_id']
}})
print('Linked InitialStructureFile to the ResFile')
print('Fixing the Creator Embedded document')
query = {'creator': {'$exists': 1}}
tot = col.count_documents(query)
for entry in tqdm(list(col.find(query, projection=['creator'])),
total=tot):
# Migrate the creator field
if entry.get('creator'):
creator = entry['creator']
creator['uuid'] = creator['uuid'].hex
creator['ip_address'] = creator.pop('ip', None)
creator['hostname'] = creator.pop('host', None)
col.find_one_and_update({'_id': entry['_id']},
{'$set': {
'creator': creator
}})
print('Creator Embedded document migrated')
col.rename(new_coll_name)
col = database[new_coll_name]
print('Renamed collecte to `disp_entry`.')
if __name__ == '__main__':
BASE_DB = 'disp-archive'
DB = 'disp_migrate_test'
COLLECTION = 'disp-entries'
# Reset the migration test collection
client = MongoClient()
base_coll = client[BASE_DB]['disp-entries']
migrate_coll = client[DB]['disp-entries']
migrate_coll.drop()
print('Preparing migration test database')
migrate_coll.insert_many(base_coll.find())
for idx in ['seed_name', 'project_name', 'struct_name']:
migrate_coll.create_index(idx, background=False)
# Drop the existing collection in the target database
migrate(DB, COLLECTION)
|
tagcomponent = "disagg"
target_capacity = 2
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 15 21:50:26 2016
save chessboard pattern and image conrners to a file for later calibration (so
as not to do it again for every calibration model)
@author: lilian+sebalander
"""
# %%
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
# %%
hayquegraficar = True
# cam puede ser ['vca', 'vcaWide', 'ptz'] son los datos que se tienen
camera = 'vcaWide'
# input
imagesFolder = "./resources/intrinsicCalib/" + camera + "/"
# output
cornersFile = imagesFolder + camera + "Corners.npy"
patternFile = imagesFolder + camera + "ChessPattern.npy"
imgShapeFile = imagesFolder + camera + "Shape.npy"
images = glob.glob(imagesFolder+'*.png')
# %%
# cantidad esquinas internas del tablero:
# los cuadraditos del chessboard-1
pattW = 9 # width
pattH = 6 # height
patternSize = (pattW, pattH)
# criterio de finalizacion de cornerSubPix
subpixCriteria = (cv2.TERM_CRITERIA_EPS, # + cv2.TERM_CRITERIA_MAX_ITER, # termination criteria type
1000000, # max number of iterations
0.001) # min accuracy
# %%
#Arrays para pts del objeto y pts de imagen para from all the images.
objpoints = [] #3d points in real world
imgpoints = [] #2d points in image plane
# Se arma un vector con la identificacion de cada cuadrito
# La dimensión de este vector es diferente al que se usa en la calibracion
# de la PTZ, es necesaria una dimension mas para que corra el calibrate
chessboardModel = np.zeros((1,pattH*pattW,3), np.float32)
chessboardModel[0, :, :2] = np.mgrid[0:pattW, 0:pattH].T.reshape(-1, 2) #rellena las columnas 1 y 2
# %%
noencuentra = []
for picture in images:
img = cv2.imread(picture, cv2.IMREAD_GRAYSCALE);
found, corners = cv2.findChessboardCorners(img, patternSize);
if found:
corners = cv2.cornerSubPix(img, corners, (11, 11), (1, 1), subpixCriteria);
imgpoints.append(corners.reshape(1, -1, 2));
if hayquegraficar:
plt.figure()
plt.imshow(cv2.imread(picture)[:,:,::-1])
plt.scatter(corners[:,0,0], corners[:,0,1])
else:
print('No se encontraron esquinas en ' + picture)
noencuentra.append(picture);
0 #
# %% SAVE DATA POINTS
np.save(cornersFile, imgpoints)
np.save(patternFile, chessboardModel)
# hay que pasar img shape al reves porque requiere width height en ese orden
np.save(imgShapeFile, img.shape[::-1])
|
import os
import os.path as osp
import time
import tabulate
import joblib
import numpy as np
import tensorflow as tf
from baselines import logger
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.utils import cat_entropy
from baselines.a2c.utils import discount_with_dones
from baselines.common import set_global_seeds, explained_variance
from pysc2.lib import actions as sc2_actions
from common import common, nsml
import re
_CONTROL_GROUP_RECALL = 0
_NOT_QUEUED = 0
def mse(pred, target):
return tf.square(pred - target) / 2.
def extract_number(f):
s = re.findall("\d+$", f)
return int(s[0]) if s else -1, f
class Model(object):
def __init__(self,
policy,
ob_space,
ac_space,
nenvs,
total_timesteps,
nprocs=32,
nscripts=16,
nsteps=20,
nstack=4,
ent_coef=0.1,
vf_coef=0.5,
vf_fisher_coef=1.0,
lr=0.25,
max_grad_norm=0.001,
kfac_clip=0.001,
lrschedule='linear',
alpha=0.99,
epsilon=1e-5):
config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=nprocs,
inter_op_parallelism_threads=nprocs)
config.gpu_options.allow_growth = True
self.sess = sess = tf.Session(config=config)
nsml.bind(sess=sess)
# nact = ac_space.n
nbatch = nenvs * nsteps
A = tf.placeholder(tf.int32, [nbatch])
XY0 = tf.placeholder(tf.int32, [nbatch])
XY1 = tf.placeholder(tf.int32, [nbatch])
# ADV == TD_TARGET - values
ADV = tf.placeholder(tf.float32, [nbatch])
TD_TARGET = tf.placeholder(tf.float32, [nbatch])
PG_LR = tf.placeholder(tf.float32, [])
VF_LR = tf.placeholder(tf.float32, [])
self.model = step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False)
self.model2 = train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True)
# Policy 1 : Base Action : train_model.pi label = A
script_mask = tf.concat(
[
tf.zeros([nscripts * nsteps, 1]),
tf.ones([(nprocs - nscripts) * nsteps, 1])
],
axis=0)
pi = train_model.pi
pac_weight = script_mask * (tf.nn.softmax(pi) - 1.0) + 1.0
pac_weight = tf.reduce_sum(pac_weight * tf.one_hot(A, depth=3), axis=1)
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pi, labels=A)
neglogpac *= tf.stop_gradient(pac_weight)
inv_A = 1.0 - tf.cast(A, tf.float32)
xy0_mask = tf.cast(A, tf.float32)
xy1_mask = tf.cast(A, tf.float32)
condition0 = tf.equal(xy0_mask, 2)
xy0_mask = tf.where(condition0, tf.ones(tf.shape(xy0_mask)), xy0_mask)
xy0_mask = 1.0 - xy0_mask
condition1 = tf.equal(xy1_mask, 2)
xy1_mask = tf.where(condition1, tf.zeros(tf.shape(xy1_mask)), xy1_mask)
# One hot representation of chosen marine.
# [batch_size, 2]
pi_xy0 = train_model.pi_xy0
pac_weight = script_mask * (tf.nn.softmax(pi_xy0) - 1.0) + 1.0
pac_weight = tf.reduce_sum(
pac_weight * tf.one_hot(XY0, depth=1024), axis=1)
logpac_xy0 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pi_xy0, labels=XY0)
logpac_xy0 *= tf.stop_gradient(pac_weight)
logpac_xy0 *= tf.cast(xy0_mask, tf.float32)
pi_xy1 = train_model.pi_xy1
pac_weight = script_mask * (tf.nn.softmax(pi_xy1) - 1.0) + 1.0
pac_weight = tf.reduce_sum(
pac_weight * tf.one_hot(XY0, depth=1024), axis=1)
# 1D? 2D?
logpac_xy1 = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=pi_xy1, labels=XY1)
logpac_xy1 *= tf.stop_gradient(pac_weight)
logpac_xy1 *= tf.cast(xy1_mask, tf.float32)
pg_loss = tf.reduce_mean(ADV * neglogpac)
pg_loss_xy0 = tf.reduce_mean(ADV * logpac_xy0)
pg_loss_xy1 = tf.reduce_mean(ADV * logpac_xy1)
vf_ = tf.squeeze(train_model.vf)
vf_r = tf.concat(
[
tf.ones([nscripts * nsteps, 1]),
tf.zeros([(nprocs - nscripts) * nsteps, 1])
],
axis=0) * TD_TARGET
vf_masked = vf_ * script_mask + vf_r
# vf_mask[0:nscripts * nsteps] = R[0:nscripts * nsteps]
vf_loss = tf.reduce_mean(mse(vf_masked, TD_TARGET))
entropy_a = tf.reduce_mean(cat_entropy(train_model.pi))
entropy_xy0 = tf.reduce_mean(cat_entropy(train_model.pi_xy0))
entropy_xy1 = tf.reduce_mean(cat_entropy(train_model.pi_xy1))
entropy = entropy_a + entropy_xy0 + entropy_xy1
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
params = find_trainable_variables("model")
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# Summarize all gradients
for grad, var in grads:
tf.summary.histogram(var.name + '/gradient', grad)
trainer = tf.train.RMSPropOptimizer(learning_rate=lr, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
self.logits = logits = train_model.pi
# xy0
self.params_common = params_common = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='model/common')
self.params_xy0 = params_xy0 = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope='model/xy0') + params_common
train_loss_xy0 = pg_loss_xy0 - entropy * ent_coef + vf_coef * vf_loss
self.grads_check_xy0 = grads_xy0 = tf.gradients(
train_loss_xy0, params_xy0)
if max_grad_norm is not None:
grads_xy0, _ = tf.clip_by_global_norm(grads_xy0, max_grad_norm)
grads_xy0 = list(zip(grads_xy0, params_xy0))
trainer_xy0 = tf.train.RMSPropOptimizer(
learning_rate=lr, decay=alpha, epsilon=epsilon)
_train_xy0 = trainer_xy0.apply_gradients(grads_xy0)
# xy1
self.params_xy1 = params_xy1 = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope='model/xy1') + params_common
train_loss_xy1 = pg_loss_xy1 - entropy * ent_coef + vf_coef * vf_loss
self.grads_check_xy1 = grads_xy1 = tf.gradients(
train_loss_xy1, params_xy1)
if max_grad_norm is not None:
grads_xy1, _ = tf.clip_by_global_norm(grads_xy1, max_grad_norm)
grads_xy1 = list(zip(grads_xy1, params_xy1))
trainer_xy1 = tf.train.RMSPropOptimizer(
learning_rate=lr, decay=alpha, epsilon=epsilon)
_train_xy1 = trainer_xy1.apply_gradients(grads_xy1)
self.lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, td_targets, masks, actions, xy0, xy1, values):
advs = td_targets - values
for step in range(len(obs)):
cur_lr = self.lr.value()
td_map = {
train_model.X: obs,
A: actions,
XY0: xy0,
XY1: xy1,
ADV: advs,
TD_TARGET: td_targets,
PG_LR: cur_lr
}
if states != []:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _, \
policy_loss_xy0, policy_entropy_xy0, _, \
policy_loss_xy1, policy_entropy_xy1, _ = sess.run(
[pg_loss, vf_loss, entropy, _train,
pg_loss_xy0, entropy_xy0, _train_xy0,
pg_loss_xy1, entropy_xy1, _train_xy1],
td_map)
return policy_loss, value_loss, policy_entropy, \
policy_loss_xy0, policy_entropy_xy0, \
policy_loss_xy1, policy_entropy_xy1
def save(save_path):
ps = sess.run(params)
joblib.dump(ps, save_path)
print(">> model updated at " + save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
print(">> model loaded from " + load_path)
self.train = train
self.save = save
self.load = load
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
print(">> global_variables_initializer start")
tf.global_variables_initializer().run(session=sess)
print(">> global_variables_initializer complete")
self.saver = tf.train.Saver(max_to_keep=1)
# merged = tf.summary.merge_all()
class Runner(object):
def __init__(self,
env,
model,
nsteps,
nscripts,
nstack,
gamma,
callback=None):
self.env = env
self.model = model
nh, nw, nc = (32, 32, 3)
self.nsteps = nsteps
self.nscripts = nscripts
self.nenv = nenv = env.num_envs
self.batch_ob_shape = (nenv * nsteps, nh, nw, nc * nstack)
self.batch_coord_shape = (nenv * nsteps, 32)
self.obs = np.zeros((nenv, nh, nw, nc * nstack), dtype=np.uint8)
self.available_actions = None
self.base_act_mask = np.full((self.nenv, 2), 0, dtype=np.uint8)
obs, rewards, dones, available_actions, army_counts, control_groups, selected, xy_per_marine = env.reset()
self.xy_per_marine = [{"0": [0, 0], "1": [0, 0]} for _ in range(nenv)]
for env_num, data in enumerate(xy_per_marine):
self.xy_per_marine[env_num] = data
self.army_counts = army_counts
self.control_groups = control_groups
self.selected = selected
self.update_obs(obs) # (2,13,32,32)
self.update_available(available_actions)
self.gamma = gamma
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
self.total_reward = [0.0 for _ in range(nenv)]
self.episode_rewards = []
self.episode_rewards_script = []
self.episode_rewards_a2c = []
self.episodes = 0
self.steps = 0
self.callback = callback
self.action_queue = [[] for _ in range(nenv)]
self.group_list = [[] for _ in range(nenv)]
self.agent_state = ["IDLE" for _ in range(nenv)]
self.dest_per_marine = [{} for _ in range(nenv)]
self.group_id = [0 for _ in range(nenv)]
def update_obs(self, obs): # (self.nenv, 32, 32, 2)
# obs = np.asarray(obs, dtype=np.int32).swapaxes(1, 2).swapaxes(2, 3)
self.obs = np.roll(self.obs, shift=-3, axis=3)
new_map = np.zeros((self.nenv, 32, 32, 3))
new_map[:, :, :, -1] = obs[:, 0, :, :]
for env_num in range(self.nenv):
# print("xy_per_marine: ", self.xy_per_marine)
if "0" not in self.xy_per_marine[env_num]:
self.xy_per_marine[env_num]["0"] = [0, 0]
if "1" not in self.xy_per_marine[env_num]:
self.xy_per_marine[env_num]["1"] = [0, 0]
marine0 = self.xy_per_marine[env_num]["0"]
marine1 = self.xy_per_marine[env_num]["1"]
new_map[env_num, marine0[0], marine0[1], -3] = 1
new_map[env_num, marine1[0], marine1[1], -2] = 1
self.obs[:, :, :, -3:] = new_map
# could not broadcast input array from shape (4,1,32,32) into shape (4,4,32)
def update_available(self, _available_actions):
# print("update_available : ", _available_actions)
self.available_actions = _available_actions
# avail = np.array([[0,1,2,3,4,7], [0,1,2,3,4,7]])
self.base_act_mask = np.full((self.nenv, 3), 0, dtype=np.uint8)
for env_num, list in enumerate(_available_actions):
# print("env_num :", env_num, " list :", list)
for action_num in list:
# print("action_num :", action_num)
if (action_num == 4):
self.base_act_mask[env_num][0] = 1
self.base_act_mask[env_num][1] = 1
elif action_num == 0:
self.base_act_mask[env_num][2] = 1
# elif(action_num == 331):
# self.base_act_mask[env_num][2] = 1
def valid_base_action(self, base_actions):
for env_num, list in enumerate(self.available_actions):
avail = []
for action_num in list:
if (action_num == 4):
avail.append(0)
avail.append(1)
elif action_num == 0:
avail.append(2)
# elif(action_num == 331):
# avail.append(2)
if base_actions[env_num] not in avail:
print("env_num", env_num, " argmax is not valid. random pick ",
avail)
base_actions[env_num] = np.random.choice(avail)
return base_actions
def trans_base_actions(self, base_actions):
new_base_actions = np.copy(base_actions)
for env_num, ba in enumerate(new_base_actions):
if (ba == 0):
new_base_actions[env_num] = 4 # move marine control group 0
elif (ba == 1):
new_base_actions[env_num] = 4 # move marine control group 1
elif (ba == 2):
new_base_actions[env_num] = 0 # move marine control group 1
# elif(ba==2):
# new_base_actions[env_num] = 331 # move marine xy0
return new_base_actions
def construct_action(self, base_actions, base_action_spec, x0, y0, x1, y1):
actions = []
for env_num, spec in enumerate(base_action_spec):
# print("spec", spec.args)
args = []
# for arg_idx, arg in enumerate(spec.args):
# #print("arg", arg)
# #print("arg.id", arg.id)
# if(arg.id==0): # screen (32,32) x0, y0
# args.append([int(x0[env_num]), int(y0[env_num])])
# # elif(arg.id==1): # minimap (32,32) x1, y1
# # args.append([int(x1[env_num]), int(y1[env_num])])
# # elif(arg.id==2): # screen2 (32,32) x2, y2
# # args.append([int(x2[env_num]), y2[env_num]])
# elif(arg.id==3): # pi3 queued (2)
# args.append([int(0)])
# elif(arg.id==4): # pi4 control_group_act (5)
# args.append([_CONTROL_GROUP_RECALL])
# elif(arg.id==5): # pi5 control_group_id 10
# args.append([int(base_actions[env_num])]) # 0 => cg 0 / 1 => cg 1
# # elif(arg.id==6): # pi6 select_point_act 4
# # args.append([int(sub6[env_num])])
# # elif(arg.id==7): # pi7 select_add 2
# # args.append([int(sub7[env_num])])
# # elif(arg.id==8): # pi8 select_unit_act 4
# # args.append([int(sub8[env_num])])
# # elif(arg.id==9): # pi9 select_unit_id 500
# # args.append([int(sub9[env_num])])
# # elif(arg.id==10): # pi10 select_worker 4
# # args.append([int(sub10[env_num])])
# # elif(arg.id==11): # pi11 build_queue_id 10
# # args.append([int(sub11[env_num])])
# # elif(arg.id==12): # pi12 unload_id 500
# # args.append([int(sub12[env_num])])
# else:
# raise NotImplementedError("cannot construct this arg", spec.args)
two_action = []
if base_actions[env_num] == 0:
two_action.append(
sc2_actions.FunctionCall(
4,
[[_CONTROL_GROUP_RECALL], [0]]
))
two_action.append(
sc2_actions.FunctionCall(
331,
[[_NOT_QUEUED], [int(x0[env_num]), y0[env_num]]]))
elif base_actions[env_num] == 1:
two_action.append(
sc2_actions.FunctionCall(4, [[_CONTROL_GROUP_RECALL], [1]]))
two_action.append(
sc2_actions.FunctionCall(
331, [[_NOT_QUEUED], [int(x1[env_num]), y1[env_num]]]))
elif base_actions[env_num] == 2:
two_action.append(
sc2_actions.FunctionCall(0, []))
two_action.append(
sc2_actions.FunctionCall(0, []))
# action = sc2_actions.FunctionCall(a, args)
actions.append(two_action)
return actions
def run(self):
mb_obs, mb_td_targets, mb_base_actions, \
mb_xy0, mb_xy1, \
mb_values, mb_dones \
= [], [], [], [], [], [], []
mb_states = self.states
for n in range(self.nsteps):
# pi, pi2, x1, y1, x2, y2, v0
pi1, pi_xy0, pi_xy1, values, states = self.model.step(self.obs, self.states, self.dones)
pi1_noise = np.random.random_sample((self.nenv, 3)) * 0.3
base_actions = np.argmax(
pi1 * self.base_act_mask + pi1_noise, axis=1)
xy0 = np.argmax(pi_xy0, axis=1)
x0 = (xy0 % 32).astype(int)
y0 = (xy0 / 32).astype(int)
xy1 = np.argmax(pi_xy1, axis=1)
x1 = (xy1 % 32).astype(int)
y1 = (xy1 / 32).astype(int)
# Scripted Agent Hacking
for env_num in range(self.nenv):
if env_num >= self.nscripts: # only for scripted agents
continue
ob = self.obs[env_num, :, :, :]
player_relative = ob[:, :, -1]
self.group_list[env_num] = common.update_group_list2(
self.control_groups[env_num])
if len(self.action_queue[env_num]) == 0:
self.action_queue[env_num], self.group_id[env_num], self.dest_per_marine[env_num], \
self.xy_per_marine[env_num] = \
common.solve_tsp(player_relative,
self.selected[env_num][0],
self.group_list[env_num],
self.group_id[env_num],
self.dest_per_marine[env_num],
self.xy_per_marine[env_num])
base_actions[env_num] = 0
x0[env_num] = 0
y0[env_num] = 0
x1[env_num] = 0
y1[env_num] = 0
if len(self.action_queue[env_num]) > 0:
action = self.action_queue[env_num].pop(0)
base_actions[env_num] = action.get("base_action", 0)
x0[env_num] = action.get("x0", 0)
y0[env_num] = action.get("y0", 0)
xy0[env_num] = y0[env_num] * 32 + x0[env_num]
x1[env_num] = action.get("x1", 0)
y1[env_num] = action.get("y1", 0)
xy1[env_num] = y1[env_num] * 32 + x1[env_num]
base_actions = self.valid_base_action(base_actions)
new_base_actions = self.trans_base_actions(base_actions)
base_action_spec = self.env.action_spec(new_base_actions)
# print("base_actions:", base_actions)
actions = self.construct_action(
base_actions,
base_action_spec,
x0,
y0,
x1,
y1
)
mb_obs.append(np.copy(self.obs))
mb_base_actions.append(base_actions)
mb_xy0.append(xy0)
mb_xy1.append(xy1)
mb_values.append(values)
mb_dones.append(self.dones)
# print("final acitons : ", actions)
obs, rewards, dones, \
available_actions, army_counts, \
control_groups, selected, xy_per_marine = self.env.step(actions=actions)
self.army_counts = army_counts
self.control_groups = control_groups
self.selected = selected
for env_num, data in enumerate(xy_per_marine):
self.xy_per_marine[env_num] = data
self.update_available(available_actions)
self.states = states
self.dones = dones
mean_100ep_reward_a2c = 0
for n, done in enumerate(dones):
self.total_reward[n] += float(rewards[n])
if done:
self.obs[n] = self.obs[n] * 0
self.episodes += 1
num_episodes = self.episodes
self.episode_rewards.append(self.total_reward[n])
model = self.model
mean_100ep_reward = round(np.mean(self.episode_rewards[-101:]), 1)
self.episode_rewards_a2c.append(self.total_reward[n])
mean_100ep_reward_a2c = round(np.mean(self.episode_rewards_a2c[-101:]), 1)
nsml.report(
n=n,
reward_a2c=self.total_reward[n],
mean_reward_a2c=mean_100ep_reward_a2c,
reward=self.total_reward[n],
mean_100ep_reward=mean_100ep_reward,
episodes=self.episodes,
step=self.episodes,
scope=locals()
)
pathToSummary = logger.get_dir() + "summaries/env-%i/" % (n + 1)
# Create a summary to monitor rewards
tf.summary.scalar("reward", self.total_reward[n])
tf.summary.scalar("100ep_average", mean_100ep_reward)
# Create summaries to visualize weights
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
# save_summaries(model, pathToSummary)
# print(pathToSummary)
# print("## Env %i DONE: A: %5.2F, V: %5.2F, R: %5.1F" %
# (n + 1, mean_100ep_reward, self.total_reward[n] - mean_100ep_reward, self.total_reward[n]))
# print("env %i reward : %d " % (n+1, self.total_reward[n]))
# if len(self.episode_rewards_a2c) > 4:
# print("episode rewards:", self.episode_rewards_a2c[len(self.episode_rewards_a2c)-5:])
# else:
# print("episode rewards:", self.episode_rewards_a2c)
# print("avg 100ep reward ", mean_100ep_reward)
if self.callback is not None:
self.callback(locals(), globals(),
logger,
n + 1,
mean_100ep_reward,
self.total_reward[n] - mean_100ep_reward,
self.total_reward[n])
self.total_reward[n] = 0
self.group_list[n] = []
self.update_obs(obs)
mb_td_targets.append(rewards)
mb_dones.append(self.dones)
# batch of steps to batch of rollouts
mb_obs = np.asarray(
mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(
self.batch_ob_shape)
mb_td_targets = np.asarray(mb_td_targets, dtype=np.float32).swapaxes(1, 0)
mb_base_actions = np.asarray(
mb_base_actions, dtype=np.int32).swapaxes(1, 0)
mb_xy0 = np.asarray(mb_xy0, dtype=np.int32).swapaxes(1, 0)
mb_xy1 = np.asarray(mb_xy1, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = self.model.value(self.obs, self.states,
self.dones).tolist()
# discount/bootstrap off value fn
for n, (rewards, dones, value) in enumerate(
zip(mb_td_targets, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0],
self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_td_targets[n] = rewards
mb_td_targets = mb_td_targets.flatten()
mb_base_actions = mb_base_actions.flatten()
mb_xy0 = mb_xy0.flatten()
mb_xy1 = mb_xy1.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_td_targets, mb_masks, \
mb_base_actions, mb_xy0, mb_xy1, mb_values
def learn(policy,
env,
seed,
total_timesteps=int(40e6),
gamma=0.99,
log_interval=1,
nprocs=24,
nscripts=12,
nsteps=20,
nstack=4,
ent_coef=0.01,
vf_coef=0.5,
vf_fisher_coef=1.0,
lr=0.25,
max_grad_norm=0.01,
kfac_clip=0.001,
save_interval=10,
lrschedule='linear',
callback=None,
RLlocals=None,
FLAGS=None):
tf.reset_default_graph()
set_global_seeds(seed)
nenvs = nprocs
ob_space = (32, 32, 3) # env.observation_space
ac_space = (32, 32)
make_model = lambda: Model(policy, ob_space, ac_space, nenvs,
total_timesteps,
nprocs=nprocs,
nscripts=nscripts,
nsteps=nsteps,
nstack=nstack,
ent_coef=ent_coef,
vf_coef=vf_coef,
vf_fisher_coef=vf_fisher_coef,
lr=lr,
max_grad_norm=max_grad_norm,
kfac_clip=kfac_clip,
lrschedule=lrschedule)
if save_interval and logger.get_dir():
import cloudpickle
# list_of_files = os.listdir(osp.join(logger.get_dir(), 'models/'))
# if len(list_of_files) > 0:
# filename = "models/%s" % (max(list_of_files, key=extract_number))
#
# print(">> Copying %s to %s" % (logger.get_dir() + filename,
# logger.get_dir() + "minigames_model.pkl"))
#
# with open(osp.join(logger.get_dir(), filename), 'wb') as fh:
# fh.write(cloudpickle.dumps(make_model))
# # os.remove(osp.join(logger.get_dir(), filename))
# else:
with open(osp.join(logger.get_dir(), "minigames_model.pkl"), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
print(">> make_model complete from @ %s!" % (osp.join(logger.get_dir(), 'minigames_model.pkl')))
# list_of_files = os.listdir(osp.join(logger.get_dir(), 'checkpoints/'))
# if len(list_of_files) > 0:
# filename = "experiments/minigames_%i/checkpoints/%s" % \
# (FLAGS.num_agents, max(list_of_files, key=extract_number))
# model.load(filename)
load2(model, "experiments/minigames_%i/checkpoints/" % FLAGS.num_agents)
runner = Runner(
env,
model,
nsteps=nsteps,
nscripts=nscripts,
nstack=nstack,
gamma=gamma,
callback=callback)
nbatch = nenvs * nsteps
tstart = time.time()
summary_writer = tf.summary.FileWriter(logger.get_dir()+"summaries/",
graph=tf.get_default_graph())
for update in range(1, total_timesteps // nbatch + 1):
# summary = model.sess.run([runner.])
# summary_writer.add_summary(summary, update)
obs, states, td_targets, masks, actions, xy0, xy1, values = runner.run()
policy_loss, value_loss, policy_entropy, \
policy_loss_xy0, policy_entropy_xy0, \
policy_loss_xy1, policy_entropy_xy1, \
= model.train(obs, states, td_targets,
masks, actions,
xy0, xy1, values)
model.old_obs = obs
nseconds = time.time() - tstart
fps = int((update * nbatch) / nseconds)
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, td_targets)
nsml.report(
nupdates=update,
total_timesteps=update * nbatch,
fps=fps,
policy_entropy=float(policy_entropy),
policy_loss=float(policy_loss),
policy_loss_xy0=float(policy_loss_xy0),
policy_entropy_xy0=float(policy_entropy_xy0),
policy_loss_xy1=float(policy_loss_xy1),
policy_entropy_xy1=float(policy_entropy_xy1),
value_loss=float(value_loss),
explained_variance=float(ev),
batch_size=nbatch,
step=update,
scope=locals()
)
# pathToSummary = logger.get_dir() + "summaries/env-%i/" % env_nr
# # save_summaries(RLlocals['model'], pathToSummary)
#
# tf.summary.scalar("reward", reward)
# tf.summary.scalar("average", average)
# merged_summary_op = tf.summary.merge_all()
#
# summary_writer = tf.summary.FileWriter(pathToSummary, RLlocals['model'].sess.graph)
logger.record_tabular("fps", fps)
logger.record_tabular("total_timesteps", update * nbatch)
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("rewards", np.mean(runner.total_reward))
# logger.record_tabular("targets", np.mean(td_targets))
# logger.record_tabular("values", np.mean(values))
# logger.record_tabular("nupdates", update)
# logger.record_tabular("policy_entropy", float(policy_entropy))
# logger.record_tabular("policy_loss", float(policy_loss))
# logger.record_tabular("policy_loss_xy0", float(policy_loss_xy0))
# logger.record_tabular("policy_entropy_xy0",
# float(policy_entropy_xy0))
# logger.record_tabular("policy_loss_xy1", float(policy_loss_xy1))
# logger.record_tabular("policy_entropy_xy1",
# float(policy_entropy_xy1))
# logger.record_tabular("policy_loss_y0", float(policy_loss_y0))
# logger.record_tabular("policy_entropy_y0", float(policy_entropy_y0))
# logger.record_tabular("value_loss", float(value_loss))
logger.dump_tabular()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
# if update != save_interval:
#
# oldCheckpointPath = osp.join(logger.get_dir(),
# 'checkpoints/checkpoint%.5i' % (update - save_interval))
# else:
# oldCheckpointPath = osp.join(logger.get_dir(),
# 'checkpoints/checkpoint%.5i' % (update - save_interval + 1))
#
# if os.path.exists(oldCheckpointPath):
# os.remove(oldCheckpointPath)
#
# savepath = osp.join(logger.get_dir(), 'checkpoints/checkpoint%.5i' % update)
# print('Saving to', savepath)
# model.save(savepath)
# model.save()
save_model(model, runner.episodes)
env.close()
def save_summaries(model, summary, path):
print("Saving summaries... at %s" % path)
writer = tf.summary.FileWriter(path, model.sess.graph)
def save_model(model, update=0):
checkpoint_dir = logger.get_dir() + "checkpoints/"
print(">> Saving checkpoint @ %s -%i.data" % (checkpoint_dir, update))
model.saver.save(model.sess, checkpoint_dir, update)
def load2(model, checkpoint_dir):
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {} ...".format(latest_checkpoint))
model.saver.restore(model.sess, latest_checkpoint)
print("Checkpoint loaded")
else:
print("No checkpoints available!\n")
|
from typing import Tuple
from random import shuffle
import numpy as np
from mlpipe.utils import MLPipeLogger, Config
from mlpipe.data_reader.mongodb import MongoDBConnect
def load_ids(
col_details: Tuple[str, str, str],
data_split: Tuple = (60, 40),
sort_by: dict = None,
limit: int = None,
shuffle_data: bool = False,
shuffle_steps: int = 1):
"""
Load MongoDB Document Ids from a collection and split them in training and validation data set
:param col_details: MongoDB collection details with a tuple of 3 string entries
[client name (from config), database name, collection name]
:param data_split: Tuple of percentage of training and test data e.g. (60, 40) for 60% training and 40% test data
:param sort_by: MongoDB sort expression. e.g. { created_at: -1 }
:param limit: maximum number of ids that should be fetched
:param shuffle_data: determine if dataset should be shuffled before splitting it to train and validation data
:param shuffle_steps: step size for the shuffling (e.g. for time series you want to have a shuffle_size of
BATCH_SIZE + (TIME_STEPS - 1)
:return: training and validation data
"""
MLPipeLogger.logger.info("Loading Document IDs from MongoDB")
mongo_con = MongoDBConnect()
mongo_con.add_connections_from_config(Config.get_config_parser())
collection = mongo_con.get_collection(*col_details)
if sort_by is None:
sort_by = {"_id": 1}
db_cursor = collection.find({}, sort_by)
if limit:
db_cursor.limit(limit)
tmp_docs = []
for doc in db_cursor:
tmp_docs.append(doc["_id"])
if shuffle_data:
if shuffle_steps == 1:
shuffle(tmp_docs)
else:
# if reshape the tmp_docs must be a multiple of shuffle_steps, cut ids that do no fit
overflow = len(tmp_docs) % shuffle_steps
tmp_docs = tmp_docs[:len(tmp_docs) - overflow]
x = np.reshape(tmp_docs, (-1, shuffle_steps))
np.random.shuffle(x)
tmp_docs = x.flatten().tolist()
train_range = int((data_split[0] / 100) * len(tmp_docs))
train_data = tmp_docs[:train_range]
val_data = tmp_docs[train_range:]
MLPipeLogger.logger.info("Documents loaded (train|validation): {0} | {1}\n\n".format(
len(train_data), len(val_data)))
return train_data, val_data
|
"""
Given a 2D matrix matrix, find the sum of the elements inside the rectangle
defined by its upper left corner (row1, col1) and lower right corner
(row2, col2).
Range Sum Query 2D
[
[3, 0, 1, 4, 2],
[5, 6, 3, 2, 1],
[1, [2, 0, 1,] 5],
[4, [1, 0, 1,] 7],
[1, [0, 3, 0,] 5]
]
The above rectangle (with the red border) is defined by
(row1, col1) = (2, 1) and (row2, col2) = (4, 3), which contains sum = 8.
Example:
Given matrix = [
[3, 0, 1, 4, 2],
[5, 6, 3, 2, 1],
[1, 2, 0, 1, 5],
[4, 1, 0, 1, 7],
[1, 0, 3, 0, 5]
]
sumRegion(2, 1, 4, 3) -> 8
sumRegion(1, 1, 2, 2) -> 11
sumRegion(1, 2, 2, 4) -> 12
Note:
1. You may assume that the matrix does not change.
2. There are many calls to sumRegion function.
3. You may assume that row1 ≤ row2 and col1 ≤ col2.
"""
#Difficulty: Medium
#12 / 12 test cases passed.
#Runtime: 1500 ms
#Memory Usage: 16.4 MB
#Runtime: 1500 ms, faster than 19.88% of Python3 online submissions for Range Sum Query 2D - Immutable.
#Memory Usage: 16.4 MB, less than 6.03% of Python3 online submissions for Range Sum Query 2D - Immutable.
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
self.matrix = matrix
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
result = 0
for row in range(row1, row2+1):
result += sum(self.matrix[row][col1:col2+1])
return result
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
|
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_config import cfg
xenserver_group = cfg.OptGroup('xenserver', title='Xenserver Options')
xenapi_agent_opts = [
cfg.IntOpt('agent_timeout',
default=30,
help="""
Number of seconds to wait for agent's reply to a request.
Nova configures/performs certain administrative actions on a
server with the help of an agent that's installed on the server.
The communication between Nova and the agent is achieved via
sharing messages, called records, over xenstore, a shared
storage across all the domains on a Xenserver host.
Operations performed by the agent on behalf of nova are:
'version',' key_init', 'password','resetnetwork','inject_file',
and 'agentupdate'.
To perform one of the above operations, the xapi 'agent' plugin
writes the command and its associated parameters to a certain
location known to the domain and awaits response. On being
notified of the message, the agent performs appropriate actions
on the server and writes the result back to xenstore.
This result is then read by the xapi 'agent' plugin to
determine the success/failure of the operation.
This config option determines how long the xapi 'agent' plugin
shall wait to read the response off of xenstore for a given
request/command. If the agent on the instance fails to write
the result in this time period, the operation is considered to
have timed out.
Services which consume this:
* ``nova-compute``
Possible values:
* Any positive integer
Related options:
* ``agent_version_timeout``
* ``agent_resetnetwork_timeout``
"""),
cfg.IntOpt('agent_version_timeout',
default=300,
help="""
Number of seconds to wait for agent't reply to version request.
This indicates the amount of time xapi 'agent' plugin waits
for the agent to respond to the 'version' request specifically.
The generic timeout for agent communication ``agent_timeout``
is ignored in this case.
During the build process the 'version' request is used to
determine if the agent is available/operational to perform
other requests such as 'resetnetwork', 'password', 'key_init'
and 'inject_file'. If the 'version' call fails, the other
configuration is skipped. So, this configuration option can
also be interpreted as time in which agent is expected to be
fully operational.
Services which consume this:
* ``nova-compute``
Possible values:
* Any positive integer
Related options:
* None
"""),
cfg.IntOpt('agent_resetnetwork_timeout',
default=60,
help="""
Number of seconds to wait for agent's reply to resetnetwork
request.
This indicates the amount of time xapi 'agent' plugin waits
for the agent to respond to the 'resetnetwork' request
specifically. The generic timeout for agent communication
``agent_timeout`` is ignored in this case.
Services which consume this:
* ``nova-compute``
Possible values:
* Any positive integer
Related options:
* None
"""),
cfg.StrOpt('agent_path',
default='usr/sbin/xe-update-networking',
help="""
Path to locate guest agent on the server.
Specifies the path in which the XenAPI guest agent should be
located. If the agent is present, network configuration is not
injected into the image.
Used if compute_driver=xenapi.XenAPIDriver and
flat_injected=True.
Services which consume this:
* ``nova-compute``
Possible values:
* A valid path
Related options:
* ``flat_injected``
* ``compute_driver``
"""),
cfg.BoolOpt('disable_agent',
default=False,
help="""
Disables the use of XenAPI agent.
This configuration option suggests whether the use of agent
should be enabled or not regardless of what image properties
are present. Image properties have an effect only when this
is set to ``True``. Read description of config option
``use_agent_default`` for more information.
Services which consume this:
* ``nova-compute``
Possible values:
* True
* False
Related options:
* ``use_agent_default``
"""),
cfg.BoolOpt('use_agent_default',
default=False,
help="""
Whether or not to use the agent by default when its usage is
enabled but not indicated by the image.
The use of XenAPI agent can be disabled altogether using the
configuration option ``disable_agent``. However, if it is not
disabled, the use of an agent can still be controlled by the
image in use through one of its properties,
``xenapi_use_agent``. If this property is either not present
or specified incorrectly on the image, the use of agent is
determined by this configuration option.
Note that if this configuration is set to ``True`` when the
agent is not present, the boot times will increase
significantly.
Services which consume this:
* ``nova-compute``
Possible values:
* True
* False
Related options:
* ``disable_agent``
"""),
]
xenapi_session_opts = [
cfg.IntOpt('login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
cfg.IntOpt('connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
]
ALL_XENSERVER_OPTS = itertools.chain(
xenapi_agent_opts,
xenapi_session_opts)
def register_opts(conf):
conf.register_group(xenserver_group)
conf.register_opts(ALL_XENSERVER_OPTS, group=xenserver_group)
def list_opts():
return {xenserver_group.name: ALL_XENSERVER_OPTS}
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from pathlib import Path
import spacy
import srsly
import typer
from spacy.kb import KnowledgeBase
from spacy_ann.candidate_generator import CandidateGenerator
from spacy_ann.types import kb_type_vs_index
from wasabi import Printer
from tqdm import tqdm
from itertools import tee
INPUT_DIM = 300 # dimension of pretrained input vectors
DESC_WIDTH = 300 # dimension of output entity vectors
def create_index(
model: str,
kb_dir: Path,
output_dir: Path,
new_model_name: str = "ann_linker",
cg_threshold: float = 0.8,
n_iter: int = 5,
verbose: bool = True,
):
"""Create an AnnLinker based on the Character N-Gram
TF-IDF vectors for aliases in a KnowledgeBase
model (str): spaCy language model directory or name to load
kb_dir (Path): path to the directory with kb entities.jsonl and aliases.jsonl files
output_dir (Path): path to output_dir for spaCy model with ann_linker pipe
kb File Formats
e.g. entities.jsonl
{"id": "a1", "description": "Machine learning (ML) is the scientific study of algorithms and statistical models..."}
{"id": "a2", "description": "ML (\"Meta Language\") is a general-purpose functional programming language. It has roots in Lisp, and has been characterized as \"Lisp with types\"."}
e.g. aliases.jsonl
{"alias": "ML", "entities": ["a1", "a2"], "probabilities": [0.5, 0.5]}
"""
msg = Printer(hide_animation=not verbose)
msg.divider("Load Model")
with msg.loading(f"Loading model {model}"):
nlp = spacy.load(model)
msg.good("Done.")
if output_dir is not None:
output_dir = Path(output_dir / new_model_name)
if not output_dir.exists():
output_dir.mkdir(parents=True)
entities, entities_copy = tee(srsly.read_jsonl(kb_dir / "entities.jsonl"))
total_entities = sum(1 for _ in entities_copy)
aliases, aliases_copy = tee(srsly.read_jsonl(kb_dir / "aliases.jsonl"))
total_aliases = sum(1 for _ in aliases_copy)
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=INPUT_DIM)
empty_doc = nlp.make_doc('').vector
for entity in tqdm(entities, desc='Adding entities to KB', total=total_entities):
id = entity['id']
if not kb.contains_entity(id):
embedding = nlp.make_doc(entity['description']).vector if 'description' in entity else empty_doc
label = entity['label'] if 'label' in entity else 0
if label: label = kb_type_vs_index[label]
kb.add_entity(entity=id,
freq=label, #TODO: Add a proper "label" field (repurposed freq field as the type label)
entity_vector=embedding)
for alias in tqdm(aliases, desc="Setting kb entities and aliases", total=total_aliases):
entities = [e for e in alias["entities"] if kb.contains_entity(e)]
num_entities = len(entities)
if num_entities > 0:
prior_probabilities = alias['probabilities'] if len(alias['probabilities']) == num_entities else [1.0 / num_entities] * num_entities
kb.add_alias(alias=alias["alias"], entities=entities, probabilities=prior_probabilities)
msg.divider("Create ANN Index")
alias_strings = kb.get_alias_strings()
cg = CandidateGenerator().fit(alias_strings, verbose=True)
ann_linker = nlp.create_pipe("ann_linker")
ann_linker.set_kb(kb)
ann_linker.set_cg(cg)
nlp.add_pipe(ann_linker, last=True)
nlp.meta["name"] = new_model_name
nlp.to_disk(output_dir)
nlp.from_disk(output_dir)
if __name__ == "__main__":
typer.run(create_index)
|
from django.contrib import admin
# Register your models here.
from .models import Ftwhl
admin.site.register(Ftwhl)
|
import wandb
import logging
from typing import Optional
from pytorch_lightning.loggers import WandbLogger
class SilentWandbLogger(WandbLogger):
"""Wandb logger wrapper that updates the log visibility from the client after initialization.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
level = kwargs.get("log_level", logging.WARNING)
logging.getLogger(wandb.__name__).setLevel(level)
|
from django.test import TestCase, Client
from .models import Profile
from django.contrib.auth.models import User
import unittest
from .forms import SignUpForm
from .signals import show_login_message, show_logout_message
from django.contrib.auth.signals import user_logged_out, user_logged_in
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
# Create your tests here.
class ModelsTestCase(TestCase):
def setUp(self):
# Create User instance
u = User.objects.create_user('testing', password='hello')
def test_count_user_instance(self):
u = User.objects.filter(username="testing")
self.assertEqual(len(u), 1)
def test_count_profile_instance(self):
u = User.objects.filter(username="testing")
p = Profile.objects.filter(user=u[0])
self.assertEqual(len(p), 1)
def test_index(self):
c = Client()
response = c.get("", secure=True)
self.assertEqual(response.status_code, 200)
def test_register(self):
c = Client()
response = c.get("/register", secure=True)
self.assertEqual(response.status_code, 200)
def test_login(self):
c = Client()
u = User.objects.get_by_natural_key('testing')
request = c.get("/login", secure=True, follow = True)
self.assertEqual(request.status_code, 200)
request2 = c.login(username='testing', password="hello")
self.assertTrue(request2)
def test_logout(self):
c = Client()
u = User.objects.get_by_natural_key('testing')
c.login(username='testing', password="hello")
check = c.get("/logout", secure=True, follow=True)
self.assertRedirects(check, "https://testserver/", 302, 200)
def test_account(self):
c = Client()
u = User.objects.get_by_natural_key('testing')
c.login(username='testing', password="hello")
response = c.get("/account", secure=True, follow=True)
self.assertEqual(response.status_code, 200)
def test_contact(self):
c = Client()
response = c.get("/contact", secure=True)
self.assertEqual(response.status_code, 200)
c.login(username='testing', password="hello")
response2 = c.get("/contact", secure=True)
self.assertEqual(response2.status_code, 200)
|
import os
import time
from krgram.tl.base import TLSerializable
from krgram.utils.bytes import Bytes
class MsgId(TLSerializable):
def __init__(self, delta = 0.0):
self.delta = float(delta)
self.curr = self()
def __call__(self):
now = time.time() + self.delta
now_sec = int(now)
frac = int((now - float(now_sec))*1000.0)
msg_id = 0x0L | (now_sec << 32) | (frac << 3)
return msg_id
def serialize(self):
return Bytes.from_int( self.curr, 8, False, False )
def deserialize_from(self, stream):
self.curr = Bytes(stream.read(8)).to_int(False, False)
class MsgSeqNo(TLSerializable):
def __init__(self ):
self._content_related_sended = 0
self._curr = 0
def __call__(self, content_related = True):
seq_no = self._content_related_sended*2
if content_related:
seq_no += 1
self._content_related_sended += 1
return seq_no
def serialize(self):
return Bytes.from_int(self._curr, 4, False, False)
def deserialize_from(self, stream):
self._curr = Bytes(stream.read(4)).to_int(False, False)
self._content_related_sended = self._curr // 2
class SessionId(TLSerializable):
def __init__(self, sid):
if sid is None:
raise ValueError("Session ID cannot be None")
else:
if isinstance(sid, Bytes):
self._id = sid.to_int()
else:
self._id = int(sid)
@staticmethod
def create():
sid = Bytes(os.urandom(8))
return SessionId(sid)
def __call__(self):
return self.serialize()
def serialize(self):
return Bytes.from_int(self._id, 8, False, False)
def deserialize_from(self, stream):
self._id = Bytes(stream.read(8)).to_int(False, False)
|
from glob import iglob
from pathlib import Path
from typing import Iterable, Union
def normalize_path(path: Union[str, Iterable[Union[Path, str]]]) -> Iterable[Path]:
# Convert a single instance to an interable
if type(path) is str:
path = [path]
def handle_path(path_obj: Path):
if path_obj.is_file():
return (path_obj,)
elif path_obj.is_dir():
return (x for x in path_obj.iterdir() if x.is_file())
accepted_files = set()
for p in path:
try:
# Try to handle p as if it was a string
for g in map(Path, iglob(p, recursive=True)):
accepted_files.update(handle_path(g))
except TypeError:
# Not a string, try to handle it like a path
accepted_files.update(handle_path(p))
return accepted_files
|
from .loss import nce_loss, negative_sampling_loss
|
"""Subject definitions for project"""
from __future__ import absolute_import
import seaborn as sns
from six.moves import zip
BEHAVE_SUBJS = ["B979", "B1107", "B1082", "B1218", "B1222", "B1101", "B1088", "B1105"]
BEHAVE_COLORS = [
"nice blue",
"windows blue",
"off blue",
"stormy blue",
"fern",
"faded green",
"dusty purple",
"dark lilac",
"red",
]
BEHAVE_COLOR_MAP = {
subj: color for subj, color in zip(BEHAVE_SUBJS, sns.xkcd_palette(BEHAVE_COLORS))
}
TRAINING = {}
for subj in ["B979", "B1107", "B1082", "B1218"]:
TRAINING[subj] = "ABCD|EFGH"
for subj in ["B1088", "B1105"]:
TRAINING[subj] = "ABGH|EFCD"
for subj in ["B1101", "B1222"]:
TRAINING[subj] = "ABEF|CDGH"
EPHYS_SUBJS = [
"B1101",
"B1218",
"B1134",
"B1088",
"B1107",
"B1096",
"B1229",
"B1082",
"B1183",
]
|
from typing import Dict, Any
import numpy as np
import os
class RandomSearch:
@staticmethod
def random_choice(*args):
choices = []
for arg in args:
choices.append(arg)
return lambda: np.random.choice(choices)
@staticmethod
def random_integer(low, high):
return lambda: int(np.random.randint(low, high))
@staticmethod
def random_subset(*args):
choices = []
for arg in args:
choices.append(arg)
func = lambda: np.random.choice(choices, np.random.randint(1, len(choices)+1), replace=False)
return func
@staticmethod
def random_uniform(low, high):
return lambda: np.random.uniform(low, high)
class HyperparameterSearch:
def __init__(self, **kwargs):
self.search_space = {}
self.lambda_ = lambda: 0
for k, v in kwargs.items():
self.search_space[k] = v
def parse(self, val: Any):
if isinstance(val, type(self.lambda_)) and val.__name__ == self.lambda_.__name__:
val = val()
if isinstance(val, (int, np.int)):
return int(val)
elif isinstance(val, (float, np.float)):
return float(val)
elif isinstance(val, (np.ndarray, list)):
return ",".join(val)
else:
return val
elif isinstance(val, (int, np.int)):
return int(val)
elif isinstance(val, (float, np.float)):
return float(val)
elif isinstance(val, (np.ndarray, list)):
return ",".join(val)
elif val is None:
return None
else:
return val
def sample(self) -> Dict:
res = {}
for k, v in self.search_space.items():
res[k] = self.parse(v)
return res
def update_environment(self, sample) -> None:
for k, v in sample.items():
os.environ[k] = str(v)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^showprofile/$', views.showprofile, name='showprofile'),
url(r'^home/$', views.home, name='home'),
url(r'^homegraph/$', views.homegraph, name='homegraph'),
url(r'^addprofile/$', views.addprofile, name='addprofile'),
url(r'^predict/$', views.predict, name='predict'),
url(r'^userprofile/$', views.userprofile, name='userprofile'),
url(r'^jsonSubject.json$', views.jsonSubject, name='jsonSubject'), #get data
url(r'^jsonEnrollment.json$', views.jsonEnrollment, name='jsonEnrollment'),
url(r'^jsonStudent.json$', views.jsonStudent, name='jsonStudent'),
url(r'^jsonProvience.json$', views.jsonProvience, name='jsonProvience'),
url(r'^coordinate_predict$', views.coordinate_predict, name='coordinate_predict'),
url(r'^coordinate_home$', views.coordinate_home, name='coordinate_home'),
url(r'^test$', views.test, name='test'),
url(r'^testcoor$', views.testcoor, name='testcoor'),
]
|
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
class ModelsHandler:
def __init__(self):
# empty init
pass
@staticmethod
def get_models(classifiers: list) -> list:
models = []
for model in classifiers:
if model == 'RandomForest':
models.append(RandomForestClassifier())
elif model == 'GausNaiveBayes':
models.append(GaussianNB())
elif model == 'LogReg':
models.append(LogisticRegression())
elif model == 'dummy':
models.append(DummyClassifier())
else:
raise ("invalid classifier: %s", model)
return models
@staticmethod
def get_model(classifier: str) -> object:
if classifier == 'RandomForest':
return RandomForestClassifier()
elif classifier == 'GausNaiveBayes':
return GaussianNB()
elif classifier == 'LogReg':
return LogisticRegression()
elif classifier == 'dummy':
return DummyClassifier()
else:
raise ("invalid classifier: %s", classifier)
|
import asyncio
import json
import logging
import sqlalchemy
import shutil
from flask import request, url_for, jsonify, make_response, Response, send_file
from flask_restful import Api, Resource, fields, marshal, marshal_with
from werkzeug.exceptions import Conflict, UnprocessableEntity
from meltano.core.job import JobFinder, State
from meltano.core.behavior.canonical import Canonical
from meltano.core.plugin import PluginRef, Profile
from meltano.core.plugin.error import PluginExecutionError, PluginLacksCapabilityError
from meltano.core.plugin.settings_service import (
PluginSettingsService,
PluginSettingValueSource,
)
from meltano.core.plugin_discovery_service import (
PluginDiscoveryService,
PluginNotFoundError,
)
from meltano.core.plugin_invoker import invoker_factory
from meltano.core.plugin_install_service import PluginInstallService
from meltano.core.project import Project
from meltano.core.project_add_service import ProjectAddService
from meltano.core.config_service import ConfigService
from meltano.core.schedule_service import (
ScheduleService,
ScheduleAlreadyExistsError,
ScheduleDoesNotExistError,
)
from meltano.core.utils import flatten, iso8601_datetime, slugify
from meltano.core.logging import (
JobLoggingService,
MissingJobLogException,
SizeThresholdJobLogException,
)
from meltano.api.api_blueprint import APIBlueprint
from meltano.api.models import db
from meltano.api.models.subscription import Subscription
from meltano.api.json import freeze_keys
from meltano.api.executor import run_elt
from flask_security import roles_required
from .errors import InvalidFileNameError
from .upload_helper import InvalidFileTypeError, InvalidFileSizeError, UploadHelper
from .utils import enforce_secure_filename
def freeze_profile_config_keys(profile):
profile["config"] = freeze_keys(profile.get("config", {}))
profile["config_sources"] = freeze_keys(profile.get("config_sources", {}))
def validate_plugin_config(
plugin: PluginRef, name, value, project: Project, settings: PluginSettingsService
):
setting_def = settings.find_setting(plugin, name)
# we want to prevent the edition of protected settings from the UI
if setting_def.protected:
logging.warning("Cannot set a 'protected' configuration externally.")
return False
if setting_def.kind == "file" and value and value != "":
uploads_directory = project.extract_dir(plugin.full_name)
resolved_file_path = project.root_dir(value).resolve()
if not str(resolved_file_path).startswith(str(uploads_directory) + "/"):
logging.warning(
"Cannot set a file configuration to a path outside the project directory"
)
return False
old_value, source = settings.get_value(db.session, plugin, name)
if source in (PluginSettingValueSource.ENV, PluginSettingValueSource.MELTANO_YML):
logging.warning(
"Cannot override a configuration set in the environment or meltano.yml."
)
return False
return True
orchestrationsBP = APIBlueprint("orchestrations", __name__)
orchestrationsAPI = Api(
orchestrationsBP,
errors={
"UnprocessableEntity": {
"error": True,
"code": "The subscription could not be created.",
"status": UnprocessableEntity.code,
},
"Conflict": {
"error": True,
"code": "A subscription already exists for this address.",
"status": Conflict.code,
},
},
)
@orchestrationsBP.errorhandler(ScheduleAlreadyExistsError)
def _handle(ex):
return (
jsonify(
{
"error": True,
"code": f"A pipeline with the name '{ex.schedule.name}' already exists. Try renaming the pipeline.",
}
),
409,
)
@orchestrationsBP.errorhandler(ScheduleDoesNotExistError)
def _handle(ex):
return (
jsonify(
{
"error": True,
"code": f"A pipeline with the name '{ex.name}' does not exist..",
}
),
404,
)
@orchestrationsBP.errorhandler(InvalidFileNameError)
def _handle(ex):
return (jsonify({"error": True, "code": f"The file lacks a valid name."}), 400)
@orchestrationsBP.errorhandler(InvalidFileTypeError)
def _handle(ex):
return (
jsonify(
{
"error": True,
"code": f"The file '{ex.file.filename}' must be one of the following types: {ex.extensions}",
}
),
400,
)
@orchestrationsBP.errorhandler(InvalidFileSizeError)
def _handle(ex):
return (
jsonify(
{
"error": True,
"code": f"The file '{ex.file.filename}' is empty or exceeds the {ex.max_file_size} size limit.",
}
),
400,
)
@orchestrationsBP.errorhandler(MissingJobLogException)
def _handle(ex):
return (jsonify({"error": False, "code": str(ex)}), 204)
@orchestrationsBP.route("/jobs/state", methods=["POST"])
def job_state() -> Response:
"""
Endpoint for getting the status of N jobs
"""
project = Project.find()
poll_payload = request.get_json()
job_ids = poll_payload["job_ids"]
jobs = []
for job_id in job_ids:
finder = JobFinder(job_id)
state_job = finder.latest(db.session)
# Validate existence first as a job may not be queued yet as a result of
# another prerequisite async process (dbt installation for example)
if state_job:
state_job_success = finder.latest_success(db.session)
jobs.append(
{
"job_id": job_id,
"is_complete": state_job.is_complete(),
"has_error": state_job.has_error(),
"started_at": state_job.started_at,
"ended_at": state_job.ended_at,
"has_ever_succeeded": state_job_success.is_success()
if state_job_success
else None,
}
)
return jsonify({"jobs": jobs})
@orchestrationsBP.route("/jobs/<job_id>/log", methods=["GET"])
def job_log(job_id) -> Response:
"""
Endpoint for getting the most recent log generated by a job with job_id
"""
project = Project.find()
try:
log_service = JobLoggingService(project)
log = log_service.get_latest_log(job_id)
has_log_exceeded_max_size = False
except SizeThresholdJobLogException as err:
log = None
has_log_exceeded_max_size = True
finder = JobFinder(job_id)
state_job = finder.latest(db.session)
state_job_success = finder.latest_success(db.session)
return jsonify(
{
"job_id": job_id,
"log": log,
"has_log_exceeded_max_size": has_log_exceeded_max_size,
"has_error": state_job.has_error() if state_job else False,
"started_at": state_job.started_at if state_job else None,
"ended_at": state_job.ended_at if state_job else None,
"trigger": state_job.trigger if state_job else None,
"has_ever_succeeded": state_job_success.is_success()
if state_job_success
else None,
}
)
@orchestrationsBP.route("/jobs/<job_id>/download", methods=["GET"])
def download_job_log(job_id) -> Response:
"""
Endpoint for downloading a job log with job_id
"""
project = Project.find()
log_service = JobLoggingService(project)
return send_file(log_service.get_downloadable_log(job_id), mimetype="text/plain")
@orchestrationsBP.route("/run", methods=["POST"])
def run():
project = Project.find()
schedule_payload = request.get_json()
job_id = run_elt(project, schedule_payload)
return jsonify({"job_id": job_id}), 202
@orchestrationsBP.route(
"/<plugin_ref:plugin_ref>/configuration/upload-file", methods=["POST"]
)
@roles_required("admin")
def upload_plugin_configuration_file(plugin_ref) -> Response:
"""
Endpoint for uploading a file for a specific plugin's configuration profile
"""
file = request.files["file"]
setting_name = enforce_secure_filename(request.form["setting_name"])
tmp = request.form.get("tmp", False)
project = Project.find()
directory = project.extract_dir(
plugin_ref.full_name, ("tmp" if tmp else ""), setting_name
)
upload_helper = UploadHelper()
file_path = upload_helper.upload_file(directory, file)
return jsonify({"path": file_path, "setting_name": setting_name}), 200
@orchestrationsBP.route(
"/<plugin_ref:plugin_ref>/configuration/delete-uploaded-file", methods=["POST"]
)
@roles_required("admin")
def delete_plugin_configuration_file(plugin_ref) -> Response:
"""
Endpoint for deleting a file for a specific plugin's configuration profile
"""
payload = request.get_json()
setting_name = enforce_secure_filename(payload["setting_name"])
tmp = payload.get("tmp", False)
project = Project.find()
directory = project.extract_dir(
plugin_ref.full_name, ("tmp" if tmp else ""), setting_name
)
shutil.rmtree(directory)
return jsonify({"setting_name": setting_name}), 200
@orchestrationsBP.route("/<plugin_ref:plugin_ref>/configuration", methods=["GET"])
def get_plugin_configuration(plugin_ref) -> Response:
"""
Endpoint for getting a plugin's configuration profiles
"""
project = Project.find()
settings = PluginSettingsService(project, show_hidden=False)
plugin = ConfigService(project).get_plugin(plugin_ref)
discovery_service = PluginDiscoveryService(project)
try:
plugin_def = discovery_service.find_plugin(plugin.type, plugin.name)
settings_group_validation = plugin_def.settings_group_validation
except PluginNotFoundError:
settings_group_validation = []
profiles = settings.profiles_with_config(db.session, plugin, redacted=True)
for profile in profiles:
freeze_profile_config_keys(profile)
return jsonify(
{
"profiles": profiles,
"settings": Canonical.as_canonical(settings.definitions(plugin)),
"settings_group_validation": settings_group_validation,
}
)
@orchestrationsBP.route(
"/<plugin_ref:plugin_ref>/configuration/profiles", methods=["POST"]
)
@roles_required("admin")
def add_plugin_configuration_profile(plugin_ref) -> Response:
"""
Endpoint for adding a configuration profile to a plugin
"""
payload = request.get_json()
project = Project.find()
config = ConfigService(project)
plugin = config.get_plugin(plugin_ref)
settings = PluginSettingsService(project)
# create the new profile for this plugin
name = payload["name"]
profile = plugin.add_profile(slugify(name), label=name)
config.update_plugin(plugin)
profile_config = settings.profile_with_config(
db.session, plugin, profile, redacted=True
)
freeze_profile_config_keys(profile_config)
return jsonify(profile_config)
@orchestrationsBP.route("/<plugin_ref:plugin_ref>/configuration", methods=["PUT"])
@roles_required("admin")
def save_plugin_configuration(plugin_ref) -> Response:
"""
Endpoint for persisting a plugin configuration
"""
project = Project.find()
payload = request.get_json()
plugin = ConfigService(project).get_plugin(plugin_ref)
# TODO iterate pipelines and save each, also set this connector's profile (reuse `pipelineInFocusIndex`?)
settings = PluginSettingsService(project, show_hidden=False)
for profile in payload:
# select the correct profile
name = profile["name"]
plugin.use_profile(plugin.get_profile(name))
for name, value in profile["config"].items():
if not validate_plugin_config(plugin, name, value, project, settings):
continue
if value == "":
settings.unset(db.session, plugin, name)
else:
settings.set(db.session, plugin, name, value)
profiles = settings.profiles_with_config(db.session, plugin, redacted=True)
for profile in profiles:
freeze_profile_config_keys(profile)
return jsonify(profiles)
@orchestrationsBP.route("/<plugin_ref:plugin_ref>/configuration/test", methods=["POST"])
@roles_required("admin")
def test_plugin_configuration(plugin_ref) -> Response:
"""
Endpoint for testing a plugin configuration's valid connection
"""
project = Project.find()
payload = request.get_json()
config_service = ConfigService(project)
plugin = config_service.get_plugin(plugin_ref)
# load the correct profile
plugin.use_profile(plugin.get_profile(payload.get("profile")))
settings = PluginSettingsService(project, show_hidden=False)
config = payload.get("config", {})
valid_config = {
name: value
for name, value in config.items()
if validate_plugin_config(plugin, name, value, project, settings)
}
async def test_stream(tap_stream) -> bool:
while not tap_stream.at_eof():
message = await tap_stream.readline()
json_dict = json.loads(message)
if json_dict["type"] == "RECORD":
return True
return False
async def test_extractor(config={}):
try:
settings_service = settings.with_config_override(
PluginSettingsService.unredact(config)
)
invoker = invoker_factory(
project,
plugin,
prepare_with_session=db.session,
plugin_settings_service=settings_service,
)
process = await invoker.invoke_async(stdout=asyncio.subprocess.PIPE)
return await test_stream(process.stdout)
except Exception as err:
# if anything happens, this is not successful
return False
finally:
try:
if process:
psutil.Process(process.pid).terminate()
except Exception as err:
logging.debug(err)
loop = asyncio.get_event_loop()
success = loop.run_until_complete(test_extractor(valid_config))
return jsonify({"is_success": success}), 200
@orchestrationsBP.route("/pipeline-schedules", methods=["GET"])
def get_pipeline_schedules():
"""
Endpoint for getting the pipeline schedules
"""
project = Project.find()
schedule_service = ScheduleService(project)
schedules = list(map(dict, schedule_service.schedules()))
for schedule in schedules:
finder = JobFinder(schedule["name"])
state_job = finder.latest(db.session)
schedule["has_error"] = state_job.has_error() if state_job else False
schedule["is_running"] = state_job.is_running() if state_job else False
schedule["job_id"] = schedule["name"]
schedule["started_at"] = state_job.started_at if state_job else None
schedule["ended_at"] = state_job.ended_at if state_job else None
schedule["trigger"] = state_job.trigger if state_job else None
state_job_success = finder.latest_success(db.session)
schedule["has_ever_succeeded"] = (
state_job_success.is_success() if state_job_success else None
)
schedule["start_date"] = (
schedule["start_date"].date().isoformat()
if schedule["start_date"]
else None
)
return jsonify(schedules)
@orchestrationsBP.route("/pipeline-schedules", methods=["POST"])
@roles_required("admin")
def save_pipeline_schedule() -> Response:
"""
Endpoint for persisting a pipeline schedule
"""
payload = request.get_json()
# Airflow requires alphanumeric characters, dashes, dots and underscores exclusively
name = payload["name"]
slug = slugify(name)
extractor = payload["extractor"]
loader = payload["loader"]
transform = payload["transform"]
interval = payload["interval"]
project = Project.find()
schedule_service = ScheduleService(project)
schedule = schedule_service.add(
db.session, slug, extractor, loader, transform, interval
)
schedule = dict(schedule)
schedule["start_date"] = (
schedule["start_date"].date().isoformat() if schedule["start_date"] else None
)
return jsonify(schedule), 201
@orchestrationsBP.route("/pipeline-schedules", methods=["PUT"])
@roles_required("admin")
def update_pipeline_schedule() -> Response:
"""
Endpoint for updating a pipeline schedule
"""
payload = request.get_json()
project = Project.find()
schedule_service = ScheduleService(project)
interval = payload["interval"]
plugin_namespace = payload["plugin_namespace"]
schedule = schedule_service.find_namespace_schedule(plugin_namespace)
schedule.interval = interval
schedule_service.update_schedule(schedule)
schedule = dict(schedule)
schedule["start_date"] = (
schedule["start_date"].date().isoformat() if schedule["start_date"] else None
)
return jsonify(schedule), 201
@orchestrationsBP.route("/pipeline-schedules", methods=["DELETE"])
@roles_required("admin")
def delete_pipeline_schedule() -> Response:
"""
Endpoint for deleting a pipeline schedule
"""
payload = request.get_json()
project = Project.find()
schedule_service = ScheduleService(project)
name = payload["name"]
schedule_service.remove(name)
return jsonify(name), 201
class SubscriptionsResource(Resource):
SubscriptionDefinition = {
"id": fields.String,
"recipient": fields.String,
"event_type": fields.String,
"source_type": fields.String,
"source_id": fields.String,
"created_at": fields.DateTime,
}
@marshal_with(SubscriptionDefinition)
def get(self):
return Subscription.query.all()
@marshal_with(SubscriptionDefinition)
def post(self):
payload = request.get_json()
try:
subscription = Subscription(**payload)
db.session.add(subscription)
db.session.commit()
except AssertionError as err:
raise UnprocessableEntity() from err
except sqlalchemy.exc.IntegrityError:
raise Conflict()
return subscription, 201
class SubscriptionResource(Resource):
def delete(self, id):
Subscription.query.filter_by(id=id).delete()
db.session.commit()
return "", 204
orchestrationsAPI.add_resource(SubscriptionsResource, "/subscriptions")
orchestrationsAPI.add_resource(SubscriptionResource, "/subscriptions/<id>")
|
def test_import_biopython():
import Bio
|
"""Tag the sandbox for release, make source and doc tarballs.
Requires Python 2.6
Example of invocation (use to test the script):
python makerelease.py --force --retag --platform=msvc6,msvc71,msvc80,mingw -ublep 0.5.0 0.6.0-dev
Example of invocation when doing a release:
python makerelease.py 0.5.0 0.6.0-dev
"""
import os.path
import subprocess
import sys
import doxybuild
import subprocess
import xml.etree.ElementTree as ElementTree
import shutil
import urllib2
import tempfile
import os
import time
from devtools import antglob, fixeol, tarball
SVN_ROOT = 'https://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/'
SVN_TAG_ROOT = SVN_ROOT + 'tags/jsoncpp'
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
def set_version( version ):
with open('version','wb') as f:
f.write( version.strip() )
def rmdir_if_exist( dir_path ):
if os.path.isdir( dir_path ):
shutil.rmtree( dir_path )
class SVNError(Exception):
pass
def svn_command( command, *args ):
cmd = ['svn', '--non-interactive', command] + list(args)
print 'Running:', ' '.join( cmd )
process = subprocess.Popen( cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
error = SVNError( 'SVN command failed:\n' + stdout )
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
stdout = svn_command( 'status', '--xml' )
etree = ElementTree.fromstring( stdout )
msg = []
for entry in etree.getiterator( 'entry' ):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
if msg:
msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
return '\n'.join( msg )
def svn_join_url( base_url, suffix ):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
def svn_check_if_tag_exist( tag_url ):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
list_stdout = svn_command( 'list', tag_url )
except SVNError, e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
# otherwise ignore error, meaning tag does not exist
return False
return True
def svn_commit( message ):
"""Commit the sandbox, providing the specified comment.
"""
svn_command( 'ci', '-m', message )
def svn_tag_sandbox( tag_url, message ):
"""Makes a tag based on the sandbox revisions.
"""
svn_command( 'copy', '-m', message, '.', tag_url )
def svn_remove_tag( tag_url, message ):
"""Removes an existing tag.
"""
svn_command( 'delete', '-m', message, tag_url )
def svn_export( tag_url, export_dir ):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
rmdir_if_exist( export_dir )
svn_command( 'export', tag_url, export_dir )
def fix_sources_eol( dist_dir ):
"""Set file EOL for tarball distribution.
"""
print 'Preparing exported source file EOL for distribution...'
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
win_sources = antglob.glob( dist_dir,
includes = '**/*.sln **/*.vcproj',
prune_dirs = prune_dirs )
unix_sources = antglob.glob( dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
prune_dirs = prune_dirs )
for path in win_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
for path in unix_sources:
fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
def download( url, target_path ):
"""Download file represented by url to target_path.
"""
f = urllib2.urlopen( url )
try:
data = f.read()
finally:
f.close()
fout = open( target_path, 'wb' )
try:
fout.write( data )
finally:
fout.close()
def check_compile( distcheck_top_dir, platform ):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
print 'Running:', ' '.join( cmd )
log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
flog = open( log_path, 'wb' )
try:
process = subprocess.Popen( cmd,
stdout=flog,
stderr=subprocess.STDOUT,
cwd=distcheck_top_dir )
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
def write_tempfile( content, **kwargs ):
fd, path = tempfile.mkstemp( **kwargs )
f = os.fdopen( fd, 'wt' )
try:
f.write( content )
finally:
f.close()
return path
class SFTPError(Exception):
pass
def run_sftp_batch( userhost, sftp, batch, retry=0 ):
path = write_tempfile( batch, suffix='.sftp', text=True )
# psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in xrange(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
print heading, ' '.join( cmd )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode != 0:
error = SFTPError( 'SFTP batch failed:\n' + stdout )
else:
break
if error:
raise error
return stdout
def sourceforge_web_synchro( sourceforge_project, doc_dir,
user=None, sftp='sftp' ):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
stdout = run_sftp_batch( userhost, sftp, """
cd htdocs
dir
exit
""" )
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
line = line.strip()
if not collect and line.endswith('> dir'):
collect = True
elif collect and line.endswith('> exit'):
break
elif collect == 1:
collect = 2
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
existing_paths.add( path[0] )
upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print 'Removing the following file from web:'
print '\n'.join( paths_to_remove )
stdout = run_sftp_batch( userhost, sftp, """cd htdocs
rm %s
exit""" % ' '.join(paths_to_remove) )
print 'Uploading %d files:' % len(upload_paths)
batch_size = 10
upload_paths = list(upload_paths)
start_time = time.time()
for index in xrange(0,len(upload_paths),batch_size):
paths = upload_paths[index:index+batch_size]
file_per_sec = (time.time() - start_time) / (index+1)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print '%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec)
run_sftp_batch( userhost, sftp, """cd htdocs
lcd %s
mput %s
exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
run_sftp_batch( userhost, sftp, """
mput %s
exit
""" % (' '.join(paths),) )
def main():
usage = """%prog release_version next_dev_version
Update 'version' file to release_version and commit.
Generates the document tarball.
Tags the sandbox revision with release_version.
Update 'version' file to next_dev_version and commit.
Performs an svn export of tag release version, and build a source tarball.
Must be started in the project top directory.
Warning: --force should only be used when developping/testing the release script.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('--dot', dest="dot_path", action='store', default=doxybuild.find_program('dot'),
help="""Path to GraphViz dot tool. Must be full qualified path. [Default: %default]""")
parser.add_option('--doxygen', dest="doxygen_path", action='store', default=doxybuild.find_program('doxygen'),
help="""Path to Doxygen tool. [Default: %default]""")
parser.add_option('--force', dest="ignore_pending_commit", action='store_true', default=False,
help="""Ignore pending commit. [Default: %default]""")
parser.add_option('--retag', dest="retag_release", action='store_true', default=False,
help="""Overwrite release existing tag if it exist. [Default: %default]""")
parser.add_option('-p', '--platforms', dest="platforms", action='store', default='',
help="""Comma separated list of platform passed to scons for build check.""")
parser.add_option('--no-test', dest="no_test", action='store_true', default=False,
help="""Skips build check.""")
parser.add_option('--no-web', dest="no_web", action='store_true', default=False,
help="""Do not update web site.""")
parser.add_option('-u', '--upload-user', dest="user", action='store',
help="""Sourceforge user for SFTP documentation upload.""")
parser.add_option('--sftp', dest='sftp', action='store', default=doxybuild.find_program('psftp', 'sftp'),
help="""Path of the SFTP compatible binary used to upload the documentation.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 2:
parser.error( 'release_version missing on command-line.' )
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
parser.error( 'You must specify either --platform or --no-test option.' )
if options.ignore_pending_commit:
msg = ''
else:
msg = check_no_pending_commit()
if not msg:
print 'Setting version to', release_version
set_version( release_version )
svn_commit( 'Release ' + release_version )
tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
if svn_check_if_tag_exist( tag_url ):
if options.retag_release:
svn_remove_tag( tag_url, 'Overwriting previous tag' )
else:
print 'Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url
sys.exit( 1 )
svn_tag_sandbox( tag_url, 'Release ' + release_version )
print 'Generated doxygen document...'
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
doc_distcheck_dir = 'dist/doccheck'
tarball.decompress( doc_tarball_path, doc_distcheck_dir )
doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
export_dir = 'dist/export'
svn_export( tag_url, export_dir )
fix_sources_eol( export_dir )
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print 'Generating source tarball to', source_tarball_path
tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print 'Decompressing source tarball to', distcheck_dir
rmdir_if_exist( distcheck_dir )
tarball.decompress( source_tarball_path, distcheck_dir )
scons_local_path = 'dist/scons-local.tar.gz'
print 'Downloading scons-local to', scons_local_path
download( SCONS_LOCAL_URL, scons_local_path )
print 'Decompressing scons-local to', distcheck_top_dir
tarball.decompress( scons_local_path, distcheck_top_dir )
# Run compilation
print 'Compiling decompressed tarball'
all_build_status = True
for platform in options.platforms.split(','):
print 'Testing platform:', platform
build_status, log_path = check_compile( distcheck_top_dir, platform )
print 'see build log:', log_path
print build_status and '=> ok' or '=> FAILED'
all_build_status = all_build_status and build_status
if not build_status:
print 'Testing failed on at least one platform, aborting...'
svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
sys.exit(1)
if options.user:
if not options.no_web:
print 'Uploading documentation using user', options.user
sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
print 'Completed documentation upload'
print 'Uploading source and documentation tarballs for release using user', options.user
sourceforge_release_tarball( SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
user=options.user, sftp=options.sftp )
print 'Source and doc release tarballs uploaded'
else:
print 'No upload user specified. Web site and download tarbal were not uploaded.'
print 'Tarball can be found at:', doc_tarball_path
# Set next version number and commit
set_version( next_version )
svn_commit( 'Released ' + release_version )
else:
sys.stderr.write( msg + '\n' )
if __name__ == '__main__':
main()
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="nbspellcheck",
version="0.0.3",
author="Colin Bernet",
author_email="colin.bernet@gmail.com",
description="Spell checker for jupyter notebooks",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/cbernet/nbspellcheck",
python_requires='>3.5',
packages=['nbspellcheck'],
scripts=['nbspellcheck/nbspellcheck.py'],
install_requires = [
'pyspellchecker',
'termcolor',
'nltk'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
def grid_matrix():
param_grid_reg = {
'C': [0.01,0.1,1,10],
}
model_reg = LogisticRegression(solver='liblinear', penalty="l2")
param_grid_tree = {
'max_depth':range(10, 30),
}
model_tree = DecisionTreeClassifier(criterion="gini", random_state=42)
param_grid_forest = {
'n_estimators': [50, 100, 150],
'max_depth':range(3, 8),
'max_features': ['auto', 'sqrt'],
}
model_forest = RandomForestClassifier(oob_score=True, bootstrap=True)
param_grid_xgb = {
"n_estimators": [150, 200, 300],
"max_depth": [5, 10, 25],
"learning_rate": [0.1, 0.05],
"subsample": [0.5, 0.8],
"colsample_bytree": [0.5, 0.8],
}
model_xgb = xgb.XGBClassifier(use_label_encoder=False, eval_metric='mlogloss')
param_grid = [param_grid_reg, param_grid_tree, param_grid_forest, param_grid_xgb]
model = [model_reg, model_tree, model_forest, model_xgb]
names = ["logistic_regression", "decision_tree", "random_forest", "gradient_boosting"]
matrix = list(zip(param_grid,model, names))
return matrix
|
import hyperparameters as h
from rl_botics.common.data_collection import *
from rl_botics.common.policies import *
from rl_botics.common.utils import *
import tensorflow as tf
class REINFORCE:
def __init__(self, args, sess, env):
"""
Initialize REINFORCE agent class
"""
self.sess = sess
self.env = env
self.obs_dim = self.env.observation_space.shape[0]
self.act_dim = self.env.action_space.n
self.render = args.render
# Hyperparameters
self.lr = args.lr
self.gamma = args.gamma
self.maxiter = 1000
self.batch_size = args.batch_size
self.n_policy_epochs = 20
# Parameters for the policy network
self.pi_sizes = h.pi_sizes + [self.act_dim]
self.pi_activations = h.pi_activations + ['relu']
self.pi_layer_types = h.pi_layer_types + ['dense']
self.pi_batch_size = h.pi_batch_size
self.pi_optimizer = tf.train.AdamOptimizer(learning_rate=h.pi_lr)
# Parameters for the value network
self.v_sizes = h.v_sizes
self.v_activations = h.v_activations
self.v_layer_types = h.v_layer_types
self.v_batch_sizes = h.v_batch_sizes
self.v_optimizer = tf.train.AdamOptimizer(learning_rate=h.v_lr)
# Initialize an empty reward list
self.rew_list = []
self.ep_rew_list = []
# Initialize graph
self._build_graph()
self._init_sess()
def _build_graph(self):
"""Build Tensorflow graph"""
self._init_placeholders()
self._build_policy()
self._build_value_function()
self._loss()
self.init = tf.global_variables_initializer()
def _init_placeholders(self):
"""
Define Tensorflow placeholders
"""
self.obs = tf.placeholder(dtype=tf.float32, shape=[None, self.obs_dim], name='obs')
self.act = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='act')
self.adv = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='adv')
# Target for value function
self.v_targ = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='target_values')
def _build_policy(self):
"""
Build Policy
"""
self.policy = MlpSoftmaxPolicy(self.sess,
self.obs,
self.pi_sizes,
self.pi_activations,
self.pi_layer_types,
self.pi_batch_size,
)
print("\nPolicy model: ")
print(self.policy.print_model_summary())
def _build_value_function(self):
"""
Value function graph
"""
self.value = MLP(self.sess,
self.obs,
self.v_sizes,
self.v_activations,
self.v_layer_types,
self.v_batch_sizes,
'value'
)
self.v_loss = tf.losses.mean_squared_error(self.value.output, self.v_targ)
self.v_train_step = self.v_optimizer.minimize(self.v_loss)
print("\nValue model: ")
print(self.value.print_model_summary())
def _loss(self):
"""
Loss graph
"""
# Policy Loss
self.loss = -tf.reduce_mean(tf.multiply(tf.exp(policy.log_prob, self.adv)
# Policy update step
self.pi_train_step = self.pi_optimizer.minimize(self.loss)
def _init_sess(self):
"""
Initialize tensorflow graph
"""
self.sess.run(self.init)
def process_paths(self, paths):
"""
Process data
:param paths: Obtain unprocessed data from training
:return feed_dict: Dict required for neural network training
"""
paths = np.asarray(paths)
# Process paths
obs = np.concatenate(paths[:, 0]).reshape(-1, self.obs_dim)
new_obs = np.concatenate(paths[:, 3]).reshape(-1, self.obs_dim)
act = paths[:, 1].reshape(-1,1)
# Computed expected return, values and advantages
expected_return = get_expected_return(paths, self.gamma, normalize=True)
values = self.value.predict(obs)
adv = expected_return-values
# Generate feed_dict with data
feed_dict = {self.obs: obs,
self.act: act,
self.adv: adv
}
return feed_dict
def update_policy(self, feed_dict):
"""
:param feed_dict:
"""
for _ in range(self.n_policy_epochs):
self.sess.run(self.pi_train_step, feed_dict=feed_dict)
def update_value(self, prev_feed_dict):
"""
Update value function
:param prev_feed_dict: Processed data from previous iteration (to avoid overfitting)
"""
# TODO: train in epochs and batches
feed_dict = {self.obs: prev_feed_dict[self.obs],
self.v_targ: prev_feed_dict[self.adv]
}
self.v_train_step.run(feed_dict)
def train(self):
"""
Train using VPG algorithm
"""
paths = get_trajectories(self.env, self.policy, self.render)
dct = self.process_paths(paths)
self.update_policy(dct)
prev_dct = dct
for itr in range(self.maxiter):
paths = get_trajectories(self.env, self.policy, self.render)
dct = self.process_paths(paths)
# Update Policy
self.update_policy(dct)
# Update value function
self.update_value(prev_dct)
# Update trajectories
prev_dct = dct
# TODO: Log data
self.sess.close()
def print_results(self):
"""
Plot reward received over training period
"""
|
import frappe
from frappe import _
@frappe.whitelist()
def payment_on_submit(self, method):
fwd_uti(self, method)
total_uti(self,method)
@frappe.whitelist()
def payment_on_cancel(self, method):
fwd_uti_cancel(self, method)
total_uti(self,method)
#On Submit Payment
def fwd_uti(self, method):
if self.forward_contract:
target_doc = frappe.get_doc("Forward Booking", self.forward_contract)
existing_row_id = frappe.db.get_value("Forward Booking Utilization", filters={"parent": self.forward_contract, "payment_entry": self.name}, fieldname="name")
if not existing_row_id:
target_doc.append("payment_entries", {
"date": self.posting_date,
"party_type": self.party_type,
"party": self.party,
"paid_amount" : self.paid_amount,
"received_amount" : self.received_amount,
"payment_entry" : self.name
})
target_doc.save()
frappe.db.commit()
#Calculate Total
def total_uti(self,method):
if self.forward_contract:
target_doc = frappe.get_doc("Forward Booking", self.forward_contract)
total = 0
if target_doc.hedge == "Export":
for row in target_doc.payment_entries:
total += row.paid_amount
target_doc.total_utilization = total
else:
for row in target_doc.payment_entries:
total += row.received_amount
target_doc.total_utilization = total
target_doc.amount_outstanding = target_doc.amount - target_doc.total_utilization - target_doc.total_cancelled
target_doc.save()
frappe.db.commit()
#CANCEL Payment
def fwd_uti_cancel(self, method):
if self.forward_contract:
existing_row_id = frappe.db.get_value("Forward Booking Utilization", filters={"parent": self.forward_contract, "payment_entry": self.name}, fieldname="name")
frappe.delete_doc("Forward Booking Utilization", existing_row_id)
frappe.db.commit()
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# ip.reportvpnstatus
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
from django.utils.translation import ugettext_lazy as _
# NOC modules
from noc.lib.app.simplereport import SimpleReport, SectionRow
from noc.ip.models.vrf import VRF
from noc.inv.models.forwardinginstance import ForwardingInstance
from noc.inv.models.subinterface import SubInterface
class ReportVPNStatusApplication(SimpleReport):
title = _("VPN Status")
def get_data(self, **kwargs):
data = []
for vrf in VRF.objects.all().order_by("name"):
if vrf.rd == "0:0":
continue # Skip global
d = []
for fi in ForwardingInstance.objects.filter(type="VRF", name=vrf.name):
si = [
i.name
for i in SubInterface.objects.filter(forwarding_instance=fi.id).only("name")
]
si = sorted(si)
if si:
d += [[fi.managed_object.name, ", ".join(si)]]
if d:
data += [
SectionRow(name="VRF %s, RD: %s [%s]" % (vrf.name, vrf.rd, vrf.state.name))
]
data += d
#
return self.from_dataset(
title=self.title, columns=[_("Managed Object"), _("Interfaces")], data=data
)
|
import datetime
import django_filters
from rest_framework import filters
from rest_framework import generics
from rest_framework.response import Response
from .models import Posts
from .serializer import PostsSerializer
DATE_FORMAT = "%Y-%m-%d"
START_DATE = "2000-01-01"
class PostsView(generics.ListAPIView):
"""
Stats list view with sorting
"""
filter_backends = [django_filters.rest_framework.DjangoFilterBackend]
serializer_class = PostsSerializer
queryset = Posts.objects.all()
def get(self, request, order=None):
order = request.GET.get('order', '')
offset = int(request.GET.get('offset', 0))
limit = request.GET.get('limit')
if limit:
limit = int(limit)
if limit and limit < 0:
limit = None
if limit and offset:
limit += offset
posts = Posts.objects.all()
if order:
posts = posts.order_by(order)[offset:limit]
serializer = PostsSerializer(posts, many=True)
return Response(serializer.data)
|
from nanpy.arduinoboard import arduinomethod, returns, FirmwareClass
from nanpy.classinfo import check4firmware
from nanpy.memo import memoized
@check4firmware
class EepromLib(FirmwareClass):
firmware_id = 'EEPROM'
@arduinomethod
def write(self, address, value):
pass
@returns(int)
@arduinomethod
def read(self, address):
pass
@property
@memoized
@returns(int)
@arduinomethod
def size(self):
pass
|
import os
from zipfile import ZipFile
from celery import shared_task
from PIL import Image
from django.conf import settings
from clustalanalysis import run_clustal
@shared_task
def create_tree(inputfile, outputfile):
try:
print("This is clustal function")
result_file = run_clustal.run_clustal(inputfile, outputfile)
return outputfile
except IOError as e:
return e
|
from rest_framework.authtoken.models import Token
from rest_framework.authentication import TokenAuthentication
from rest_framework.exceptions import AuthenticationFailed
from django.utils import timezone
from django.conf import settings
def expires_in(token):
time_elapsed = timezone.now() - token.created
left_time = settings.TOKEN_EXPIRE_TIME - time_elapsed.seconds
return left_time
def is_token_expired(token):
left_time = expires_in(token)
if left_time < 0:
return True
else:
return False
def token_expire_handler(token):
is_expired = is_token_expired(token)
if is_expired:
token.delete()
elif not is_expired: # time reset
token.created = timezone.now() # time reset
token.save() # time reset
return is_expired, token
class ExpiringTokenAuthentication(TokenAuthentication):
def authenticate_credentials(self, key):
try:
token = Token.objects.get(key=key)
except Token.DoesNotExist:
raise AuthenticationFailed("Invalid Token")
if not token.user.is_active:
raise AuthenticationFailed("User is not active")
is_expired, token = token_expire_handler(token)
# print(token.created)
if is_expired:
raise AuthenticationFailed("The Token is expired")
return token.user, token
|
from posixpath import dirname
from flask import Flask, request, render_template,redirect, url_for,abort,send_from_directory
from werkzeug.utils import secure_filename
import os.path
import tempfile
import io
import os
import base64
from datetime import datetime
from pathlib import Path
import torchvision
from torchvision import transforms
import torch
from torch import no_grad
import cv2
import numpy as np
from PIL import Image
# Here are the 91 classes.
OBJECTS = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
# Here are the classesj for display
OBJECTS_html=['all', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
#key type of objects valuue: list of files that pertain to each objects
FILE_OBJ={}
def get_predictions(pred,threshold=0.8,objects=None ):
"""
This function will assign a string name to a predicted class and eliminate predictions whose likelihood is under a threshold
pred: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class yhat, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
image : frozen surface
predicted_classes: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class name, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
thre
"""
predicted_classes= [(OBJECTS[i],p,[(box[0], box[1]), (box[2], box[3])]) for i,p,box in zip(list(pred[0]['labels'].numpy()),pred[0]['scores'].detach().numpy(),list(pred[0]['boxes'].detach().numpy()))]
predicted_classes=[ stuff for stuff in predicted_classes if stuff[1]>threshold ]
if objects and predicted_classes :
predicted_classes=[ (name, p, box) for name, p, box in predicted_classes if name in objects ]
return predicted_classes
def draw_box(predicted_classes,image,rect_th= 30,text_size= 3,text_th=3):
"""
draws box around each object
predicted_classes: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class name, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
image : frozen surface
"""
img=(np.clip(cv2.cvtColor(np.clip(image.numpy().transpose((1, 2, 0)),0,1), cv2.COLOR_RGB2BGR),0,1)*255).astype(np.uint8).copy()
for predicted_class in predicted_classes:
label=str(predicted_class[0]) + " likelihood"
probability=predicted_class[1]
box=predicted_class[2]
cv2.rectangle(img, (int(box[0][0]), int(box[0][1])), (int(box[1][0]), int(box[1][1])),(0, 255, 0), 4) # Draw Rectangle with the coordinates
cv2.putText(img,label, (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=3)
cv2.putText(img,label+": "+str(round(probability,2)), (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=3)
return img
#Faster R-CNN is a model that predicts both bounding boxes and class scores for potential objects in the image pre-trained on COCO.
model_ = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# set to eval
model_.eval()
# save memory
for name, param in model_.named_parameters():
param.requires_grad = False
#the function calls Faster R-CNN model_ but save RAM:
def model(x):
with torch.no_grad():
yhat = model_(x)
return yhat
# transform image to tensor
transform = transforms.Compose([transforms.ToTensor()])
app=Flask(__name__)
# EXTENSIONS allowed
dostuff=None
app.config['UPLOAD_EXTENSIONS'] = ['.jpg', '.png', '.gif','.jpeg']
# paths
app.config['UPLOAD_PATH'] = 'uploads'
app.config['OBJECTS_PATH'] = 'objects'
# confident_range
app.config['CONFIDENT_RANG'] = None
#path of images
app.config['FILE_PATH']=None
app.config['FILE_NAME']=[]
# directory of path
dir_name = Path(app.instance_path)
@app.route('/')
def home():
#new file that has been uploaded
files= os.listdir(app.config['UPLOAD_PATH'])
# check if a the following
files=[ file for file in files if os.path.splitext(file )[1] in app.config['UPLOAD_EXTENSIONS'] ]
#files that has been uploaded that have been uploaded
object_files=os.listdir(app.config['OBJECTS_PATH'])
object_files=[ file for file in object_files if os.path.splitext(file )[1] in app.config['UPLOAD_EXTENSIONS'] ]
return render_template('index.html', files=app.config['FILE_NAME'] ,objects_list=OBJECTS_html,object_files=object_files)
@app.route('/', methods=['POST'])
def upload_file():
#file object
uploaded_file = request.files['file']
#file name
filename= secure_filename(uploaded_file.filename)
#file extention
file_ext = os.path.splitext(filename)[1]
#check if empty file
if filename != '':
# file path /uploads/filename
#check if .jpg, .png, .gif if not send an error
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400)
#send back to home agument is the fuction "home"
#upload file path
#uploaded_file.save(filename)
file_path=Path(app.config['UPLOAD_PATH']).joinpath(filename)
# same the file name to be used in other parts of app
app.config['FILE_NAME']=[filename]
# file path to be used in app
app.config['FILE_PATH']=file_path
uploaded_file.save(file_path)
return redirect(url_for('home'))
@app.route('/find_object', methods=['POST'])
def find():
redirect(url_for('home'))
# object to find
object=request.form.get("objects")
confident_range = request.form.get("confident_range")
app.config['CONFIDENT_RANG'] = int(confident_range) / int(100)
print("++++++++", confident_range)
# this is a bug fix as it will only save the image twice
object_=object
if object_:
half = 0.5
print(app.config['FILE_PATH'])
image = Image.open(app.config['FILE_PATH'])
arr = []
image.resize( [int(half * s) for s in image.size] )
img = transform(image)
pred = model(torch.unsqueeze(img,0))
if object=='all':
pred_thresh=get_predictions(pred,threshold=app.config['CONFIDENT_RANG'])
else:
pred_thresh=get_predictions(pred,threshold=app.config['CONFIDENT_RANG'],objects=object)
object_=None
#draw box on image
image_numpy=draw_box(pred_thresh,img,rect_th= 1,text_size= 1,text_th=1)
#save image with box with new name
filename, file_extension = os.path.splitext(app.config['FILE_NAME'][0])
print(filename, file_extension)
app.config['FILE_NAME'] = []
#name of file with lables
new_file_name=filename+"_object"+file_extension
new_file_path=Path(app.config['OBJECTS_PATH']).joinpath(new_file_name)
#save file we use opencv as the boxes look better
cv2.imwrite(str(new_file_path), image_numpy)
#get differnet objects and save as image
for obj in pred_thresh:
#Top and bottom corner of box
x_0,y_0=obj[2][0]
x_1,y_1=obj[2][1]
#save the image with a name that inculds the object and time
now = datetime.now()
dt_string = now.strftime("_%d_%m_%Y_%H_%M_%S_%f").strip()
obj_file_name=obj[0]+dt_string+file_extension
object_file_ext=Path(app.config['OBJECTS_PATH']).joinpath(obj_file_name)
if not(obj[0] in set(FILE_OBJ.keys())):
FILE_OBJ[obj[0]]=[obj_file_name ]
else:
FILE_OBJ[obj[0]].append(obj_file_name)
new_image=image.copy().crop((x_0,y_0,x_1,y_1))
new_image.save(object_file_ext)
if (request.form.get("Find_New")):
os.remove(app.config['FILE_PATH'])
return redirect(url_for('home'))
return render_template("find_object.html" ,objects=object,file=new_file_name, title=object, range1=confident_range)
@app.route('/your_object')
def your_gallery():
print('assss',FILE_OBJ)
return render_template("your_object.html" ,obj_files=FILE_OBJ)
#serve these uploade files from following route
@app.route('/uploads/<filename>')
def upload(filename):
#get file this is called in index.html
return send_from_directory(app.config['UPLOAD_PATH'], filename)
#serve these files from following routey
@app.route('/objects/<filename>')
def upload_objects(filename):
#get file this is called in index.html
return send_from_directory(app.config['OBJECTS_PATH'], filename)
@app.route('/your_object/<galleryName>')
def view_obejct(galleryName):
return render_template("view_obejct.html" ,obj_files=FILE_OBJ[galleryName], title=galleryName)
@app.route('/your_galary')
def view_gallery():
files = os.listdir(app.config['UPLOAD_PATH'])
print("test")
return render_template("your_galary.html" ,obj_files=files)
if __name__=="__main__":
app.run(host="0.0.0.0", port=8080)
|
#! /usr/bin/env python
import urllib2
from xml.dom import minidom, Node
class RSSItem:
def __init__(self,title="",description="", link="",pubDate = ""):
self.title = title
self.description = description
self.link = link
self.pubDate = pubDate
class RSSReader:
name = ""
def __init__(self,RSSUrl):
"""Initialize the class"""
self.RSSUrl = RSSUrl;
try:
self.xmldoc = self.GetXMLDocument(RSSUrl)
except (IOError, OSError):
self.xmldoc = None
if not self.xmldoc == None:
for itemNode in self.xmldoc.documentElement.childNodes:
if (itemNode.nodeName == "channel"):
self.name = self.GetChildText(itemNode,"title")
def GetXMLDocument(self,RSSUrl):
"""This function reads in a RSS URL and then returns the XML documentn on success"""
urlInfo = urllib2.urlopen(RSSUrl)
xmldoc = None
if (urlInfo):
xmldoc = minidom.parse(urlInfo)
else:
print "Error Getting URL"
return xmldoc
def GetItemText(self,xmlNode):
"""Get the text from an xml item"""
text = ""
for textNode in xmlNode.childNodes:
if (textNode.nodeType == Node.TEXT_NODE):
text += textNode.nodeValue
return text
def GetChildText(self, xmlNode, childName):
"""Get a child node from the xml node"""
if (not xmlNode):
print "Error GetChildNode: No xml_node"
return ""
for itemNode in xmlNode.childNodes:
if (itemNode.nodeName==childName):
return self.GetItemText(itemNode)
"""Return Nothing"""
return ""
def CreateRSSItem(self,itemNode):
"""Create an RSS item and return it"""
title = '# '
title += self.GetChildText(itemNode,"title")
description = self.GetChildText(itemNode,"description")
link = self.GetChildText(itemNode, "link")
pubDate = self.GetChildText(itemNode, "pubDate")
return RSSItem(title,description,link,pubDate)
def GetItems(self):
"""Generator to get items"""
print("ddd")
if not self.xmldoc == None:
for itemNode in self.xmldoc.documentElement.childNodes:
for itemNode in itemNode.childNodes:
if (itemNode.nodeName == "item"):
"""Allright we have an item"""
rssItem = self.CreateRSSItem(itemNode)
yield rssItem
|
import unittest
from fuzzystring import FuzzyString
class FuzzyStringTests(unittest.TestCase):
"""Tests for FuzzyString."""
def test_constructor(self):
FuzzyString("hello")
def test_equality_and_inequality_with_same_string(self):
hello = FuzzyString("hello")
self.assertEqual(hello, "hello")
self.assertFalse(hello != "hello")
def test_equality_with_completely_different_string(self):
hello = FuzzyString("hello")
self.assertNotEqual(hello, "Hello there")
self.assertFalse(hello == "Hello there")
self.assertNotEqual(hello, "hello there")
self.assertFalse(hello == "Hello there")
def test_equality_and_inequality_with_different_case_string(self):
hello = FuzzyString("hellO")
self.assertEqual(hello, "Hello")
self.assertFalse(hello != "Hello")
self.assertEqual(hello, "HELLO")
self.assertFalse(hello != "HELLO")
def test_string_representation(self):
hello = FuzzyString("heLlO")
self.assertEqual(str(hello), "heLlO")
self.assertEqual(repr(hello), repr("heLlO"))
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_other_string_comparisons(self):
apple = FuzzyString("Apple")
self.assertGreater(apple, "animal")
self.assertLess("animal", apple)
self.assertFalse(apple < "animal")
self.assertFalse("animal" > apple)
self.assertGreaterEqual(apple, "animal")
self.assertGreaterEqual(apple, "apple")
self.assertLessEqual("animal", apple)
self.assertLessEqual("animal", "animal")
self.assertFalse(apple <= "animal")
self.assertFalse("animal" >= apple)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_string_operators(self):
hello = FuzzyString("heLlO")
self.assertEqual(hello + "!", "helLo!")
self.assertNotEqual(hello + "!", "hello")
self.assertTrue("he" in hello)
self.assertIn("He", hello)
self.assertNotIn("He!", hello)
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_normalizes_strings(self):
string = FuzzyString("\u00df and ss")
self.assertEqual(string, "ss and \u00df")
string = FuzzyString("ß, ss, \uf9fb, and \u7099")
self.assertEqual(string, "ss, ß, \u7099, and \uf9fb")
accent = '\u0301'
accented_e = FuzzyString('\u00e9')
self.assertEqual('\u0065\u0301', accented_e)
self.assertIn(accent, accented_e)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
#
#
# 0=================================0
# | Project Name |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Implements: Support Vector Machine
#
# ----------------------------------------------------------------------------------------------------------------------
#
# YUWEI CAO - 2020/11/13 13:05 PM
#
#
# ----------------------------------------
# Import packages and constant
# ----------------------------------------
import os
import h5py
import numpy as np
from glob import glob
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# use seaborn plotting defaults
import seaborn as sns; sns.set()
def ResizeDataset(path, percentage, n_classes, shuffle):
if path == 'cache/modelnet40/features/':
original_name = ['train0.h5', 'train1.h5', 'train2.h5',
'train3.h5', 'train4.h5']
else:
original_name = ['train0.h5', 'train1.h5', 'train2.h5',
'train3.h5', 'train4.h5', 'train5.h5', 'train6.h5', 'train7.h5']
for h5_name in original_name:
ori_name = os.path.join(path, h5_name)
out_file_name= ori_name + "_" + str(percentage)+ "_resized.h5"
if os.path.exists(out_file_name):
os.remove(out_file_name)
fw = h5py.File(out_file_name, 'w', libver='latest')
dset = fw.create_dataset("data", (1,1024,),maxshape=(None,1024), dtype='<f4')
dset_l = fw.create_dataset("label",(1,),maxshape=(None,),dtype='uint8')
fw.swmr_mode = True
f = h5py.File(ori_name)
data = f['data'][:]
cls_label = f['label'][:]
#data shuffle
if shuffle:
idx = np.arange(len(cls_label))
np.random.shuffle(idx)
data,cls_label = data[idx, ...], cls_label[idx]
class_dist= np.zeros(n_classes)
for c in range(len(data)):
class_dist[cls_label[c]]+=1
log_string('Ori data to size of :', np.sum(class_dist))
log_string ('class distribution of this dataset :',class_dist)
class_dist_new= (percentage*class_dist/100).astype(int)
for i in range(n_classes):
if class_dist_new[i]<1:
class_dist_new[i]=1
class_dist_count=np.zeros(n_classes)
data_count=0
for c in range(len(data)):
label_c=cls_label[c]
if(class_dist_count[label_c] < class_dist_new[label_c]):
class_dist_count[label_c]+=1
new_shape = (data_count+1,1024,)
dset.resize(new_shape)
dset_l.resize((data_count+1,))
dset[data_count,:] = data[c]
dset_l[data_count] = cls_label[c]
dset.flush()
dset_l.flush()
data_count+=1
log_string('Finished resizing data to size of :', np.sum(class_dist_new))
log_string ('class distribution of resized dataset :',class_dist_new)
fw.close
# Read in the list of categories in MODELNET40
def get_category_names():
shape_names_file = os.path.join('modelnet40_ply_hdf5_2048', 'shape_names.txt')
shape_names = [line.rstrip() for line in open(shape_names_file)]
return shape_names
class SVM(object):
def __init__(self, feature_dir, percent=100):
self.feature_dir = feature_dir
self.test_path = glob(os.path.join(self.feature_dir, 'test*.h5'))
if(percent<100):
ResizeDataset(path = self.feature_dir, percentage=percent, n_classes=40, shuffle=True)
self.train_path = glob(os.path.join(self.feature_dir, 'train*%s_resized.h5'%percent))
else:
self.train_path = glob(os.path.join(self.feature_dir, 'train*.h5'))
log_string(str(self.train_path))
log_string("Loading feature dataset...")
train_data = []
train_label = []
for path in self.train_path:
log_string("Loading path: " + str(path))
f = h5py.File(path, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
train_data.append(data)
train_label.append(label)
self.train_data = np.concatenate(train_data, axis=0)
self.train_label = np.concatenate(train_label, axis=0)
log_string("Training set size:" + str(np.size(self.train_data, 0)))
test_data = []
test_label = []
for path in self.test_path:
log_string("Loading path: " + str(path))
f = h5py.File(path, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
test_data.append(data)
test_label.append(label)
self.test_data = np.concatenate(test_data, axis=0)
self.test_label = np.concatenate(test_label, axis=0)
log_string("Testing set size:" + str(np.size(self.test_data, 0)))
def classify(self):
clf = LinearSVC(random_state=0)
clf.fit(self.train_data, self.train_label)
result = clf.predict(self.test_data)
accuracy = np.sum(result==self.test_label).astype(float) / np.size(self.test_label)
log_string(str(classification_report(self.test_label, result,
target_names=get_category_names())))
mat = confusion_matrix(self.test_label, result)
plt.figure(figsize=(10, 16))
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False, cmap='YlOrRd',
xticklabels=get_category_names(),
yticklabels=get_category_names())
plt.xlabel('true label')
plt.ylabel('predicted label')
plt.savefig("output/heatmap_%s.png"%percent, dpi=300)
log_string("Transfer linear SVM accuracy: {:.2f}%".format(accuracy*100))
LOG_FOUT = open(os.path.join('output','svm_log.txt'), 'w')
def log_string(out_str):
LOG_FOUT.write(out_str + '\n')
LOG_FOUT.flush()
print(out_str)
|
import os
import re
import sys
import time
from threading import Thread
from hive_gns.database.access import write
from hive_gns.database.haf_sync import HafSync
from hive_gns.engine.hook_processor import HookProcessor
from hive_gns.engine.pruner import Pruner
from hive_gns.server import system_status
from hive_gns.server.serve import run_server
from hive_gns.tools import INSTALL_DIR
class GnsModules:
def __init__(self) -> None:
self.modules = {}
def _is_valid_module(self, module):
return bool(re.match(r'^[a-z]+[_]*$', module))
def _init_modules_db(self):
for m in self.modules:
sql = f"""
INSERT INTO gns.module_state (module)
SELECT ('{m}')
WHERE NOT EXISTS (SELECT * FROM gns.module_state WHERE module = '{m}');
"""
write(sql)
def _load(self):
dir = f'{INSTALL_DIR}/modules'
module_list = [f.name for f in os.scandir(dir) if self._is_valid_module(f.name)]
for m in module_list:
if m not in self.modules:
self.modules[m] = HookProcessor(m)
self._init_modules_db()
def _refresh_modules(self):
# TODO: periodically run _load()
while True:
self._load()
time.sleep(120)
def start(self):
self._load()
for m in self.modules:
self.modules[m].start()
def run():
try:
HafSync.init()
HafSync.toggle_sync()
Thread(target=HafSync.main_loop).start()
time.sleep(15)
Thread(target=Pruner.run_pruner).start()
# start GNS modules
while not system_status.is_init():
time.sleep(1)
modules = GnsModules()
modules.start()
# run server
run_server()
except KeyboardInterrupt:
# shutdown
sys.exit()
if __name__ == '__main__':
run()
|
import random
import json
import sys
from paho.mqtt import client as mqtt_client
"""
Test app listening to all SmartSleep topics
"""
broker = 'broker.emqx.io'
port = 1883
topic = "SmartSleep/#" # '#' is a special character, meaning all topics starting with SmartSleep will be covered
# generate client ID with pub prefix randomly
client_id = f'python-mqtt-{random.randint(0, 100)}'
username = 'emqx'
password = 'public'
def connect_mqtt() -> mqtt_client:
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
client = mqtt_client.Client(client_id)
client.username_pw_set(username, password)
client.on_connect = on_connect
client.connect(broker, port)
return client
def subscribe(client: mqtt_client):
def on_message(client, userdata, msg):
try: print(f"Received `{json.loads(msg.payload)}` from `{msg.topic}` topic")
except json.JSONDecodeError: print(f"Received `{msg.payload}` from `{msg.topic}` topic")
client.subscribe(topic)
client.on_message = on_message
def run():
client = connect_mqtt()
subscribe(client)
client.loop_forever()
if __name__ == '__main__':
try:
run()
except KeyboardInterrupt:
print('interrupted')
sys.exit(0)
|
from django.conf import settings
from django.contrib import messages
from django.http import JsonResponse, HttpResponseRedirect
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import ListView, CreateView
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import FormMixin, DeleteView, UpdateView
from rules.contrib.views import PermissionRequiredMixin
from controlpanel.api import cluster
from controlpanel.api.models import IAMManagedPolicy, User
from controlpanel.api.permissions import is_superuser
from controlpanel.frontend.forms import CreateIAMManagedPolicyForm, AddUserToIAMManagedPolicyForm
from controlpanel.oidc import OIDCLoginRequiredMixin
class IAMManagedPolicyList(OIDCLoginRequiredMixin, PermissionRequiredMixin, ListView):
context_object_name = 'policies'
model = IAMManagedPolicy
permission_required = 'api.list_policy'
template_name = "policy-list.html"
def get_queryset(self):
return IAMManagedPolicy.objects.filter(created_by=self.request.user)
class AdminIAMManagedPolicyList(IAMManagedPolicyList):
permission_required = 'api.is_superuser'
def get_queryset(self):
return IAMManagedPolicy.objects.all()
class IAMManagedPolicyCreate(OIDCLoginRequiredMixin, PermissionRequiredMixin, CreateView):
form_class = CreateIAMManagedPolicyForm
model = IAMManagedPolicy
permission_required = 'api.create_policy'
template_name = "policy-create.html"
def get_form_kwargs(self):
return FormMixin.get_form_kwargs(self)
def get_success_url(self):
return reverse_lazy("list-policies")
def form_valid(self, form):
self.object = IAMManagedPolicy(
name=form.cleaned_data['name'],
created_by=self.request.user
)
self.object.save()
messages.success(
self.request,
f"Successfully created {self.object.name} policy",
)
return FormMixin.form_valid(self, form)
class IAMManagedPolicyDetail(OIDCLoginRequiredMixin, PermissionRequiredMixin, UpdateView):
form_class = AddUserToIAMManagedPolicyForm
model = IAMManagedPolicy
permission_required = 'api.create_policy'
template_name = "policy-update.html"
context_object_name = "policy"
def get_form_kwargs(self):
return FormMixin.get_form_kwargs(self)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user_options = User.objects.exclude(
pk__in=self.object.users.values_list('auth0_id', flat=True)
)
context.update({
"users_options": user_options
})
return context
def get_success_url(self):
return reverse_lazy(
"manage-policy",
kwargs={"pk": self.object.id}
)
def form_valid(self, form):
user_id = form.cleaned_data['user_id']
user = User.objects.get(pk=user_id)
self.object.users.add(user)
self.object.save()
messages.success(
self.request,
f"Successfully added user {user.name} ",
)
return FormMixin.form_valid(self, form)
class IAMManagedPolicyDelete(OIDCLoginRequiredMixin, PermissionRequiredMixin, DeleteView):
model = IAMManagedPolicy
permission_required = 'api.destroy_policy'
def get_success_url(self):
messages.success(self.request, "Successfully delete data source")
return reverse_lazy("list-policies")
def get_queryset(self):
queryset = IAMManagedPolicy.objects.all()
if is_superuser(self.request.user):
return queryset
return queryset.filter(created_by=self.request.user)
class IAMManagedPolicyFormRoleList(OIDCLoginRequiredMixin, View):
def get(self, *args, **kwargs):
roles = cluster.list_role_names()
data = [
r for r in roles
if r.startswith(f"airflow")
or r.startswith(f"{settings.ENV}_app")
]
return JsonResponse(data, safe=False)
class IAMManagedPolicyRemoveUser(OIDCLoginRequiredMixin, PermissionRequiredMixin, SingleObjectMixin, View):
model = IAMManagedPolicy
permission_required = 'api.update_policy'
def get_success_url(self):
messages.success(self.request, "Successfully removed user")
return reverse_lazy("manage-policy", kwargs={"pk": self.object.id})
def get(self, request, *args, **kwargs):
self.object = self.get_object()
user = User.objects.get(pk=kwargs['user_id'])
self.object.users.remove(user)
return HttpResponseRedirect(self.get_success_url())
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.animation import FuncAnimation, PillowWriter
import matplotlib.patches as mpatches
from matplotlib.legend_handler import HandlerPatch, HandlerCircleCollection
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import os
from IPython.display import Image, display
#from model_evaluation_3D import plot_covariance_ellipsoide
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise
from filterpy.common import Saver
DT= 0.01
SIGMA=0.5
class Trajectoy3DGenerattion:
def __init__(self,sigma=0.5, T=10.0, fs=100.0):
global DT,SIGMA
self.fs = fs # Sampling Frequency
self.dt = 1.0/fs
# Set Global Variables
DT = self.dt
SIGMA = sigma
self.T = T # measuremnt time
self.m = int(self.T/self.dt) # number of measurements
self.sigma = sigma
self.px= 0.0 # x Position Start
self.py= 0.0 # y Position Start
self.pz= 1.0 # z Position Start
self.vx = 10.0 # m/s Velocity at the beginning
self.vy = 0.0 # m/s Velocity
self.vz = 0.0 # m/s Velocity
c = 0.1 # Drag Resistance Coefficient
self.Xr=[]
self.Yr=[]
self.Zr=[]
self.Vx=[]
self.Vy=[]
self.Vz=[]
self.ax =[]
self.az =[]
for i in range(int(self.m)):
# Just to simulate a trajectory
accx = -c*self.vx**2
self.vx += accx*self.dt
self.px += self.vx*self.dt
accz = -9.806 + c*self.vz**2
self.vz += accz*self.dt
self.pz += self.vz*self.dt
self.Xr.append(self.px)
self.Yr.append(self.py)
self.Zr.append(self.pz)
self.Vx.append(self.vx)
self.Vy.append(self.vy)
self.Vz.append(self.vz)
self.az.append(accz)
self.ax.append(accx)
aux = self.Xr
self.Xr = self.Zr
self.Zr = aux
aux = self.Vx
self.Vx = self.Vz
self.Vz = aux
aux = self.ax
self.ax = self.az
self.az = aux
def get_velocities(self):
return self.Vx, self.Vy, self.Vz
def get_trajectory_position(self):
return np.array(self.Xr), np.array(self.Yr), np.array(self.Zr)
def get_acceleration(self):
return self.ax, self.az
def get_measurements(self):
#adding Noise
np.random.seed(25)
self.Xm = self.Xr + self.sigma * (np.random.randn(self.m))
self.Ym = self.Yr + self.sigma * (np.random.randn(self.m))
self.Zm = self.Zr + self.sigma * (np.random.randn(self.m))
return self.Xm, self.Ym, self.Zm
def plot_planets(x, y, z, ax):
ax.scatter(x[0], y[0], z[0], c='b', s=850, facecolor='b')
ax.scatter(x[-1], y[-1], z[-1], c='gray', s=350, facecolor='b')
e_txt = ax.text(x[0]-3, y[0], z[0]-10.5,"Earth", weight='bold', c="b", fontsize=10)
m_txt = ax.text(x[-1]-4, y[-1], z[-1]+4,"Moon", weight='bold', c="gray", fontsize=10)
return e_txt, m_txt
def plot_measurements_3D(traj, ax, title=""):
x,y,z = traj.get_measurements()
plot_planets(x, y, z, ax)
ax.scatter(x, y, z, c='g', alpha=0.3, facecolor=None, label="Measurements")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title(title, fontsize=15)
#ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
# Axis equal
max_range = np.array([x.max()-x.min(), y.max()-y.min(), z.max()-z.min()]).max() / 3.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.legend(loc='best',prop={'size':15})
def plot_trajectory_3D(traj, ax, title=""):
x,y,z = traj.get_trajectory_position()
plot_planets(x, y, z, ax)
ax.plot(x, y, z, c='r', lw=2, ls="--", label="Trajectory")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title(title, fontsize=15)
#ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
# Axis equal
max_range = np.array([x.max()-x.min(), y.max()-y.min(), z.max()-z.min()]).max() / 3.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.legend(loc='best',prop={'size':15})
def plot_prediction(preds,traj, ax):
global SIGMA
xt, yt, zt = preds[:,0], preds[:,1], preds[:,2]
Xr, Yr, Zr = traj.get_trajectory_position()
Xm, Ym, Zm = traj.get_measurements()
print("Xm: ", Xm.shape)
print("Ym: ", Ym.shape)
print("Zm: ", Zm.shape)
plot_planets(Xr, Yr, Zr, ax)
ax.plot(xt,yt,zt, lw=2, label='Kalman Filter Estimate')
ax.plot(Xr, Yr, Zr, lw=2, label='Real Trajectory Without Noise')
ax.scatter(Xm, Ym, Zm, edgecolor='g', alpha=0.1, lw=2, label="Measurements")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.legend(loc='best',prop={'size':15})
ax.set_title("Kalman Filter Estimate - Sigma={}".format(SIGMA), fontsize=15)
# Axis equal
max_range = np.array([Xm.max()-Xm.min(), Ym.max()-Ym.min(), Zm.max()-Zm.min()]).max() / 3.0
mean_x = Xm.mean()
mean_y = Ym.mean()
mean_z = Zm.mean()
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
def plot_x_z_2D(ax, traj, preds):
global SIGMA
xt, yt, zt = preds[:,0], preds[:,1], preds[:,2]
Xr, Yr, Zr = traj.get_trajectory_position()
Xm, Ym, Zm = traj.get_measurements()
ax.plot(xt,zt, label='Kalman Filter Estimate')
ax.scatter(Xm,Zm, label='Measurement', c='gray', s=15, alpha=0.5)
ax.plot(Xr, Zr, label='Real')
ax.set_title("Kalman Filter Estimate 2D - Sigma={}".format(SIGMA), fontsize=15)
ax.legend(loc='best',prop={'size':15})
ax.set_xlabel('X ($m$)')
ax.set_ylabel('Y ($m$)')
#-------------------------- KALMAN FUNCTIONS -------------------------------------------------
def init_kalman(traj):
global SIGMA, DT
#Transition_Matrix matrix
PHI = np.array([[1.0, 0.0, 0.0, DT, 0.0, 0.0, 1/2.0*DT**2, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, DT, 0.0, 0.0, 1/2.0*DT**2, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, DT, 0.0, 0.0, 1/2.0*DT**2],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, DT, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, DT, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, DT],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
# Matrix Observation_Matrix
#We are looking for the position of the spaceship x,y,z
H = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
x, y, z = traj.get_measurements()
vx, vy, vz = traj.get_velocities()
ax, az = traj.get_acceleration()
#initial state
init_states = np.array([x[0], y[0], z[0], vx[0], vy[0], vz[0], ax[0], 0., az[0]])
P = np.eye(9)*(0.5**2)
rp = 1 # Noise of Position Measurement
R = np.eye(3)* rp
G = np.array([ [(DT**2)/2],
[(DT**2)/2],
[(DT**2)/2],
[ DT ],
[ DT ],
[ DT ],
[ 1. ],
[ 1. ],
[ 1. ]])
acc_noise = 0.1 # acceleration proccess noise
Q= np.dot(G, G.T)* acc_noise**2
#Q = Q_discrete_white_noise(3, dt=DT, var=50, block_size=3)
return init_states, PHI, H, Q, P, R
def Ship_tracker(traj):
global DT
init_states, PHI, H, Q, P, R = init_kalman(traj)
tracker= KalmanFilter(dim_x = 9, dim_z=3)
tracker.x = init_states
tracker.F = PHI
tracker.H = H # Measurement function
tracker.P = P # covariance matrix
tracker.R = R # state uncertainty
tracker.Q = Q # process uncertainty
return tracker
def run(tracker, traj):
x, y, z = traj.get_measurements()
zs = np.asarray([ x, y, z]).T
preds, cov = [],[]
for z in zs:
tracker.predict()
tracker.update(z=z)
preds.append(tracker.x)
cov.append(tracker.P)
return np.array(preds), np.array(cov)
def run_half_measures(tracker, traj):
x, y, z = traj.get_measurements()
zs = np.asarray([ x, y, z]).T
preds, cov = [],[]
for i,z in enumerate(zs):
tracker.predict()
if i <= len(zs)//2:
tracker.update(z=z)
preds.append(tracker.x)
cov.append(tracker.P)
return np.array(preds), np.array(cov)
def run_even_index_update(tracker, traj):
x, y, z = traj.get_measurements()
zs = np.asarray([ x, y, z]).T
preds, cov = [],[]
for i, z in enumerate(zs):
tracker.predict()
if( i % 2 == 0):
tracker.update(z=z)
preds.append(tracker.x)
cov.append(tracker.P)
return np.array(preds), np.array(cov)
def run_update_every_5(tracker, traj):
x, y, z = traj.get_measurements()
zs = np.asarray([ x, y, z]).T
preds, cov = [],[]
for i, z in enumerate(zs):
tracker.predict()
if( i % 5 == 0):
tracker.update(z=z)
preds.append(tracker.x)
cov.append(tracker.P)
return np.array(preds), np.array(cov)
def run_update_hole_in_middle(tracker, traj):
x, y, z = traj.get_measurements()
zs = np.asarray([ x, y, z]).T
preds, cov = [],[]
chunk = len(zs) // 3
for i, z in enumerate(zs):
tracker.predict()
if i <= chunk or i >= 2*chunk:
tracker.update(z=z)
preds.append(tracker.x)
cov.append(tracker.P)
return np.array(preds), np.array(cov)
class SpaceAnimation3D:
"""
:predictions: matrix with the predictions of the states
:measurements: dataframe with the measurements with noise
:target_x: target x of the position
:target_y: target y of the position
"""
def __init__(self, predictions, traj):
self.fig = plt.figure(figsize=(16,13))
self.ax = Axes3D(self.fig)
self.x_target, self.y_target, self.z_target = traj.get_measurements()
Xr, Yr, Zr = traj.get_trajectory_position()
self.x_pred = predictions[:,0]
self.y_pred = predictions[:,1]
self.z_pred = predictions[:,2]
plot_planets(Xr,Yr,Zr, self.ax)
self.spaceship_pred, = self.ax.plot([], [], [], lw=5, c="r", label="Predictions")
self.measurements, = self.ax.plot([], [], [], lw=2, alpha=0.6, c="g", label="Measurements")
max_range = np.array([self.x_pred.max()-self.x_pred.min(), self.y_pred.max()-self.y_pred.min(), self.z_pred.max()-self.z_pred.min()]).max() / 3.0
mean_x = self.x_pred.mean()
mean_y = self.y_pred.mean()
mean_z = self.z_pred.mean()
self.ax.set_xlim3d(mean_x - max_range, mean_x + max_range)
self.ax.set_ylim3d(mean_y - max_range, mean_y + max_range)
self.ax.set_zlim3d(mean_z - max_range, mean_z + max_range)
self.ax.legend(loc='best',prop={'size':15})
def init(self):
self.spaceship_pred.set_data_3d([])
self.measurements.set_data_3d([])
return self.spaceship_pred, self.measurements,
def animate(self,i):
self.spaceship_pred.set_data_3d(self.x_pred[:i], self.y_pred[:i],self.z_pred[:i])
self.measurements.set_data_3d(self.x_target[:i], self.y_target[:i], self.z_target[:i])
return self.spaceship_pred,self.measurements,
def save_and_visualize_animation(self, path):
anim= FuncAnimation(fig=self.fig, func=self.animate, init_func=self.init, frames=len(self.x_pred),interval=50, blit=True)
writer = PillowWriter(fps=25)
anim.save( path, writer=writer, dpi=90)
plt.close()
with open(path,'rb') as f:
display(Image(data=f.read(), format='gif'))
|
import logging
import gensim
from docs import config
from optparse import OptionParser
from lib.corpora import corpus_word_count
logging.basicConfig(format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO)
# Build this program's option parser
def build_opt_parser():
usage = "usage: %prog [options] <dictionary> <filename> [, <filename>, ...]"
parser = OptionParser(usage=usage)
parser.add_option("-d", "--dictionary", dest="dictionary",
default=None,
help="Optional dictionary parameter for parsing MM corpus files"
)
return parser
# Parse commandline arguments using OptionParser given
def parse_arguments(parser):
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
exit()
return options, args
# Main function
def main():
parser = build_opt_parser()
(options, args) = parse_arguments(parser)
dictionary = gensim.corpora.Dictionary.load(options.dictionary) if options.dictionary else options.dictionary
word_count = corpus_word_count(args, dictionary)
logging.info("Word Count: {}".format(word_count))
if __name__ == "__main__":
main()
|
'''
Module:
Set plotting range for axis
'''
def set_range(f, n, d, xbeg, xend):
# sampling point begin and end
sample_beg = float(f)
sample_end = sample_beg + (n - 1) * d
# limit of axis
if xbeg is None:
xbeg = sample_beg
else:
xbeg = float(xbeg)
if xend is None:
xend = sample_end
else:
xend = float(xend)
if xbeg < sample_beg:
nbeg = 0
xbeg = sample_beg
else:
nbeg = int(abs(round((xbeg - sample_beg) / d)))
if abs(xend) > abs(sample_end):
nend = n
xend = sample_end
else:
nend = int(n - abs(round((sample_end - xend) / d)))
# check range
if nend <= nbeg:
print(' Error: Axis range specification error. ')
exit()
# return range
return sample_beg, sample_end, xbeg, xend, nbeg, nend
|
import time
import os
import pyautogui
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
try:
driver.implicitly_wait(15)
print driver.current_url
print "test 1"
driver.execute_script("getMetric();")
init_x, init_y, init_width, init_height = driver.execute_script("return winMetrics;")
print "x:%d, y:%d, width:%d, height:%d" % (init_x, init_y, init_width, init_height)
button = driver.find_element_by_id('newWindow')
button.click() # click the button
pyautogui.moveTo(init_x + 25, init_y + 25)
pyautogui.click();
result = driver.find_element_by_id('result1')
print result.get_attribute('innerHTML')
assert("success" in result.get_attribute('innerHTML'))
print "test 2"
pyautogui.moveTo(init_x + 12, init_y + 12)
pyautogui.click();
result = driver.find_element_by_id('result2')
print result.get_attribute('innerHTML')
assert("success" in result.get_attribute('innerHTML'))
finally:
driver.quit()
|
# This problem was recently asked by Apple:
# Given an integer k and a binary search tree, find the floor (less than or equal to) of k, and the ceiling (larger than or equal to) of k. If either does not exist, then print them as None.
class Node:
def __init__(self, value):
self.left = None
self.right = None
self.value = value
def findCeilingFloor(root_node, k, floor=None, ceil=None):
# Fill this in.
if root_node == None:
return None
while root_node != None:
if root_node.value == k:
return (k, k)
elif k < root_node.value:
ceil = root_node
root_node = root_node.left
else:
floor = root_node
root_node = root_node.right
return (floor.value, ceil.value)
root = Node(8)
root.left = Node(4)
root.right = Node(12)
root.left.left = Node(2)
root.left.right = Node(6)
root.right.left = Node(10)
root.right.right = Node(14)
print (findCeilingFloor(root, 5))
# (4, 6)
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permissions', '0003_remove_role_name'),
]
operations = [
migrations.AlterField(
model_name='role',
name='label',
field=models.CharField(
help_text='A short text describing the role.',
max_length=128, unique=True, verbose_name='Label'
),
),
]
|
class Solution:
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
primes = 1
n = area
index = int(n ** 0.5)
while index >= 1:
if area % index == 0:
factor = area // index
if factor >= index:
return [factor, index]
index -= 1
return [1, 1]
if __name__ == '__main__':
solution = Solution()
print(solution.constructRectangle(1))
print(solution.constructRectangle(2))
print(solution.constructRectangle(3))
print(solution.constructRectangle(4))
print(solution.constructRectangle(5))
print(solution.constructRectangle(6))
print(solution.constructRectangle(8))
print(solution.constructRectangle(12))
else:
pass
|
__author__ = 'Bob'
from django import template
from django.contrib.auth.models import Group
register = template.Library()
@register.filter(name='hasGroup')
def hasGroup(user, group_name):
try:
group = Group.objects.get(name=group_name)
return True if group in user.groups.all() else False
except Exception as identifier:
pass
return False
|
from typing import Iterable, List, Optional, Tuple
from plenum.common.messages.node_messages import ViewChangeDone
from plenum.server.router import Route
from stp_core.common.log import getlogger
from plenum.server.primary_decider import PrimaryDecider
from plenum.server.replica import Replica
logger = getlogger()
class PrimarySelector(PrimaryDecider):
"""
Simple implementation of primary decider.
Decides on a primary in round-robin fashion.
Assumes that all nodes are up
"""
def __init__(self, node):
super().__init__(node)
@property
def routes(self) -> Iterable[Route]:
return []
# overridden method of PrimaryDecider
def get_msgs_for_lagged_nodes(self) -> List[ViewChangeDone]:
return self.node.view_changer.get_msgs_for_lagged_nodes()
# overridden method of PrimaryDecider
def decidePrimaries(self):
return self.node.view_changer.decidePrimaries()
# Question: Master is always 0, until we change that rule why incur cost
# of a method call, also name is confusing
def _is_master_instance(self, instance_id):
# TODO: get master instance from outside
# Instance 0 is always master
return instance_id == 0
def _get_primary_id(self, view_no, instance_id, total_nodes):
return (view_no + instance_id) % total_nodes
def next_primary_node_name(self, instance_id, nodeReg=None):
if nodeReg is None:
nodeReg = self.node.nodeReg
rank = self._get_primary_id(self.viewNo, instance_id, len(nodeReg))
name = self.node.get_name_by_rank(rank, nodeReg=nodeReg)
logger.trace("{} selected {} as next primary node for instId {}, "
"viewNo {} with rank {}, nodeReg {}".format(
self, name, instance_id, self.viewNo, rank, nodeReg))
assert name, "{} failed to get next primary node name".format(self)
return name
def next_primary_replica_name(self, instance_id, nodeReg=None):
"""
Returns name of the next node which is supposed to be a new Primary
in round-robin fashion
"""
return Replica.generateName(
nodeName=self.next_primary_node_name(instance_id, nodeReg=nodeReg),
instId=instance_id)
# overridden method of PrimaryDecider
def start_election_for_instance(self, instance_id):
raise NotImplementedError("Election can be started for "
"all instances only")
|
import os
import warnings
from . import python_aio, python_aio_asyncio
from .abstract import AbstractContext, AbstractOperation
from .version import __author__, __version__
try:
from . import linux_aio
from . import linux_aio_asyncio
except ImportError:
linux_aio = None # type: ignore
linux_aio_asyncio = None # type: ignore
try:
from . import thread_aio
from . import thread_aio_asyncio
except ImportError:
thread_aio = None # type: ignore
thread_aio_asyncio = None # type: ignore
variants = tuple(filter(None, [linux_aio, thread_aio, python_aio]))
variants_asyncio = tuple(filter(None, [
linux_aio_asyncio,
thread_aio_asyncio,
python_aio_asyncio
]))
preferred = variants[0]
preferred_asyncio = variants_asyncio[0]
def __select_implementation():
global preferred
global preferred_asyncio
implementations = {
"linux": (linux_aio, linux_aio_asyncio),
"thread": (thread_aio, thread_aio_asyncio),
"python": (python_aio, python_aio_asyncio),
}
implementations = {k: v for k, v in implementations.items() if all(v)}
default_implementation = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "default_implementation"
)
requested = os.getenv("CAIO_IMPL")
if not requested and os.path.isfile(default_implementation):
with open(default_implementation, "r") as fp:
for line in fp:
line = line.strip()
if line.startswith("#") or not line:
continue
if line in implementations:
requested = line
break
elif requested and requested not in implementations:
warnings.warn(
"CAIO_IMPL contains unsupported value %r. Use one of %r" % (
requested, tuple(implementations),
),
RuntimeWarning
)
return
preferred, preferred_asyncio = implementations.get(
requested,
(preferred, preferred_asyncio),
)
__select_implementation()
Context = preferred.Context # type: ignore
Operation = preferred.Operation # type: ignore
AsyncioContext = preferred_asyncio.AsyncioContext # type: ignore
__all__ = (
"Context",
"Operation",
"AsyncioContext",
"AbstractContext",
"AbstractOperation",
"python_aio",
"python_aio_asyncio",
"linux_aio",
"linux_aio_asyncio",
"thread_aio",
"thread_aio_asyncio",
"__version__",
"__author__",
"variants",
"variants_asyncio",
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
class AnyCSVException(Exception):
pass
class NoDelimiterException(AnyCSVException):
pass
class FileSizeException(Exception):
pass
|
import dash
# contains widgets that can be dropped into app
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import pandas as pd
import pickle
# https://dash.plot.ly/dash-core-components
########### Initiate the app
# 'app' is required by heroku
app = dash.Dash(__name__, external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css'])
# server name is specified in proc file
server = app.server
app.title='knn'
########### Set up the layout
# generates HTML code
app.layout = html.Div(children=[
html.H1('Medicine Cabinet Recommender App'),
# multi line single-Div
html.Div([
# sections have similar code but unique slider id
# header
html.H6('Effect & Preferences'),
dcc.Checklist(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': 'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value=['MTL', 'SF']
)
,
#added linebreak so no overlap on screen
html.Br(),
# header
html.H6('Petal Length'),
dcc.Slider(
id='slider-2',
min=1,
max=8,
step=0.1,
marks={i:str(i) for i in range(1,9)},
# default value
value=5
),
#added linebreak so no overlap on screen
html.Br(),
# where choice is made
html.H6('# of Neighbors'),
dcc.Dropdown(
id = 'k-drop',
value=5,
options=[{'label': i, 'value':i} for i in [5,10,15,20,25]]
),
# where output data will go
html.H6(id='output-message', children='output will go here')
]),
html.Br(),
html.A('See The Underlying Code On Github', href='https://github.com/lineality/intro_knn_plotly'),
])
############ Interactive Callbacks
# call back function, functions with decorators(specify input and output)
@app.callback(Output('output-message', 'children'),
[Input('k-drop', 'value'),
Input('slider-1', 'value'),
Input('slider-2', 'value')
])
#
def display_results(k, value0, value1):
# this opens the pickle
# the opposite of pickling the file
file = open(f'resources/model_k{k}.pkl', 'rb')
model=pickle.load(file)
file.close
new_obs=[[value0,value1]]
pred=model.predict(new_obs)
specieslist=['setosa', 'versicolor','verginica']
final_pred=specieslist[pred[0]]
return f'For a flower with sepal length {value0} and petal length {value1}, the predicted species is"{final_pred}"'
############ Execute the app
if __name__ == '__main__':
app.run_server()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# abc 用于实现抽象类
from abc import ABCMeta, abstractmethod
# 过滤器
class Filter:
__metaclass__ = ABCMeta
def __init__(self): pass
@abstractmethod
def filter(self, file_name): pass
|
import glob
import os
import time
from .utils.utils import pickle_read, pickle_dump
from .disk.operations import purge_id_in_cache
def list_keys_to_purge(directory, days_thresh, access_thresh,
include_uncounted=False):
counter_path = os.path.join(directory, 'counter.pkl')
counter = pickle_read(counter_path)
purge_keys1 = [key for key, x in counter.items()
if isinstance(x[1], str) and x[0] <= access_thresh]
purge_keys2 = [key for key, x in counter.items()
if not isinstance(x[1], str) and x[0] <= access_thresh
and (time.time()-x[1]/1000000)/(60*60*24) > days_thresh]
if include_uncounted:
li = []
for thing in ('output_', 'metadata_', 'data_'):
li = li + [x.split('.pkl')[0].split(thing)[1]
for x in glob.glob(directory+'/*')
if x.endswith('.pkl') and thing in x]
li = list(set(li))
purge_keys3 = [x for x in li if x not in counter.keys()]
else:
purge_keys3 = []
return purge_keys1 + purge_keys2 + purge_keys3
def purge_cached_data(directory, days_thresh, access_thresh,
include_uncounted=False):
purge_keys = list_keys_to_purge(
directory, days_thresh, access_thresh, include_uncounted)
print('purging %s items' % len(purge_keys))
for id_ in purge_keys:
purge_id_in_cache(directory, id_)
counter_path = os.path.join(directory, 'counter.pkl')
counter = pickle_read(counter_path)
counter = {key: val for key, val in counter.items()
if key not in purge_keys}
pickle_dump(counter, counter_path)
|
import requests
import json
import os
import sys
import pymysql
# Database and table name
mysql_db_name = "checklist"
mysql_db_table = "items"
use_ssl = "yes"
# Format a string to be included in a SQL query as value
def escape_quotes(this_value):
return str(this_value).replace("'", "\\'")
# Get database credentials from environment variables
mysql_server_fqdn = os.environ.get("MYSQL_FQDN")
if mysql_server_fqdn == None:
print("ERROR: Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server")
sys.exit(1)
mysql_server_name = mysql_server_fqdn.split('.')[0]
mysql_server_username = os.environ.get("MYSQL_USER")
if mysql_server_username == None:
print("ERROR: Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username")
sys.exit(1)
if not mysql_server_username.__contains__('@'):
mysql_server_username += '@' + mysql_server_name
mysql_server_password = os.environ.get("MYSQL_PASSWORD")
if mysql_server_password == None:
print("ERROR: Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password")
sys.exit(1)
# Create connection to MySQL server and get version
print ("INFO: Connecting to {0} with username {1}...".format(mysql_server_fqdn, mysql_server_username))
if use_ssl == 'yes':
db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}})
else:
db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, passwd = mysql_server_password)
sql_query = "SELECT VERSION();"
cursor = db.cursor()
cursor.execute(sql_query)
rows = cursor.fetchall()
data = ""
if len(rows) > 0:
for row in rows:
if len(data) > 0:
data += ', '
data += str(''.join(row))
print ("INFO: Connected to MySQL server {0} with version {1}".format(mysql_server_fqdn, data))
# Delete db if existed
sql_query = "DROP DATABASE IF EXISTS {0};".format(mysql_db_name)
# print ("Sending query: {0}".format(sql_query))
cursor.execute(sql_query)
db.commit()
# Create database
sql_query = "CREATE DATABASE IF NOT EXISTS {0};".format(mysql_db_name)
# print ("Sending query: {0}".format(sql_query))
cursor.execute(sql_query)
db.commit()
sql_query = "USE {0}".format(mysql_db_name)
# print ("Sending query: {0}".format(sql_query))
cursor.execute(sql_query)
db.commit()
# Create table
sql_query = """CREATE TABLE {0} (
guid varchar(40),
text varchar(1024),
description varchar(1024),
link varchar(255),
training varchar(255),
comments varchar(1024),
severity varchar(10),
status varchar(15),
category varchar(255),
subcategory varchar(255),
graph_query_success varchar(1024),
graph_query_failure varchar(1024),
graph_query_result varchar(4096)
);""".format(mysql_db_table)
# print ("DEBUG: Sending query: {0}".format(sql_query))
cursor.execute(sql_query)
db.commit()
# Download checklist
technology = os.environ.get("CHECKLIST_TECHNOLOGY")
if technology:
checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/" + technology + "_checklist.en.json"
else:
checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json"
response = requests.get(checklist_url)
# If download was successful
if response.status_code == 200:
print ("INFO: File {0} downloaded successfully".format(checklist_url))
try:
# Deserialize JSON to object variable
checklist_object = json.loads(response.text)
except Exception as e:
print("Error deserializing JSON content: {0}".format(str(e)))
sys.exit(1)
# Get default status from the JSON, default to "Not verified"
try:
status_list = checklist_object.get("status")
default_status = status_list[0].get("name")
except:
default_status = "Not verified"
pass
# For each checklist item, add a row to mysql DB
row_counter = 0
for item in checklist_object.get("items"):
guid = item.get("guid")
category = item.get("category")
subcategory = item.get("subcategory")
text = escape_quotes(item.get("text"))
description = escape_quotes(item.get("description"))
severity = item.get("severity")
link = item.get("link")
training = item.get("training")
status = default_status
graph_query_success = escape_quotes(item.get("graph_success"))
graph_query_failure = escape_quotes(item.get("graph_failure"))
# print("DEBUG: Adding to table {0}: '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}'".format(mysql_db_table, category, subcategory, text, description, severity, link, training, graph_query_success, graph_query_failure, guid))
sql_query = """INSERT INTO {0} (category,subcategory,text,description,severity,link,training,graph_query_success,graph_query_failure,guid,status)
VALUES ('{1}','{2}','{3}','{4}','{5}', '{6}','{7}','{8}','{9}','{10}', '{11}');""".format(mysql_db_table, category, subcategory, text, description, severity, link, training, graph_query_success, graph_query_failure, guid, status)
# print ("DEBUG: Sending query: {0}".format(sql_query))
cursor.execute(sql_query)
db.commit()
row_counter += 1
else:
print ("Error downloading {0}".format(checklist_url))
# Bye
print("INFO: {0} rows added to database.".format(str(row_counter)))
db.close()
|
# Configuration
# [Yoshikawa Taichi]
# version 2.4 (Jan. 29, 2020)
class Configuration():
'''
Configuration
'''
def __init__(self):
# ----- Neural network components -----
## Neuron numbers in input-layer
self.input_size = 4
## Neuron numbers in hidden-layer
self.hidden_size = 5
## Neuron numbers in output-layer
self.output_size = 3
# ----- Neural network options -----
## sigmoid/tanh : Xavier / ReLU : He
self.init_method = 'He'
## SGD/Momentum/AdaGrad/Adam
self.learning_method = 'Adam'
## AdaGrad : 0.1 , SGD/Adam : 0.01 , Momentum : 0.001
self.learning_rate = 0.01
## numerical grad : num / back-propagation : bp
self.gradient_method = 'bp'
## 'sum-squared-error', 'cross-entropy-error'
self.loss_function = 'cross-entropy-error'
self.epoch = 1000
self.batch_size = 10
# ----- Dataset Configuration -----
self.dataset_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
self.dataset_index = {
'dec' : [0,1,2,3],
'obj' : 4
}
self.dataset_one_hot_vector = {
'Iris-setosa' : [1,0,0],
'Iris-versicolor' : [0,1,0],
'Iris-virginica' : [0,0,1]
}
self.dataset_ratio = {
'train' : 100,
'test' : 50
}
# ----- I/O Configuration -----
self.path_out = '.'
|
from django.conf.urls import *
from .api_views import APIShippingArchiveQualitySummaryReportViewSet, APIShippingTripStatusReportViewSet
urlpatterns = [
url(r'^org/quality/summary/(?P<org_slug>[^/]+)/$',
APIShippingArchiveQualitySummaryReportViewSet.as_view(),
name='api-org-quality-summary'),
url(r'^project/status/(?P<project_slug>[pP]--[a-fA-F0-9]{4}-[a-fA-F0-9]{4})/$',
APIShippingTripStatusReportViewSet.as_view(),
name='api-project-trip-status'),
]
|
# Generated by Django 3.1.7 on 2021-03-23 20:20
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProductSize',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=255, verbose_name='Product Title')),
('active', models.BooleanField(default=True)),
('created_date', models.DateField(auto_now_add=True)),
('updated_date', models.DateField(auto_now=True)),
],
options={
'verbose_name': 'Product Size',
'verbose_name_plural': 'Products Sizes',
},
),
migrations.AddField(
model_name='product',
name='available_sizes',
field=models.ManyToManyField(to='products.ProductSize', verbose_name='Available Sizes'),
),
]
|
# -*- coding: utf-8 -*-
#__date__ = 7/26/18
#__time__ = 10:51 AM
#__author__ = isminilourentzou
import logging
from itertools import count
import os
import numpy as np
from config import opt, additional_args
import lib
import time
start_time = time.time()
additional_args(opt)
N_SAMPLES = opt.nsamples
logging.basicConfig(filename=os.path.join(opt.save_dir, opt.al_method+'only.log') if opt.logfolder else None, level=logging.INFO)
logging.getLogger("data").setLevel('WARNING')
logging.getLogger("model").setLevel('WARNING')
logging.getLogger("train").setLevel('WARNING')
logger = logging.getLogger("onlyAL")
def main():
logger.info(opt)
# load the test data: target languages
dataset = lib.data.Conll_dataset(opt, tag_type='ner', train=False)
#TODO: Sample initial train and validation set?
#dataset.sample_validation(100)
wordrepr = lib.train.build_wordrepr(opt, dataset.vocabs)
logger.info('Begin training active learning policy..')
allaccuracylist, allf1list, allpreclist, allreclist, allcountlist = [], [], [], [], []
for tau in range(0,opt.episode):
accuracylist, f1list, preclist, reclist, countlist = [], [], [], [], []
train_iter, validation_iter, test_iter = dataset.batch_iter(opt.batch_size)
model, optim = lib.train.create_model(opt, wordrepr)
trainer = lib.train.Trainer(model, train_iter, validation_iter, optim, opt)
_, _, _, _, acc, f1, prec, rec = trainer.train(opt.start_epoch, opt.end_epoch)[-1]
accuracylist.append(acc)
f1list.append(f1)
preclist.append(prec)
reclist.append(rec)
countlist.append(dataset.count_new)
logger.info('First accuracy: %.3f, f1: %.3f, prec: %.3f, rec: %.3f' % (acc, f1, prec, rec))
new_examples = []
#In every episode, run the trajectory
for t in count():
if(dataset.count_new >= opt.budget or len(dataset.train_unlabeled) < opt.nsamples): break
#unlabeled_examples = dataset.sample_unlabeled(opt.k_num) #Random sample k points from D_pool
#dataset.unlabeled_examples = unlabeled_examples
dataset.unlabeled_examples = dataset.train_unlabeled
logger.info('Episode:{}/{} Budget:{}/{} Unlabeled:{}'.format(str(tau+1),opt.episode, dataset.count_new, opt.budget, len(dataset.train_unlabeled)))
query_strategy = lib.utils.choose_al_method(opt.al_method, dataset, model, opt)
ask_xnew_active = query_strategy.make_query(n_samples=N_SAMPLES)
if(len(ask_xnew_active)>0):
for x_new in ask_xnew_active:
dataset.label(x_new)
#assert x_new.id[0] not in new_examples
new_examples.append(x_new.id[0])
#print('new_examples', len(new_examples), new_examples)
trainer = lib.train.Trainer(model, train_iter, validation_iter, optim, opt)
_, _, _, _, acc, f1, prec, rec = trainer.train(opt.start_epoch, opt.end_epoch)[-1]
#if((dataset.count_new+1) % opt.k_num*2 == 0):
accuracylist.append(acc)
f1list.append(f1)
preclist.append(prec)
reclist.append(rec)
countlist.append(dataset.count_new)
logger.info('accuracy: %.3f, f1: %.3f, prec: %.3f, rec: %.3f' % (acc, f1, prec, rec))
dataset.reset()
allaccuracylist.append(accuracylist)
allf1list.append(f1list)
allpreclist.append(preclist)
allreclist.append(reclist)
allcountlist.append(countlist)
logger.info('Test finished.')
logger.info("--- %s seconds ---" % (time.time() - start_time))
averageacc=list(np.mean(np.array(allaccuracylist), axis=0))
averagef1=list(np.mean(np.array(allf1list), axis=0))
averageprec=list(np.mean(np.array(allpreclist), axis=0))
averagerec=list(np.mean(np.array(allreclist), axis=0))
averagecount=list(np.mean(np.array(allcountlist), axis=0))
#Save results to csv and plot!
csv_results = os.path.join(opt.save_dir, '_'.join([opt.lang, opt.al_method, 'only_result.csv']))
logging.info('Saving results to {}'.format(csv_results))
lib.utils.save_results(csv_results, averageacc, averagef1, averageprec, averagerec, averagecount, 'only'+opt.al_method.upper())
if __name__ == '__main__':
testing = opt.test.split(';')
for test in testing:
opt.test = test
opt.episode =1
main()
|
def _char_value(c):
assert 'A' <= c <= 'Z'
return ord(c) - ord('A') + 1
def _value_of_name(name):
return sum([_char_value(c) for c in name])
def names_scores(names):
names_sorted = sorted(names)
res = 0
for i in xrange(len(names_sorted)):
res += (i + 1) * _value_of_name(names_sorted[i])
return res
|
# -*- coding: utf-8 -*-
from . import logistic_interface as LI
import os
import sys
from datetime import datetime
def execute(init_x:float = 0.1,
record_num:int = 500,
iterate_num:int = 5000,
a_interval:float = 0.005,
a_min:float = 2.5,
file_name:str= datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + ".png"):
fig_dir = os.path.join(os.getcwd(), "graph")
if os.path.exists(fig_dir) is False:
os.mkdir(fig_dir)
df = LI.run(init_x, record_num, iterate_num, a_interval, a_min)
graph = df.plot.scatter(x="a", y = "limit", s = 0.0001)
fig = graph.get_figure()
fig.savefig(os.path.join(fig_dir, file_name))
def main(args):
if len(args) == 1:
return main()
if len(args) == 2:
return main(float(args[1]))
if len(args) == 3:
return main(float(args[1]), int(args[2]))
if len(args) == 4:
return main(float(args[1]), int(args[2]), int(args[3]))
if len(args) == 5:
return main(float(args[1]), int(args[2]), int(args[3]), float(args[4]))
if len(args) == 6:
return main(float(args[1]), int(args[2]), int(args[3]), float(args[4]), float(args[5]))
if len(args) == 7:
return main(float(args[1]), int(args[2]), int(args[3]), float(args[4]), float(args[5]), args[6])
main(sys.argv)
|
from past.builtins import basestring
from snovault import upgrade_step
from .shared import ENCODE2_AWARDS, REFERENCES_UUID
from pyramid.traversal import find_root
import re
def fix_reference(value):
if not isinstance(value, basestring):
raise ValueError(value)
return value.replace('PUBMED:', 'PMID:').replace(' ', '')
@upgrade_step('document', '', '2')
def document_0_2(value, system):
# http://redmine.encodedcc.org/issues/1259
if 'references' in value:
value['references'] = [fix_reference(v) for v in value['references']]
@upgrade_step('document', '2', '3')
def document_2_3(value, system):
# http://redmine.encodedcc.org/issues/1295
# http://redmine.encodedcc.org/issues/1307
if 'status' in value:
if value['status'] == 'DELETED':
value['status'] = 'deleted'
elif value['status'] == 'CURRENT':
if value['award'] in ENCODE2_AWARDS:
value['status'] = 'released'
elif value['award'] not in ENCODE2_AWARDS:
value['status'] = 'in progress'
@upgrade_step('document', '3', '4')
def document_3_4(value, system):
# http://redmine.encodedcc.org/issues/2591
context = system['context']
root = find_root(context)
publications = root['publications']
if 'references' in value:
new_references = []
for ref in value['references']:
if re.match('doi', ref):
new_references.append(REFERENCES_UUID[ref])
else:
item = publications[ref]
new_references.append(str(item.uuid))
value['references'] = new_references
@upgrade_step('document', '4', '5')
def document_4_5(value, system):
# http://redmine.encodedcc.org/issues/3063
if 'urls' in value:
value['urls'] = list(set(value['urls']))
if 'aliases' in value:
value['aliases'] = list(set(value['aliases']))
if 'references' in value:
value['references'] = list(set(value['references']))
@upgrade_step('document', '5', '6')
def document_5_6(value, system):
# http://redmine.encodedcc.org/issues/1384
if 'description' in value:
if value['description']:
value['description'] = value['description'].strip()
else:
del value['description']
if 'notes' in value:
if value['notes']:
value['notes'] = value['notes'].strip()
else:
del value['notes']
|
# -*- coding: utf-8 -*-
from scrapy.cmdline import execute
import sys
import os
print(os.path.dirname(os.path.abspath(__file__)))
# os.path.abspath(__file__) 获取到main文件的绝对路径, os.path.dirname获取到父目录
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", "jobbole"])
|
import logging
import telegram
class TelegramLogsHandler(logging.Handler):
def __init__(self, debug_bot_token, chat_id):
super().__init__()
self.debug_bot = telegram.Bot(debug_bot_token)
self.chat_id = chat_id
def emit(self, record):
log_entry = self.format(record)
self.debug_bot.send_message(self.chat_id, text=log_entry)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import argparse
import numpy as np
import cv2
from eval import cal_hist, pre_process
def parse_args():
parser = argparse.ArgumentParser(description="deeplabv3 accuracy calculation")
parser.add_argument('--data_root', type=str, default='', help='root path of val data')
parser.add_argument('--data_lst', type=str, default='', help='list of val data')
parser.add_argument('--batch_size', type=int, default=1, help='batch size')
parser.add_argument('--crop_size', type=int, default=513, help='crop size')
parser.add_argument('--scales', type=float, action='append', help='scales of evaluation')
parser.add_argument('--flip', action='store_true', help='perform left-right flip')
parser.add_argument('--ignore_label', type=int, default=255, help='ignore label')
parser.add_argument('--num_classes', type=int, default=21, help='number of classes')
parser.add_argument('--result_path', type=str, default='./result_Files', help='result Files path')
args, _ = parser.parse_known_args()
return args
def eval_batch(args, result_file, img_lst, crop_size=513, flip=True):
result_lst = []
batch_size = len(img_lst)
batch_img = np.zeros((args.batch_size, 3, crop_size, crop_size), dtype=np.float32)
resize_hw = []
for l in range(batch_size):
img_ = img_lst[l]
img_, resize_h, resize_w = pre_process(args, img_, crop_size)
batch_img[l] = img_
resize_hw.append([resize_h, resize_w])
batch_img = np.ascontiguousarray(batch_img)
net_out = np.fromfile(result_file, np.float32).reshape(args.batch_size, args.num_classes, crop_size, crop_size)
for bs in range(batch_size):
probs_ = net_out[bs][:, :resize_hw[bs][0], :resize_hw[bs][1]].transpose((1, 2, 0))
ori_h, ori_w = img_lst[bs].shape[0], img_lst[bs].shape[1]
probs_ = cv2.resize(probs_, (ori_w, ori_h))
result_lst.append(probs_)
return result_lst
def eval_batch_scales(args, eval_net, img_lst, scales,
base_crop_size=513, flip=True):
sizes_ = [int((base_crop_size - 1) * sc) + 1 for sc in scales]
probs_lst = eval_batch(args, eval_net, img_lst, crop_size=sizes_[0], flip=flip)
print(sizes_)
for crop_size_ in sizes_[1:]:
probs_lst_tmp = eval_batch(args, eval_net, img_lst, crop_size=crop_size_, flip=flip)
for pl, _ in enumerate(probs_lst):
probs_lst[pl] += probs_lst_tmp[pl]
result_msk = []
for i in probs_lst:
result_msk.append(i.argmax(axis=2))
return result_msk
def acc_cal():
args = parse_args()
args.image_mean = [103.53, 116.28, 123.675]
args.image_std = [57.375, 57.120, 58.395]
# data list
with open(args.data_lst) as f:
img_lst = f.readlines()
# evaluate
hist = np.zeros((args.num_classes, args.num_classes))
batch_img_lst = []
batch_msk_lst = []
bi = 0
image_num = 0
for i, line in enumerate(img_lst):
img_path, msk_path = line.strip().split(' ')
result_file = os.path.join(args.result_path, os.path.basename(img_path).split('.jpg')[0] + '_0.bin')
img_path = os.path.join(args.data_root, img_path)
msk_path = os.path.join(args.data_root, msk_path)
img_ = cv2.imread(img_path)
msk_ = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE)
batch_img_lst.append(img_)
batch_msk_lst.append(msk_)
bi += 1
if bi == args.batch_size:
batch_res = eval_batch_scales(args, result_file, batch_img_lst, scales=args.scales,
base_crop_size=args.crop_size, flip=args.flip)
for mi in range(args.batch_size):
hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)
bi = 0
batch_img_lst = []
batch_msk_lst = []
print('processed {} images'.format(i+1))
image_num = i
if bi > 0:
batch_res = eval_batch_scales(args, result_file, batch_img_lst, scales=args.scales,
base_crop_size=args.crop_size, flip=args.flip)
for mi in range(bi):
hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)
print('processed {} images'.format(image_num + 1))
print(hist)
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print('per-class IoU', iu)
print('mean IoU', np.nanmean(iu))
if __name__ == '__main__':
acc_cal()
|
# Distributed under the MIT licesnse.
# Copyright (c) 2013 Dave McCoy (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
'''
Log
6/12/2013: Initial commit
'''
from PyQt4.QtGui import *
from PyQt4.QtCore import *
#Note this cannot have a QColor for these two because the values are passed
#through MIME types
DEMO_BOX_COLOR = "blue"
DEMO_BOX_RECT = QRectF(0, 0, 100, 50)
DEMO_BOX_POS = QPointF(0, 0)
DEMO_BOX_ID = "Demo Box"
LINK_DEMO_COLOR = QColor("black")
BEZIER_CONNECTION = False
|
from .adc import Adc
from .and_ import And
from .asl import Asl
from .eor import Eor
from .lsr import Lsr
from .ora import Ora
from .rol import Rol
from .ror import Ror
from .sbc import Sbc
|
from dnnv.properties import *
N = Network("N")
Forall(
x,
Implies(
(0 <= x <= 1),
And(
abs(N(x)[0, 3] - N(x)[0, 9]) < abs(N(x)[0, 3] - N(x)[0, 2]),
abs(N(x)[0, 3] - N(x)[0, 9]) < abs(N(x)[0, 9] - N(x)[0, 2]),
),
),
)
|
import sys
instructions = 'LOAD STORE IN OUT ADD SUB MUL DIV MOD AND OR XOR JUMP JZ JLZ JGZ'.split()
address = {}
current_address = 0
def assemble_label(words):
address[words[0][:-1]] = current_address
return assemble(words[1:])
def assemble_number(words):
if len(words) > 1 and not words[1].startswith(';'):
raise ValueError('Extra stuff on line')
global current_address
current_address += 1
return hex(int(words[0]))[2:].zfill(8)
def get_operand(word):
if word.isdigit():
return int(word)
if word.isalpha():
return address[word]
raise ValueError('Argument must be a number or label')
def ensure_only_one_operand(words):
if len(words) == 1:
raise ValueError('No operand to instruction')
if len(words) > 2 and not words[2].startswith(';'):
raise ValueError('Extra stuff on line')
def assemble_instruction(words):
ensure_only_one_operand(words)
operand = get_operand(words[1])
if operand >= 2 ** 28:
raise ValueError('Operand too large')
opcode = hex(instructions.index(words[0]))[2:]
operand_in_hex = hex(operand)[2:].zfill(7)
global current_address
current_address += 1
return f'{opcode}{operand_in_hex}'
def assemble(words):
if not words:
return ''
elif words[0].startswith(';'):
return ''
elif words[0].endswith(':'):
return assemble_label(words)
elif words[0].isdigit():
return assemble_number(words)
elif words[0] in instructions:
return assemble_instruction(words)
raise ValueError('Illegal syntax')
try:
for line in sys.stdin:
machine_code = assemble(line.rstrip().upper().split())
if machine_code:
print(machine_code)
except ValueError as e:
print(e)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import os
import json
import cv2
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import Boxes, BoxMode
# from .coco import load_coco_json, load_sem_seg
"""
This file contains functions to register a COCO-format dataset to the DatasetCatalog.
"""
__all__ = ["register_kitti_instances", "get_kitti_instances_meta"]
# cats = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting',
# 'Tram', 'Misc', 'DontCare']
cats = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting',
'Tram', 'Misc']
# KITTI_CATEGORIES = [
# {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": cats[0]},
# {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": cats[1]},
# {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": cats[2]},
# {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": cats[3]},
# {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": cats[4]},
# {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": cats[5]},
# {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": cats[6]},
# {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": cats[7]},
# {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": cats[8]},
# ]
KITTI_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": cats[0]},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": cats[1]},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": cats[2]},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": cats[3]},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": cats[4]},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": cats[5]},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": cats[6]},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": cats[7]},
]
def load_kitti_json(json_path, img_path, name):
with open(json_path) as f:
for i, line in enumerate(f):
images = json.loads(line) # run through each line
#img = json.dumps(images, indent = 2) #for a better representation
dataset_dicts = []
for i, image in enumerate(images['images']):
record = {}
# print("registering image")
filename = os.path.join(img_path, image["file_name"])
_id = image["id"]
#if the file doesn't exist
# height, width = cv2.imread(filename).shape[:2]
try:
height, width = cv2.imread(filename).shape[:2]
except AttributeError:
print("File doesn't exist")
continue
record["file_name"] = filename
record["height"] = height
record["width"] = width
record["image_id"] = _id #
objs = [] #many instances in 1 record
for anno in images['annotations']:
anno_id = anno["image_id"]
category_id = anno["category_id"]
#check if the image id from image data same with annotation and include only person and person sitting
#although in the JSON file it is number 8, the annotation start from 1
if anno_id == _id:
# area = anno["area"]
instance_id = anno["id"]
# print("Iter {2} Instance {1} In image {0}".format(filename, instance_id,i))
px = anno["bbox"][0]
py = anno["bbox"][1]
p_width = anno["bbox"][2]
p_height = anno["bbox"][3]
obj = {"bbox": [px,py,p_width,p_height],
"bbox_mode": BoxMode.XYWH_ABS, #it's not XYXY but XYWH
# "area": area,
"segmentation":[],
"category_id": category_id - 1, #set things only classes person
"iscrowd": 0}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
# if i == 200: #200 iterations
# break
return dataset_dicts
def register_kitti_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: load_kitti_json(json_file, image_root, name))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
def get_kitti_instances_meta(dataset_name):
thing_ids = [k["id"] for k in KITTI_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in KITTI_CATEGORIES if k["isthing"] == 1]
assert len(thing_ids) == 8 or len(thing_ids) == 9, len(thing_ids)
# Mapping from the incontiguous COCO category id to an id in [0, 79]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in KITTI_CATEGORIES if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
|
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from registration.forms import RegistrationForm
def register(request, next=settings.LOGIN_REDIRECT_URL, form_cls=RegistrationForm,
profile_cb=None, login_cb=None, template='registration/register.html',
extra_context=None, initial=None):
"""
Register user by e-mail address and log them in.
"""
if request.method == "POST":
form = form_cls(request.POST)
if form.is_valid():
user, pw = form.save(request, profile_cb)
user = auth.authenticate(username=user.username, password=pw)
auth.login(request, user)
response = HttpResponseRedirect(next)
if login_cb is not None:
response = login_cb(response, user)
return response
else:
form = form_cls(initial=initial)
context = {'form': form}
if extra_context is not None:
context.update(extra_context)
return render_to_response(template, context, RequestContext(request))
|
#!/usr/bin/env python3
import logging
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(name)-15s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
import asyncio
import collections
import json
import os
import aiohttp
from aiohttp import web
DEFAULT_PORT = 5000
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
ASSETS_DIR = os.path.join(ROOT_DIR, "assets")
INDEX_PATH = os.path.join(ROOT_DIR, "index.html")
def make_message(author, message):
return json.dumps({"author": author, "message": message})
def ignore_result(task):
if not task.cancelled():
task.exception() # mark as observed
class Application:
def __init__(self):
self._app = web.Application()
self._app.add_routes([
web.get("/", self.handle_index),
web.get("/session", self.handle_session),
web.static("/assets", ASSETS_DIR)
])
self._app.cleanup_ctx.append(lambda app: self.status_loop_setup(app))
self._session_count = 1
self._room = ChatRoom()
def run(self):
web.run_app(self._app, port = DEFAULT_PORT)
async def status_loop_setup(self, app):
task = asyncio.create_task(self.status_loop())
yield
task.cancel()
await task
async def status_loop(self):
try:
while True:
logger.info("Sessions: %s, Tasks: %s" % (self._room.session_count(), len(asyncio.all_tasks())))
await asyncio.sleep(15 * 60)
except asyncio.CancelledError:
pass
async def handle_index(self, request):
with open(INDEX_PATH, "r", encoding = "utf-8") as f:
html = f.read()
return web.Response(text = html, content_type = "text/html", charset = "utf-8")
# Receives a websocket connection
async def handle_session(self, request):
ws = web.WebSocketResponse(heartbeat = 60.0)
await ws.prepare(request)
name = f"User#{self._session_count}"
self._session_count += 1
session = ChatSession(name, ws)
try:
await session.run(self._room)
finally:
await ws.close()
return ws
# Keeps a set of connected sessions. All messages are sent to every member of the room.
class ChatRoom:
def __init__(self):
self._sessions = set()
def session_count(self):
return len(self._sessions)
def register(self, session):
assert session not in self._sessions
self._sessions.add(session)
self.send(make_message("SYSTEM", f"{session.name()} has joined the room."))
def unregister(self, session):
assert session in self._sessions
self._sessions.remove(session)
self.send(make_message("SYSTEM", f"{session.name()} has left the room."))
def send(self, message):
for session in self._sessions:
session.send(message)
# Reads incoming json messages from the web socket and writes them to other clients.
# Also writes outgoing json messages from the message queue to the web socket.
class ChatSession:
MAX_MESSAGES = 100
def __init__(self, name, socket):
self._name = name
self._socket = socket
self._read_task = None
self._write_task = None
self._queue_event = asyncio.Event()
self._message_queue = collections.deque()
def name(self):
return self._name
async def run(self, room):
self._write_task = asyncio.create_task(self.write())
self._write_task.add_done_callback(ignore_result)
self._read_task = asyncio.current_task()
# Read incoming messages until an error occurs or we get cancelled.
try:
room.register(self)
async for frame in self._socket:
if frame.type == aiohttp.WSMsgType.TEXT:
data = json.loads(frame.data)
if data["type"] == "message":
message = make_message(self._name, data["message"])
room.send(message)
else:
logger.error("Invalid data type: %s", data["type"])
elif frame.type == aiohttp.WSMsgType.ERROR:
logger.error("WebSocket connection closed with error %s" % ws.exception())
break
except asyncio.CancelledError:
# logger.info(f"Reader cancelled: {self.name()}")
raise
except Exception:
logger.exception("Error within a session.")
finally:
room.unregister(self)
if not self._write_task.done() and not self._write_task.cancelled():
self._write_task.cancel()
# logger.info(f"Reader stopped: {self.name()}")
# Sends a message to the client. Client's that are too slow to drain
## their queue get disconnected.
def send(self, message):
if len(self._message_queue) >= ChatSession.MAX_MESSAGES:
logger.info("Disconnecting client because it was too slow.")
self.stop()
return
# Insert the message and wake the task
was_empty = len(self._message_queue) == 0
self._message_queue.append(message)
if was_empty and not self._queue_event.is_set():
self._queue_event.set()
# Writes from the queue to the client's web socket.
async def write(self):
try:
while True:
await self._queue_event.wait()
assert len(self._message_queue) > 0
message = self._message_queue.popleft()
if not self._message_queue:
self._queue_event.clear()
await self._socket.send_str(message)
except asyncio.CancelledError:
# logger.info(f"Writer cancelled: {self.name()}")
raise
except Exception:
logger.exception("Error writing to web socket.")
self.stop()
finally:
# logger.info(f"Writer stopped: {self.name()}")
pass
# Cancel both tasks
def stop(self):
if not self._read_task.done() and not self._read_task.cancelled():
self._read_task.cancel()
if __name__ == "__main__":
app = Application()
app.run()
|
import socket
import ssl
import time
import threading
import json
from src.server.server_utilities import prepare_message
from src.server.server_strings import *
from src.server import server_data
SERVER_IP = "127.0.0.1"
SERVER_PORT = 9999
PLAYER_NAME = input("Enter a display name: ")
while len(PLAYER_NAME) == 0:
PLAYER_NAME = input("Enter a display name: ")
SERVER_NAME = 'N/A'
HEADER_SIZE = 8
CLIENT_TICK_RATE = 0.1
print("Client Initializing...")
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
print(f"Connecting to server: [{SERVER_IP}:{SERVER_PORT}]")
insecure_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_socket = context.wrap_socket(insecure_socket, server_hostname=SERVER_IP)
ssl_socket.connect((SERVER_IP, SERVER_PORT))
print(f"Server Certificate:\n{ssl.DER_cert_to_PEM_cert(ssl_socket.getpeercert(True))}")
print(f"Connnection Established: [{SERVER_IP}:{SERVER_PORT}]")
print("------------------------------------------------------------------")
print("This client currently implements a command-system for testing purposes")
print("This will be overridden when the C# client is made.")
print("------------------------------------------------------------------")
print("You can type commands into the console to send them to the server.")
print("\nAvailable commands: ")
print("!say <message>: Broadcasts a chat message to all clients on the server.")
print("!draw: Draws a card from the deck to the player hand.")
print("!play <card_index>: Plays a card from the player hand by the given index.")
print("!cards: Lists the cards currently on hand.")
print("!clients: Lists all the clients on the server.")
print("!start: Starts a game session, currently it only creates a deck and distributes 7 cards to each client.")
print("!stop: Stops an active game session and clears the hands of all clients.")
print("------------------------------------------------------------------")
def disconnect_from_server(reason=None):
global kill_threads
kill_threads = True
if ssl_socket:
ssl_socket.close()
print(f"Client disconnected from server: [{SERVER_IP}:{SERVER_PORT}]")
if reason:
print(f'Client disconnected due to the following: {reason}')
def inbound_server_data():
# Loop incoming messages
while not kill_threads:
try:
# Get header from 10 bytes (2 are formatting)
raw_header = ssl_socket.recv(HEADER_SIZE + 2)
except socket.error as e:
# print(e)
disconnect_from_server()
continue
if len(raw_header) <= 0:
continue
# Get message length from given header info
msg_len = int(raw_header[1:HEADER_SIZE + 1].decode("utf-8"))
# Get the message based on the number of bytes stated in the header
raw_msg = ssl_socket.recv(msg_len)
header = raw_header.decode('utf-8')
message = json.loads(raw_msg.decode('utf-8'))
if message[SERV_DATA_CONTENT] == "!quit":
disconnect_from_server()
elif message[SERV_DATA_CONTENT].split(' ', 1)[0] == "!setname":
global PLAYER_NAME
PLAYER_NAME = message[SERV_DATA_CONTENT].split(' ', 1)[1]
print(f'[DEBUG] Player name set: {PLAYER_NAME}')
elif message[SERV_DATA_CONTENT].split(' ', 1)[0] == "!setserver":
global SERVER_NAME
SERVER_NAME = message[SERV_DATA_CLIENT]
print(f'[DEBUG] Server name set: {SERVER_NAME}')
else:
print(f"{header}[{message[SERV_DATA_CLIENT] if message[SERV_DATA_CLIENT] is not None else SERVER_NAME}{' -> Me' if message[SERV_DATA_TYPE] != SERV_BROADCAST else ''}]:{message[SERV_DATA_CONTENT]}")
def outbound_data_to_server():
# Send connect message
connect_data = server_data.Data(content_type=SERV_MESSAGE, content_data=f"!connect {PLAYER_NAME}", client=PLAYER_NAME)
ssl_socket.send(bytes(prepare_message(connect_data), 'utf-8'))
while not kill_threads:
try:
# Send data to the server.
data_to_send = input()
if len(data_to_send) != 0:
data_to_send = server_data.Data(content_type=SERV_MESSAGE, content_data=data_to_send, client=PLAYER_NAME)
ssl_socket.send(bytes(prepare_message(data_to_send), 'utf-8'))
except socket.error as e:
# print(e)
disconnect_from_server()
return
time.sleep(CLIENT_TICK_RATE)
# Kill flags
kill_threads = False
# Start inbound data retrieval.
inbound_thread = threading.Thread(target=inbound_server_data)
inbound_thread.start()
# Start outbound data sending.
outbound_thread = threading.Thread(target=outbound_data_to_server)
outbound_thread.start()
|
from django.conf.urls import url
from django.urls import path, include
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView, TokenVerifyView
)
from .views import create_user_view, SignupMailSentView, SignupDomainError, create_org_addition_request, RequestOrgAdditionSuccess, \
activate_user, RequestOrgAdditionAPIView, OrgMailSentView, activate_org, RegisterAnonymousContributor
urlpatterns = [
path('', include('django.contrib.auth.urls')),
url(r'^signup$', create_user_view, name='signup'),
url(r'^api/signup/anon$', RegisterAnonymousContributor.as_view(), name='api-signup-anon'),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'), # TODO remove
url(r'^regmailsent$', SignupMailSentView.as_view(), name='regmailsent'),
url(r'^orgregmailsent$', OrgMailSentView.as_view(), name='org-reg-mail-sent'),
url(r'^request_org_addition$', create_org_addition_request, name='req-org-addition'),
url(r'^request_org_addition/create$', RequestOrgAdditionAPIView.as_view(), name='api-req-org-addition-create'),
url(r'^request_org_addition/success$', RequestOrgAdditionSuccess.as_view(), name='req-org-addition-success'),
url(r'^error/domainmismatch/(?P<pk>[0-9]+)$', SignupDomainError.as_view(), name='signup-domain-error'),
url(r'^activate_user/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]*-[0-9A-Za-z]*)/$', activate_user, name='activate_user'),
url(r'^activate_org/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]*-[0-9A-Za-z]*)/$', activate_org, name='activate_org'),
]
|
from dataflow import *
from var import *
|
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from collections import Counter
from collections import defaultdict
from sklearn.tree import DecisionTreeClassifier,plot_tree
import time
import matplotlib.pyplot as plt
class CartTree_Category_Lib:
"""
Cart 分类树 相关的 函数库
( 适用于 类别 类型 训练数据 )
Author: xrh
Date: 2021-04-02
ref:
"""
def calc_gini(self, trainLabelArr, sample_weight=None):
"""
计算数据集 D的 基尼指数 (基尼不纯度)
:param trainLabelArr:当前数据集的标签集
:param sample_weight: 样本的权重 ( 适用于 AdaBoost )
:return: 基尼指数
"""
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight = np.ones(N, dtype=int)
D = np.sum(sample_weight) # 数据集中所有样本的 权重和
LabelSet = {ele for ele in trainLabelArr} # trainLabelArr 中所有标签的类别
sum_p = 0
for label in LabelSet:
C_k = np.sum(sample_weight[trainLabelArr == label]) # 类型为k 的样本的权重和
p = C_k / D
sum_p += p ** 2
gini = 1 - sum_p
return gini
def calc_gini_A(self, trainDataArr_DevFeature, trainLabelArr, split_value=None, sample_weight=None):
"""
计算 条件 基尼指数
只对 所关心的 特征 对应的那一列 进行计算,提升训练速度
:param trainDataArr_DevFeature: 切割后只有feature那列数据的数组
:param trainLabelArr: 标签集数组
:param split_value: 切分特征的 特征值
:param sample_weight: 样本的权重 ( 适用于 AdaBoost )
:return: 条件基尼指数
"""
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight = np.ones(N, dtype=int)
D = np.sum(sample_weight) # 数据集中所有样本的 权重和
gini_A = 0
if split_value == None: # 未指定划分 的特征值
A_set = {A_i for A_i in trainDataArr_DevFeature} # trainDataArr_DevFeature 中的 所有取值
for i in A_set:
D_i = np.sum(sample_weight[trainDataArr_DevFeature == i]) # 特征值为 i 的 样本的权重和
p_i = D_i / D
gini_A += p_i * self.calc_gini(trainLabelArr[trainDataArr_DevFeature == i],
sample_weight[trainDataArr_DevFeature == i])
else: # 指定 划分的特征值, 把集合 根据特征值划分为2个子集
# D_1 满足 特征值的 子集合
# D_2 不满足 特征值的 子集合
D_1 = np.sum(sample_weight[trainDataArr_DevFeature == split_value]) # 特征值为 split_value 的 样本的权重和
p_1 = D_1 / D
p_2 = 1 - p_1
gini_A = p_1 * self.calc_gini(trainLabelArr[trainDataArr_DevFeature == split_value],
sample_weight[trainDataArr_DevFeature == split_value]) \
+ p_2 * self.calc_gini(trainLabelArr[trainDataArr_DevFeature != split_value],
sample_weight[trainDataArr_DevFeature != split_value])
return gini_A
def select_min_gini(self, trainDataArr, trainLabelArr, feature_value_set, sample_weight=None):
"""
选择 条件基 尼指数 最小的特征
:param trainDataArr: shape=(60000,784)
:param trainLabelArr: shape=(60000,1)
:param feature_value_set: 可供选择 的特征集合 , 包括 (特征, 切分值)
:param sample_weight: 样本的权重 ( 适用于 AdaBoost )
:return:
"""
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight = np.ones(N, dtype=int)
mini_gini_A = float('inf') # 最小 条件基尼指数
mini_gini_A_feature = 0 # 取得 最小条件基尼指数的 特征
mini_gini_A_feature_split = None # 取得 最小条件基尼指数的特征 的切分点
for i, v in feature_value_set: # 遍历 (特征, 特征的切分值)
current = self.calc_gini_A(trainDataArr[:, i], trainLabelArr, v, sample_weight) # 选择第i个特征作为划分特征 的条件基尼指数
# print('gini(D,A:{},v:{})={}'.format(i,v,current))
if current < mini_gini_A:
mini_gini_A = current
mini_gini_A_feature = i
mini_gini_A_feature_split = v
best_feature_value = (mini_gini_A_feature, mini_gini_A_feature_split)
return best_feature_value, mini_gini_A
class CartTree_Lib:
"""
Cart 分类树 相关的 函数库
( 适用于 数值 类型 训练数据 )
Author: xrh
Date: 2021-03-26
ref:
"""
def calc_gini(self, trainLabelArr, sample_weight=None):
"""
计算数据集 D的 基尼指数 (基尼不纯度)
:param trainLabelArr:当前数据集的标签集
:param sample_weight: 样本的权重 ( 适用于 AdaBoost )
:return: 基尼指数
"""
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight = np.ones(N, dtype=int)
D = np.sum(sample_weight) # 数据集中所有样本的 权重和
LabelSet = {ele for ele in trainLabelArr} # trainLabelArr 中所有标签的类别
sum_p = 0
for label in LabelSet:
C_k = np.sum(sample_weight[trainLabelArr == label]) # 类型为k 的样本的权重和
p = C_k / D
sum_p += p ** 2
gini = 1 - sum_p
return gini
def calc_gini_A(self, trainDataArr_DevFeature, trainLabelArr, split_value=None, sample_weight=None):
"""
计算 条件 基尼指数
只对 所关心的 特征 对应的那一列 进行计算,提升训练速度
:param trainDataArr_DevFeature: 切割后只有feature那列数据的数组
:param trainLabelArr: 标签集数组
:param split_value: 切分特征的 特征值
:param sample_weight: 样本的权重 ( 适用于 AdaBoost )
:return: 条件基尼指数
"""
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight = np.ones(N, dtype=int)
D = np.sum(sample_weight) # 数据集中所有样本的 权重和
gini_A = 0
if split_value == None: # 未指定划分 的特征值
A_set = {A_i for A_i in trainDataArr_DevFeature} # trainDataArr_DevFeature 中的 所有取值
for i in A_set:
D_i = np.sum(sample_weight[trainDataArr_DevFeature <= i]) # 特征值为 i 的 样本的权重和
p_i = D_i / D
gini_A += p_i * self.calc_gini(trainLabelArr[trainDataArr_DevFeature <= i],
sample_weight[trainDataArr_DevFeature <= i])
else: # 指定 划分的特征值, 把集合 根据特征值划分为2个子集
# D_1 满足 <= 特征值的 子集合
# D_2 满足 > 特征值的 子集合
D_1 = np.sum(sample_weight[trainDataArr_DevFeature <= split_value]) # 特征值为 split_value 的 样本的权重和
p_1 = D_1 / D
p_2 = 1 - p_1
gini_A = p_1 * self.calc_gini(trainLabelArr[trainDataArr_DevFeature <= split_value],
sample_weight[trainDataArr_DevFeature <= split_value]) \
+ p_2 * self.calc_gini(trainLabelArr[trainDataArr_DevFeature > split_value],
sample_weight[trainDataArr_DevFeature > split_value])
return gini_A
def select_min_gini(self, trainDataArr, trainLabelArr, feature_value_set, sample_weight=None):
"""
选择 条件基尼指数 最小的特征
:param trainDataArr: shape=(60000,784)
:param trainLabelArr: shape=(60000,1)
:param feature_value_set: 可供选择 的特征集合 , 包括 (特征, 切分值)
:param sample_weight: 样本的权重 ( 适用于 AdaBoost )
:return:
"""
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight = np.ones(N, dtype=int)
mini_gini_A = float('inf') # 最小 条件基尼指数
mini_gini_A_feature = 0 # 取得 最小条件基尼指数的 特征
mini_gini_A_feature_split = None # 取得 最小条件基尼指数的特征 的切分点
for i, v in feature_value_set: # 遍历 (特征, 特征的切分值)
current = self.calc_gini_A(trainDataArr[:, i], trainLabelArr, v, sample_weight) # 选择第i个特征作为划分特征 的条件基尼指数
# print('gini(D,A:{},v:{})={}'.format(i,v,current))
if current < mini_gini_A:
mini_gini_A = current
mini_gini_A_feature = i
mini_gini_A_feature_split = v
best_feature_value = (mini_gini_A_feature, mini_gini_A_feature_split)
return best_feature_value, mini_gini_A
# 树节点
class Node:
def __init__(self, label=None, sample_N=None, gini=None, feature=None, feature_value=None, prev_feature=None,
prev_feature_value=None, value=None,childs=None):
self.label = label # 叶子节点才有标签
self.sample_N = sample_N # 当前节点的样本总数
self.gini=gini
self.feature = feature # 非叶子节点, 划分 子节点的特征
self.feature_value = feature_value
self.prev_feature = prev_feature
self.prev_feature_value = prev_feature_value
self.value=value
self.childs = childs
#TODO:
# 1. 权重( sampel_weight ) 的计算出现问题 , 导致 模型的 预测准确率差 (已完成)
# 2. 对于 数值 类型的 特征, 要对 它进行区间的划分 , 进而生成子节点
class CartTree_Category(CartTree_Category_Lib):
"""
决策树的 Cart分类 算法( 适用于 类别型训练数据 )
1.支持样本权重
2.样本中的 所有特征 均为 类别型特征
3.未实现剪枝
test1: 多分类任务
数据集:Mnist
参数: max_depth=50
训练集数量:60000
测试集数量:10000
正确率:0.86
模型训练时长:319s
Author: xrh
Date: 2021-03-26
"""
def __init__(self, root=None, threshold=0.1 , max_depth=1):
self.root = root
self.threshold = threshold # 信息增益的 阈值
self.max_depth=max_depth # 树的最大深度
def __pure_dataset(self, trainLabelArr):
"""
判断 数据集 D 是否纯净
"""
dict_labels = Counter(trainLabelArr.flatten())
return len(dict_labels) == 1
def __class_value(self,trainLabelArr,sample_weight):
"""
返回数据集D中 所有类型的 样本的 权重和
:param trainLabelArr:
:param sample_weight:
:return:
"""
dict_labels=defaultdict(int)
for i in range(len(trainLabelArr)):
k=trainLabelArr[i]
dict_labels[k]+=sample_weight[i] # 某个类别的样本的权重和
return dict_labels
def major_class(self, trainLabelArr , sample_weight):
"""
返回 数据集 D 中 数量最多的样本 的类别
样本权值 sample_weight ,本质上是样本的数量分布, 因此 判断 哪种类型的样本最多时, 需要 考虑 样本权重
"""
# dict_labels = Counter(trainLabelArr.flatten())
# max_num_label=dict_labels.most_common(1)[0][0]
dict_labels=defaultdict(int)
for i in range(len(trainLabelArr)):
k=trainLabelArr[i]
dict_labels[k]+=sample_weight[i] # 某个类别的样本的权重和
max_num_label= max( dict_labels.items() , key=lambda ele: ele[1] )[0] # 找 权重和 最大的 类别
return max_num_label
def __build_tree(self, trainDataArr, trainLabelArr, feature_value_set, tree_depth,sample_weight,
prev_feature=None, prev_feature_value=None,father_label=None):
"""
递归 构建树
:param trainDataArr:
:param trainLabelArr:
:param feature_value_set:
:param tree_depth:
:param sample_weight:
:param prev_feature:
:param prev_feature_value:
:return:
"""
T = Node()
T.prev_feature = prev_feature # 标记 父节点的 划分特征
T.prev_feature_value = prev_feature_value # 标记 通过父节点的 哪一个 分支走到 当前节点
if len(trainLabelArr) == 0: # 数据集已经为空, 形成叶子节点
T.label = father_label # 说明不能再往下划分了, 使用 上一个节点( 父亲节点 ) 给它的标签值
else:
gini= self.calc_gini(trainLabelArr,sample_weight)
T.gini=gini
T.sample_N= np.shape(trainLabelArr)[0]
dict_labels= self.__class_value(trainLabelArr, sample_weight).items() # 数据集 D中 所有类型的 样本的 权重和 { 类型: 权重 }
T.value=dict_labels # 所有类型的 样本的 权重和
max_num_label = max(dict_labels, key=lambda ele: ele[1])[0] # 权重和 最大的类别
if self.__pure_dataset(trainLabelArr) == True: # 数据集 已经纯净, 无需往下划分, 形成叶子节点
T.label = trainLabelArr[0]
elif len(feature_value_set) == 0 or tree_depth >= self.max_depth or gini < self.threshold : # 所有 (特征, 特征值) 的组合 已经用完,
# 或者 树的深度达到最大深度 ,
# 选取 数据集 中最多的样本标签值作为 叶子节点的标签
T.label = max_num_label
else:
best_feature_value,mini_gini_A = self.select_min_gini(trainDataArr, trainLabelArr, feature_value_set, sample_weight)
# if mini_gini_A < self.threshold:
# T.label = self.major_class(trainLabelArr)
Ag, Ag_split=best_feature_value
T.feature = Ag # 选择的 最佳特征
T.feature_value= Ag_split # 最佳特征 的切分点
T.childs = dict()
trainDataArr_DevFeature= trainDataArr[:,Ag]
# CART 树为二叉树
# 左节点为 满足 切分特征值的 分支
T.childs[0] = self.__build_tree(trainDataArr[trainDataArr_DevFeature == Ag_split],
trainLabelArr[trainDataArr_DevFeature == Ag_split], feature_value_set - set([best_feature_value]),
tree_depth+1,
sample_weight[trainDataArr_DevFeature == Ag_split],
prev_feature=Ag,
prev_feature_value= str(Ag_split)+'-Yes',
father_label=max_num_label )
# 右节点为 不满足 切分特征值的 分支
T.childs[1] = self.__build_tree(trainDataArr[trainDataArr_DevFeature != Ag_split],
trainLabelArr[trainDataArr_DevFeature != Ag_split], feature_value_set - set([best_feature_value]),
tree_depth + 1,
sample_weight[trainDataArr_DevFeature != Ag_split],
prev_feature=Ag,
prev_feature_value= str(Ag_split)+'-No',
father_label=max_num_label )
print('T.feature:{},T.feature_value:{}, T.gini:{} , T.sample_N:{} '.format(T.feature,T.feature_value,T.gini,T.sample_N))
print('T.prev_feature:{},T.prev_feature_value:{} '.format(T.prev_feature, T.prev_feature_value))
print('T.childs:{}'.format(T.childs))
print('Tree depth:{}'.format(tree_depth))
print('T.label:{}'.format(T.label))
print('-----------')
return T
@staticmethod
def get_feature_value_set(trainDataArr):
"""
由于 cartTree 为二叉树,
若某特征 有 3个特征值, 则 需要 3个切分点
eg. 特征'年龄' 包含特征值: ['青年' , '中年' , '老年']
切分点为:
1. 是否为 青年
2. 是否为 中年
3. 是否为 老年
若某特征 有 2个特征值, 则 需要 2 个切分点
返回所有 (特征, 特征切分点) 的组合
:param trainDataArr:
:return:
"""
feature_value_set = set() # 可供选择的特征集合 , 包括 (特征, 切分值)
for i in range(np.shape(trainDataArr)[1]): # 遍历所有的特征
trainDataArr_DevFeature = trainDataArr[:, i] # 特征 i 单独抽出来
A_set = {A_i for A_i in trainDataArr_DevFeature} # trainDataArr_DevFeature 中的 所有取值
if len(A_set) <= 2: # 特征 i 的特征值的个数 小于2个
feature_value_set.add((i, list(A_set)[0])) #
else: # 特征 i 的特征值的个数 >=3 个
for A_i in A_set:
feature_value_set.add((i, A_i)) #
return feature_value_set
def fit(self, trainDataArr, trainLabelArr, feature_value_set=None, sample_weight=None):
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight= np.ones(N,dtype=int)
if feature_value_set is None:
feature_value_set= self.get_feature_value_set(trainDataArr) # 可供选择的特征集合 , 包括 (特征, 切分值)
# print('feature_value_set completed')
self.root = self.__build_tree(trainDataArr, trainLabelArr, feature_value_set,tree_depth=0,sample_weight=sample_weight)
def __predict(self, row):
"""
预测 一个样本
:param row:
:return:
"""
p = self.root
while p.label == None: # 到达 叶子节点 退出循环
judge_feature = p.feature # 当前节点划分的 特征
if row[judge_feature]== p.feature_value: # 样本 特征的特质值 与 切分点相同, 走左节点
p = p.childs[0]
else: # 走右节点
p = p.childs[1]
return p.label
def predict(self, testDataArr):
"""
预测 测试 数据集,返回预测结果
:param test_data:
:return:
"""
res_list = []
for row in testDataArr:
res_list.append( self.__predict(row) )
return res_list
def score(self,testDataArr, testLabelArr):
"""
预测 测试 数据集,返回 正确率
:param test_data:
:return:
"""
res_list= self.predict(testDataArr)
err_arr = np.ones( len(res_list), dtype=int)
res_arr=np.array(res_list)
err_arr[res_arr == testLabelArr] = 0
err_rate = np.mean(err_arr)
accuracy=1-err_rate
return accuracy
class CartTree(CartTree_Lib):
"""
决策树的 Cart分类 算法( 适用于 数值类型训练数据 )
1.支持样本权重
2.样本中的 所有特征 均为 数值类型特征
3.未实现剪枝
test1: 多分类任务
数据集:Mnist
训练集数量:60000
测试集数量:10000
参数: max_depth=50
正确率:0.86
模型训练时长:565s
Author: xrh
Date: 2021-03-26
"""
def __init__(self, root=None, threshold=0.1, max_depth=1):
self.root = root
self.threshold = threshold # 信息增益的 阈值
self.max_depth = max_depth # 树的最大深度
def __pure_dataset(self, trainLabelArr):
"""
判断 数据集 D 是否纯净
"""
dict_labels = Counter(trainLabelArr.flatten())
return len(dict_labels) == 1
def __class_value(self, trainLabelArr, sample_weight):
"""
返回数据集 D中 所有类型的 样本的 权重和
样本权值 sample_weight ,本质上是样本的数量分布, 因此 判断种类型的样本最多时, 需要 考虑 样本权重
:param trainLabelArr:
:param sample_weight:
:return:
"""
dict_labels = defaultdict(int)
for i in range(len(trainLabelArr)):
k = trainLabelArr[i]
dict_labels[k] += sample_weight[i] # 某个类别的样本的权重和
return dict_labels
def __major_class(self, trainLabelArr, sample_weight):
"""
返回 数据集 D 中 数量最多的样本 的类别
样本权值 sample_weight ,本质上是样本的数量分布, 因此 判断种类型的样本最多时, 需要 考虑 样本权重
"""
# dict_labels = Counter(trainLabelArr.flatten())
# max_num_label=dict_labels.most_common(1)[0][0]
dict_labels = defaultdict(int)
for i in range( len(trainLabelArr) ):
k = trainLabelArr[i]
dict_labels[k] += sample_weight[i] # 某个类别的样本的权重和
max_num_label = max(dict_labels.items(), key=lambda ele: ele[1])[0] # 找 权重和 最大的 类别
return max_num_label
def __build_tree( self, trainDataArr, trainLabelArr, feature_value_set, tree_depth, sample_weight, prev_feature=None,
prev_feature_value=None,father_label=None ):
"""
递归 构建树
递归结束条件:
(1) 当前结点包含的样本全属于同一类别,无需划分。
(2) 当前属性集为空,或所有样本在所有属性上的取值相同,无法划分:把当前结点标记为叶节点,并将其类别设定为该结点所含样本最多的类别。
属性集为空的情况:假设有六个特征,六个特征全部用完发现,数据集中还是存在不同类别数据的情况。
当前特征值全都相同,在类别中有不同取值。
(3)当前结点包含的样本集合为空,不能划分:将类别设定为父节点所含样本最多的类别。
出现这个情况的原因是:在生成决策树的过程中,数据按照特征不断的划分,很有可能在使用这个特征某一个值之前,已经可以判断包含该特征值的类别了。所以会出现空的情况。
:param trainDataArr:
:param trainLabelArr:
:param feature_value_set:
:param tree_depth:
:param sample_weight:
:param prev_feature:
:param prev_feature_value:
:return:
"""
T = Node()
T.prev_feature = prev_feature # 标记 父节点的 划分特征
T.prev_feature_value = prev_feature_value # 标记 通过父节点的 哪一个 分支走到 当前节点
if len(trainLabelArr) == 0: # 数据集已经为空, 形成叶子节点
T.label = father_label # 说明不能再往下划分了, 使用 上一个节点( 父亲节点 ) 给它的标签值
else: #
gini = self.calc_gini(trainLabelArr, sample_weight)
T.gini = gini
T.sample_N = np.shape(trainLabelArr)[0]
dict_labels= self.__class_value(trainLabelArr, sample_weight).items() # 数据集 D中 所有类型的 样本的 权重和 { 类型: 权重 }
T.value=dict_labels # 所有类型的 样本的 权重和
max_num_label = max(dict_labels, key=lambda ele: ele[1])[0] # 权重和 最大的类别
if self.__pure_dataset(trainLabelArr) == True: # 数据集 已经纯净, 无需往下划分, 形成叶子节点
T.label = trainLabelArr[0]
elif len(feature_value_set) == 0 or tree_depth >= self.max_depth or gini < self.threshold: # 所有 切分(特征, 特征值) 的组合 已经用完,
# 或者 树的深度达到最大深度 ,
# 选取 数据集 中最多的样本标签值作为 叶子节点的标签
T.label = max_num_label
else:
best_feature_value, mini_gini_A = self.select_min_gini(trainDataArr, trainLabelArr, feature_value_set,
sample_weight)
Ag, Ag_split = best_feature_value
T.feature = Ag # 选择的 最佳特征
T.feature_value = Ag_split # 最佳特征 的切分点
T.childs = dict()
trainDataArr_DevFeature = trainDataArr[:, Ag]
# CART 树为二叉树
# 左节点为 <= 特征值的 分支
T.childs[0] = self.__build_tree(trainDataArr[trainDataArr_DevFeature <= Ag_split],
trainLabelArr[trainDataArr_DevFeature <= Ag_split],
feature_value_set - { (best_feature_value) },
tree_depth + 1,
sample_weight[trainDataArr_DevFeature <= Ag_split],
prev_feature=Ag,
prev_feature_value=' <= ' + str(Ag_split),father_label=max_num_label )
# 右节点为 > 切分特征值的 分支
T.childs[1] = self.__build_tree(trainDataArr[trainDataArr_DevFeature > Ag_split],
trainLabelArr[trainDataArr_DevFeature > Ag_split],
feature_value_set - { (best_feature_value) },
tree_depth + 1,
sample_weight[trainDataArr_DevFeature > Ag_split],
prev_feature=Ag,
prev_feature_value=' > ' + str(Ag_split) ,father_label=max_num_label)
print('T.feature:{},T.feature_value:{}, T.gini:{} , T.sample_N:{} , T.value:{} '.format(T.feature, T.feature_value, T.gini,
T.sample_N,T.value))
print('T.prev_feature:{},T.prev_feature_value:{} '.format(T.prev_feature, T.prev_feature_value))
print('T.childs:{}'.format(T.childs))
print('Tree depth:{}'.format(tree_depth))
print('T.label:{}'.format(T.label))
print('-----------')
return T
@staticmethod
def get_feature_value_set(trainDataArr):
"""
cartTree 为二叉树,
对于离散型特征:
若为 可比型 (数值类型特征),比如电影评分等级,特征的所有取值为 [1, 2, 3, 4, 5],那么按照阈值 0.5, 1.5, 2.5, 3.5, 4.5, 5.5 分别划分即可,这里选择了 6 个划分点;
*若为 不可比型,即Categorical类型,比如职业,对应的可能取值为 [0, 1, 2],那么划分取值为 0, 1, 2,表示是否等于0,是否等于1,是否等于2,注意sklearn里面处理这类数据并没有对应的实现方法,而是采用的是离散型特征可比型处理策略。即:CART可以做类别型数据,但是sklearn没实现。
对于连续型特征:
那么取值就很多了,比如 [0.13, 0.123, 0.18, 0.23, ...],那么要每两个值之间都取一个阈值划分点。
综上, 排除 * 的情况, 我们可以简单 使用 特征值作为 切分 阈值
eg. 特征'电影评分' 包含特征值: [1, 2, 3]
切分点为:
1. 电影评分 <=1 | 电影评分 >1
2. 电影评分 <=2 | 电影评分 >2
3. 电影评分 <=3 | 电影评分 >3
返回所有 (特征, 特征切分点) 的组合
:param trainDataArr:
:return:
"""
feature_value_set = set() # 可供选择的特征集合 , 包括 (特征, 切分值)
for i in range(np.shape(trainDataArr)[1]): # 遍历所有的特征
trainDataArr_DevFeature = trainDataArr[:, i] # 特征 i 单独抽出来
A_set = {A_i for A_i in trainDataArr_DevFeature} # trainDataArr_DevFeature 中的 所有取值
for A_i in A_set:
feature_value_set.add((i, A_i)) #
return feature_value_set
def fit(self, trainDataArr, trainLabelArr, feature_value_set=None, sample_weight=None):
N = len(trainLabelArr) # 数据集的总行数
if sample_weight is None:
sample_weight = np.ones(N, dtype=int)
if feature_value_set is None:
feature_value_set = self.get_feature_value_set(trainDataArr) # 可供选择的特征集合 , 包括 (特征, 切分值)
# print('feature_value_set completed')
self.root = self.__build_tree(trainDataArr, trainLabelArr, feature_value_set, tree_depth=0,
sample_weight=sample_weight)
def __predict(self, row):
"""
预测 一个样本
:param row:
:return:
"""
p = self.root
while p.label == None: # 到达 叶子节点 退出循环
judge_feature = p.feature # 当前节点划分的 特征
if row[judge_feature] <= p.feature_value: # 样本 特征的特质值 <= 切分点, 走左节点
p = p.childs[0]
else: # 走右节点
p = p.childs[1]
return p.label
def predict(self, testDataArr):
"""
预测 测试 数据集,返回预测结果
:param test_data:
:return:
"""
res_list = []
for row in testDataArr:
res_list.append(self.__predict(row))
return res_list
def score(self, testDataArr, testLabelArr):
"""
预测 测试 数据集,返回 正确率
:param test_data:
:return:
"""
res_list = self.predict(testDataArr)
err_arr = np.ones(len(res_list), dtype=int)
res_arr = np.array(res_list)
err_arr[res_arr == testLabelArr] = 0
err_rate = np.mean(err_arr)
accuracy = 1 - err_rate
return accuracy
class PreProcess:
"""
对数据集进行 预处理的方法集合
Author: xrh
Date: 2021-04-01
"""
@staticmethod
def ordinal_encoding(dataset):
"""
序号编码
将数据集中的 类别型特征 转换为 数值型特征
:param dataset:
:return:
dataset - 转换后的 数据集
features_idx_category_dic - 所有 类别型特征 和 转换后的数值型特征的对应关系
"""
N, feature_Num = np.shape(dataset)
dic_features_category_value=[] # 存储 所有 类别型特征 和 转换后的数值型特征的对应关系
for i in range(feature_Num):
feature_i_category_set = {ele for ele in dataset[:, i]} # 第i 个特征的 类别集合
dic_feature_i_category_value = {}
for category, idx in zip(feature_i_category_set, range(len(feature_i_category_set))):
dic_feature_i_category_value[category] = idx
dic_features_category_value.append( dic_feature_i_category_value ) # 存储 所有 类别型特征 和 转换后的数值型特征的对应关系
dataset[:, i] = list( map( lambda x: dic_feature_i_category_value[x], dataset[:, i] ) )
return dataset,dic_features_category_value
class Test:
def __create_tarin_data(self):
"""
《统计学习方法》 表5.1 中的数据集
:return:
"""
datasets = [['青年', '否', '否', '一般', '否'],
['青年', '否', '否', '好', '否'],
['青年', '是', '否', '好', '是'],
['青年', '是', '是', '一般', '是'],
['青年', '否', '否', '一般', '否'],
['中年', '否', '否', '一般', '否'],
['中年', '否', '否', '好', '否'],
['中年', '是', '是', '好', '是'],
['中年', '否', '是', '非常好', '是'],
['中年', '否', '是', '非常好', '是'],
['老年', '否', '是', '非常好', '是'],
['老年', '否', '是', '好', '是'],
['老年', '是', '否', '好', '是'],
['老年', '是', '否', '非常好', '是'],
['老年', '否', '否', '一般', '否'],
]
labels = [u'年龄', u'有工作', u'有自己的房子', u'信贷情况', u'类别']
# 返回数据集和每个维度的名称
return datasets, labels
def __create_test_data(self):
datasets = [['青年', '否', '是', '一般', '是'],
['老年', '否', '否', '好', '否']
]
labels = [u'年龄', u'有工作', u'有自己的房子', u'信贷情况', u'类别']
# 返回数据集和每个维度的名称
return datasets, labels
def test_CartTree_Category_Lib(self):
"""
CartTree_Lib 测试
:return:
"""
datasets, label_name = self.__create_tarin_data()
datasetsArr = np.array(datasets)
trainDataArr= datasetsArr[:, 0:-1]
trainLabelArr= datasetsArr[:, -1]
Lib = CartTree_Category_Lib()
feature_value_set=set() # 可供选择的特征集合 , 包括 (特征, 切分值)
for i in range(np.shape(trainDataArr)[1]): # 遍历所有的特征
trainDataArr_DevFeature= trainDataArr[:,i] # 特征 i 单独抽出来
A_set = {A_i for A_i in trainDataArr_DevFeature} # trainDataArr_DevFeature 中的 所有取值
if len(A_set)<=2: # 特征 i 的特征值的个数 小于2个
feature_value_set.add( (i ,list(A_set)[0]) ) #
else: # 特征 i 的特征值的个数 >=3 个
for A_i in A_set:
feature_value_set.add((i, A_i)) #
best_feature_value, mini_gini_A= Lib.select_min_gini(trainDataArr,trainLabelArr,feature_value_set)
print('best feature: {}, split value:{}, gini:{} '.format(best_feature_value[0],best_feature_value[1], mini_gini_A))
def test_CartTree_Lib(self):
"""
CartTree_Lib 测试
:return:
"""
datasets, label_name = self.__create_tarin_data()
# 将 类别型训练数据 转换为数值型训练数据
datasetsArr, features_idx_category_dic = PreProcess.ordinal_encoding(np.array(datasets))
print('features_idx_category_dic: ',features_idx_category_dic)
trainDataArr = datasetsArr[:, 0:-1]
trainLabelArr = datasetsArr[:, -1]
Lib = CartTree_Lib()
feature_value_set = set() # 可供选择的特征集合 , 包括 (特征, 切分值)
for i in range(np.shape(trainDataArr)[1]): # 遍历所有的特征
trainDataArr_DevFeature = trainDataArr[:, i] # 特征 i 单独抽出来
A_set = {A_i for A_i in trainDataArr_DevFeature} # trainDataArr_DevFeature 中的 所有取值
for A_i in A_set:
feature_value_set.add((i, A_i)) #
# print(feature_value_set)
best_feature_value, mini_gini_A = Lib.select_min_gini(trainDataArr, trainLabelArr, feature_value_set)
print('best feature: {}, split value:{}, gini:{} '.format(best_feature_value[0], best_feature_value[1],
mini_gini_A))
def test_small_category_dataset(self):
"""
利用《统计学习方法》 表 5.1 中的数据集 测试 决策树 ID3
数据集 中的特征均为 类别型特征
:return:
"""
# 获取训练集
datasets, labels = self.__create_tarin_data()
datasetsArr= np.array(datasets)
trainDataArr= datasetsArr[:, 0:-1]
trainLabelArr= datasetsArr[:, -1]
# 开始时间
start = time.time()
# 创建决策树
print('start create tree')
CT = CartTree_Category(threshold=0.1 , max_depth=10)
CT.fit(trainDataArr, trainLabelArr)
print(' tree complete ')
# 结束时间
end = time.time()
print('time span:', end - start)
# 测试数据集
datasets_test, _ = self.__create_test_data()
datasetsArr_test = np.array(datasets_test)
testDataArr= datasetsArr_test[:, 0:-1]
testLabelArr= datasetsArr_test[:, -1]
print('res:', CT.score(testDataArr, testLabelArr))
def test_small_value_dataset(self):
"""
利用《统计学习方法》 表 5.1 中的数据集 测试 决策树 ID3
数据集 中的特征均为 类别型特征, 需要将其转换为 数值类型的特征
:return:
"""
# 获取训练集
datasets, labels = self.__create_tarin_data()
# 将 类别型训练数据 转换为数值型训练数据
datasetsArr,dic_features_category_value = PreProcess.ordinal_encoding(np.array(datasets))
print('dic_features_category_value:',dic_features_category_value)
trainDataArr = datasetsArr[:, 0:-1]
trainLabelArr = datasetsArr[:, -1]
# 开始时间
start = time.time()
# 创建决策树
print('start create tree')
CT = CartTree(threshold=0.1, max_depth=10)
CT.fit(trainDataArr, trainLabelArr)
print(' tree complete ')
# 结束时间
end = time.time()
print('time span:', end - start)
# 测试数据集
datasetsArr_test= datasetsArr # 用 训练数据集 作为 测试数据集
testDataArr = datasetsArr_test[:, 0:-1]
testLabelArr = datasetsArr_test[:, -1]
print('test dataset accuracy :', CT.score(testDataArr, testLabelArr))
# sklearn 的决策树
DT = DecisionTreeClassifier( max_depth=10, criterion="gini", splitter='best' )
DT.fit(trainDataArr, trainLabelArr)
for name, val in zip(labels, DT.feature_importances_): # 打印 所有特征的重要程度
print("{} -> {}".format(name, val))
for name, val in zip(DT.tree_.feature, DT.tree_.threshold): # 依次打印 切分的特征 和 切分点
print("{} -> {}".format(name, val) )
plt.figure(figsize=(18, 10))
plot_tree(DT)
plt.show()
def loadData(self,fileName,n=1000):
'''
加载文件
:param fileName:要加载的文件路径
:param n: 返回的数据集的规模
:return: 数据集和标签集
'''
# 存放数据及标记
dataArr = []
labelArr = []
# 读取文件
fr = open(fileName)
cnt=0 # 计数器
# 遍历文件中的每一行
for line in fr.readlines():
if cnt==n:
break
# 获取当前行,并按“,”切割成字段放入列表中
# strip:去掉每行字符串首尾指定的字符(默认空格或换行符)
# split:按照指定的字符将字符串切割成每个字段,返回列表形式
curLine = line.strip().split(',')
# 将每行中除标记外的数据放入数据集中(curLine[0]为标记信息)
# 在放入的同时将原先字符串形式的数据转换为整型
# 此外将数据进行了二值化处理,大于128的转换成1,小于的转换成0,方便后续计算
dataArr.append([int(int(num) > 128) for num in curLine[1:]])
# 将标记信息放入标记集中
# 放入的同时将标记转换为整型
labelArr.append(int(curLine[0]))
cnt+=1
fr.close()
# 返回数据集和标记
return dataArr, labelArr
def test_Mnist_dataset(self ,n_train,n_test):
"""
利用 Mnist 数据集 测试 决策树 CART
:param n_train: 使用训练数据集的规模
:param n_test: 使用测试数据集的规模
:return:
"""
# 获取训练集
trainDataList, trainLabelList = self.loadData('../Mnist/mnist_train.csv',n=n_train)
print('train data, row num:{} , column num:{} '.format(len(trainDataList),len(trainDataList[0])))
trainDataArr = np.array(trainDataList)
trainLabelArr = np.array(trainLabelList)
# 开始时间
print('start training model....')
start = time.time()
# CT = CartTree_Category(threshold=0.1, max_depth=50)
CT = CartTree(threshold=0.1 , max_depth=15)
CT.fit(trainDataArr, trainLabelArr)
# 结束时间
end = time.time()
print('training cost time :', end - start)
# 获取测试集
testDataList, testLabelList = self.loadData('../Mnist/mnist_test.csv',n=n_test)
print('test data, row num:{} , column num:{} '.format(len(testDataList), len(testDataList[0])))
testDataArr = np.array(testDataList)
testLabelArr = np.array(testLabelList)
print('test accuracy :', CT.score(testDataArr,testLabelArr))
# sklearn
DT=DecisionTreeClassifier(max_depth=15,criterion="gini", splitter='best')
DT.fit(trainDataArr, trainLabelArr)
for name, val in zip(DT.tree_.feature, DT.tree_.threshold):
print("{} -> {}".format(name, val))
plt.figure(figsize=(20, 12))
plot_tree(DT)
plt.show()
def test_iris_dataset(self ):
# 使用iris数据集,其中有三个分类, y的取值为0,1,2
X, y = datasets.load_iris(True) # 包括150行记录
# 将数据集一分为二,训练数据占80%,测试数据占20%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1024)
CT = CartTree(max_depth=3) # 第 m 个弱分类器
CT.fit(X, y)
print('by xrh , test accuracy :', CT.score(X_test, y_test))
DT=DecisionTreeClassifier(max_depth=3)
DT.fit(X_train,y_train)
print('by sklearn ,test accuracy :', DT.score(X_test,y_test))
for name, val in zip(DT.tree_.feature, DT.tree_.threshold):
print("{} -> {}".format(name, val))
plt.figure(figsize=(20, 12))
plot_tree(DT)
plt.show()
if __name__ == '__main__':
test=Test()
# test.test_CartTree_Lib()
# test.test_small_category_dataset()
# test.test_small_value_dataset()
test.test_Mnist_dataset(60000,10000)
# test.test_iris_dataset()
|
from gamelib import event
from gamelib.util import resources, sound
msges = {}
B_RIGHT = 15 #to 3d
B_LEFT = 29 #to 3a
B_TOP = 6 #to 3e
RED_FOREIGN_KEY = 200
YELLOW_FOREIGN_KEY = 25
GENERATOR = 26
GENERATOR_IN = 7
GENERATOR_OUT = [8, 9]
BOMB = 47
FROM_E_LOC = 43
TANK_L = 250
TANK_M = 23
TANK_R = 24
TURRETS = [18, 19, 21, 22]
in_area = False
def enter_area():
global in_area
in_area = True
def exit_area():
global in_area
if not event.get_flag('bomb_msg'):
event.set_flag('bomb_msg', True)
event.show_message("Objective: Find Bomb, Blow Generator")
if not in_area and event.get_flag('3c_placed_bomb') and not event.get_flag('3_gen_destroyed'):
event.set_flag('3_gen_destroyed', True, True)
event.get_object(GENERATOR).image = resources.Generator_wrecked
event.point_at(0)
event.show_message('Door Opened')
try:
event.get_object(BOMB).detonate()
except:
pass
for obj_id in TURRETS:
try:
event.get_object(obj_id).active = False
except:
pass #don't care
in_area = False
def drop_bomb():
if in_area:
event.set_flag('3c_placed_bomb', True)
else:
event.show_message('Bomb Not Close Enough')
def go_top():
if not event.player_has_unit(YELLOW_FOREIGN_KEY):
if not event.get_flag('key_msg'):
event.set_flag('key_msg', True)
event.show_message("Key required for elevator")
return False
sound.play(resources.elevator)
event.move_player(-50, 0)
event.go_to_level('level_3e', True, True)
def go_right():
event.move_player(0, -50)
event.go_to_level('level_3d', True, True, True)
def go_left():
if not event.player_has_unit(RED_FOREIGN_KEY): return True
event.move_player(50, 0)
event.go_to_level('level_3a', True, True, True)
def init():
global msges
msges = event.file_to_dict('Messages_3.txt')
event.register_collision_func(B_TOP, go_top)
event.register_collision_func(B_RIGHT, go_right)
event.register_collision_func(B_LEFT, go_left)
event.register_collision_func(GENERATOR_IN, enter_area)
event.register_collision_func(GENERATOR_OUT[0], exit_area)
event.register_collision_func(GENERATOR_OUT[1], exit_area)
event.register_release_func(BOMB, drop_bomb)
if event.get_flag('from_level') == '3a':
event.set_player_rotation(0)
event.set_flag('from_level', '3c', True)
event.set_flag('3_gen_destroyed', False, True)
event.get_object(TANK_L).active = False
event.get_object(TANK_M).active = False
event.get_object(TANK_R).active = False
for obj_id in TURRETS:
event.get_object(obj_id).active = False
event.show_ai_message(msges['gw0rp_enter3c'], head='gw0rp')
def on_load():
global msges
msges = event.file_to_dict('Messages_3.txt')
if event.get_flag('from_level') == '3a':
event.set_player_rotation(0)
elif event.get_flag('from_level') == '3e':
#event.move_player_to_object(FROM_E_LOC)
#event.set_player_angle(1.57)
for obj_id in [TANK_L, TANK_M, TANK_R]:
try:
event.get_object(obj_id).active = True
except:
pass #don't care
for obj_id in TURRETS:
try:
event.get_object(obj_id).active = True
except:
pass #don't care
event.stop_player()
if event.player_has_unit(BOMB) and not event.get_flag('bomb_msg_1'):
event.show_message("Plant the bomb near the generator.")
event.point_at(GENERATOR)
event.set_flag('bomb_msg_1', True)
event.set_flag('from_level', '3c', True)
|
import numpy as np
import cv2.cv2 as cv2
from digital_image_processing.tools.logger_base import log as log_message
def hough_transform(img_to_hough: np.array, img_original: np.array) -> np.array:
"""Runs the Hough Transform algorithm
Reference:
Coste, Arthur. (2012). Image Processing : Hough Transform. 10.13140/RG.2.2.30430.48969.
:param img_to_hough: The input image. Must be a gray scale image
:type img_to_hough: ndarray
:param img_original: The input original image.
:type img_original: ndarray
:return: The estimated local for each pixel
:rtype: ndarray
"""
log_message.info('Finds the edges using Hough Transform.')
edges = cv2.Canny(img_to_hough, 50, 150, apertureSize=3)
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength, maxLineGap)
for x1, y1, x2, y2 in lines[0]:
cv2.line(img_original, (x1, y1), (x2, y2), (0, 255, 0), 2)
ret = img_original
return ret
|
import numpy as np
#np.random.seed(777)
import chainer
from chainer import cuda
from chainer import serializers
import chainer.functions as F
import argparse
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import PIL
from PIL import ImageDraw
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--original', action='store_true',
default=True, help='train on original MNIST')
group.add_argument('--translated', action='store_true',
default=False, help='train on translated MNIST')
group.add_argument('--cluttered', action='store_true',
default=False, help='train on translated & cluttered MNIST')
parser.add_argument('--lstm', type=bool, default=False,
help='use LSTM units in core layer')
parser.add_argument('-m', '--model', type=str,
default='models/ram_original_epoch800.chainermodel',
help='load model weights from given file')
parser.add_argument('-g', '--gpuid', type=int, default=-1,
help='GPU device ID (default is CPU)')
args = parser.parse_args()
train, test = chainer.datasets.get_mnist()
train_data, train_targets = np.array(train).transpose()
test_data, test_targets = np.array(test).transpose()
train_data = np.array(list(train_data)).reshape(train_data.shape[0],1,28,28)
train_data.flags.writeable = False
test_data = np.array(list(test_data)).reshape(test_data.shape[0],1,28,28)
train_targets = np.array(train_targets).astype(np.int32)
test_targets = np.array(test_targets).astype(np.int32)
# hyper-params for each task
if args.original:
filename = 'ram_original'
# RAM params for original MNIST
g_size = 8
n_steps = 6
n_scales = 1
def process(batch):
return batch
if args.translated:
filename = 'ram_translated'
g_size = 12
n_steps = 6
n_scales = 3
# create translated MNIST
def translate(batch):
n, c, w_i = batch.shape[:3]
w_o = 60
data = np.zeros(shape=(n,c,w_o,w_o), dtype=np.float32)
for k in range(n):
i, j = np.random.randint(0, w_o-w_i, size=2)
data[k, :, i:i+w_i, j:j+w_i] += batch[k]
return data
process = translate
if args.cluttered:
filename = 'ram_cluttered'
g_size = 12
n_steps = 6
n_scales = 3
# create cluttered MNIST
def clutter(batch):
n, c, w_i = batch.shape[:3]
w_o = 60
data = np.zeros(shape=(n,c,w_o,w_o), dtype=np.float32)
for k in range(n):
i, j = np.random.randint(0, w_o-w_i, size=2)
data[k, :, i:i+w_i, j:j+w_i] += batch[k]
for _ in range(4):
clt = train_data[np.random.randint(0, train_data.shape[0]-1)]
c1, c2 = np.random.randint(0, w_i-8, size=2)
i1, i2 = np.random.randint(0, w_o-8, size=2)
data[k, :, i1:i1+8, i2:i2+8] += clt[:, c1:c1+8, c2:c2+8]
data = np.clip(data, 0., 1.)
return data
process = clutter
# init RAM model
from ram import RAM
model = RAM(
g_size=g_size, n_steps=n_steps, n_scales=n_scales, use_lstm=args.lstm)
print('load model from {}'.format(args.model))
serializers.load_hdf5(args.model, model)
gpuid = args.gpuid
if gpuid >= 0:
cuda.get_device(gpuid).use()
model.to_gpu()
# inference
test_data = process(test_data)
test_data.flags.writeable = False
index = np.random.randint(0, 9999)
image = PIL.Image.fromarray(test_data[index][0]*255).convert('RGB')
x = test_data[index][np.newaxis,:,:,:]
init_l = np.random.uniform(low=-1, high=1, size=2)
y, ys, ls = model.infer(x, init_l)
locs = ((ls+1) / 2) * (np.array(test_data.shape[2:4])+1)
# plot results
from crop import crop
plt.subplots_adjust(wspace=0.35, hspace=0.05)
for t in range(0, n_steps):
# digit with glimpse
plt.subplot(3+n_scales, n_steps, t+1)
# green if correct otherwise red
if np.argmax(ys[t]) == test_targets[index]:
color = (0, 255, 0)
else:
color = (255, 0, 0)
canvas = image.copy()
draw = ImageDraw.Draw(canvas)
xy = np.array([locs[t,1],locs[t,0],locs[t,1],locs[t,0]])
wh = np.array([-g_size//2, -g_size//2, g_size//2, g_size//2])
xys = [xy + np.power(2,s)*wh for s in range(n_scales)]
for xy in xys:
draw.rectangle(xy=list(xy), outline=color)
del draw
plt.imshow(canvas)
plt.axis('off')
# glimpse at each scale
gs = crop(x, center=ls[t:t+1], size=g_size)
plt.subplot(3+n_scales, n_steps, n_steps + t+1)
plt.imshow(gs.data[0,0], cmap='gray')
plt.axis('off')
for k in range(1, n_scales):
s = np.power(2,k)
patch = crop(x, center=ls[t:t+1], size=g_size*s)
patch = F.average_pooling_2d(patch, ksize=s)
gs = F.concat((gs, patch), axis=1)
plt.subplot(3+n_scales, n_steps, n_steps*(k+1) + t+1)
plt.imshow(gs.data[0,k], cmap='gray')
plt.axis('off')
# output probability
plt.subplot2grid((3+n_scales,n_steps), (1+n_scales,t), rowspan=2)
plt.barh(np.arange(10), ys[t], align='center')
plt.xlim(0, 1)
plt.ylim(-0.5, 9.5)
if t == 0:
plt.yticks(np.arange(10))
else:
plt.yticks(np.arange(10), ['' for _ in range(10)])
plt.xticks([])
plt.show()
|
import csv
import logging
from pantomime.types import CSV
from collections import OrderedDict
from followthemoney.types import registry
from followthemoney.util import sanitize_text
from ingestors.support.temp import TempFileSupport
from ingestors.support.encoding import EncodingSupport
log = logging.getLogger(__name__)
class TableSupport(EncodingSupport, TempFileSupport):
"""Handle creating rows from an ingestor."""
def emit_row_dicts(self, table, rows, headers=None):
csv_path = self.make_work_file(table.id)
row_count = 0
with open(csv_path, "w", encoding=self.DEFAULT_ENCODING) as fp:
csv_writer = csv.writer(fp, dialect="unix")
for row in rows:
if headers is None:
headers = list(row.keys())
values = [sanitize_text(row.get(h)) for h in headers]
length = sum((len(v) for v in values if v is not None))
if length == 0:
continue
csv_writer.writerow(values)
self.manager.emit_text_fragment(table, values, row_count)
row_count += 1
if row_count > 0 and row_count % 1000 == 0:
log.info("Table emit [%s]: %s...", table, row_count)
if row_count > 0:
csv_hash = self.manager.store(csv_path, mime_type=CSV)
table.set("csvHash", csv_hash)
table.set("rowCount", row_count + 1)
table.set("columns", registry.json.pack(headers))
def wrap_row_tuples(self, rows):
for row in rows:
headers = ["Column %s" % i for i in range(1, len(row) + 1)]
yield OrderedDict(zip(headers, row))
def emit_row_tuples(self, table, rows):
return self.emit_row_dicts(table, self.wrap_row_tuples(rows))
|
"""
This module attempts to create a profile detecting the preconditions needed for primitives
Created by Daniel Garijo
"""
from os import listdir
from os.path import isfile, join
import json
import pandas as pd
from d3m import index
import primitive_interfaces
import sys
import numpy as np
#Data: dataset to test (from the test datasets that illustrate different requirements)
#Primitive: primitive being tested. We assume it follows d3m interfaces
def passTest(data, primitive):
target = data.iloc[:,-1]
train = data.drop(data.columns[[len(data.columns)-1]], axis=1) #drop target column (the last one)
#if (isinstance(primitive,primitive_interfaces.transformer.TransformerPrimitiveBase)):
#in theory the ifs simplify the tests, but many primitives extend an interface that is not the correct one.
try:
primitive.produce(inputs=train)
return True
except Exception as e2:
print("Exception just produce: ",e2)
#elif (isinstance(primitive,primitive_interfaces.unsupervised_learning.UnsupervisedLearnerPrimitiveBase)):
try:
primitive.produce(train[1])
return True
#Unsupervised
except Exception as e:
print("Exception produce column",e)
try:
primitive.set_training_data(inputs=train)
primitive.fit()
primitive.produce(inputs=train)
return True
except Exception as e1:
print("Exception fitproduce ",e1)
#supervised
try:
primitive.set_training_data(inputs=train,outputs=target)
primitive.fit()
primitive.produce(inputs=train)
return True
except Exception as e:
print("Exception set fit produce",e)
return False
#Path: String with the path to the dataset folders. The system assumes to have two : clean_data and requirement_data
#Primitive module name: string with the module name. E.g., 'sklearn.svm'
#Primitive name: class object of the primitive
def getPrimitiveRequirements(path, primitiveName, primitiveClass):
CLEAN = path + "clean_data"
REQ = path + "requirement_data"
#Clean data files: all primitives should pass these tests
data_clean_int = pd.read_csv(CLEAN +'/int_clean_data.csv')
data_clean_float = pd.read_csv(CLEAN +'/float_clean_data.csv')
data_clean_int.name = "CLEAN DATA INT"
data_clean_float.name = "CLEAN DATA FLOAT"
prim = {}
try:
hyperparams_class = primitiveClass.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
#print(hyperparams_class.defaults())
primExec = primitiveClass(hyperparams=hyperparams_class.defaults())
except:
print("Error while loading primitive" + primitiveName)
prim["Error"] = ["COULD-NOT-LOAD-PRIMITIVE"]
return prim
#if not hasattr(primExec, 'produce'):
# print("Primitive does not have produce method. No requirements considered")
# return -1
passed = (passTest(data_clean_int, primExec)) and (passTest(data_clean_float, primExec))
if(not passed):
print("The primitive "+primitiveName+" cannot execute the clean datasets. No further requirements addressed")
prim["Error"] = ["COULD-NOT-LOAD-TABULAR-DATA"]
return prim
onlyfiles = [f for f in listdir(REQ) if isfile(join(REQ, f))]
requirements = []
for d in onlyfiles:
data = pd.read_csv(REQ+"/"+d)
data.name = d
passed = passTest(data, primExec)
if ("missing" in data.name) and (not passed) and ("NO_MISSING_VALUES" not in requirements):
#print("Primitive cannot handle missing values")
requirements.append("NO_MISSING_VALUES")
if ("categorical" in data.name) and (not passed) and ("NO_CATEGORICAL_VALUES" not in requirements):
#print("Primitive cannot handle string/categorical values")
requirements.append("NO_CATEGORICAL_VALUES")
if ("unique" in data.name) and (not passed) and ("NOT_UNIQUE" not in requirements):
#print("Primitive cannot handle having a column of unique values")
requirements.append("NOT_UNIQUE")
if ("negative" in data.name) and (not passed) and ("POSITIVE_VALUES" not in requirements):
#print("Primitive cannot handle negative values")
requirements.append("POSITIVE_VALUES")
#if(array):
# #prim.isArray = True
# prim["IsArray"] = True
prim["Requirements"] = requirements
return prim
#Main script
DATADIR = "data_profiler/" #Dir with the profiling datasets
d = {}
for primitive_name, primitive in index.search().items():
#print ("Detecting requirements for : " +primitive_name)
#if(primitive_name == "d3m.primitives.common_primitives.PCA"):
#if(primitive_name == "d3m.primitives.test.SumPrimitive"):
#print (" " + json.dumps(getPrimitiveRequirements(DATADIR,primitive_name, primitive)))
d[primitive_name] = getPrimitiveRequirements(DATADIR,primitive_name, primitive)
print (json.dumps(d))
|
from google.colab import drive
drive.mount('/content/drive')
import nltk
nltk.download('stopwords')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.layers import Dropout
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
STOPWORDS = set(stopwords.words('english'))
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras import backend as K
train_df = pd.read_csv("/train.csv",index_col=False)
train_df.reset_index(drop=True, inplace=True)
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = 50000
# Max number of words in each text.
MAX_SEQUENCE_LENGTH = 250
# This is fixed.
EMBEDDING_DIM = 100
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
tokenizer.fit_on_texts(train_df['text'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
X = tokenizer.texts_to_sequences(train_df['text'].values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', X.shape)
Y = pd.get_dummies(train_df['labels']).values
print('Shape of label tensor:', Y.shape)
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.10, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 5
batch_size = 64
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size,validation_split=0.1,callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
accr = model.evaluate(X_test,Y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
plt.title('Loss')
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show();
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='test')
plt.legend()
plt.show();
|
from mock import patch, Mock
from simplescraper import SimpleScraper
import os
import codecs
import sys
if sys.version_info[0] < 3:
callableClass = 'urllib2.urlopen'
else:
callableClass = 'urllib.request.urlopen'
def get_request_mock(success=True):
read_data = []
code_status = []
request_mock = Mock()
#set multiple response codes 200 for each call
if success is True:
read_data = [codecs.open("test/test.html", 'r').read()]
code_status = [200,200,200,200,200]
else:
read_data = ['error']
code_status = [404,404,404,404,404]
request_mock.getcode.side_effect = code_status
request_mock.read.side_effect = read_data
return request_mock
@patch(callableClass)
def test_full_https_path(mock_urlopen):
mock_urlopen.return_value = get_request_mock()
test = SimpleScraper()
result = test.get_scraped_data('https://www.google.com')
assert len(result) > 0
@patch(callableClass)
def test_full_http_path(mock_urlopen):
mock_urlopen.return_value = get_request_mock()
test = SimpleScraper()
result = test.get_scraped_data('http://www.google.com')
assert len(result) > 0
@patch(callableClass)
def test_full_path(mock_urlopen):
mock_urlopen.return_value = get_request_mock()
test = SimpleScraper()
result = test.get_scraped_data('www.google.com')
assert len(result) > 0
@patch(callableClass)
def test_path(mock_urlopen):
mock_urlopen.return_value = get_request_mock()
test = SimpleScraper()
result = test.get_scraped_data('google.com')
assert len(result) > 0
@patch(callableClass)
def test_malformed_path(mock_urlopen):
mock_urlopen.return_value = get_request_mock(success=False)
test = SimpleScraper()
result = test.get_scraped_data('.google.com')
assert 'error' in result
def test_no_content_provided():
test = SimpleScraper()
result = test.get_scraped_data('')
assert 'error' in result
|
"""
Copyright 2020 Jackpine Technologies Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# coding: utf-8
"""
cons3rt - Copyright Jackpine Technologies Corp.
NOTE: This file is auto-generated. Do not edit the file manually.
"""
import pprint
import re # noqa: F401
import six
from cons3rt.configuration import Configuration
__author__ = 'Jackpine Technologies Corporation'
__copyright__ = 'Copyright 2020, Jackpine Technologies Corporation'
__license__ = 'Apache 2.0',
__version__ = '1.0.0'
__maintainer__ = 'API Support'
__email__ = 'support@cons3rt.com'
class FullDeploymentRun(object):
"""NOTE: This class is auto-generated. Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'categories': 'list[MinimalCategory]',
'creator': 'MinimalUser',
'earliest_start_time': 'int',
'end_time': 'int',
'lease_time': 'int',
'estimated_ready_time': 'int',
'estimated_start_time': 'int',
'id': 'int',
'log_entries': 'list[MinimalLogEntry]',
'message': 'str',
'project': 'MinimalProject',
'ready_time': 'int',
'result': 'str',
'start_time': 'int',
'time_of_request': 'int',
'canceled': 'bool',
'deployment': 'MinimalDeployment',
'deployment_run_hosts': 'list[MinimalDeploymentRunHost]',
'properties': 'list[ModelProperty]',
'deployment_run_status': 'str',
'description': 'str',
'fap_status': 'str',
'host_set_name': 'str',
'locked': 'bool',
'name': 'str',
'power_schedule': 'PowerSchedule',
'recurring_schedule': 'MinimalRecurringSchedule',
'scheduler_status_message': 'str',
'target_state': 'str',
'test_error': 'bool',
'test_runs': 'list[MinimalTestRunTask]',
'retained_on_error': 'bool',
'virtualization_realm': 'MinimalVirtualizationRealm',
'deployment_run_result_type': 'str'
}
attribute_map = {
'categories': 'categories',
'creator': 'creator',
'earliest_start_time': 'earliestStartTime',
'end_time': 'endTime',
'lease_time': 'leaseTime',
'estimated_ready_time': 'estimatedReadyTime',
'estimated_start_time': 'estimatedStartTime',
'id': 'id',
'log_entries': 'logEntries',
'message': 'message',
'project': 'project',
'ready_time': 'readyTime',
'result': 'result',
'start_time': 'startTime',
'time_of_request': 'timeOfRequest',
'canceled': 'canceled',
'deployment': 'deployment',
'deployment_run_hosts': 'deploymentRunHosts',
'properties': 'properties',
'deployment_run_status': 'deploymentRunStatus',
'description': 'description',
'fap_status': 'fapStatus',
'host_set_name': 'hostSetName',
'locked': 'locked',
'name': 'name',
'power_schedule': 'powerSchedule',
'recurring_schedule': 'recurringSchedule',
'scheduler_status_message': 'schedulerStatusMessage',
'target_state': 'targetState',
'test_error': 'testError',
'test_runs': 'testRuns',
'retained_on_error': 'retainedOnError',
'virtualization_realm': 'virtualizationRealm',
'deployment_run_result_type': 'deploymentRunResultType'
}
def __init__(self, categories=None, creator=None, earliest_start_time=None, end_time=None, lease_time=None, estimated_ready_time=None, estimated_start_time=None, id=None, log_entries=None, message=None, project=None, ready_time=None, result=None, start_time=None, time_of_request=None, canceled=None, deployment=None, deployment_run_hosts=None, properties=None, deployment_run_status=None, description=None, fap_status=None, host_set_name=None, locked=None, name=None, power_schedule=None, recurring_schedule=None, scheduler_status_message=None, target_state=None, test_error=None, test_runs=None, retained_on_error=None, virtualization_realm=None, deployment_run_result_type=None, local_vars_configuration=None): # noqa: E501
"""FullDeploymentRun - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._categories = None
self._creator = None
self._earliest_start_time = None
self._end_time = None
self._lease_time = None
self._estimated_ready_time = None
self._estimated_start_time = None
self._id = None
self._log_entries = None
self._message = None
self._project = None
self._ready_time = None
self._result = None
self._start_time = None
self._time_of_request = None
self._canceled = None
self._deployment = None
self._deployment_run_hosts = None
self._properties = None
self._deployment_run_status = None
self._description = None
self._fap_status = None
self._host_set_name = None
self._locked = None
self._name = None
self._power_schedule = None
self._recurring_schedule = None
self._scheduler_status_message = None
self._target_state = None
self._test_error = None
self._test_runs = None
self._retained_on_error = None
self._virtualization_realm = None
self._deployment_run_result_type = None
self.discriminator = None
if categories is not None:
self.categories = categories
if creator is not None:
self.creator = creator
if earliest_start_time is not None:
self.earliest_start_time = earliest_start_time
if end_time is not None:
self.end_time = end_time
if lease_time is not None:
self.lease_time = lease_time
if estimated_ready_time is not None:
self.estimated_ready_time = estimated_ready_time
if estimated_start_time is not None:
self.estimated_start_time = estimated_start_time
if id is not None:
self.id = id
if log_entries is not None:
self.log_entries = log_entries
if message is not None:
self.message = message
if project is not None:
self.project = project
if ready_time is not None:
self.ready_time = ready_time
if result is not None:
self.result = result
if start_time is not None:
self.start_time = start_time
if time_of_request is not None:
self.time_of_request = time_of_request
if canceled is not None:
self.canceled = canceled
if deployment is not None:
self.deployment = deployment
if deployment_run_hosts is not None:
self.deployment_run_hosts = deployment_run_hosts
if properties is not None:
self.properties = properties
if deployment_run_status is not None:
self.deployment_run_status = deployment_run_status
if description is not None:
self.description = description
if fap_status is not None:
self.fap_status = fap_status
if host_set_name is not None:
self.host_set_name = host_set_name
if locked is not None:
self.locked = locked
if name is not None:
self.name = name
if power_schedule is not None:
self.power_schedule = power_schedule
if recurring_schedule is not None:
self.recurring_schedule = recurring_schedule
if scheduler_status_message is not None:
self.scheduler_status_message = scheduler_status_message
if target_state is not None:
self.target_state = target_state
if test_error is not None:
self.test_error = test_error
if test_runs is not None:
self.test_runs = test_runs
if retained_on_error is not None:
self.retained_on_error = retained_on_error
if virtualization_realm is not None:
self.virtualization_realm = virtualization_realm
if deployment_run_result_type is not None:
self.deployment_run_result_type = deployment_run_result_type
@property
def categories(self):
"""Gets the categories of this FullDeploymentRun. # noqa: E501
:return: The categories of this FullDeploymentRun. # noqa: E501
:rtype: list[MinimalCategory]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this FullDeploymentRun.
:param categories: The categories of this FullDeploymentRun. # noqa: E501
:type: list[MinimalCategory]
"""
self._categories = categories
@property
def creator(self):
"""Gets the creator of this FullDeploymentRun. # noqa: E501
:return: The creator of this FullDeploymentRun. # noqa: E501
:rtype: MinimalUser
"""
return self._creator
@creator.setter
def creator(self, creator):
"""Sets the creator of this FullDeploymentRun.
:param creator: The creator of this FullDeploymentRun. # noqa: E501
:type: MinimalUser
"""
self._creator = creator
@property
def earliest_start_time(self):
"""Gets the earliest_start_time of this FullDeploymentRun. # noqa: E501
:return: The earliest_start_time of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._earliest_start_time
@earliest_start_time.setter
def earliest_start_time(self, earliest_start_time):
"""Sets the earliest_start_time of this FullDeploymentRun.
:param earliest_start_time: The earliest_start_time of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._earliest_start_time = earliest_start_time
@property
def end_time(self):
"""Gets the end_time of this FullDeploymentRun. # noqa: E501
:return: The end_time of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this FullDeploymentRun.
:param end_time: The end_time of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._end_time = end_time
@property
def lease_time(self):
"""Gets the lease_time of this FullDeploymentRun. # noqa: E501
:return: The lease_time of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._lease_time
@lease_time.setter
def lease_time(self, lease_time):
"""Sets the lease_time of this FullDeploymentRun.
:param lease_time: The lease_time of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._lease_time = lease_time
@property
def estimated_ready_time(self):
"""Gets the estimated_ready_time of this FullDeploymentRun. # noqa: E501
:return: The estimated_ready_time of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._estimated_ready_time
@estimated_ready_time.setter
def estimated_ready_time(self, estimated_ready_time):
"""Sets the estimated_ready_time of this FullDeploymentRun.
:param estimated_ready_time: The estimated_ready_time of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._estimated_ready_time = estimated_ready_time
@property
def estimated_start_time(self):
"""Gets the estimated_start_time of this FullDeploymentRun. # noqa: E501
:return: The estimated_start_time of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._estimated_start_time
@estimated_start_time.setter
def estimated_start_time(self, estimated_start_time):
"""Sets the estimated_start_time of this FullDeploymentRun.
:param estimated_start_time: The estimated_start_time of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._estimated_start_time = estimated_start_time
@property
def id(self):
"""Gets the id of this FullDeploymentRun. # noqa: E501
:return: The id of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FullDeploymentRun.
:param id: The id of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._id = id
@property
def log_entries(self):
"""Gets the log_entries of this FullDeploymentRun. # noqa: E501
:return: The log_entries of this FullDeploymentRun. # noqa: E501
:rtype: list[MinimalLogEntry]
"""
return self._log_entries
@log_entries.setter
def log_entries(self, log_entries):
"""Sets the log_entries of this FullDeploymentRun.
:param log_entries: The log_entries of this FullDeploymentRun. # noqa: E501
:type: list[MinimalLogEntry]
"""
self._log_entries = log_entries
@property
def message(self):
"""Gets the message of this FullDeploymentRun. # noqa: E501
:return: The message of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this FullDeploymentRun.
:param message: The message of this FullDeploymentRun. # noqa: E501
:type: str
"""
self._message = message
@property
def project(self):
"""Gets the project of this FullDeploymentRun. # noqa: E501
:return: The project of this FullDeploymentRun. # noqa: E501
:rtype: MinimalProject
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this FullDeploymentRun.
:param project: The project of this FullDeploymentRun. # noqa: E501
:type: MinimalProject
"""
self._project = project
@property
def ready_time(self):
"""Gets the ready_time of this FullDeploymentRun. # noqa: E501
:return: The ready_time of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._ready_time
@ready_time.setter
def ready_time(self, ready_time):
"""Sets the ready_time of this FullDeploymentRun.
:param ready_time: The ready_time of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._ready_time = ready_time
@property
def result(self):
"""Gets the result of this FullDeploymentRun. # noqa: E501
:return: The result of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this FullDeploymentRun.
:param result: The result of this FullDeploymentRun. # noqa: E501
:type: str
"""
self._result = result
@property
def start_time(self):
"""Gets the start_time of this FullDeploymentRun. # noqa: E501
:return: The start_time of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this FullDeploymentRun.
:param start_time: The start_time of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._start_time = start_time
@property
def time_of_request(self):
"""Gets the time_of_request of this FullDeploymentRun. # noqa: E501
:return: The time_of_request of this FullDeploymentRun. # noqa: E501
:rtype: int
"""
return self._time_of_request
@time_of_request.setter
def time_of_request(self, time_of_request):
"""Sets the time_of_request of this FullDeploymentRun.
:param time_of_request: The time_of_request of this FullDeploymentRun. # noqa: E501
:type: int
"""
self._time_of_request = time_of_request
@property
def canceled(self):
"""Gets the canceled of this FullDeploymentRun. # noqa: E501
:return: The canceled of this FullDeploymentRun. # noqa: E501
:rtype: bool
"""
return self._canceled
@canceled.setter
def canceled(self, canceled):
"""Sets the canceled of this FullDeploymentRun.
:param canceled: The canceled of this FullDeploymentRun. # noqa: E501
:type: bool
"""
self._canceled = canceled
@property
def deployment(self):
"""Gets the deployment of this FullDeploymentRun. # noqa: E501
:return: The deployment of this FullDeploymentRun. # noqa: E501
:rtype: MinimalDeployment
"""
return self._deployment
@deployment.setter
def deployment(self, deployment):
"""Sets the deployment of this FullDeploymentRun.
:param deployment: The deployment of this FullDeploymentRun. # noqa: E501
:type: MinimalDeployment
"""
self._deployment = deployment
@property
def deployment_run_hosts(self):
"""Gets the deployment_run_hosts of this FullDeploymentRun. # noqa: E501
:return: The deployment_run_hosts of this FullDeploymentRun. # noqa: E501
:rtype: list[MinimalDeploymentRunHost]
"""
return self._deployment_run_hosts
@deployment_run_hosts.setter
def deployment_run_hosts(self, deployment_run_hosts):
"""Sets the deployment_run_hosts of this FullDeploymentRun.
:param deployment_run_hosts: The deployment_run_hosts of this FullDeploymentRun. # noqa: E501
:type: list[MinimalDeploymentRunHost]
"""
self._deployment_run_hosts = deployment_run_hosts
@property
def properties(self):
"""Gets the properties of this FullDeploymentRun. # noqa: E501
:return: The properties of this FullDeploymentRun. # noqa: E501
:rtype: list[ModelProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this FullDeploymentRun.
:param properties: The properties of this FullDeploymentRun. # noqa: E501
:type: list[ModelProperty]
"""
self._properties = properties
@property
def deployment_run_status(self):
"""Gets the deployment_run_status of this FullDeploymentRun. # noqa: E501
:return: The deployment_run_status of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._deployment_run_status
@deployment_run_status.setter
def deployment_run_status(self, deployment_run_status):
"""Sets the deployment_run_status of this FullDeploymentRun.
:param deployment_run_status: The deployment_run_status of this FullDeploymentRun. # noqa: E501
:type: str
"""
allowed_values = ["UNKNOWN", "SCHEDULED", "SUBMITTED", "PROVISIONING_HOSTS", "HOSTS_PROVISIONED", "RESERVED", "RELEASE_REQUESTED", "RELEASING", "TESTING", "TESTED", "REDEPLOYING_HOSTS", "COMPLETED", "CANCELED"] # noqa: E501
if self.local_vars_configuration.client_side_validation and deployment_run_status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `deployment_run_status` ({0}), must be one of {1}" # noqa: E501
.format(deployment_run_status, allowed_values)
)
self._deployment_run_status = deployment_run_status
@property
def description(self):
"""Gets the description of this FullDeploymentRun. # noqa: E501
:return: The description of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this FullDeploymentRun.
:param description: The description of this FullDeploymentRun. # noqa: E501
:type: str
"""
self._description = description
@property
def fap_status(self):
"""Gets the fap_status of this FullDeploymentRun. # noqa: E501
:return: The fap_status of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._fap_status
@fap_status.setter
def fap_status(self, fap_status):
"""Sets the fap_status of this FullDeploymentRun.
:param fap_status: The fap_status of this FullDeploymentRun. # noqa: E501
:type: str
"""
allowed_values = ["REQUESTED", "BUILDING_HOSTSET", "BUILDING_HOSTSET_ERROR", "HOSTSET_BUILT_POWERED_OFF", "POWERING_ON", "POWERING_ON_ERROR", "POWERED_ON", "AWAITING_AGENT_CHECK_IN", "AGENT_CHECK_IN_ERROR", "AGENT_CHECK_IN_SUCCESS", "BUILDING_SOURCE", "SOURCE_BUILT", "BUILDING_SOURCE_ERROR", "BUILDING_SYSTEMS", "BUILDING_SYSTEMS_ERROR", "SYSTEMS_BUILT", "BUILDING_SCENARIO", "BUILDING_SCENARIO_ERROR", "SCENARIO_BUILT", "REBOOTING", "REBOOTING_ERROR", "RESERVED", "REDEPLOYING_HOSTS", "REDEPLOYING_HOSTS_ERROR", "RELEASE_REQUESTED", "RELEASING", "RELEASING_SCENARIO_ERROR", "COMPLETE", "UNKNOWN", "CANCELED", "INVALID_STATE_ERROR", "FAP_SERVICE_COMMUNICATIONS_ERROR", "INVALID_REQUEST_ERROR", "REDEPLOYING_HOSTS", "REDEPLOYING_HOSTS_ERROR"] # noqa: E501
if self.local_vars_configuration.client_side_validation and fap_status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `fap_status` ({0}), must be one of {1}" # noqa: E501
.format(fap_status, allowed_values)
)
self._fap_status = fap_status
@property
def host_set_name(self):
"""Gets the host_set_name of this FullDeploymentRun. # noqa: E501
:return: The host_set_name of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._host_set_name
@host_set_name.setter
def host_set_name(self, host_set_name):
"""Sets the host_set_name of this FullDeploymentRun.
:param host_set_name: The host_set_name of this FullDeploymentRun. # noqa: E501
:type: str
"""
self._host_set_name = host_set_name
@property
def locked(self):
"""Gets the locked of this FullDeploymentRun. # noqa: E501
:return: The locked of this FullDeploymentRun. # noqa: E501
:rtype: bool
"""
return self._locked
@locked.setter
def locked(self, locked):
"""Sets the locked of this FullDeploymentRun.
:param locked: The locked of this FullDeploymentRun. # noqa: E501
:type: bool
"""
self._locked = locked
@property
def name(self):
"""Gets the name of this FullDeploymentRun. # noqa: E501
:return: The name of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FullDeploymentRun.
:param name: The name of this FullDeploymentRun. # noqa: E501
:type: str
"""
self._name = name
@property
def power_schedule(self):
"""Gets the power_schedule of this FullDeploymentRun. # noqa: E501
:return: The power_schedule of this FullDeploymentRun. # noqa: E501
:rtype: PowerSchedule
"""
return self._power_schedule
@power_schedule.setter
def power_schedule(self, power_schedule):
"""Sets the power_schedule of this FullDeploymentRun.
:param power_schedule: The power_schedule of this FullDeploymentRun. # noqa: E501
:type: PowerSchedule
"""
self._power_schedule = power_schedule
@property
def recurring_schedule(self):
"""Gets the recurring_schedule of this FullDeploymentRun. # noqa: E501
:return: The recurring_schedule of this FullDeploymentRun. # noqa: E501
:rtype: MinimalRecurringSchedule
"""
return self._recurring_schedule
@recurring_schedule.setter
def recurring_schedule(self, recurring_schedule):
"""Sets the recurring_schedule of this FullDeploymentRun.
:param recurring_schedule: The recurring_schedule of this FullDeploymentRun. # noqa: E501
:type: MinimalRecurringSchedule
"""
self._recurring_schedule = recurring_schedule
@property
def scheduler_status_message(self):
"""Gets the scheduler_status_message of this FullDeploymentRun. # noqa: E501
:return: The scheduler_status_message of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._scheduler_status_message
@scheduler_status_message.setter
def scheduler_status_message(self, scheduler_status_message):
"""Sets the scheduler_status_message of this FullDeploymentRun.
:param scheduler_status_message: The scheduler_status_message of this FullDeploymentRun. # noqa: E501
:type: str
"""
self._scheduler_status_message = scheduler_status_message
@property
def target_state(self):
"""Gets the target_state of this FullDeploymentRun. # noqa: E501
:return: The target_state of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._target_state
@target_state.setter
def target_state(self, target_state):
"""Sets the target_state of this FullDeploymentRun.
:param target_state: The target_state of this FullDeploymentRun. # noqa: E501
:type: str
"""
allowed_values = ["UNKNOWN_STATE", "SYSTEMS_BUILT", "SCENARIO_BUILT", "TESTS_EXECUTED_RESOURCES_RELEASED", "TESTS_EXECUTED_RESOURCES_RESERVED"] # noqa: E501
if self.local_vars_configuration.client_side_validation and target_state not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `target_state` ({0}), must be one of {1}" # noqa: E501
.format(target_state, allowed_values)
)
self._target_state = target_state
@property
def test_error(self):
"""Gets the test_error of this FullDeploymentRun. # noqa: E501
:return: The test_error of this FullDeploymentRun. # noqa: E501
:rtype: bool
"""
return self._test_error
@test_error.setter
def test_error(self, test_error):
"""Sets the test_error of this FullDeploymentRun.
:param test_error: The test_error of this FullDeploymentRun. # noqa: E501
:type: bool
"""
self._test_error = test_error
@property
def test_runs(self):
"""Gets the test_runs of this FullDeploymentRun. # noqa: E501
:return: The test_runs of this FullDeploymentRun. # noqa: E501
:rtype: list[MinimalTestRunTask]
"""
return self._test_runs
@test_runs.setter
def test_runs(self, test_runs):
"""Sets the test_runs of this FullDeploymentRun.
:param test_runs: The test_runs of this FullDeploymentRun. # noqa: E501
:type: list[MinimalTestRunTask]
"""
self._test_runs = test_runs
@property
def retained_on_error(self):
"""Gets the retained_on_error of this FullDeploymentRun. # noqa: E501
:return: The retained_on_error of this FullDeploymentRun. # noqa: E501
:rtype: bool
"""
return self._retained_on_error
@retained_on_error.setter
def retained_on_error(self, retained_on_error):
"""Sets the retained_on_error of this FullDeploymentRun.
:param retained_on_error: The retained_on_error of this FullDeploymentRun. # noqa: E501
:type: bool
"""
self._retained_on_error = retained_on_error
@property
def virtualization_realm(self):
"""Gets the virtualization_realm of this FullDeploymentRun. # noqa: E501
:return: The virtualization_realm of this FullDeploymentRun. # noqa: E501
:rtype: MinimalVirtualizationRealm
"""
return self._virtualization_realm
@virtualization_realm.setter
def virtualization_realm(self, virtualization_realm):
"""Sets the virtualization_realm of this FullDeploymentRun.
:param virtualization_realm: The virtualization_realm of this FullDeploymentRun. # noqa: E501
:type: MinimalVirtualizationRealm
"""
self._virtualization_realm = virtualization_realm
@property
def deployment_run_result_type(self):
"""Gets the deployment_run_result_type of this FullDeploymentRun. # noqa: E501
:return: The deployment_run_result_type of this FullDeploymentRun. # noqa: E501
:rtype: str
"""
return self._deployment_run_result_type
@deployment_run_result_type.setter
def deployment_run_result_type(self, deployment_run_result_type):
"""Sets the deployment_run_result_type of this FullDeploymentRun.
:param deployment_run_result_type: The deployment_run_result_type of this FullDeploymentRun. # noqa: E501
:type: str
"""
allowed_values = ["ERROR", "SUCCESS", "CANCELED", "UNKNOWN"] # noqa: E501
if self.local_vars_configuration.client_side_validation and deployment_run_result_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `deployment_run_result_type` ({0}), must be one of {1}" # noqa: E501
.format(deployment_run_result_type, allowed_values)
)
self._deployment_run_result_type = deployment_run_result_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FullDeploymentRun):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FullDeploymentRun):
return True
return self.to_dict() != other.to_dict()
|
#!/usr/bin/env python
#
# Parser
#
import sys, os
import orio.tool.ply.lex, orio.tool.ply.yacc
import orio.main.util.globals as g
import orio.module.splingo.ast as ast
#----------------------------------------------------------------------------------------------------------------------
# LEXER
class SpLingoLexer:
def __init__(self):
pass
keywords = [
'scalar', 'vector', 'matrix',
'in', 'out', #'inout',
'dia',
'register', 'auto', 'extern', 'static'
]
reserved = {}
for k in keywords:
reserved[k] = k.upper()
tokens = list(reserved.values()) + [
# identifiers and literals
'ID', 'ILIT', 'FLIT', 'SLIT',
# operators (||,&&,<=,>=,==,!=)
#'LOR', 'LAND', 'LNOT',
#LE', GE', 'EE', 'NE',
# shorthand assignment (*=, /=, %=, +=, -=)
#'MULTEQ', 'DIVEQ', 'MODEQ', 'PLUSEQ', 'MINUSEQ',
# increment/decrement (++,--)
'PP', 'MM',
'SLCOMMENT', 'MLCOMMENT'
]
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# operators
#t_LOR = r'\|\|'
#t_LAND = r'&&'
#t_LE = r'<='
#t_GE = r'>='
#t_EE = r'=='
#t_NE = r'!='
# shorthand assignment operators
#t_MULTEQ = r'\*='
#t_DIVEQ = r'/='
#t_MODEQ = r'%='
#t_PLUSEQ = r'\+='
#t_MINUSEQ = r'-='
# increment/decrement
t_PP = r'\+\+'
t_MM = r'--'
literals = "+-*/%()[]{},;:'.=<>!"
# integer literal
t_ILIT = r'\d+'
# floating literal
t_FLIT = r'((\d+)(\.\d*)([eE](\+|-)?(\d+))? | (\d+)[eE](\+|-)?(\d+))'
# string literal
t_SLIT = r'\"([^\\\n]|(\\.))*?\"'
def t_ID(self, t):
r'[A-Za-z_]([A-Za-z0-9_]*[A-Za-z0-9_]+)*'
t.type = self.reserved.get(t.value, 'ID')
return t
def t_SLCOMMENT(self, t):
r'//.*'
t.value = t.value[2:]
return t
def t_MLCOMMENT(self, t):
r'/\*[^/]*\*/'
t.value = t.value[2:-2]
return t
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(self, t):
g.err('%s: illegal character (%s) at line %s' % (self.__class__, t.value[0], t.lexer.lineno))
def build(self, **kwargs):
self.lexer = orio.tool.ply.lex.lex(module=self, **kwargs)
def test(self, data):
self.lexer.input(data)
while 1:
tok = self.lexer.token()
if not tok: break
print tok
def input(self, data):
return self.lexer.input(data)
def token(self):
return self.lexer.token()
#----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
# GRAMMAR
tokens = SpLingoLexer.tokens
start = 'prog'
parser = None
elixir = None
def p_prog_a(p):
'''prog : sid IN params OUT params '{' stmts '}' '''
p[0] = ast.FunDec(p[1], ast.IdentExp('void'), [], p[3]+p[5], p[7])
#print codegen.CodeGen().generate(p[0], '', ' ')
def p_prog_b(p):
'''prog : sid IN params '{' stmts '}'
| sid OUT params '{' stmts '}' '''
p[0] = ast.FunDec(p[1], ast.IdentExp('void'), [], p[3], p[5])
def p_params(p):
'''params : param
| params ',' param'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_param(p):
'''param : sid ':' type'''
#tname = reduce(lambda acc,item: acc+'.'+item, p[3][1:], p[3][0])
p[0] = ast.ParamDec(p[3], p[1])
def p_type(p):
'''type : MATRIX subty
| VECTOR
| SCALAR'''
if len(p) == 3:
if p[2] is None:
p[0] = ast.IdentExp(p[1])
else:
p[0] = ast.QualIdentExp(p[1], p[2])
else:
p[0] = ast.IdentExp(p[1])
def p_subty(p):
'''subty : "." DIA
| empty'''
if p[1] is None:
p[0] = None
else:
p[0] = ast.IdentExp(p[2])
#------------------------------------------------------------------------------
def p_stmts(p):
'''stmts : stmt
| stmts stmt'''
if len(p) == 2:
p[0] = ast.CompStmt([p[1]])
else:
p[0] = ast.CompStmt(p[1].stmts + [p[2]])
def p_stmt_eq(p):
'''stmt : exp '''
#TODO: ensure correpondence of stored coordinates to file positions
coord = p.lineno(1)
p[0] = ast.ExpStmt(p[1], coord)
def p_stmt_dec(p):
'''stmt : sid exp ';'
| quals sid exp ';' '''
if len(p) == 4:
p[0] = ast.VarDec(p[1], [p[2]], True, [], p.lineno(3))
else:
p[0] = ast.VarDec(p[2], [p[3]], True, p[1], p.lineno(4))
def p_stmt_comment(p):
'''stmt : comment'''
p[0] = p[1]
def p_comment(p):
'''comment : SLCOMMENT
| MLCOMMENT'''
p[0] = ast.Comment(p[1], p.lineno(1))
def p_quals(p):
'''quals : qual
| quals qual'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1].append(p[2])
def p_qual(p):
'''qual : REGISTER
| AUTO
| EXTERN
| STATIC '''
p[0] = p[1]
#------------------------------------------------------------------------------
precedence = (
('left', ','),
('right', '='),
('left', '<', '>'),
('left', '+', '-'),
('left', '*', '/', '%'),
('left', 'PP', 'MM')
)
#------------------------------------------------------------------------------
def p_exp_primary(p):
'''exp : primary'''
p[0] = p[1]
def p_exp_paren(p):
'''exp : '(' exp ')' '''
p[0] = ast.ParenExp(p[2], p.lineno(1))
def p_exp_seq(p):
'''exp : exp ',' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.COMMA, p[1], p[3], p.lineno(2))
def p_exp_eq(p):
'''exp : exp '=' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.EQ, p[1], p[3], p.lineno(2))
def p_exp_plus(p):
'''exp : exp '+' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.PLUS, p[1], p[3], p.lineno(2))
def p_exp_minus(p):
'''exp : exp '-' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.MINUS, p[1], p[3], p.lineno(2))
def p_exp_mult(p):
'''exp : exp '*' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.MULT, p[1], p[3], p.lineno(2))
def p_exp_div(p):
'''exp : exp '/' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.DIV, p[1], p[3], p.lineno(2))
def p_exp_mod(p):
'''exp : exp '%' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.MOD, p[1], p[3], p.lineno(2))
def p_exp_lt(p):
'''exp : exp '<' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.LT, p[1], p[3], p.lineno(2))
def p_exp_gt(p):
'''exp : exp '>' exp'''
p[0] = ast.BinOpExp(ast.BinOpExp.GT, p[1], p[3], p.lineno(2))
def p_exp_uminus(p):
'''exp : '-' exp'''
p[0] = ast.UnaryExp(ast.UnaryExp.MINUS, p[2], p.lineno(1))
def p_exp_transpose(p):
'''exp : exp "'" '''
p[0] = ast.UnaryExp(ast.UnaryExp.TRANSPOSE, p[1], p.lineno(2))
def p_exp_postpp(p):
'''exp : exp PP '''
p[0] = ast.UnaryExp(ast.UnaryExp.POST_INC, p[1], p.lineno(2))
def p_exp_postmm(p):
'''exp : exp MM '''
p[0] = ast.UnaryExp(ast.UnaryExp.POST_DEC, p[1], p.lineno(2))
def p_exp_array(p):
'''exp : exp '[' exp ']' '''
p[0] = ast.ArrayRefExp(p[1], p[3], p.lineno(2))
#------------------------------------------------------------------------------
def p_primary_id(p):
'primary : sid'
p[0] = p[1]
def p_primary_ilit(p):
'primary : ILIT'
p[0] = ast.LitExp(int(p[1]), ast.LitExp.INT, p.lineno(1))
def p_primary_flit(p):
'primary : FLIT'
p[0] = ast.LitExp(float(p[1]), ast.LitExp.FLOAT, p.lineno(1))
def p_primary_slit(p):
'primary : SLIT'
p[0] = ast.LitExp(p[1], ast.LitExp.STRING, p.lineno(1))
def p_sid(p):
'sid : ID'
p[0] = ast.IdentExp(p[1], p.lineno(1))
def p_empty(p):
'empty : '
p[0] = None
def p_error(p):
g.err("orio.module.splingo.parser: error in input line #%s, at token-type '%s', token-value '%s'" % (p.lineno, p.type, p.value))
#----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------
def parse(text):
'''Lex, parse and create the HL AST for the DSL text'''
global elixir
if elixir is None:
elixir = SpLingoLexer()
elixir.build(debug=0, optimize=1)
parser = orio.tool.ply.yacc.yacc(debug=0, optimize=1, write_tables=0,
errorlog=orio.tool.ply.yacc.NullLogger()
)
return parser.parse(text, lexer=elixir, debug=0)
#----------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
for i in range(1, len(sys.argv)):
#print "About to lex %s" % sys.argv[i]
f = open(sys.argv[i], "r")
s = f.read()
f.close()
#print "Contents of %s:\n%s" % (sys.argv[i], s)
# Test the lexer; just print out all tokens founds
#l.test(s)
parse(s)
print >>sys.stderr, '[parser] Successfully parsed %s' % sys.argv[i]
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waf_web_app_firewall_facts
short_description: Fetches details about one or multiple WebAppFirewall resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple WebAppFirewall resources in Oracle Cloud Infrastructure
- Gets a list of all WebAppFirewalls in a compartment.
- If I(web_app_firewall_id) is specified, the details of a single WebAppFirewall will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
web_app_firewall_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WebAppFirewall.
- Required to get a specific web_app_firewall.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment in which to list resources.
- Required to list multiple web_app_firewalls.
type: str
web_app_firewall_policy_id:
description:
- A filter to return only the WebAppFirewall with the given L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of
related WebAppFirewallPolicy.
type: str
lifecycle_state:
description:
- A filter to return only resources that match the given lifecycleState.
type: list
elements: str
display_name:
description:
- A filter to return only resources that match the entire display name given.
type: str
aliases: ["name"]
sort_order:
description:
- The sort order to use, either 'ASC' or 'DESC'.
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. Only one sort order may be provided.
Default order for timeCreated is descending.
Default order for displayName is ascending.
If no value is specified timeCreated is default.
type: str
choices:
- "timeCreated"
- "displayName"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific web_app_firewall
oci_waf_web_app_firewall_facts:
# required
web_app_firewall_id: "ocid1.webappfirewall.oc1..xxxxxxEXAMPLExxxxxx"
- name: List web_app_firewalls
oci_waf_web_app_firewall_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
web_app_firewall_policy_id: "ocid1.webappfirewallpolicy.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state: [ "$p.getValue()" ]
display_name: display_name_example
sort_order: ASC
sort_by: timeCreated
"""
RETURN = """
web_app_firewalls:
description:
- List of WebAppFirewall resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WebAppFirewall.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- WebAppFirewall display name, can be renamed.
returned: on success
type: str
sample: display_name_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
backend_type:
description:
- Type of the WebAppFirewall, as example LOAD_BALANCER.
returned: on success
type: str
sample: LOAD_BALANCER
web_app_firewall_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of WebAppFirewallPolicy, which is attached to the resource.
returned: on success
type: str
sample: "ocid1.webappfirewallpolicy.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- The time the WebAppFirewall was created. An RFC3339 formatted datetime string.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The time the WebAppFirewall was updated. An RFC3339 formatted datetime string.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current state of the WebAppFirewall.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- A message describing the current state in more detail.
For example, can be used to provide actionable information for a resource in FAILED state.
returned: on success
type: str
sample: lifecycle_details_example
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\": \\"true\\"}}`"
returned: on success
type: dict
sample: {}
load_balancer_id:
description:
- LoadBalancer L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) to which the WebAppFirewallPolicy is attached to.
returned: on success
type: str
sample: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"backend_type": "LOAD_BALANCER",
"web_app_firewall_policy_id": "ocid1.webappfirewallpolicy.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {},
"load_balancer_id": "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waf import WafClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class WebAppFirewallFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"web_app_firewall_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_web_app_firewall,
web_app_firewall_id=self.module.params.get("web_app_firewall_id"),
)
def list_resources(self):
optional_list_method_params = [
"web_app_firewall_policy_id",
"lifecycle_state",
"display_name",
"sort_order",
"sort_by",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_web_app_firewalls,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
WebAppFirewallFactsHelperCustom = get_custom_class("WebAppFirewallFactsHelperCustom")
class ResourceFactsHelper(
WebAppFirewallFactsHelperCustom, WebAppFirewallFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
web_app_firewall_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
web_app_firewall_policy_id=dict(type="str"),
lifecycle_state=dict(type="list", elements="str"),
display_name=dict(aliases=["name"], type="str"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(type="str", choices=["timeCreated", "displayName"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="web_app_firewall",
service_client_class=WafClient,
namespace="waf",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(web_app_firewalls=result)
if __name__ == "__main__":
main()
|
import numpy as np
def weights(X, remove_self_conn=False):
N = len(X[0])
P = len(X)
W_ij = lambda i, j: (1/N)*sum(X[m][i] * X[m][j] for m in range(P))
if remove_self_conn:
W = [[W_ij(i, j) if i != j else 0 for i in range(N)] for j in range(N)]
else:
W = [[W_ij(i, j) for i in range(N)] for j in range(N)]
return np.array(W)
def sign(x):
if x >= 0: return 1
if x < 0: return -1
return 0
def recall(W, in_x):
N = len(in_x)
x_i = lambda i: sign(sum(W[i][j]*in_x[j] for j in range(N)))
out_x = [x_i(i) for i in range(N)]
return np.array(out_x)
def recall_sequentially(W, x, max_iterations=5000, snapshot=500):
in_x = list(x)
N = len(in_x)
x_i = lambda i: sign(sum(W[i][j]*in_x[j] for j in range(N)))
out_array = []
for _ in range(max_iterations):
choice = np.random.randint(0, len(in_x))
in_x[choice] = x_i(choice)
if not _ % snapshot:
out_array.append(list(in_x))
return out_array
def recall_until_stable(W, x, max_iterations=1000):
for _ in range(max_iterations):
new_x = recall(W, x)
if (new_x == x).all():
return new_x
x = new_x
return -1
def test_stable(W, x):
result = recall(W, x)
if (result == x).all():
print("Pass")
else:
print("Fail", x, result)
def test_expected(W, x, exp):
result = recall_until_stable(W, x)
if (result == exp).all():
print("Pass")
else:
print("Fail", result, exp)
def energy(W, x):
N = len(W)
E_i = lambda i: sum(W[i][j]*x[j]*x[i] for j in range(N))
return -1*sum(E_i(i) for i in range(N))
|
def test(a, b, c, *args, **kwargs):
with 1.22:
print 1.2
print {'s', 'e', 't'}
print ['l', 'i', 's', 't']
print ('t', 'u', 'p', 'l', 'e')
print {'d': '1', 'i': '2', 'c': '3', 't': '4'}
print [x for x in []]
print [x for x in [] if x is not None]
print 123456789
print 0x75BCD15
print 0b111010110111100110100010101
print 0726746425
print 0o726746425
print "123456789"
pass
pass
yield 1
a(a, b, *args, c=d, x=y, **kwargs)
subscript[idx]
slicesimpl[slow:supper]
sliceext[elow:eupper:estep]
class Foobar1:
pass
class Foobar2(SomeBase):
pass
class Foobar3(Base1, Base2):
pass
class Foobar4(Base1, Base2):
def __init__(self, *args, **kwargs):
self.arg = args
|
from pywingui.windows import *
from pywingui.wtl import *
from ctypes import c_char
try:
LoadLibrary("SciLexer.DLL")
#except Exception, e:
except:# for compatibility with Python 3 version
MessageBox(0, "The Scintilla DLL could not be loaded.", "Error loading Scintilla", MB_OK | MB_ICONERROR)
#~ raise e
from .scintilla_constants import *
class SCNotification(Structure):
_fields_ = [("nmhdr", NMHDR),
("position", c_int),
("ch", c_int),
("modifiers", c_int),
("modificationType", c_int),
("text", c_wchar_p),
("length", c_int),
("linesAdded", c_int),
("message", c_int),
("wParam", WPARAM),
("lParam", LPARAM),
("line", c_int),
("foldLevelNow", c_int),
("foldLevelPrev", c_int),
("margin", c_int),
("listType", c_int),
("x", c_int),
("y", c_int)]
copyright = \
"""
Scintilla
Copyright 1998-2003 by Neil Hodgson <neilh@scintilla.org>
All Rights Reserved
"""
class Scintilla(Window):
_window_class_ = "Scintilla"
_window_style_ = WS_VISIBLE | WS_CHILD
def __init__(self, *args, **kwargs):
Window.__init__(self, *args, **kwargs)
self.InterceptParent()
def GetNotification(self, event):
return SCNotification.from_address(int(event.lParam))
def SendScintillaMessage(self, msg, wParam, lParam):
#TODO use fast path,e.g. retreive direct message fn from
#scintilla as described in scintilla docs
return windll.user32.SendMessageA(self.handle, msg, wParam, lParam)
#~ return self.SendMessage(msg, wParam, lParam)
def SetText(self, txt):
self.SendScintillaMessage(SCI_SETTEXT, 0, txt)
def GetLexer(self):
return self.SendScintillaMessage(SCI_GETLEXER, 0, 0)
def SetLexerLanguage(self, lang):
self.SendScintillaMessage(SCI_SETLEXERLANGUAGE, 0, lang)
def SetStyleBits(self, key, value):
self.SendScintillaMessage(SCI_SETSTYLEBITS, key, value)
def SetMarginWidth(self, width = 0):
self.SendScintillaMessage(SCI_SETMARGINWIDTHN, 0, width)
def SetProperty(self, key, value):
self.SendScintillaMessage(SCI_SETPROPERTY, key, value)
def SetKeyWords(self, keyWordSet, keyWordList):
self.SendScintillaMessage(SCI_SETKEYWORDS, keyWordSet, " ".join(keyWordList))
def StyleSetFore(self, styleNumber, color):
self.SendScintillaMessage(SCI_STYLESETFORE, styleNumber, color)
def StyleSetBack(self, styleNumber, color):
self.SendScintillaMessage(SCI_STYLESETBACK, styleNumber, color)
def StyleSetSize(self, styleNumber, size):
self.SendScintillaMessage(SCI_STYLESETSIZE, styleNumber, size)
def StyleSetFont(self, styleNumber, face):
self.SendScintillaMessage(SCI_STYLESETFONT, styleNumber, face)
def StyleClearAll(self):
self.SendScintillaMessage(SCI_STYLECLEARALL, 0, 0)
def GetLength(self):
return self.SendScintillaMessage(SCI_GETLENGTH, 0, 0)
def GetText(self):
buff_length = self.GetLength() + 1
buff = create_string_buffer(buff_length)
self.SendScintillaMessage(SCI_GETTEXT, buff_length, byref(buff))
return str(buff.value)
def GetSelText(self):
start = self.SendScintillaMessage(SCI_GETSELECTIONSTART, 0, 0)
end = self.SendScintillaMessage(SCI_GETSELECTIONEND, 0, 0)
if start == end: return ""
buff = (c_char * (end - start + 1))()
self.SendScintillaMessage(SCI_GETSELTEXT, 0, byref(buff))
return str(buff.value)
def HasSelection(self):
start = self.SendScintillaMessage(SCI_GETSELECTIONSTART, 0, 0)
end = self.SendScintillaMessage(SCI_GETSELECTIONEND, 0, 0)
return (end - start) > 0
def AddText(self, text):
self.SendScintillaMessage(SCI_ADDTEXT, len(text), text)
def SetTabWidth(self, width):
self.SendScintillaMessage(SCI_SETTABWIDTH, width, 0)
def SetUseTabs(self, useTabs):
self.SendScintillaMessage(SCI_SETUSETABS, int(useTabs), 0)
def SetEolMode(self, eolMode):
self.SendScintillaMessage(SCI_SETEOLMODE, eolMode, 0)
def Undo(self):
self.SendScintillaMessage(SCI_UNDO, 0, 0)
def Redo(self):
self.SendScintillaMessage(SCI_REDO, 0, 0)
def CanUndo(self):
return self.SendScintillaMessage(SCI_CANUNDO, 0, 0)
def CanRedo(self):
return self.SendScintillaMessage(SCI_CANREDO, 0, 0)
def Cut(self):
self.SendScintillaMessage(SCI_CUT, 0, 0)
def Copy(self):
self.SendScintillaMessage(SCI_COPY, 0, 0)
def Clear(self):
self.SendScintillaMessage(SCI_CLEAR, 0, 0)
def Paste(self):
self.SendScintillaMessage(SCI_PASTE, 0, 0)
def CanPaste(self):
return self.SendScintillaMessage(SCI_CANPASTE, 0, 0)
def SelectAll(self):
self.SendScintillaMessage(SCI_SELECTALL, 0, 0)
|
from math import sqrt
class Vector2D:
def __init__(self,x,y):
self.__x = x
self.__y = y
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
@x.setter
def x(self,new_x):
self.__x = new_x
@y.setter
def y(self,new_y):
self.__y = new_y
def norme2(self):
"""Methode pour calculer la norme"""
return sqrt(self.__x**2+self.__y**2)
def addition(self,t):
"""Methode effectuant l'addition"""
self.__x += t.x
self.__y += t.y
def soustraction(self,t):
"""Methode effectuant la soustraction"""
self.__x -= t.x
self.__y -= t.y
def produit_scalaire(self,t):
"""Methode effectuant le produit scalaire"""
return self.__x * t.x + self.__y * t.y
def colinearite(self,t):
"""verification de la colinéarité"""
return self.produit_scalaire(t)==0
def unitaire(self):
"""verification du caractère unitaire"""
return self.norme2()==1
v1 = Vector2D(2,5)
print(v1.norme2())
v2 = Vector2D(5,2)
v1.addition(v2)
print(v1.x, v1.y, sep ='\n')
v1.soustraction(v2)
print(v1.x, v1.y, sep ='\n')
print(v1.colinearite(v2))
print(v1.unitaire())
|
from unittest import TestCase
from dispatcher.configuration_helper import ConfigurationHelper
from .common.mock_resources import *
from dispatcher.packagemanager import memory_repo
from dispatcher.dispatcher_exception import DispatcherException
from inbm_lib.xmlhandler import XmlHandler
from mock import patch
import os
from typing import Any
TEST_SCHEMA_LOCATION = os.path.join(os.path.dirname(__file__),
'../../fpm-template/usr/share/dispatcher-agent/'
'manifest_schema.xsd')
GOOD_XML = '<?xml version="1.0" encoding="UTF-8"?>' \
'<manifest><type>config</type><config><cmd>load</cmd><configtype><load>' \
'<fetch>http://u.intel.com:8000/tc.xml</fetch></load>' \
'</configtype></config></manifest>'
TAR_XML = '<?xml version="1.0" encoding="UTF-8"?>' \
'<manifest><type>config</type><config><cmd>load</cmd><configtype><load>' \
'<fetch>http://u.intel.com:8000/tc.tar</fetch></load>' \
'</configtype></config></manifest>'
SIGN_TAR_XML = '<?xml version="1.0" encoding="UTF-8"?>' \
'<manifest><type>config</type><config><cmd>load</cmd><configtype><load>' \
'<fetch>http://u.intel.com:8000/tc.tar</fetch><signature>asgasd</signature></load>' \
'</configtype></config></manifest>'
GOOD_PARSED_XML = {'fetch': 'http://ubuntu.intel.com:8000/tc.xml'}
GOOD_TAR_PARSED_XML = {'fetch': 'http://ubuntu.intel.com:8000/tc.tar'}
GOOD_SIGN_TAR_PARSED_XML = {'fetch': 'http://ubuntu.intel.com:8000/tc.tar', 'signature': 'asgasd'}
class TestConfigurationHelper(TestCase):
def setUp(self) -> None:
self.mock_callbacks_obj = MockDispatcherCallbacks.build_mock_dispatcher_callbacks()
self.good = XmlHandler(GOOD_XML, is_file=False, schema_location=TEST_SCHEMA_LOCATION)
self.tar = XmlHandler(TAR_XML, is_file=False, schema_location=TEST_SCHEMA_LOCATION)
self.sign_tar = XmlHandler(SIGN_TAR_XML, is_file=False,
schema_location=TEST_SCHEMA_LOCATION)
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get', return_value=dummy_success)
@patch('inbm_lib.xmlhandler.XmlHandler.get_children', return_value=GOOD_PARSED_XML)
@patch('dispatcher.configuration_helper.validate_file_type')
def test_file_download_success(self, mock_validate_file, mock_xml, mock_fetch, mock_source):
try:
ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.good, memory_repo.MemoryRepo(""))
except DispatcherException:
self.fail("Dispatcher download raised DispatcherException unexpectedly!")
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get', return_value=dummy_failure)
@patch('inbm_lib.xmlhandler.XmlHandler.get_children', return_value=GOOD_PARSED_XML)
def test_file_download_fetch_fails(self, mock_xml, mock_fetch, mock_source):
with self.assertRaisesRegex(DispatcherException, "Configuration File Fetch Failed: {\"status\": 400, "
"\"message\": \"FAILED TO INSTALL\"}"):
ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.good, memory_repo.MemoryRepo(""))
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get', return_value=Result(404, "Not Found"))
def test_file_download_xml_fails(self, mock_get, mock_source):
with self.assertRaisesRegex(DispatcherException,
"Configuration File Fetch Failed: {\"status\": 404, "
"\"message\": \"Not Found\"}"):
ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.good, memory_repo.MemoryRepo(""))
@patch('dispatcher.configuration_helper.verify_source', side_effect=DispatcherException('Source verification failed'))
def test_source_verification_fails(self, mock_source):
with self.assertRaisesRegex(DispatcherException, 'Source verification failed'):
ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.good, memory_repo.MemoryRepo(""))
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get', return_value=dummy_success)
@patch('inbm_lib.xmlhandler.XmlHandler.get_children', return_value=GOOD_PARSED_XML)
@patch('dispatcher.configuration_helper.validate_file_type')
def test_conf_file_name_correct(self, mock_validate_file, mock_xml, mock_fetch, mock_source):
try:
conf = ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.good, memory_repo.MemoryRepo(""))
except DispatcherException:
self.fail("Raised exception when not expected.")
self.assertEqual(conf, 'tc.xml')
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get')
@patch('inbm_lib.xmlhandler.XmlHandler.get_children')
@patch('dispatcher.configuration_helper.ConfigurationHelper._extract_files_from_tar')
@patch('dispatcher.configuration_helper.validate_file_type')
def test_tar_conf_filename_correct(self, mock_validate, mock_files, mock_xml, mock_fetch, mock_source):
mock_xml.return_value = GOOD_TAR_PARSED_XML
mock_fetch.return_value = dummy_success
mock_files.return_value = 'tc.xml'
try:
conf = ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.tar, memory_repo.MemoryRepo(""))
except DispatcherException:
self.fail("Raised exception when not expected.")
self.assertEqual(conf, 'tc.xml')
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get')
@patch('inbm_lib.xmlhandler.XmlHandler.get_children')
@patch('dispatcher.configuration_helper.ConfigurationHelper._extract_files_from_tar')
@patch('dispatcher.configuration_helper.validate_file_type')
@patch('dispatcher.configuration_helper.os.path.exists', return_value=True)
def test_tar_conf_with_pem_no_sign_fail(self, mock_valid_file, mock_validate, mock_files, mock_xml, mock_fetch, mock_source):
mock_xml.return_value = GOOD_TAR_PARSED_XML
mock_fetch.return_value = dummy_success
mock_files.return_value = 'tc.xml'
with self.assertRaisesRegex(DispatcherException,
'Configuration Load Aborted: Signature is required to proceed with the update.'):
ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.tar, memory_repo.MemoryRepo(""))
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get')
@patch('inbm_lib.xmlhandler.XmlHandler.get_children')
@patch('dispatcher.configuration_helper.ConfigurationHelper._extract_files_from_tar')
@patch('dispatcher.configuration_helper.verify_signature', result=True)
@patch('dispatcher.configuration_helper.validate_file_type')
def test_tar_file_download_success(self, mock_validate, mock_sign, mock_files, mock_xml, mock_fetch, mock_source):
mock_xml.return_value = GOOD_SIGN_TAR_PARSED_XML
mock_fetch.return_value = dummy_success
mock_files.return_value = 'tc.xml'
try:
conf = ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.sign_tar, memory_repo.MemoryRepo(""))
self.assertEqual(conf, 'tc.xml')
except DispatcherException:
self.fail("Raised exception when not expected.")
@patch("dispatcher.packagemanager.memory_repo.MemoryRepo.delete")
@patch('dispatcher.configuration_helper.verify_source')
@patch('dispatcher.configuration_helper.get')
@patch('inbm_lib.xmlhandler.XmlHandler.get_children')
@patch('dispatcher.configuration_helper.ConfigurationHelper.parse_url', return_value='')
@patch('dispatcher.configuration_helper.validate_file_type')
@patch('dispatcher.configuration_helper.os.path.exists', return_value=True)
def test_signature_check_fails(self, mock_is_file, mock_validate, mock_parse, mock_children, mock_get, mock_source, mock_delete):
mock_get.return_value = Result(status=200, message="OK")
with self.assertRaisesRegex(DispatcherException, 'Configuration Load Aborted. Signature check failed'):
ConfigurationHelper(self.mock_callbacks_obj).download_config(
self.good, memory_repo.MemoryRepo(""))
mock_delete.assert_called_once()
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.run')
@patch('inbm_lib.xmlhandler.XmlHandler.get_children')
def test_extract_files_from_tar(self, mock_xml: Any, mock_runner: Any) -> None:
mock_xml.return_value = GOOD_PARSED_XML
mock_runner.return_value = ('tc.conf', '', 0)
conf_file = ConfigurationHelper(
self.mock_callbacks_obj)._extract_files_from_tar(
'/var/cache/manageability/repository/tc.tar')
self.assertEqual(conf_file, 'tc.conf')
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.run')
@patch('inbm_lib.xmlhandler.XmlHandler.get_children')
def test_extract_files_from_tar_file_fail(self, mock_xml, mock_runner):
mock_xml.return_value = GOOD_PARSED_XML
mock_runner.return_value = ('tc.txt', '', 0)
with self.assertRaisesRegex(DispatcherException, 'Configuration File Load Error: Invalid File sent. error:'):
ConfigurationHelper(self.mock_callbacks_obj)._extract_files_from_tar(
'/var/cache/manageability/repository/tc.tar')
|
#!/usr/bin/env python3
"""A simple pythonic unit conversion API.
Title:
Unit Converter
Description:
Develop a program that converts various units between one another.
The user enters the type of unit being entered,
the type of unit they want to convert to
and then the value.
The program will then make the conversion.
"""
class Unit:
def __init__(self, value):
if isinstance(value, self.__class__.__bases__[0]): # if unit is the same
self.value = self.convert_from_base(value.convert_to_base().value).value
elif isinstance(value, Unit): # if unit is not the same
raise TypeError(f"Unit {value.__class__.__name__} can not be converted to"
f" {self.__class__.__name__}")
else: # if value is not a unit
self.value = value
def convert_to_base(self):
pass
def convert_from_base(self, value):
pass
def __repr__(self) -> str:
return f"{str(self.value)} {unit_abbreviations[type(self)]}"
class Length(Unit):
_unit_in_base = 1
def convert_to_base(self):
"""Convert the current value to the base unit."""
return Metre(self.value * self._unit_in_base)
def convert_from_base(self, value: int):
"""Convert the input value to this unit."""
return self.__class__(value / self._unit_in_base)
def __add__(self, other):
return Metre(self.convert_to_base().value + other.convert_to_base().value)
def __sub__(self, other):
return Metre(self.convert_to_base().value - other.convert_to_base().value)
class Mass(Unit):
_unit_in_base = 1
def convert_to_base(self):
"""Convert the current value to the base unit."""
return Kilogram(self.value * self._unit_in_base)
def convert_from_base(self, value: int):
"""Convert the input value to this unit."""
return self.__class__(value / self._unit_in_base)
def __add__(self, other):
return Kilogram(self.convert_to_base().value + other.convert_to_base().value)
def __sub__(self, other):
return Kilogram(self.convert_to_base().value - other.convert_to_base().value)
class Time(Unit):
_unit_in_base = 1
def convert_to_base(self):
"""Convert the current value to the base unit."""
return Second(self.value * self._unit_in_base)
def convert_from_base(self, value: int):
"""Convert the input value to this unit."""
return self.__class__(value / self._unit_in_base)
def __add__(self, other):
return Second(self.convert_to_base().value + other.convert_to_base().value)
def __sub__(self, other):
return Second(self.convert_to_base().value - other.convert_to_base().value)
class Temperature(Unit):
pass
class Metre(Length):
_unit_in_base = 1
class Inch(Length):
_unit_in_base = 0.0254
class Foot(Length):
_unit_in_base = 0.3048
class Yard(Length):
_unit_in_base = 0.9144
class Mile(Length):
_unit_in_base = 1609.344
class Kilogram(Mass):
_unit_in_base = 1
class Pound(Mass):
_unit_in_base = 0.45359237
class Ounce(Mass):
_unit_in_base = 0.028349523125
class Second(Time):
_unit_in_base = 1
class Minute(Time):
_unit_in_base = 60
class Hour(Time):
_unit_in_base = 3600
class Day(Time):
_unit_in_base = 86400
class Kelvin(Temperature):
def convert_to_base(self):
"""Convert the current value to the base unit."""
return Kelvin(self.value)
def convert_from_base(self, value):
"""Convert the input value to this unit."""
return Kelvin(value)
class Celsius(Temperature):
"""Convert the current value to the base unit."""
def convert_to_base(self):
return Kelvin(self.value + 273.15)
def convert_from_base(self, value):
"""Convert the input value to this unit."""
return Celsius(value - 273.15)
class Fahrenheit(Temperature):
"""Convert the current value to the base unit."""
def convert_to_base(self):
return Kelvin((self.value + 459.67) * (5/9))
def convert_from_base(self, value):
"""Convert the input value to this unit."""
return Fahrenheit(value / (5/9) - 459.67)
unit_abbreviations = {
Metre: "m",
Inch: "in",
Foot: "ft",
Yard: "yd",
Mile: "mi",
Kilogram: "kg",
Pound: "lb",
Ounce: "oz",
Second: "s",
Minute: "min",
Hour: "h",
Day: "d",
Kelvin: "K",
Celsius: "°C",
Fahrenheit: "°F"
}
def _start_interactively():
unit_of_abbreviation = {
"m": Metre,
"in": Inch,
"ft": Foot,
"yd": Yard,
"mi": Mile,
"kg": Kilogram,
"lb": Pound,
"oz": Ounce,
"s": Second,
"min": Minute,
"h": Hour,
"d": Day,
"K": Kelvin,
"°C": Celsius,
"°F": Fahrenheit
}
while True:
number_to_convert = input("Please type in the number you want to convert: ")
convert_from = input("Please type in the abbreviation for the unit of the number: ")
convert_to = input("Please type in the abbreviation for the unit "
"you want the number to be converted to: ")
try:
if convert_from not in unit_of_abbreviation or convert_to not in unit_of_abbreviation:
print("The conversion failed because the units are wrong."
"Please try again!")
else:
print(f"{number_to_convert} {convert_from} is",
unit_of_abbreviation[convert_to](
unit_of_abbreviation[convert_from](float(number_to_convert))
))
except TypeError:
print("The conversion failed because the units are not of the same kind. "
"Please try again!")
except ValueError:
print("The conversion failed because the number you entered is invalid. "
"Please try again!")
finally:
print("")
if __name__ == "__main__":
_start_interactively()
|
"""Unittests for the deepblink.cli module."""
# pylint: disable=missing-function-docstring
from unittest import mock
import argparse
import logging
import os
import numpy as np
import pandas as pd
from deepblink.cli._config import HandleConfig
from deepblink.cli._create import HandleCreate
from deepblink.cli._main import arg_parser
LOGGER = logging.Logger("None")
# TODO cover more in-depth functionality like commands etc.
def test_parse_args():
parser = arg_parser()
assert isinstance(parser, argparse.ArgumentParser)
with mock.patch("sys.argv", [""]):
assert isinstance(parser.parse_args(), argparse.Namespace)
def test_config():
temp_file = "test"
full_file = os.path.abspath(temp_file + ".yaml")
try:
handler = HandleConfig(arg_name=temp_file, arg_raw=False, logger=LOGGER)
handler()
assert os.path.exists(full_file)
finally:
os.remove(full_file)
# TODO image_label_lists, crop_image, get_file_lists, splits, save_npz
def test_create():
handler = HandleCreate(
arg_input="None",
arg_labels="None",
arg_name="None",
arg_size=0,
arg_testsplit=0,
arg_validsplit=0,
arg_minspots=0,
logger=LOGGER,
)
labels = [
os.path.join(os.path.dirname(__file__), "./data/input_manual.csv"),
os.path.join(os.path.dirname(__file__), "./data/input_trackmate_post7.csv"),
os.path.join(os.path.dirname(__file__), "./data/input_trackmate_pre7.csv"),
]
max_size = 100
image = np.zeros((max_size, max_size))
for label in labels:
df = pd.read_csv(label, index_col=0)
df = handler.convert_labels(image, df)
assert df.columns.to_list() == ["r", "c"]
assert df.dtypes.to_list() == [np.float64, np.float64]
assert df["r"].min() >= 0
assert df["c"].min() >= 0
assert df["r"].max() <= max_size
assert df["c"].max() <= max_size
|
"""
Gregory Way 2018
Interpret Compression
5.analyze-weights/interpret-compression.py
Track and visualize gene set activity across compressed features
Usage:
python interpret-compression.py
Output:
Several geneset enrichment scores and many figures describing enrichment across features
"""
import os
import subprocess
dataset_collection_tuples = [
("TCGA", "GpH"),
("TCGA", "GpXCELL"),
("TCGA", "GpC4CM"),
("TCGA", "GpC2CPREACTOME"),
("TCGA", "GpC3TFT"),
("TARGET", "GpH"),
("TARGET", "GpXCELL"),
("TARGET", "GpC4CM"),
("GTEX", "GpXCELL"),
]
gmt_name_dict = {
"gpc1": "c1.all.v6.1.entrez.gmt",
"gpc2cpg": "c2.cgp.v6.1.entrez.gmt",
"gpc2cpreactome": "c2.cp.reactome.v6.1.entrez.gmt",
"gpc3mir": "c3.mir.v6.1.entrez.gmt",
"gpc3tft": "c3.tft.v6.1.entrez.gmt",
"gpc4cgn": "c4.cgn.v6.1.entrez.gmt",
"gpc4cm": "c4.cm.v6.1.entrez.gmt",
"gpc5bp": "c5.bp.v6.1.entrez.gmt",
"gpc5cc": "c5.cc.v6.1.entrez.gmt",
"gpc5mf": "c5.mf.v6.1.entrez.gmt",
"gpc6": "c6.all.v6.1.entrez.gmt",
"gpc7": "c7.all.v6.1.entrez.gmt",
"gph": "h.all.v6.1.entrez.gmt",
"gpxcell": "xcell_all_entrez.gmt",
}
for dataset, metaedge in dataset_collection_tuples:
metaedge_lower = metaedge.lower()
gene_set_dir = os.path.join("results", dataset.lower(), metaedge_lower, 'signal')
gmt_name = gmt_name_dict[metaedge_lower]
geneset_command = [
"python",
"geneset_tracking.py",
"--dataset",
dataset,
"--metaedge",
metaedge,
]
visualize_command = [
"Rscript",
"--vanilla",
"visualize_genesets.R",
"--dataset",
dataset,
"--gmt_name",
gmt_name,
"--metaedge",
metaedge,
"--gene_set_dir",
gene_set_dir,
"--save_top_results",
]
print(geneset_command)
subprocess.call(geneset_command)
print(visualize_command)
subprocess.call(visualize_command)
|
from typing import Optional
from pydantic import BaseModel
class UserBase(BaseModel):
first_name: Optional[str] = None
class UserBaseInDB(UserBase):
id: int = None
username: Optional[str] = None
email: Optional[str] = None
is_active: Optional[bool] = True
is_superuser: Optional[bool] = False
class Config:
orm_mode = True
class UserCreate(UserBaseInDB):
""" Свойства для получения через API при создании из админки
"""
username: str
email: str
password: str
first_name: str
class UserCreateInRegistration(BaseModel):
""" Свойства для получения через API при регистрации
"""
username: str
email: str
password: str
first_name: str
class Config:
orm_mode = True
class UserUpdate(UserBaseInDB):
""" Properties to receive via API on update
"""
password: Optional[str] = None
class User(UserBaseInDB):
""" Additional properties to return via API
"""
pass
class UserInDB(UserBaseInDB):
""" Additional properties stored in DB
"""
password: str
class UserPublic(UserBase):
""" For public profile user
"""
id: int
class Config:
orm_mode = True
|
#!/usr/bin/env python
#
# Copyright 2016 Marcus Furlong <furlongm@gmail.com>
#
# This file is part of Patchman.
#
# Patchman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 only.
#
# Patchman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchman. If not, see <http://www.gnu.org/licenses/>
#
# zypp system plugin for patchman
#
import os
import logging
from zypp_plugin import Plugin
class MyPlugin(Plugin):
def PLUGINBEGIN(self, headers, body):
logging.info("PLUGINBEGIN")
logging.debug("headers: {0!s}".format(headers))
self.ack()
def PACKAGESETCHANGED(self, headers, body):
logging.info("PACKAGESETCHANGED")
logging.debug("headers: {0!s}".format(headers))
print('patchman: sending data')
servicecmd = '/usr/sbin/patchman-client'
args = '-n'
command = '{0!s} {1!s}> /dev/null'.format(servicecmd, args)
os.system(command)
self.ack()
def PLUGINEND(self, headers, body):
logging.info("PLUGINEND")
logging.debug("headers: {0!s}".format(headers))
self.ack()
plugin = MyPlugin()
plugin.main()
|
"""
Data preproc functions:
adjust_to_see: adjust image to better visualize (rotate and transpose)
augmentation: apply variations to a list of images
normalization: apply normalization and variations on images (if required)
preprocess: main function for preprocess.
Make the image:
illumination_compensation: apply illumination regularitation
remove_cursive_style: remove cursive style from image (if necessary)
sauvola: apply sauvola binarization
text_standardize: preprocess and standardize sentence
"""
import re
import os
import cv2
import html
import string
import numpy as np
def adjust_to_see(img):
"""Rotate and transpose to image visualize (cv2 method or jupyter notebook)"""
(h, w) = img.shape[:2]
(cX, cY) = (w // 2, h // 2)
M = cv2.getRotationMatrix2D((cX, cY), -90, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
img = cv2.warpAffine(img, M, (nW + 1, nH + 1))
img = cv2.warpAffine(img.transpose(), M, (nW, nH))
return img
def augmentation(imgs,
rotation_range=0,
scale_range=0,
height_shift_range=0,
width_shift_range=0,
dilate_range=1,
erode_range=1):
"""Apply variations to a list of images (rotate, width and height shift, scale, erode, dilate)"""
imgs = imgs.astype(np.float32)
_, h, w = imgs.shape
dilate_kernel = np.ones((int(np.random.uniform(1, dilate_range)),), np.uint8)
erode_kernel = np.ones((int(np.random.uniform(1, erode_range)),), np.uint8)
height_shift = np.random.uniform(-height_shift_range, height_shift_range)
rotation = np.random.uniform(-rotation_range, rotation_range)
scale = np.random.uniform(1 - scale_range, 1)
width_shift = np.random.uniform(-width_shift_range, width_shift_range)
trans_map = np.float32([[1, 0, width_shift * w], [0, 1, height_shift * h]])
rot_map = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)
trans_map_aff = np.r_[trans_map, [[0, 0, 1]]]
rot_map_aff = np.r_[rot_map, [[0, 0, 1]]]
affine_mat = rot_map_aff.dot(trans_map_aff)[:2, :]
for i in range(len(imgs)):
imgs[i] = cv2.warpAffine(imgs[i], affine_mat, (w, h), flags=cv2.INTER_NEAREST, borderValue=255)
imgs[i] = cv2.erode(imgs[i], erode_kernel, iterations=1)
imgs[i] = cv2.dilate(imgs[i], dilate_kernel, iterations=1)
return imgs
def normalization(imgs):
"""Normalize list of images"""
imgs = np.asarray(imgs).astype(np.float32)
imgs = np.expand_dims(imgs / 255, axis=-1)
return imgs
"""
Preprocess metodology based in:
H. Scheidl, S. Fiel and R. Sablatnig,
Word Beam Search: A Connectionist Temporal Classification Decoding Algorithm, in
16th International Conference on Frontiers in Handwriting Recognition, pp. 256-258, 2018.
"""
def preprocess(img, input_size):
"""Make the process with the `input_size` to the scale resize"""
def imread(path):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
u, i = np.unique(np.array(img).flatten(), return_inverse=True)
background = int(u[np.argmax(np.bincount(i))])
return img, background
if isinstance(img, str):
img, bg = imread(img)
if isinstance(img, tuple):
image, boundbox = img
img, bg = imread(image)
for i in range(len(boundbox)):
if isinstance(boundbox[i], float):
total = len(img) if i < 2 else len(img[0])
boundbox[i] = int(total * boundbox[i])
else:
boundbox[i] = int(boundbox[i])
img = np.asarray(img[boundbox[0]:boundbox[1], boundbox[2]:boundbox[3]], dtype=np.uint8)
wt, ht, _ = input_size
h, w = np.asarray(img).shape
f = max((w / wt), (h / ht))
new_size = (max(min(wt, int(w / f)), 1), max(min(ht, int(h / f)), 1))
img = cv2.resize(img, new_size)
target = np.ones([ht, wt], dtype=np.uint8) * bg
target[0:new_size[1], 0:new_size[0]] = img
img = cv2.transpose(target)
return img
"""
DeepSpell based text cleaning process.
Tal Weiss.
Deep Spelling.
Medium: https://machinelearnings.co/deep-spelling-9ffef96a24f6#.2c9pu8nlm
Github: https://github.com/MajorTal/DeepSpell
"""
RE_DASH_FILTER = re.compile(r'[\-\˗\֊\‐\‑\‒\–\—\⁻\₋\−\﹣\-]', re.UNICODE)
RE_APOSTROPHE_FILTER = re.compile(r''|[ʼ՚'‘’‛❛❜ߴߵ`‵´ˊˋ{}{}{}{}{}{}{}{}{}]'.format(
chr(768), chr(769), chr(832), chr(833), chr(2387),
chr(5151), chr(5152), chr(65344), chr(8242)), re.UNICODE)
RE_RESERVED_CHAR_FILTER = re.compile(r'[¶¤«»]', re.UNICODE)
RE_LEFT_PARENTH_FILTER = re.compile(r'[\(\[\{\⁽\₍\❨\❪\﹙\(]', re.UNICODE)
RE_RIGHT_PARENTH_FILTER = re.compile(r'[\)\]\}\⁾\₎\❩\❫\﹚\)]', re.UNICODE)
RE_BASIC_CLEANER = re.compile(r'[^\w\s{}]'.format(re.escape(string.punctuation)), re.UNICODE)
LEFT_PUNCTUATION_FILTER = """!%&),.:;<=>?@\\]^_`|}~"""
RIGHT_PUNCTUATION_FILTER = """"(/<=>@[\\^_`{|~"""
NORMALIZE_WHITESPACE_REGEX = re.compile(r'[^\S\n]+', re.UNICODE)
def text_standardize(text):
"""Organize/add spaces around punctuation marks"""
if text is None:
return ""
text = html.unescape(text).replace("\\n", "").replace("\\t", "")
text = RE_RESERVED_CHAR_FILTER.sub("", text)
text = RE_DASH_FILTER.sub("-", text)
text = RE_APOSTROPHE_FILTER.sub("'", text)
text = RE_LEFT_PARENTH_FILTER.sub("(", text)
text = RE_RIGHT_PARENTH_FILTER.sub(")", text)
text = RE_BASIC_CLEANER.sub("", text)
text = text.lstrip(LEFT_PUNCTUATION_FILTER)
text = text.rstrip(RIGHT_PUNCTUATION_FILTER)
text = text.translate(str.maketrans({c: f" {c} " for c in string.punctuation}))
text = NORMALIZE_WHITESPACE_REGEX.sub(" ", text.strip())
return text
def generate_kaldi_assets(output_path, dtgen, predicts):
from kaldiio import WriteHelper
# get data and ground truth lists
ctc_TK, space_TK, ground_truth = "<ctc>", "<space>", []
for pt in dtgen.partitions + ['test']:
for x in dtgen.dataset[pt]['gt']:
ground_truth.append([space_TK if y == " " else y for y in list(f" {x} ")])
# define dataset size and default tokens
train_size = dtgen.size['train'] + dtgen.size['valid'] + dtgen.size['test']
# get chars list and save with the ctc and space tokens
chars = list(dtgen.tokenizer.chars) + [ctc_TK]
chars[chars.index(" ")] = space_TK
kaldi_path = os.path.join(output_path, "kaldi")
os.makedirs(kaldi_path, exist_ok=True)
with open(os.path.join(kaldi_path, "chars.lst"), "w") as lg:
lg.write("\n".join(chars))
ark_file_name = os.path.join(kaldi_path, "conf_mats.ark")
scp_file_name = os.path.join(kaldi_path, "conf_mats.scp")
# save ark and scp file (laia output/kaldi input format)
with WriteHelper(f"ark,scp:{ark_file_name},{scp_file_name}") as writer:
for i, item in enumerate(predicts):
writer(str(i + train_size), item)
# save ground_truth.lst file with sparse sentences
with open(os.path.join(kaldi_path, "ground_truth.lst"), "w") as lg:
for i, item in enumerate(ground_truth):
lg.write(f"{i} {' '.join(item)}\n")
# save indexes of the train/valid and test partitions
with open(os.path.join(kaldi_path, "ID_train.lst"), "w") as lg:
range_index = [str(i) for i in range(0, train_size)]
lg.write("\n".join(range_index))
with open(os.path.join(kaldi_path, "ID_test.lst"), "w") as lg:
range_index = [str(i) for i in range(train_size, train_size + dtgen.size['test'])]
lg.write("\n".join(range_index))
|
from typing import Dict, List, Optional
import os
import random
import torch
import numpy as np
import pandas as pd
from PIL import Image
from .dataset import SegmentationDataset
from theseus.segmentation.augmentations.mosaic import Mosaic
from theseus.utilities.loggers.observer import LoggerObserver
LOGGER = LoggerObserver.getLogger('main')
class CSVDatasetWithMosaic(SegmentationDataset):
r"""CSVDataset multi-labels segmentation dataset
Reads in .csv file with structure below:
filename | label
---------- | -----------
<img1>.jpg | <mask1>.jpg
image_dir: `str`
path to directory contains images
mask_dir: `str`
path to directory contains masks
transform: Optional[List]
transformatin functions
"""
def __init__(
self,
image_dir: str,
mask_dir: str,
csv_path: str,
txt_classnames: str,
mosaic_size: int,
mosaic_prob: float = 0.3,
transform: Optional[List] = None,
**kwargs):
super(CSVDatasetWithMosaic, self).__init__(**kwargs)
self.image_dir = image_dir
self.mask_dir = mask_dir
self.csv_path = csv_path
self.transform = transform
self.txt_classnames = txt_classnames
self.mosaic = Mosaic(mosaic_size, mosaic_size)
self.mosaic_size = mosaic_size
self.mosaic_prob = mosaic_prob
self._load_data()
def load_mosaic(self, index:int):
indexes = [index] + [random.randint(0, len(self.fns) - 1) for _ in range(3)]
images_list = []
masks_list = []
for index in indexes:
img_path, label_path = self.fns[index]
img = Image.open(img_path).convert('RGB')
img = np.array(img)
mask = self._load_mask(label_path)
images_list.append(img)
masks_list.append(mask)
result_image, result_mask = self.mosaic(
images_list,
masks_list)
return result_image, result_mask
def __getitem__(self, idx: int) -> Dict:
"""
Get one item
"""
if random.uniform(0,1) <= self.mosaic_prob:
img, mask = self.load_mosaic(idx)
width, height = self.mosaic_size, self.mosaic_size
basename = None
else:
img_path, label_path = self.fns[idx]
img = Image.open(img_path).convert('RGB')
width, height = img.width, img.height
img = np.array(img)
mask = self._load_mask(label_path)
basename = os.path.basename(img_path)
if self.transform is not None:
item = self.transform(image = img, mask = mask)
img, mask = item['image'], item['mask']
target = {}
target['mask'] = mask
return {
"input": img,
'target': target,
'img_name': basename,
'ori_size': [width, height]
}
def _load_data(self):
"""
Read data from csv and load into memory
"""
with open(self.txt_classnames, 'r') as f:
self.classnames = f.read().splitlines()
# Mapping between classnames and indices
for idx, classname in enumerate(self.classnames):
self.classes_idx[classname] = idx
self.num_classes = len(self.classnames)
df = pd.read_csv(self.csv_path)
for idx, row in df.iterrows():
img_name, mask_name = row
image_path = os.path.join(self.image_dir,img_name)
mask_path = os.path.join(self.mask_dir, mask_name)
self.fns.append([image_path, mask_path])
def _calculate_classes_dist(self):
LOGGER.text("Calculating class distribution...", LoggerObserver.DEBUG)
self.classes_dist = []
for _, mask_path in self.fns:
mask = self._load_mask(mask_path)
unique_ids = np.unique(mask).tolist()
# A hack, because classes distribute fewer for higher index
label = max(unique_ids)
self.classes_dist.append(label)
return self.classes_dist
def _load_mask(self, label_path):
mask = Image.open(label_path).convert('RGB')
mask = np.array(mask)[:,:,::-1] # (H,W,3)
mask = np.argmax(mask, axis=-1) # (H,W) with each pixel value represent one class
return mask
def _encode_masks(self, masks):
"""
Input masks from _load_mask(), but in shape [B, H, W]
Output should be one-hot encoding of segmentation masks [B, NC, H, W]
"""
one_hot = torch.nn.functional.one_hot(masks.long(), num_classes=self.num_classes) # (B,H,W,NC)
one_hot = one_hot.permute(0, 3, 1, 2) # (B,NC,H,W)
return one_hot.float()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.