content
stringlengths 5
1.05M
|
|---|
from server import app
if __name__ == "__main__":
print('WSGI server running at localhost:4000')
app.run(host='localhost', port=4000)
|
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
sys.path.append("src/envrd")
import audio
class AudioObject(QObject, audio.SpeechRecognizer):
detected_phrase = pyqtSignal(str)
transcribed_phrase = pyqtSignal(str)
error = pyqtSignal()
def __init__(self, keyphrases : dict, *args, parent=None, **kwargs):
super().__init__(parent, keyphrases=keyphrases)
print("audio init")
def emitPhrase(self, phrase):
self.detected_phrase.emit(phrase)
# @desc
# emits a string containing the most recently transcribed phrase
def sendCurrentPhrase(self):
while self.current_phrase == None:
continue
self.transcribed_phrase.emit(self.current_phrase)
def speechHandler(self):
self.listenForPhrases()
|
import numpy as np
def create_label_map(num_classes=19):
name_label_mapping = {
'unlabeled': 0, 'outlier': 1, 'car': 10, 'bicycle': 11,
'bus': 13, 'motorcycle': 15, 'on-rails': 16, 'truck': 18,
'other-vehicle': 20, 'person': 30, 'bicyclist': 31,
'motorcyclist': 32, 'road': 40, 'parking': 44,
'sidewalk': 48, 'other-ground': 49, 'building': 50,
'fence': 51, 'other-structure': 52, 'lane-marking': 60,
'vegetation': 70, 'trunk': 71, 'terrain': 72, 'pole': 80,
'traffic-sign': 81, 'other-object': 99, 'moving-car': 252,
'moving-bicyclist': 253, 'moving-person': 254, 'moving-motorcyclist': 255,
'moving-on-rails': 256, 'moving-bus': 257, 'moving-truck': 258,
'moving-other-vehicle': 259
}
for k in name_label_mapping:
name_label_mapping[k] = name_label_mapping[k.replace('moving-', '')]
train_label_name_mapping = {
0: 'car', 1: 'bicycle', 2: 'motorcycle', 3: 'truck', 4:
'other-vehicle', 5: 'person', 6: 'bicyclist', 7: 'motorcyclist',
8: 'road', 9: 'parking', 10: 'sidewalk', 11: 'other-ground',
12: 'building', 13: 'fence', 14: 'vegetation', 15: 'trunk',
16: 'terrain', 17: 'pole', 18: 'traffic-sign'
}
label_map = np.zeros(260)+num_classes
for i in range(num_classes):
cls_name = train_label_name_mapping[i]
label_map[name_label_mapping[cls_name]] = min(num_classes,i)
return label_map.astype(np.int64)
|
# Generated by Django 2.0.3 on 2018-03-17 18:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20180314_2252'),
]
operations = [
migrations.AlterField(
model_name='user',
name='image',
field=models.ImageField(default='', height_field='height_field', upload_to='avatars/', width_field='width_field'),
),
]
|
#!/usr/bin/env python
# !!!!!!!! density.out is wrong !!!!
# data extraction is correct, but the assignment of each
# data point to x,y,z location is wrong
# avgdens[x][y][z] is correct but x,y,z run backwards
import h5py
import numpy as np
def grabDensity(h5file):
# get density
f = h5py.File(h5file)
for name,quantity in f.items():
if name.startswith('density'):
density = quantity.get("value")[:]
# end if name.startswith
# end for name,quantity
f.close()
return density
# end def grabDensity
import xml.etree.ElementTree as ET
def grabLimits(inputfile):
tree = ET.parse(inputfile)
root = tree.getroot()
for section in root:
if section.tag=='hamiltonian':
for ham in section:
if ham.attrib['name']=='density':
xmin=ham.attrib['x_min']
xmax=ham.attrib['x_max']
ymin=ham.attrib['y_min']
ymax=ham.attrib['y_max']
zmin=ham.attrib['z_min']
zmax=ham.attrib['z_max']
delta=ham.attrib['delta']
# end if ham==density
# end for ham in section
# end if section.tag
# end for section in root
return [xmin,xmax,ymin,ymax,zmin,zmax,delta]
# end def grabInput
import argparse
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Plot proton density')
parser.add_argument('XML', type=str, default=None, help="input XML")
parser.add_argument("DMC", type=str, help="h5 file with DMC density")
parser.add_argument('-e','--equil', type=int, help="number of equilibration steps")
args = parser.parse_args()
# get density grid parameters
limits = grabLimits(args.XML)
xmin,xmax,ymin,ymax,zmin,zmax = map(float,limits[:-1])
d1,d2,d3 = map(float,limits[-1].split())
dx = (xmax-xmin)/int(1/d1)
dy = (ymax-ymin)/int(1/d2)
dz = (zmax-zmin)/int(1/d3)
# get density on grid
density = grabDensity(args.DMC)[args.equil:]
avgdens = density.mean(axis=0)
#avgdens = avgdens.transpose()
print xmin,xmax,ymin,ymax,zmin,zmax
print (xmax-xmin)/dx,(ymax-ymin)/dy,(zmax-zmin)/dz
print 'dumpping to file'
f = open('density.dat','w')
for i in range(len(avgdens)):
x = i*dx+xmin+dx/2.0
for j in range(len(avgdens[0])):
y = j*dy%(ymax-ymin)+ymin+dy/2.0
for k in range(len(avgdens[0][0])):
z = k*dz%(zmax-zmin)+zmin+dz/2.0
f.write( "%2.3f %2.3f %2.3f %1.5f\n" % (x,y,z,avgdens[i][j][k]) )
# end for k
# end for j
# end for i
f.close()
# end __main__
|
import os
from netdice.input_parser import InputParser
from netdice.util import project_root_dir
def get_test_input_file(topo_name: str):
return os.path.join(project_root_dir, "tests", "inputs", topo_name)
def get_paper_problem_file():
return get_test_input_file("paper_example.json")
def get_paper_problem():
return InputParser(get_paper_problem_file()).get_problems()[0]
|
from django.contrib import admin
from .models import *
admin.register(Project)
admin.register(ProjectList)
admin.register(ListItem)
admin.register(ItemDetail)
|
import pickle
import numpy as np
import torch
class RLModel:
def __init__(self, observation_def, action_space, train=False, training_comm=(None, None)):
# observation_def -- list (name, tuple (shape, dtype))
self.observation_def = observation_def
self.action_space = action_space
self.train = train
self.input_queue, self.output_queue = None, None
if self.train:
training_comm[0].put(pickle.loads(pickle.dumps(self))) # HACK
self.input_queue, self.output_queue = training_comm
else:
import self_play
import games.nethack
checkpoint = torch.load('/checkpoints/nethack/2021-10-08--16-13-24/model.checkpoint')
config = games.nethack.MuZeroConfig(rl_model=self)
self.inference_iterator = self_play.SelfPlayNoRay(checkpoint, lambda *a: None, config, 0) \
.play_game_generator(0, 0, False, config.opponent, 0)
assert next(self.inference_iterator) is None
self.is_first_iteration = True
# def encode_observation(self, observation):
# assert sorted(observation.keys()) == sorted(self.observation_def.keys())
# ret = []
# for key, (shape, dtype) in self.observation_def:
# val = observation[key]
# assert val.shape == shape, (val.shape, shape)
# ret.append(np.array(list(val.reshape(-1).astype(dtype).tobytes()), dtype=np.uint8))
# ret = np.concatenate(ret)
# return ret
def encode_observation(self, observation):
vals = []
hw_shape = None
for key, (shape, dtype) in self.observation_def:
vals.append(observation[key])
if hw_shape is not None and len(shape) > 1:
if len(shape) == 2:
assert hw_shape == shape, (hw_shape, shape)
elif len(shape) == 3:
assert hw_shape == shape[1:], (hw_shape, shape)
else:
assert 0, hw_shape
if len(shape) > 1:
if len(shape) == 2:
hw_shape = shape
elif len(shape) == 3:
hw_shape = shape[1:]
else:
assert 0
vals = [(
val.reshape(val.shape[0], *hw_shape) if len(val.shape) == 3 else
val.reshape(1, *val.shape) if len(val.shape) == 2 else
val.reshape(val.shape[0], 1, 1).repeat(hw_shape[0], 1).repeat(hw_shape[1], 2)
).astype(np.float32) for val in vals]
return np.concatenate(vals, 0)
def zero_observation(self):
ret = {}
for key, (shape, dtype) in self.observation_def:
ret[key] = np.zeros(shape=shape, dtype=dtype)
return ret
def observation_shape(self):
return self.encode_observation(self.zero_observation()).shape
# def decode_observation(self, data):
# ret = {}
# for key, (shape, dtype) in self.observation_def:
# arr = np.zeros(shape=shape, dtype=dtype)
# s = len(arr.tobytes())
# ret[key] = np.frombuffer(bytes(data[:s]), dtype=np.dtype).reshape(shape)
# data = data[s:]
# assert len(data) == 0
# return ret
def choose_action(self, agent, observation, legal_actions):
assert len(legal_actions) > 0
assert all(map(lambda action: action in self.action_space, legal_actions))
assert len(legal_actions) > 0
legal_actions = [self.action_space.index(action) for action in legal_actions]
if self.train:
self.input_queue.put((observation, legal_actions, agent.score))
action_id = self.output_queue.get()
if action_id is None:
raise KeyboardInterrupt()
else:
action_id = self.inference_iterator.send((self.encode_observation(observation), 0, False, 0, legal_actions))
assert action_id in legal_actions
return self.action_space[action_id]
|
import sqlite3
#import pwd
myemp=99999
while myemp != 0:
myemp = int(input("Enter Employee Id : "))
if myemp == 0:
break
myfname = input("Enter First Name : ")
mylname = input("Enter Last Name : ")
mydept = input("Enter Department Code : ")
mysal = float(input("Enter Gross Salary : "))
sqlstr="insert into newtable (emp_id, fname, lname, deptno, salary) \
values ("+str(myemp)+", '"+myfname+"', '"+mylname+"', "+str(mydept)+", "+str(mysal)+");"
print(sqlstr)
# dbname='db5'
# user='postgres'
# host='localhost'
dbname=connstring="PyX1901.db"
print()
conn = sqlite3.connect(connstring)
print(f'Connecting to the database ... {dbname} connected')
cur=conn.cursor()
try:
cur.execute(sqlstr)
print('Executing Query on database ... done')
except (Exception, psycopg2.DatabaseError) as error:
print("Error Executing Insert Query or Record Already Exists .... ")
print(error)
try:
rows=cur.fetchall()
print('Collecting results ... Output is ...')
print(list(rows))
except:
print(" 1 Row(s) affected .....")
cur.close()
conn.commit()
print("End of Session ....")
|
# coding:utf-8
import os
import logging
import json
import urllib2
url = "http://www.tbs.co.jp/kanran/"
try:
req = urllib2.urlopen(url)
message = ""
#req.encoding = req.apparent_encoding
r = req.read()
for line in r.splitlines():
if line.find("クリスマスの約束") >= 0:
message += line
if line.find("小田和正") >= 0:
message += line
if len(message) > 0:
webhook = os.environ["SLACK_URL"]
payload = {
"channel": '@yyoshiki41',
"username": 'クリスマスの約束',
"icon_emoji": ':christmas_tree:',
"text": "```\n"+message+"\n```",
}
request = urllib2.Request(webhook, json.dumps(payload), {'Content-Type': 'application/json'})
urllib2.urlopen(request)
except urllib2.URLError:
logging.exception('Caught exception fetching url')
|
import numpy as np
import torch
from mjrl.utils.tensor_utils import tensorize
from torch.autograd import Variable
class MLP(torch.nn.Module):
def __init__(self, env_spec=None,
hidden_sizes=(64,64),
min_log_std=-3.0,
init_log_std=0.0,
seed=123,
device='cpu',
observation_dim=None,
action_dim=None,
max_log_std=1.0,
*args, **kwargs,
):
"""
:param env_spec: specifications of the env (see utils/gym_env.py)
:param hidden_sizes: network hidden layer sizes (currently 2 layers only)
:param min_log_std: log_std is clamped at this value and can't go below
:param init_log_std: initial log standard deviation
:param seed: random seed
"""
super(MLP, self).__init__()
# check input specification
if env_spec is None:
assert observation_dim is not None
assert action_dim is not None
self.observation_dim = env_spec.observation_dim if env_spec is not None else observation_dim # number of states
self.action_dim = env_spec.action_dim if env_spec is not None else action_dim # number of actions
self.device = device
self.seed = seed
self.min_log_std_val = min_log_std if type(min_log_std)==np.ndarray else min_log_std * np.ones(self.action_dim)
self.max_log_std_val = max_log_std if type(max_log_std)==np.ndarray else max_log_std * np.ones(self.action_dim)
self.min_log_std = tensorize(self.min_log_std_val)
self.max_log_std = tensorize(self.max_log_std_val)
# Set seed
# ------------------------
assert type(seed) == int
torch.manual_seed(seed)
np.random.seed(seed)
# Policy network
# ------------------------
self.layer_sizes = (self.observation_dim, ) + hidden_sizes + (self.action_dim, )
self.nonlinearity = torch.tanh
self.fc_layers = torch.nn.ModuleList([torch.nn.Linear(self.layer_sizes[i], self.layer_sizes[i+1])
for i in range(len(self.layer_sizes)-1)])
for param in list(self.parameters())[-2:]: # only last layer
param.data = 1e-2 * param.data
self.log_std = torch.nn.Parameter(torch.ones(self.action_dim) * init_log_std, requires_grad=True)
self.log_std.data = torch.max(self.log_std.data, self.min_log_std)
self.log_std.data = torch.min(self.log_std.data, self.max_log_std)
self.trainable_params = list(self.parameters())
# transform variables
self.in_shift, self.in_scale = torch.zeros(self.observation_dim), torch.ones(self.observation_dim)
self.out_shift, self.out_scale = torch.zeros(self.action_dim), torch.ones(self.action_dim)
# Easy access variables
# -------------------------
self.log_std_val = self.log_std.to('cpu').data.numpy().ravel()
# clamp log_std to [min_log_std, max_log_std]
self.log_std_val = np.clip(self.log_std_val, self.min_log_std_val, self.max_log_std_val)
self.param_shapes = [p.data.numpy().shape for p in self.trainable_params]
self.param_sizes = [p.data.numpy().size for p in self.trainable_params]
self.d = np.sum(self.param_sizes) # total number of params
# Placeholders
# ------------------------
self.obs_var = torch.zeros(self.observation_dim)
# Move parameters to device
# ------------------------
self.to(device)
# Network forward
# ============================================
def forward(self, observations):
if type(observations) == np.ndarray: observations = torch.from_numpy(observations).float()
assert type(observations) == torch.Tensor
observations = observations.to(self.device)
out = (observations - self.in_shift) / (self.in_scale + 1e-6)
for i in range(len(self.fc_layers)-1):
out = self.fc_layers[i](out)
out = self.nonlinearity(out)
out = self.fc_layers[-1](out) * self.out_scale + self.out_shift
return out
# Utility functions
# ============================================
def to(self, device):
super().to(device)
self.min_log_std, self.max_log_std = self.min_log_std.to(device), self.max_log_std.to(device)
self.in_shift, self.in_scale = self.in_shift.to(device), self.in_scale.to(device)
self.out_shift, self.out_scale = self.out_shift.to(device), self.out_scale.to(device)
self.trainable_params = list(self.parameters())
self.device = device
def get_param_values(self, *args, **kwargs):
params = torch.cat([p.contiguous().view(-1).data for p in self.parameters()])
return params.clone()
def set_param_values(self, new_params, *args, **kwargs):
current_idx = 0
for idx, param in enumerate(self.parameters()):
vals = new_params[current_idx:current_idx + self.param_sizes[idx]]
vals = vals.reshape(self.param_shapes[idx])
# clip std at minimum value
vals = torch.max(vals, self.min_log_std) if idx == 0 else vals
vals = torch.min(vals, self.max_log_std) if idx == 0 else vals
param.data = vals.to(self.device).clone()
current_idx += self.param_sizes[idx]
# update log_std_val for sampling
self.log_std_val = np.float64(self.log_std.to('cpu').data.numpy().ravel())
self.log_std_val = np.clip(self.log_std_val, self.min_log_std_val, self.max_log_std_val)
self.trainable_params = list(self.parameters())
def set_transformations(self, in_shift=None, in_scale=None,
out_shift=None, out_scale=None, *args, **kwargs):
in_shift = self.in_shift if in_shift is None else tensorize(in_shift)
in_scale = self.in_scale if in_scale is None else tensorize(in_scale)
out_shift = self.out_shift if out_shift is None else tensorize(out_shift)
out_scale = self.out_scale if out_scale is None else tensorize(out_scale)
self.in_shift, self.in_scale = in_shift.to(self.device), in_scale.to(self.device)
self.out_shift, self.out_scale = out_shift.to(self.device), out_scale.to(self.device)
# Main functions
# ============================================
def get_action(self, observation):
assert type(observation) == np.ndarray
if self.device != 'cpu':
print("Warning: get_action function should be used only for simulation.")
print("Requires policy on CPU. Changing policy device to CPU.")
self.to('cpu')
o = np.float32(observation.reshape(1, -1))
self.obs_var.data = torch.from_numpy(o)
mean = self.forward(self.obs_var).to('cpu').data.numpy().ravel()
noise = np.exp(self.log_std_val) * np.random.randn(self.action_dim)
action = mean + noise
return [action, {'mean': mean, 'log_std': self.log_std_val, 'evaluation': mean}]
def mean_LL(self, observations, actions, log_std=None, *args, **kwargs):
if type(observations) == np.ndarray: observations = torch.from_numpy(observations).float()
if type(actions) == np.ndarray: actions = torch.from_numpy(actions).float()
observations, actions = observations.to(self.device), actions.to(self.device)
log_std = self.log_std if log_std is None else log_std
mean = self.forward(observations)
zs = (actions - mean) / torch.exp(self.log_std)
LL = - 0.5 * torch.sum(zs ** 2, dim=1) + \
- torch.sum(log_std) + \
- 0.5 * self.action_dim * np.log(2 * np.pi)
return mean, LL
def log_likelihood(self, observations, actions, *args, **kwargs):
mean, LL = self.mean_LL(observations, actions)
return LL.to('cpu').data.numpy()
def mean_kl(self, observations, *args, **kwargs):
new_log_std = self.log_std
old_log_std = self.log_std.detach().clone()
new_mean = self.forward(observations)
old_mean = new_mean.detach()
return self.kl_divergence(new_mean, old_mean, new_log_std, old_log_std, *args, **kwargs)
def kl_divergence(self, new_mean, old_mean, new_log_std, old_log_std, *args, **kwargs):
new_std, old_std = torch.exp(new_log_std), torch.exp(old_log_std)
Nr = (old_mean - new_mean) ** 2 + old_std ** 2 - new_std ** 2
Dr = 2 * new_std ** 2 + 1e-8
sample_kl = torch.sum(Nr / Dr + new_log_std - old_log_std, dim=1)
return torch.mean(sample_kl)
|
print("Allow the user to enter a series of integers. Sum the integers")
print("Ignore non-numeric input. End input with'.' ")
theSum = 0
while True:
theNum = input("Number:")
if theNum == '.':
break
if not theNum.isdigit():
print("Error,only numbers please")
continue
theSum += int(theNum)
print("The Sum is:", theSum)
|
#
# Copyright 2019 BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
import os
from cliboa.conf import env
class TestSqliteCreation(object):
def setup_method(self, method):
self._db_dir = os.path.join(env.BASE_DIR, "db")
|
# [129] 求根到叶子节点数字之和
# https://leetcode-cn.com/problems/sum-root-to-leaf-numbers/description/
# * algorithms
# * Medium (66.16%)
# * Total Accepted: 61.1K
# * Total Submissions: 92K
# * Testcase Example: '[1,2,3]'
# 给定一个二叉树,它的每个结点都存放一个 0-9 的数字,每条从根到叶子节点的路径都代表一个数字。
# 例如,从根到叶子节点路径 1->2->3 代表数字 123。
# 计算从根到叶子节点生成的所有数字之和。
# 说明: 叶子节点是指没有子节点的节点。
# 示例 1:
# 输入: [1,2,3]
# 1
# / \
# 2 3
# 输出: 25
# 解释:
# 从根到叶子节点路径 1->2 代表数字 12.
# 从根到叶子节点路径 1->3 代表数字 13.
# 因此,数字总和 = 12 + 13 = 25.
# 示例 2:
# 输入: [4,9,0,5,1]
# 4
# / \
# 9 0
# / \
# 5 1
# 输出: 1026
# 解释:
# 从根到叶子节点路径 4->9->5 代表数字 495.
# 从根到叶子节点路径 4->9->1 代表数字 491.
# 从根到叶子节点路径 4->0 代表数字 40.
# 因此,数字总和 = 495 + 491 + 40 = 1026.
# class Node:
# def __init__(self, val, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution(object):
def sumNumbers0(self, root):
res = [0]
if not root:
return 0
def dfs(root, s):
s = s * 10 + root.val
if root.left:
dfs(root.left, s)
if root.right:
dfs(root.right, s)
if not (root.left or root.right):
res[0] += s
dfs(root, 0)
return res[0]
def sumNumbers(self, root):
if not root:
return 0
res = 0
node_queue = deque([root])
num_queue = deque([root.val])
while node_queue:
node = node_queue.popleft()
num = num_queue.popleft()
if not (node.left or node.right):
res += num
else:
if node.left:
node_queue.append(node.left)
num_queue.append(num * 10 + node.left.val)
if node.right:
node_queue.append(node.right)
num_queue.append(num * 10 + node.right.val)
return res
|
#
# This file is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test_support.smvbasetest import SmvBaseTest
from test_support.extrapath import ExtraPath
class SmvModelTest(SmvBaseTest):
@classmethod
def smvAppInitArgs(cls):
return ["--smv-props", "smv.stages=stage1:stage2"]
def test_SmvModelExec(self):
with ExtraPath("src/test/python/smv_model"):
model = self.smvApp.getModuleResult("stage1.modules.Model")
execDf = self.df("stage1.modules.ModelExec")
self.assertEqual(str(model), execDf.collect()[0][0])
def test_link_to_SmvResultModule(self):
"""Test that result of link to SmvModel is same as SmvModel's result
"""
with ExtraPath("src/test/python/smv_model"):
ModelRes = self.smvApp.getModuleResult("stage1.modules.Model")
ModelExecDf = self.smvApp.getModuleResult("stage2.modules.ModelExecWithLink")
self.assertEqual(str(ModelRes), ModelExecDf.collect()[0][0])
def test_module_depends_on_model(self):
"""Test module can depends on model and use directly"""
with ExtraPath("src/test/python/smv_model"):
mod = self.df("stage1.modules.Model")
res = self.df("stage1.modules.ModuleUsesModel")
exp = self.createDF("a:String", "\"{}\"".format(mod))
self.should_be_same(res, exp)
|
#Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
num = int(input('Digite um número: '))
total = 0
for c in range(1, num + 1):
if num % c == 0:
total += 1
if total == 2:
print(f'O numero {num} é primo')
else:
print(f'O número {num} não é primo')
|
# Import Configuration
from config import config
output_dir = config["OUTPUT"]["DIRECTORY"]
output_db = config["OUTPUT"]["DATABASE"]
crossref_email = config["API"]["CROSSREF_EMAIL"]
# Import Internal
from graph_utils import *
from db_utils import *
# Import External
from graph_tool.all import *
from halo import Halo
from habanero import Crossref
from requests.exceptions import HTTPError, ConnectionError
from pprint import pprint
from time import sleep
# MAIN
# Update progress on spinner
def update_progress(message, status, spinner):
if status == "inserted":
spinner.succeed(message)
elif status == "found":
spinner.info(message)
elif status == "fail":
spinner.fail(message)
spinner.start("Building network...")
# GRAPH CREATION
# Make structure graph
def create_structure_graph(directed = True):
graph = Graph(directed = directed)
# ID
# types: DOI, ISSN, ASJC code, ORCID
item_id = graph.new_vertex_property("string")
graph.vp.id = item_id
# Name
# types: paper title, journal name, subject name, author name
item_name = graph.new_vertex_property("string")
graph.vp.name = item_name
# Type
# types: author (3), paper (2), journal (1), subject (0)
item_type = graph.new_vertex_property("int")
graph.vp.type = item_type
# Author Type
# types: first (1), additional (0)
item_author_type = graph.new_edge_property("int")
graph.ep.author_type = item_author_type
# Author Info
# types: dictionary of author info from API
item_author_info = graph.new_vertex_property("object")
graph.vp.author_info = item_author_info
return graph
# Create user graph
def create_user_graph(directed = True):
graph = Graph(directed = directed)
# ID
# types: DOI, ISSN, ASJC code, ORCID, UNI
item_id = graph.new_vertex_property("string")
graph.vp.id = item_id
# Name
# types: paper title, journal name, subject name, author name, user name
item_name = graph.new_vertex_property("string")
graph.vp.name = item_name
# Type
# types: user (4), author (3), paper (2), journal (1), subject (0)
item_type = graph.new_vertex_property("int")
graph.vp.type = item_type
# Times Accessed
# types: count accessed
item_times_accessed = graph.new_edge_property("int")
graph.ep.times_accessed = item_times_accessed
# Vertex Dict
# For finding vertices by id
item_vertex_dict = graph.new_graph_property("object")
graph.gp.vertex_dict = item_vertex_dict
return graph
# ITEM PROCESSING
# Process the authors for a paper
def process_authors(graph, authors, spinner):
global vertex_dict
author_vertices = []
for author in authors:
if "given" in author and "family" in author:
author_name = author["given"] + " " + author["family"]
elif "given" in author:
author_name = author["given"]
elif "family" in author:
author_name = author["family"]
else:
continue
if author_name in vertex_dict["author"]:
author_index = vertex_dict["author"][author_name]
author_vertex = graph.vertex(author_index)
author_vertices.append(author_index)
message = "Author " + author_name + " found in network."
update_progress(message, "found", spinner)
else:
author_vertex = graph.add_vertex()
if "ORCID" in author:
graph.vp.id[author_vertex] = author["ORCID"]
graph.vp.name[author_vertex] = author_name
graph.vp.type[author_vertex] = 3
graph.vp.author_info[author_vertex] = author
vertex_dict["author"][author_name] = int(author_vertex)
author_vertices.append(author_vertex)
message = "Author " + author_name + " inserted into network."
update_progress(message, "inserted", spinner)
return author_vertices
# Process the subjects for a journal
def process_subjects(graph, subjects, spinner):
global vertex_dict
subject_vertices = []
for subject in subjects:
if subject["ASJC"] in vertex_dict["subject"]:
subject_index = vertex_dict["subject"][subject["ASJC"]]
subject_vertex = graph.vertex(subject_index)
subject_vertices.append(subject_vertex)
message = "Subject " + str(subject["ASJC"]) + " found in network."
update_progress(message, "found", spinner)
else:
subject_vertex = graph.add_vertex()
graph.vp.id[subject_vertex] = subject["ASJC"]
graph.vp.name[subject_vertex] = subject["name"]
graph.vp.type[subject_vertex] = 0
vertex_dict["subject"][subject["ASJC"]] = int(subject_vertex)
subject_vertices.append(subject_vertex)
message = "Subject " + str(subject["ASJC"]) + " inserted into network."
update_progress(message, "inserted", spinner)
return subject_vertices
# Process the journal for a paper
def process_journal(graph, journal, spinner):
global vertex_dict
subjects = journal["subjects"]
subject_vertices = process_subjects(graph, subjects, spinner)
ISSN = journal["ISSN"][0]
if ISSN in vertex_dict["journal"]:
journal_index = vertex_dict["journal"][ISSN]
journal_vertex = graph.vertex(journal_index)
message = "Journal " + ISSN + " found in network."
update_progress(message, "found", spinner)
else:
journal_vertex = graph.add_vertex()
graph.vp.id[journal_vertex] = ISSN
if type(journal["title"]) == type(list()):
title = journal["title"][0]
else:
title = journal["title"]
graph.vp.name[journal_vertex] = title
graph.vp.type[journal_vertex] = 1
for subject_vertex in subject_vertices:
graph.add_edge(journal_vertex, subject_vertex)
vertex_dict["journal"][ISSN] = int(journal_vertex)
message = "Journal " + ISSN + " inserted into network."
update_progress(message, "inserted", spinner)
return journal_vertex
# Process a paper
def process_paper(graph, DOI, cr, mode, counter, total, spinner):
global vertex_dict
try:
item = cr.works(ids = DOI)
except HTTPError:
message = f"HTTPError ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
except TimeoutError:
message = f"TimeoutError ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
if not item["message"]["title"]:
message = f"Paper {DOI} no title found ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
title = item["message"]["title"][0]
if DOI in vertex_dict["paper"]:
message = f"Paper {DOI} found in network. ({counter} of {total})"
update_progress(message, "found", spinner)
return None
else:
try:
author_vertices = []
journal_vertex = None
if mode in ["author", "combined"]:
if not "author" in item["message"]:
message = f"Paper {DOI} no authors found ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
elif not item["message"]["author"]:
message = f"Paper {DOI} no authors found ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
authors = item["message"]["author"]
author_vertices = process_authors(graph, authors, spinner)
if mode in ["network", "combined"]:
if not "ISSN" in item["message"]:
return None
try:
journal = cr.journals(ids = item["message"]["ISSN"])
except HTTPError:
sleep(5)
message = f"HTTPError ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
except ConnectionError:
sleep(5)
message = f"ConnectionError ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
except TimeoutError:
sleep(5)
message = f"TimeoutError ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
if "message" in journal:
journal = journal["message"]
elif type(journal) == type(list()):
journal = journal[0]["message"]
else:
message = f"No journal found for paper {DOI}. ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
journal_vertex = process_journal(graph, journal, spinner)
paper_vertex = graph.add_vertex()
graph.vp.id[paper_vertex] = DOI
graph.vp.name[paper_vertex] = title
graph.vp.type[paper_vertex] = 2
vertex_dict["paper"][DOI] = int(paper_vertex)
if journal_vertex:
graph.add_edge(paper_vertex, journal_vertex)
if author_vertices:
for author_vertex in author_vertices:
author_edge = graph.add_edge(author_vertex, paper_vertex)
author = graph.vp.author_info[author_vertex]
if author["sequence"] == "first":
graph.ep.author_type[author_edge] = 1
else:
graph.ep.author_type[author_edge] = 0
message = f"Paper {DOI} inserted into network. ({counter} of {total})"
update_progress(message, "inserted", spinner)
return paper_vertex
except HTTPError:
message = f"HTTPError ({counter} of {total})"
update_progress(message, "fail", spinner)
return None
# Process the citations for a paper
def process_citations(graph, DOI, cr):
global vertex_dict
try:
item = cr.works(ids = DOI)
except HTTPError:
print("HTTPError")
return
if "reference" in item["message"]:
if not item["message"]["reference"]:
print(f"No references for {DOI}")
return
else:
print(f"No references for {DOI}")
return
cited_by_vertex = graph.vertex(vertex_dict["paper"][DOI])
for reference in item["message"]["reference"]:
if "DOI" in reference:
if reference["DOI"] in vertex_dict["paper"]:
cited_vertex = graph.vertex(vertex_dict["paper"][reference["DOI"]])
graph.add_edge(cited_vertex, cited_by_vertex)
else:
continue
# Process user
def process_user(graph, uni, cr, counter, total, spinner):
global vertex_dict
global sqlite_cursor
sqlite_cursor.execute("SELECT ezproxy_user_id FROM ezproxy_users WHERE uni = ?", (uni,))
user_id = sqlite_cursor.fetchone()[0]
sqlite_cursor.execute("SELECT ezproxy_doi_id FROM access_records WHERE ezproxy_doi_id = ?", (user_id,))
records = [item[0] for item in sqlite_cursor.fetchall()]
if uni in vertex_dict["user"]:
message = f"User {uni} found in network. ({counter} of {total})"
user_vertex = vertex_dict["user"][uni]
user_vertex = graph.vertex(user_index)
update_progress(message, "found", spinner)
else:
user_vertex = graph.add_vertex()
graph.vp.id[user_vertex] = uni
graph.vp.name[user_vertex] = user_id
graph.vp.type[user_vertex] = 4
vertex_dict["user"][uni] = int(user_vertex)
message = f"User {uni} inserted into network. ({counter} of {total})"
update_progress(message, "inserted", spinner)
for record in records:
sqlite_cursor.execute("SELECT doi FROM ezproxy_doi WHERE ezproxy_doi_id = ?", (record,))
try:
DOI = sqlite_cursor.fetchone()[0]
except TypeError:
continue
paper_vertex = process_paper(graph, DOI, cr, "user", counter, total, spinner)
if paper_vertex:
prior_access_edge = graph.edge(user_vertex, paper_vertex)
if prior_access_edge:
graph.ep.times_accessed[prior_access_edge] += 1
else:
access_edge = graph.add_edge(user_vertex, paper_vertex)
graph.ep.times_accessed[access_edge] = 1
return
# GRAPH BUILDING
# Build a graph based on metadata structure
def build_structure_graph(graph, DOIs, mode, spinner):
global crossref_email
global vertex_dict
vertex_dict = {"paper" : {}, "journal" : {}, "subject" : {}, "author" : {}}
total = len(DOIs)
counter = 1
spinner.start()
cr = Crossref(mailto = crossref_email)
for DOI in DOIs:
process_paper(graph, DOI, cr, mode, counter, total, spinner)
counter += 1
spinner.succeed("All papers inserted")
if mode in ["citation", "combined"]:
spinner.start("Building citation edges...")
for DOI in vertex_dict["paper"]:
process_citations(graph, DOI, cr)
spinner.stop()
# Build a graph based on user access records
def build_user_graph(graph, users, spinner, cursor):
global crossref_email
global vertex_dict
global sqlite_cursor
sqlite_cursor = cursor
vertex_dict = {"paper" : {}, "journal" : {}, "subject" : {}, "author" : {}, "user" : {}}
total = len(users)
counter = 1
spinner.start()
cr = Crossref(mailto = crossref_email)
for uni in users:
process_user(graph, uni, cr, counter, total, spinner)
counter += 1
spinner.succeed("All users inserted")
# WRAPPERS
# Wrapper routine for different graph types
def network_routine():
options = ["network", "citation", "author", "user", "combined"]
print("Runtime Options Available")
for i in range(len(options)):
print(str(i) + " - " + options[i])
program = options[int(input("Enter option number: "))]
filename = input("Graph Filename [*].[gt, graphml, etc]: ")
spinner = Halo(text = "Building network...", spinner = "runner", text_color = "red")
add_json_to_output_db()
conn = connect_to_output_db()
sqlite_cursor = conn.cursor()
if program == "user":
sqlite_cursor.execute("SELECT uni FROM ezproxy_users WHERE uni IS NOT NULL")
data = [item[0] for item in sqlite_cursor.fetchall()]
print("Running user program.")
graph = create_user_graph()
build_user_graph(graph, data[:10:], spinner, sqlite_cursor)
else:
sqlite_cursor.execute("SELECT doi FROM ezproxy_doi WHERE doi IS NOT NULL")
data = [item[0] for item in sqlite_cursor.fetchall()]
graph = create_structure_graph()
if program == "network":
print("Running network program.")
elif program == "citation":
print("Running citation program.")
elif program == "author":
print("Running author program.")
elif program == "combined":
print("Running combined program.")
build_structure_graph(graph, data[:10:], program, spinner)
graph.save(output_dir + filename)
print("Graph saved.")
|
from conans import tools
import os
from conanfile_base import BaseLib
class xclockConan(BaseLib):
basename = "xclock"
name = basename.lower()
version = "1.0.9"
tags = ("conan", "xclock")
description = '{description}'
exports = ["conanfile_base.py"]
requires = [ 'libx11/1.6.8@bincrafters/stable',
'libxt/1.2.0@bincrafters/stable',
'libxaw/1.0.13@bincrafters/stable',
'libxmu/1.1.3@bincrafters/stable',
'xproto/7.0.31@bincrafters/stable',
'libxrender/0.9.10@bincrafters/stable',
'libxft/2.3.3@bincrafters/stable',
'libxkbfile/1.1.0@bincrafters/stable']
def source(self):
url = "https://www.x.org/archive/individual/app/xclock-1.0.9.tar.gz"
tools.get(url, sha256="4f0dd4d7d969b55c64f6e58242bca201d19e49eb8c9736dc099330bb0c5385b1")
extracted_dir = "xclock-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def build(self):
super(xclockConan, self).build()
self.run(os.path.join('source_subfolder', 'xclock'))
|
# -*- coding: utf-8 -*-
#con.execute_non_query(INSERT_EX_SQ.encode('your language encoder'))
#
__doc__='''
使い方:
'''
import os
from os import getenv
import sys
import datetime
import time
import locale
import psycopg2
import csv
from map_matching import map_matching as mm
from map_matching.utils import Edge, Measurement
version = u'1.0.0'
viewflg=False
logflg=False
def generate_placeholder(length, width):
"""
Generate "(%s, %s, %s, ...), ..." for placing parameters.
"""
return ','.join('(' + ','.join(['%s'] * width) + ')' for _ in range(length))
def create_sequence_subquery(length, columns):
"""Create a subquery for sequence."""
placeholder = generate_placeholder(length, len(columns))
subquery = 'WITH sequence {columns} AS (VALUES {placeholder})'.format(
columns='(' + ','.join(columns) + ')',
placeholder=placeholder)
return subquery
def query_edges_in_sequence_bbox(conn, road_table_name, sequence, search_radius):
"""
サーチ円の分拡張されたバウンディングボックス内のシーケンスのすべての道路エッジをクエリーする
Query all road edges within the bounding box of the sequence
expanded by search_radius.
"""
if not sequence:
return
#テストのため固定
stmt = '''
-- NOTE the length unit is in km
SELECT edge.gid, edge.source, edge.target, edge.length * 1000, edge.length * 1000
FROM {road_table_name} AS edge
CROSS JOIN (SELECT ST_Extent(ST_MakePoint(ST_X({sequence_name}.way), ST_Y({sequence_name}.way)))::geometry AS extent FROM {sequence_name}) AS extent
WHERE edge.the_geom && ST_Envelope(ST_Buffer(extent.extent::geography, {search_radius})::geometry)
'''.format(road_table_name=road_table_name,sequence_name=sequence,search_radius=search_radius)
cur = conn.cursor()
cur.execute(stmt)
for gid, source, target, cost, reverse_cost in cur.fetchall():
edge = Edge(id=gid,
start_node=source,
end_node=target,
cost=cost,
reverse_cost=reverse_cost)
yield edge
cur.close()
def build_road_network(edges):
"""
エッジリストの双方向道路グラフデータを構築する
Construct the bidirectional road graph given a list of edges.
"""
graph = {}
# Graph with bidirectional edges
for edge in edges:
graph.setdefault(edge.start_node, []).append(edge)
graph.setdefault(edge.end_node, []).append(edge.reversed_edge())
return graph
# Subclass the native Candidate class to support more attributes
class Candidate(mm.Candidate):
def __init__(self, measurement, edge, location, distance):
super(Candidate, self).__init__(measurement=measurement, edge=edge, location=location, distance=distance)
self.lon = None
self.lat = None
self.mlon=None
self.mlat=None
self.ptime= None
self.edgeflg=None
def query_candidates(conn, road_table_name, sequence, search_radius):
"""
サーチ円内に存在するシーケンスデータの各々の計測データの候補をクエリーする
Query candidates of each measurement in a sequence within
search_radius.
"""
stmt = '''
WITH
--- WITH sequence AS (subquery here),
seq AS (SELECT *,
ST_SetSRID(ST_MakePoint(ST_X({sequence_name}.way), ST_Y({sequence_name}.way)), 4326) AS geom,
ST_SetSRID(ST_MakePoint(ST_X({sequence_name}.way), ST_Y({sequence_name}.way)), 4326)::geography AS geog
FROM {sequence_name})
SELECT seq.csv_id, ST_X(seq.way) as lon, ST_Y(seq.way) as lat, seq.ptime,
--- Edge information
edge.gid, edge.source, edge.target,
edge.length, edge.length,
--- Location, a float between 0 and 1 representing the location of the closest point on the edge to the measurement.
ST_LineLocatePoint(edge.the_geom, seq.geom) AS location,
--- Distance in meters from the measurement to its candidate's location
ST_Distance(seq.geog, edge.the_geom::geography) AS distance,
--- Candidate's location (a position along the edge)
ST_X(ST_ClosestPoint(edge.the_geom, seq.geom)) AS clon,
ST_Y(ST_ClosestPoint(edge.the_geom, seq.geom)) AS clat
FROM seq CROSS JOIN {road_table_name} AS edge
WHERE edge.the_geom && ST_Envelope(ST_Buffer(seq.geog, {search_radius})::geometry)
AND ST_DWithin(seq.geog, edge.the_geom::geography, {search_radius})
'''.format(road_table_name=road_table_name,sequence_name=sequence,search_radius=search_radius)
cur = conn.cursor()
cur.execute(stmt)
for mid, mlon, mlat, mdt, \
eid, source, target, cost, reverse_cost, \
location, distance, \
clon, clat in cur:
measurement = Measurement(id=mid, lon=mlon, lat=mlat)
edge = Edge(id=eid, start_node=source, end_node=target, cost=cost, reverse_cost=reverse_cost)
assert 0 <= location <= 1
candidate = Candidate(measurement=measurement, edge=edge, location=location, distance=distance)
# Coordinate along the edge (not needed by MM but might be
# useful info to users)
candidate.lon = clon # マッチングポイント X
candidate.lat = clat # マッチングポイント Y
candidate.mlon = mlon # プローブポイント X
candidate.mlat = mlat # プローブポイント Y
candidate.ptime = mdt # プローブ日付(TIMESTAMP)
candidate.edgeflg = 0
yield candidate
cur.close()
def map_match(conn, road_table_name,sequence, search_radius, max_route_distance):
"""シーケンステーブルをマッチングし、candidatesリストを返す"""
start=time.time()
# Prepare the network graph and the candidates along the sequence
edges = query_edges_in_sequence_bbox(conn, road_table_name,sequence, search_radius)
print( 'edges:' ,time.time() - start)
start=time.time()
network = build_road_network(edges)
print('network:', time.time() - start)
start=time.time()
candidates = query_candidates(conn, road_table_name, sequence, search_radius)
print('candidates:', time.time() - start)
start=time.time()
# If the route distance between two consive measurements are
# longer than `max_route_distance` in meters, consider it as a
# breakage
matcher = mm.MapMatching(network.get, max_route_distance)
print( 'matcher:', time.time() - start)
# Match and return the selected candidates along the path
return list(matcher.offline_match(candidates))
def main(argv):
pguser='postgres'
pgport='5432'
pghost='localhost'
pgdbname ='evtest'
pgpassword='apptec'
# postgresql://{username}:{password}@{hostname}:{port}/{database}
dsn='postgresql://{0}:{1}@{2}:{3}/{4}'.format(pguser,pgpassword,pghost,pgport,pgdbname)
# OSMデータダウンロード指定のファイル名から作成予定のOSMテーブル名を生成
osmtbl='kakogawa_ways'
# プローブCSVファイルをアップロードする
csvtbl='probe_kaisen197_2016'
# プローブテーブルを使用してマップマッチングを実行する
start=time.time()
conn = psycopg2.connect(dsn)
candidates = map_match(conn, osmtbl,csvtbl, search_radius, max_route_distance)
conn.close()
process_time = time.time() - start
print( 'process_time;',process_time )
# 候補データに各エッジの最初と最後の識別フラグを追加する
flg=0
cb=None
for candidate in candidates:
candidate.edgeflg=(0 if flg == candidate.edge.id else 1)
flg=candidate.edge.id
if cb is not None :
if candidate.edgeflg == 1 and cb.edgeflg==0 :
cb.edgeflg=2
cb=candidate
with open( outputcsv, "w" ) as f:
f.write(u'mid,ptime,mlon,mlat,clon,clat,cid,cloc,cdist,edgeflg\n')
for candidate in candidates:
a= \
'{0},'.format(candidate.measurement.id) +\
'{0},'.format(candidate.ptime)+\
'{0:.6f},{1:.6f},'.format(*map(float, (candidate.measurement.lon, candidate.measurement.lat))) +\
'{0:.6f},{1:.6f},'.format(*map(float, (candidate.lon, candidate.lat)))+\
'{0},'.format(candidate.edge.id) +\
'{0:.2f},'.format(candidate.location) +\
'{0:.2f},'.format(candidate.distance) +\
'{0}\n'.format(candidate.edgeflg)
f.write(a)
f.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
default_app_config = "request_log.apps.RequestLogConfig"
|
import atexit
from threading import Thread
from ..util.ipc import ipc_cleanup, ipc_send, start_ipc_server
from ..util.sdk import Singleton
class KeyboardButtonsListener(metaclass=Singleton):
def __init__(self):
self.buttons = {}
atexit.register(self.__clean_up)
self.listener_thread = Thread(
target=start_ipc_server, args=("keyevent", self.__on_key_event)
)
self.listener_thread.start()
def add_button(self, key, button):
self.buttons[key] = button
def __on_key_event(self, ipc_message):
key, event = ipc_message.split(" ")
button = self.buttons.get(key)
if button:
if event == "keydown":
button._on_press()
elif event == "keyup":
button._on_release()
def __clean_up(self):
ipc_cleanup("keyevent")
class KeyboardButton: # interface to match pitop.KeyboardButton
def __init__(self, key):
self.key = key
self.pressed_method = None
self.released_method = None
self.__key_pressed = False
listener = KeyboardButtonsListener()
listener.add_button(key, self)
ipc_send("keylisten", key)
def _on_press(self):
self.__key_pressed = True
if self.pressed_method is not None:
self.pressed_method()
def _on_release(self):
self.__key_pressed = False
if self.released_method is not None:
self.released_method()
@property
def when_pressed(self):
"""Get or set the 'when pressed' button state callback function. When
set, this callback function will be invoked when this event happens.
:type callback: Function
:param callback:
Callback function to run when a button is pressed.
"""
@when_pressed.setter
def when_pressed(self, method=None):
if method is None:
raise "Error: no method assigned"
self.pressed_method = method
@property
def when_released(self):
"""Get or set the 'when released' button state callback function. When
set, this callback function will be invoked when this event happens.
:type callback: Function
:param callback:
Callback function to run when a button is released.
"""
@when_released.setter
def when_released(self, method=None):
if method is None:
raise "Error: no method assigned"
self.released_method = method
@property
def is_pressed(self) -> bool:
"""Get or set the button state as a boolean value.
:rtype: bool
"""
if self.__key_pressed is True:
return True
else:
return False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 08:41:14 2018
@author: gcu
"""
import networkx as nx
import pickle
import pandas as pd
import numpy as np
import glob
import os
import re
from collections import Counter
#import pylab as plt
import matplotlib.pyplot as plt
data=pd.read_csv("./low_mem_data.csv")
# Remove rows with invalid dates
data.dropna(subset=['DAT'], inplace=True)
def getUsersTgT(Tbl=''):
cntr = Counter(Tbl['TGT'])
return cntr.most_common(n=None)
usr=getUsersTgT(Tbl=data)
# The first element is a space, which means there was no user, thus let's remove
# The first user.
del usr[0]
# We have list of users. Now let's get build a function that given
# a user, we get a table with that data.
def getTableForUser(U='', T=''):
tmp = T.set_index('TGT')
return tmp.ix[U]
def validateHour(T=''):
err=[]
count=0
for t in T.DAT:
if int(t[:2]) > 23:
print("Error",t)
err.append(count)
count=count+1
if len(err) > 0:
for i in err:
print("Dropping",i)
T.drop(T.index[i], inplace=True)
return T
def acceptanceRatio(T=''):
allElem=[]
acc=[]
neu=[]
rej=[]
accR=[]
neuR=[]
rejR=[]
succR=[]
cumSum=[]
cumAcc=[]
cumNeu=[]
cumRej=[]
fRes=0
fResR=[]
for i in T.index:
elemV = T['VOT'][i]
elemR = T['RES'][i]
if int(elemV) == 1:
acc.append(elemV)
if int(elemV) == 0:
print("Neutral")
neu.append(elemV)
if int(elemV) == -1:
rej.append(elemV)
#fRes=fRes+int(elemR)
#fResR.append(fRes/len(allElem+1))
allElem.append(elemV)
accR.append(len(acc)/len(allElem))
neuR.append(len(neu)/len(allElem))
rejR.append(len(rej)/len(allElem))
cumSum.append(len(allElem))
cumAcc.append(len(acc))
cumNeu.append(len(neu))
cumRej.append(len(rej))
T["accRatio"]=accR
T["neuRatio"]=neuR
T["rejRatio"]=rejR
T["cumSumVot"]=cumSum
T["cumAcc"]=cumAcc
T["cumNeu"]=cumNeu
T["cumRej"]=cumRej
#T["resRatio"]=fResR
return T
#usrTable=getTableForUser(U='Werdna',T=data)
usrTable=getTableForUser(U='Wikiwoohoo',T=data)
usrTable.reset_index(inplace=True)
usrTable = validateHour(T=usrTable)
usrTable['date']=pd.to_datetime(usrTable.DAT)
usrTable.sort_index(inplace=True)
acceptanceRatio(T=usrTable)
usrTable.set_index('date',inplace=True)
r=usrTable[['accRatio','neuRatio','rejRatio']]
#plt.figure();
#r.plot();
ft=usrTable.reset_index()
#x=ft['date']
#y=ft['accRatio']
#z=ft['NUM_WORDS']
#x=x.values
#y=y.values
#z=z.values
#fig, ax = plt.subplots()
#ax.fill(x, y,z, zorder=10)
#ax.grid(True, zorder=5)
#plt.show()
#from sklearn.decomposition import PCA
#pca = PCA(n_components=2)
#pca.fit(r)
#X_ = pca.transform(r)
#dfPCA = pd.DataFrame({'x1': X= ['a','b']_[:,0], 'x2': X_[:,1]})
#plt.scatter(dfPCA['x1'], dfPCA['x2'])
#ft=usrTable.reset_index()
#x=(ft['date']).values
#y=(ft['accRatio']).values
#ssplt.scatter(x,y)
|
# -- encoding: UTF-8 --
from django.forms import TypedChoiceField, CharField
from django.utils.text import capfirst
__all__ = ["formfield"]
# This is a copy of Django 1.8's (78d43a5e1064b63db1c486516c4263ef1c4c975c)
# `Field.formfield()`, for compatibility with Django 1.5.x, which does not
# support `choices_form_class` in a sane way.
# The commit b6f4a92ff45d98a63dc29402d8ad86b88e6a6697
# would make this compatible with our enums,
# but it's best to go all the way to the freshest code, I think.
def formfield(db_field, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not db_field.blank,
'label': capfirst(db_field.verbose_name),
'help_text': db_field.help_text}
if db_field.has_default():
if callable(db_field.default):
defaults['initial'] = db_field.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = db_field.get_default()
if db_field.choices:
# Fields with choices get special treatment.
include_blank = (db_field.blank or
not (db_field.has_default() or 'initial' in kwargs))
defaults['choices'] = db_field.get_choices(include_blank=include_blank)
defaults['coerce'] = db_field.to_python
if db_field.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = CharField
return form_class(**defaults)
# This is a bare-bones implementation of `import_string`, as
# implemented in Django commit f95122e541df5bebb9b5ebb6226b0013e5edc893.
try:
try:
from django.utils.module_loading import import_string
except ImportError:
from django.utils.module_loading import import_by_path as import_string
except ImportError:
from django.utils.importlib import import_module
def import_string(dotted_path):
module_path, class_name = dotted_path.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
|
import os
import streamlit.components.v1 as components
# Create a _RELEASE constant. We'll set this to False while we're developing
# the component, and True when we're ready to package and distribute it.
# (This is, of course, optional - there are innumerable ways to manage your
# release process.)
_RELEASE = True
# Declare a Streamlit component. `declare_component` returns a function
# that is used to create instances of the component. We're naming this
# function "_component_func", with an underscore prefix, because we don't want
# to expose it directly to users. Instead, we will create a custom wrapper
# function, below, that will serve as our component's public API.
# It's worth noting that this call to `declare_component` is the
# *only thing* you need to do to create the binding between Streamlit and
# your component frontend. Everything else we do in this file is simply a
# best practice.
if not _RELEASE:
_streamlit_navbar001 = components.declare_component(
"streamlit_navbar001",
url="http://localhost:3001",
)
else:
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
_streamlit_navbar001 = components.declare_component("streamlit_navbar001", path=build_dir)
def streamlit_navbar001(navbar_buttons):
component_value = _streamlit_navbar001(navbar_buttons=navbar_buttons, default=0)
# We could modify the value returned from the component if we wanted.
# There's no need to do this in our simple example - but it's an option.
return component_value
# Add some test code to play with the component while it's in development.
# During development, we can run this just as we would any other Streamlit
# app: `$ streamlit run my_component/__init__.py`
if not _RELEASE:
import streamlit as st
st.subheader("Component with constant args")
# Create an instance of our component with a constant `name` arg, and
# print its output value.
button_id = streamlit_navbar001(navbar_buttons=[{'name':'home','id':'home'},{'name':'home2','id':'home2'},{'name':'home3','id':'home3'},{'name':'home4','id':'home4'}])
st.markdown("You've clicked the button with id: {}".format(button_id))
|
import os
import logging
import sys
import shutil
import json
import pkg_resources
import pandas as pd
from Bio import SeqIO
class Controller(object):
def __init__(self, args):
self.fasta = args.input
self.out = args.output
self.threads = args.threads
self.dist = args.dist
self.prod = args.prodigal
self.db = args.db
self.circular = args.circular
self.oev = args.overall_eval
self.ocs = args.overall_cov_seq
self.och = args.overall_cov_hmm
self.check_inp = args.skip_check
self.keep_tmp = args.keep_tmp
self.lvl = args.log_lvl
self.redo = args.redo_typing
self.kmer = args.kmer
self.crispr_cas_dist = args.ccd
self.pred_prob = args.pred_prob
self.noplot = args.no_plot
self.scale = args.scale
self.nogrid = args.no_grid
self.expand = args.expand
self.simplelog = args.simplelog
self.customhmm = args.custom_hmm
self.repeat_id = args.repeat_id
self.spacer_id = args.spacer_id
self.spacer_sem = args.spacer_sem
self.any_cas = False
self.any_operon = False
self.any_crispr = False
# Logger
if self.simplelog:
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=self.lvl)
else:
logging.basicConfig(format='\033[36m'+'[%(asctime)s] %(levelname)s:'+'\033[0m'+' %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=self.lvl)
logging.info('Running CRISPRCasTyper version {}'.format(pkg_resources.require("cctyper")[0].version))
# Force consistency
self.out = os.path.join(self.out, '')
if self.redo:
self.check_inp = True
self.prot_path = self.out+'proteins.faa'
# Check databases
self.check_db()
# Check input and output
self.check_input()
self.check_out()
# If redo check if any crisprs and operons
if self.redo:
if os.path.exists(self.out+'cas_operons.tab') or os.path.exists(self.out+'cas_operons_putative.tab'):
self.any_operon = True
if os.path.exists(self.out+'crisprs_all.tab'):
self.any_crispr = True
# Write arguments
da = vars(args)
f = open(self.out+'arguments.tab', 'w')
for k, v in da.items():
f.write('{}:\t{}\n'.format(k, v))
f.close()
# Get lengths
self.get_length()
def check_out(self):
if not self.redo:
try:
os.mkdir(self.out)
except FileExistsError:
logging.error('Directory '+self.out+' already exists')
sys.exit()
def check_input(self):
if not self.check_inp:
if os.path.isfile(self.fasta):
if not self.is_fasta():
logging.error('Input file is not in fasta format')
sys.exit()
else:
logging.error('Could not find input file')
sys.exit()
def is_fasta(self):
try:
with open(self.fasta, 'r') as handle:
fa = SeqIO.parse(handle, 'fasta')
[float(x.id) for x in fa]
logging.error('Numeric fasta headers not supported')
return False
except:
with open(self.fasta, 'r') as handle:
fa = SeqIO.parse(handle, 'fasta')
return any(fa)
def clean(self):
if not self.redo:
if os.stat(self.out+'hmmer.log').st_size == 0:
os.remove(self.out+'hmmer.log')
if self.customhmm != '':
if os.stat(self.out+'hmmer_custom.log').st_size == 0:
os.remove(self.out+'hmmer_custom.log')
if not self.keep_tmp:
logging.info('Removing temporary files')
shutil.rmtree(self.out+'hmmer')
os.remove(self.out+'minced.out')
os.remove(self.out+'prodigal.log')
os.remove(self.out+'proteins.faa')
def check_db(self):
if self.db == '':
try:
self.db = os.environ['CCTYPER_DB']
except:
logging.error('Could not find database directory')
sys.exit()
self.scoring = os.path.join(self.db, 'CasScoring.csv')
self.pdir = os.path.join(self.db, 'Profiles', '')
self.xgb = os.path.join(self.db, "xgb_repeats.model")
self.typedict = os.path.join(self.db, "type_dict.tab")
self.cutoffdb = os.path.join(self.db, "cutoffs.tab")
self.ifdb = os.path.join(self.db, "interference.json")
self.addb = os.path.join(self.db, "adaptation.json")
# Try to load CasScoring table
if os.path.isfile(self.scoring):
try:
dump = pd.read_csv(self.scoring, sep=",")
except:
logging.error('CasScoring table could not be loaded')
sys.exit()
else:
logging.error('CasScoring table could not be found')
sys.exit()
# Look if HMM profile dir exists
if os.path.isdir(self.pdir):
for i in os.listdir(self.pdir):
if not i.lower().endswith('.hmm'):
logging.error('There are non-HMM profiles in the HMM profile directory')
sys.exit()
else:
logging.error('Could not find HMM profile directory')
sys.exit()
# Load specific cutoffs
with open(self.cutoffdb, 'r') as f:
rs = (ll.rstrip().split(':') for ll in f)
self.cutoffs = {r[0].lower():r[1].split(',') for r in rs}
# Load mandatory gene files
with open(self.ifdb, 'r') as f:
self.compl_interf = json.load(f)
with open(self.addb, 'r') as f:
self.compl_adapt = json.load(f)
def get_length(self):
with open(self.fasta, 'r') as handle:
self.len_dict = {}
for fa in SeqIO.parse(handle, 'fasta'):
self.len_dict[fa.id] = len(fa.seq)
|
"""
Plot training/validation curves for multiple models.
"""
from __future__ import division
from __future__ import print_function
import argparse
import matplotlib
import numpy as np
import os
matplotlib.use('Agg') # This must be called before importing pyplot
import matplotlib.pyplot as plt
COLORS_RGB = [
(228, 26, 28), (55, 126, 184), (77, 175, 74),
(152, 78, 163), (255, 127, 0), (255, 255, 51),
(166, 86, 40), (247, 129, 191), (153, 153, 153)
]
# Scale the RGB values to the [0, 1] range, which is the format
# matplotlib accepts.
colors = [(r / 255, g / 255, b / 255) for r, g, b in COLORS_RGB]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dirs', nargs='+', required=True,
help='Directories where the model and costs are saved')
parser.add_argument('-s', '--save_file', type=str, required=True,
help='Filename of the output plot')
return parser.parse_args()
def graph(dirs, save_file, average_window=100):
""" Plot the training and validation costs over iterations
Params:
dirs (list(str)): Directories where the model and costs are saved
save_file (str): Filename of the output plot
average_window (int): Window size for smoothening the graphs
"""
fig, ax = plt.subplots()
ax.set_xlabel('Iters')
ax.set_ylabel('Loss')
average_filter = np.ones(average_window) / float(average_window)
for i, d in enumerate(dirs):
name = os.path.basename(os.path.abspath(d))
color = colors[i % len(colors)]
costs = np.load(os.path.join(d, 'costs.npz'))
train_costs = costs['train']
valid_costs = costs['validation'].tolist()
iters = train_costs.shape[0]
valid_range = [500 * (i + 1) for i in range(iters // 500)]
if len(valid_range) != len(valid_costs):
valid_range.append(iters)
if train_costs.ndim == 1:
train_costs = np.convolve(train_costs, average_filter,
mode='valid')
ax.plot(train_costs, color=color, label=name + '_train', lw=1.5)
ax.plot(valid_range, valid_costs[:len(valid_range)],
'-o', color=color, label=name + '_valid')
ax.grid(True)
ax.legend(loc='best')
plt.savefig(save_file)
if __name__ == '__main__':
args = parse_args()
graph(args.dirs, args.save_file)
|
from .testmapgen import TestMapGen
from .testwalk import TestWalk
|
"""
Module for generation of plots.
"""
# Import Python standard libraries
import statistics
# Import 3rd-party libraries
from matplotlib import pyplot as plt
import numpy as np
import math
def graph_word_distribution_entropies(entropies1, entropies2, output_path, **kwargs):
title = kwargs.get("title", "")
label1 = kwargs.get("label1", None)
label2 = kwargs.get("label2", None)
graph_limit = kwargs.get("graph_limit", None)
# entropies1.
# entropies2.
# language - name of language for identification in figures and reports.
# title - title for graph.
# graphlimit - upper graph limit for histogram bins.
cnt1 = f"{len(entropies1):6d}"
avg1 = f"{statistics.mean(entropies1):6.3f}"
std1 = f"{statistics.stdev(entropies1):6.3f}"
cnt2 = f"{len(entropies2):6d}"
avg2 = f"{statistics.mean(entropies2):6.3f}"
std2 = f"{statistics.stdev(entropies2):6.3f}"
entropies = sorted(entropies1 + entropies2)
upper_limit = graph_limit if graph_limit is not None else math.ceil(entropies[-3])
lower_limit = min(0, math.floor(entropies[3]))
# Set frame horizontal for this measure.
bins = np.linspace(lower_limit, upper_limit, 60)
plt.figure(figsize=(8, 5))
plt.hist(
entropies1,
bins,
alpha=0.65,
label=f"{label1}$(n={cnt1}, \\mu={avg1}, \\sigma={std1})$",
color="blue",
)
plt.hist(
entropies2,
bins,
alpha=0.65,
label=f"{label2}$(n={cnt2}, \\mu={avg2}, \\sigma={std2})$",
color="red",
)
plt.grid(axis="y", alpha=0.8)
plt.legend(loc="upper right")
plt.xlabel("Entropies")
plt.ylabel("Frequency")
plt.title(title)
# Build file output and write
plt.savefig(output_path, dpi=600)
plt.close()
# def draw_dist(x, output_path, title="Distribution of Statistic"):
# cnt = f"{len(x):6d}"
# avg = f"{np.mean(x):9.4f}"
# std = f"{np.std(x):9.4f}"
# # An "interface" to matplotlib.axes.Axes.hist() method
# plt.figure(figsize=(8, 5))
# n, bins, patches = plt.hist(
# x=x, bins="auto", color="#0504aa", alpha=0.75, rwidth=0.85
# )
# plt.grid(axis="y", alpha=0.75)
# plt.xlabel("Statistic")
# plt.ylabel("Frequency")
# plt.title(
# title + r" $(n=" + cnt + ", \mu=" + avg + ", \sigma=" + std + ")$"
# )
# maxfreq = n.max()
# # Set a clean upper y-axis limit.
# plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
# # Build file output and write
# plt.savefig(output_path, dpi=600)
# plt.close()
|
"""
Copyright (c) 2018-2019 ARM Limited. All rights reserved.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations
"""
import sys
from io import open
from os import sep
from os.path import isfile, join, dirname
import json
import pytest
from tools.memap import MemapParser, _ArmccParser
from copy import deepcopy
PARSED_ARM_DATA = {
"startup/startup.o": {".text": 0xc0},
"[lib]/c_p.l/__main.o": {".text": 8},
"irqs/irqs.o": {".text": 0x98},
"data/data.o": {".data": 0x18, ".bss": 0x198},
"main.o": {".text": 0x36},
}
def test_parse_armcc():
memap = MemapParser()
memap.parse(join(dirname(__file__), "arm.map"), "ARM")
parsed_data_os_agnostic = dict()
for k in PARSED_ARM_DATA:
parsed_data_os_agnostic[k.replace('/', sep)] = PARSED_ARM_DATA[k]
assert memap.modules == parsed_data_os_agnostic
PARSED_IAR_DATA = {
"startup/startup.o": {".text": 0xc0},
"[lib]/d16M_tlf.a/__main.o": {".text": 8},
"irqs/irqs.o": {".text": 0x98},
"data/data.o": {".data": 0x18, ".bss": 0x198},
"main.o": {".text": 0x36},
}
def test_parse_iar():
memap = MemapParser()
memap.parse(join(dirname(__file__), "iar.map"), "IAR")
parsed_data_os_agnostic = dict()
for k in PARSED_IAR_DATA:
parsed_data_os_agnostic[k.replace('/', sep)] = PARSED_IAR_DATA[k]
assert memap.modules == parsed_data_os_agnostic
PARSED_GCC_DATA = {
"startup/startup.o": {".text": 0xc0},
"[lib]/d16M_tlf.a/__main.o": {".text": 8},
"[lib]/misc/foo.o": {".text": 8},
"irqs/irqs.o": {".text": 0x98},
"data/data.o": {".data": 0x18, ".bss": 0x198},
"main.o": {".text": 0x36},
}
def test_parse_gcc():
memap = MemapParser()
memap.parse(join(dirname(__file__), "gcc.map"), "GCC_ARM")
parsed_data_os_agnostic = dict()
for k in PARSED_GCC_DATA:
parsed_data_os_agnostic[k.replace('/', sep)] = PARSED_GCC_DATA[k]
assert memap.modules == parsed_data_os_agnostic
def test_add_empty_module():
memap = _ArmccParser()
old_modules = deepcopy(memap.modules)
memap.module_add("", 8, ".data")
assert(old_modules == memap.modules)
memap.module_add("main.o", 0, ".text")
assert(old_modules == memap.modules)
memap.module_add("main.o", 8, "")
assert(old_modules == memap.modules)
def test_add_full_module():
memap = _ArmccParser()
old_modules = deepcopy(memap.modules)
memap.module_add("main.o", 8, ".data")
assert(old_modules != memap.modules)
assert("main.o" in memap.modules)
assert(".data" in memap.modules["main.o"])
assert(memap.modules["main.o"][".data"] == 8)
|
from rest_framework import serializers
from users.models import User
from django.contrib.auth.models import Group
from django.contrib.auth.hashers import make_password
class AdminSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = [
'id',
'username',
'email',
'mobile',
'password',
'groups',
'user_permissions'
]
extra_kwargs = {
'password': {
"write_only": True,
},
}
def create(self, validated_data):
# validated_data['password'] = make_password(validated_data['password'])
# validated_data['is_staff'] = True
# # 密码未加密
# return super().create(validated_data)
# 1、提取manytomanyfields
groups = validated_data.pop('groups') # [5]
user_permissions = validated_data.pop('user_permissions') # [79, 80]
# 2、新建主表对象
admin_user = User.objects.create_superuser(**validated_data)
# 3、构建中间表数据
admin_user.groups.set(groups)
admin_user.user_permissions.set(user_permissions)
return admin_user
def update(self, instance, validated_data):
# 校验密码是否传入
# 如果传入,加密
# 没有传入
password = validated_data.get("password")
if password:
validated_data['password'] = make_password(password)
else:
validated_data['password'] = instance.password
return super().update(instance, validated_data)
class AdminGroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ['id', 'name']
|
#Votemain module
"""
Votelib module by Blake Cretney
This work is distributed AS IS. It is up to you to
determine if it is useful and safe. In particular,
NO WARRANTY is expressed or implied.
I permanently give everyone the rights to use, modify,
copy, distribute, re-distribute, and perform this work,
and all derived works, to the extent that I hold copyright
in them. My intent is to have this work treated as
public domain.
This module contains the heart of the program.
"""
from string import *
import re
import numpy
import sys
from sys import maxsize
from votelib import *
import votemethod
class Options:
cand_l = None # list of candidate names
zero_def=0 # zero out the defeats
method_nm=None # selected method
n_votes=0
record_pw=0 # do I have to record pairwise information
pw_tbl=None
record_ballots=0 # do I have to record complete ballots
ballot_tbl=None
tiebreaker=None
# order of candidates used by some methods to break ties
class Ballot:
votes=0
ballot=None
lineno=0 # current line being read (for error information)
def failure(x):
raise RuntimeError("Failure: %s\nLine %d" % (x,lineno))
def bug(x):
raise RuntimeError("Internal Error: %s\nLine %d" % (x,lineno))
def input_line():
global lineno
while 1:
rawline = input()
lineno=lineno+1
comment=find(rawline, '#') # filter out comments
if comment!=-1:
rawline=rawline[:comment]
rawline=lstrip(rawline)
while rawline and rawline[0]==">":
rawline=rawline[1:]
rawline=lstrip(rawline)
if rawline!="": break
return(rawline)
def read_table(x): # reads a directly entered table
n=x.shape[0]
try:
for i in range(n):
rawline=input_line()
sline=split(rawline)[-n:]
for j in range(n):
if i!=j: x[i,j]=x[i,j]+int(sline[j])
except ValueError: failure('Bad Table Value')
except IndexError: failure('Malformed Table')
except EOFError: failure('EOF during table')
def get_options(list,o): # gets command line options
for x in list:
x=split(x,None,1)
opt= lower(x[0])
if len(x)>1:
param= x[1]
else:
param= None
if opt != 'm' and o.method_nm==None:
failure('-m must be first option')
if opt == 'cands':
if param==None:
failure('Missing parameter')
if o.cand_l!=None:
failure('Redefinition of candidate list')
o.cand_l=[]
for cand in split(param):
if find(cand,'-')==-1:
o.cand_l = o.cand_l + [cand]
else:
range=split(cand,'-',1)
o.cand_l=o.cand_l + candRange(range[0],range[1])
n=len(o.cand_l)
if o.record_pw:
o.pw_tbl=numpy.zeros((n,n),numpy.int32) # pairwise table
if o.record_ballots:
o.ballot_tbl=[] # storage for ballots
elif opt=='m':
if o.method_nm!=None:
failure('Multiple methods selected')
if param==None:
failure('Missing parameter')
if o.n_votes>0: failure('-m must precede ballots')
o.method_nm=lower(param)
if o.method_nm=="borda":
o.record_pw=1
elif o.method_nm=="bucklin":
o.record_ballots=1
elif o.method_nm=="c//irv":
o.method_nm="c_irv"
o.record_pw=1
o.record_ballots=1
elif o.method_nm=="copeland":
o.record_pw=1
elif o.method_nm=="irv":
o.record_ballots=1
elif o.method_nm=="minmax":
o.record_pw=1
elif o.method_nm=="borda-elim":
o.method_nm="borda_elim"
o.record_pw=1
elif o.method_nm=="nanson":
o.record_pw=1
elif o.method_nm=="pw-elim":
o.method_nm="pw_elim"
o.record_pw=1
elif o.method_nm=="s//irv":
o.method_nm="s_irv"
o.record_pw=1
o.record_ballots=1
elif o.method_nm=="s//minmax":
o.method_nm="s_minmax"
o.record_pw=1
elif o.method_nm=="schulze":
o.record_pw=1
elif o.method_nm=="smith":
o.record_pw=1
elif o.method_nm=="table":
o.record_pw=1
elif o.method_nm=="rp":
o.record_pw=1
elif o.method_nm=="ukvt":
o.record_pw=1
elif o.method_nm=="nrp":
o.record_pw=1
else: failure('unknown method: ' + o.method_nm)
elif opt== 'table':
if o.cand_l==None:
failure('-cands must precede -table')
if o.record_pw==0: failure('-table needs pairwise method')
if o.record_ballots!=0: failure('-table requires purely pairwise method')
if o.n_votes>0: failure('Tables must precede ballots')
read_table(o.pw_tbl)
elif opt=='tie':
if o.cand_l==None:
failure('-cands must precede -tie')
if param==None:
failure('Missing parameter')
if o.n_votes>0: failure('-tie must precede ballots')
if o.tiebreaker!=None:
failure('Multiple tiebreaker selected')
tb=split(param)
o.tiebreaker=[]
try:
for cand in tb:
o.tiebreaker=o.tiebreaker + [o.cand_l.index(cand)]
except ValueError: failure('Unknown candidate used in -tie')
if(len(o.tiebreaker)!=n):
failure("Tiebreaker must list all candidates")
elif opt=='zd':
if not o.record_pw:
failure('zero-defeats only works on pairwise')
o.zero_def=1
else:
failure('Unable to process option:' + repr(opt))
def vote_main():
o=Options()
if len(sys.argv)>1: # process the command line for options
command=join(sys.argv[1:])
command=strip(command)
if command:
if command[0]!='-': failure('option must use hyphen')
get_options(re.split(r'\s+-',command[1:]),o)
try:
while o.cand_l==None:
rawline=input_line()
if rawline[0]=='-': # process argument lines
get_options(re.split(r'\s+-',rawline[1:]),o)
else:
failure('Some options must precede data')
n=len(o.cand_l)
while 1:
rawline = input_line()
if rawline[0]=='-': # process argument lines
get_options(re.split(r'\s+-',rawline[1:]),o)
continue
bltsvote=split(rawline,":",1)
if len(bltsvote)==1: #check for number of ballots
ballots=1
rawline=bltsvote[0]
else:
try:
ballots=int(bltsvote[0])
rawline=bltsvote[1]
except ValueError: failure('illegal number of ballots')
rawline=strip(rawline)
if len(rawline)==0: failure('missing ballot')
if ballots<=0: failure('Number of ballots must be positive')
o.n_votes=o.n_votes+ballots
rawline=strip(rawline)
rawline=re.sub(r'\s*=\s*','=',rawline) # remove whitespace around '='
line=re.split(r'[\s>]+',rawline) # '>' and/or any remaing whitespace means '>'
#give each candidate a score based on where it appears on the ballot. n is best, 0 worst
working=numpy.zeros((n),numpy.int32)
level=n
for eqcands in line:
cands= split(eqcands,"=")
for cand in cands:
try:
x=o.cand_l.index(cand)
except ValueError: failure('Unknown candidate: ' + cand)
working[x]=level
level=level-1
if o.record_pw:
for i in range(n):
for j in range(n):
if working[i]>working[j]:
o.pw_tbl[i,j]=o.pw_tbl[i,j]+ballots
if o.record_ballots:
b=Ballot()
b.votes=ballots
b.ballot=working
o.ballot_tbl=o.ballot_tbl+[b]
except EOFError:
if o.cand_l==None:
print("Empty File. Nothing to do.")
return
global lineno
lineno=-1
print('VOTES ' , o.n_votes)
if o.record_pw:
if o.zero_def:
zero_defeats(o.pw_tbl)
print("Defeats Zero'd out")
else:
to_margins(o.pw_tbl)
print("Margins")
print_scores(o.pw_tbl,o.cand_l)
if o.method_nm=="table":
return
# choose which method to use on the data
eval('votemethod.'+o.method_nm+'(o)')
def vote_engine(fin=None,fout=None,opts=None):
old_in=sys.stdin
old_out=sys.stdout
old_argv=sys.argv
if fin: sys.stdin=fin
if fout: sys.stdout=fout
if opts: sys.argv=opts
try:
vote_main()
except RuntimeError as e:
print(e.args[0])
sys.stdin=old_in
sys.stdout=old_out
sys.argv=old_argv
|
from flask import json
from werkzeug.exceptions import HTTPException
def register_error_handler(flask_app):
flask_app.register_error_handler(HTTPException, __handle_exception)
def __handle_exception(e):
"""Return JSON instead of HTML for HTTP errors."""
# start with the correct headers and status code from the error
response = e.get_response()
# replace the body with JSON
response.data = json.dumps({
"code": e.code,
"name": e.name,
"description": e.description,
})
response.content_type = "application/json"
return response
|
import gzip, shutil
def decompress(n):
with gzip.open(n, 'r') as f_in, open('farm_0.uc', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def compress(n):
with open(n, 'rb') as f_in:
with gzip.open('Events.json', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
decompress('farm_0.data')
|
__version_tuple__ = (2, 6, 0, "dev")
__version__ = '2.6.0-dev'
|
# -*- coding: utf-8 -*-
import unittest
from datetime import date
from skyscraper.utils.constants import POWER_KEY, TRUNK_KEY, MASS_KEY, PRICE_KEY, AGE_KEY, CURRENCY_KEY
from skyscraper.utils.constants import SPEEDOMETER_KEY, CAR_KEY, CONDITION_KEY
from skyscraper.utils.value_parser import ValueParser
class TestBasicPaths(unittest.TestCase):
default_input = {}
value_parser = ValueParser(default_input)
@staticmethod
def date_to_age(years, months):
today = date.today()
month_diff = today.month - months
if month_diff <= 0:
years += 1
months = 12 + month_diff
else:
months = month_diff
return date(today.year - years, months, 1).strftime("%Y/%m")
def setUp(self):
self.default_input = {
CAR_KEY: 'http://hasznaltauto.hu/auto',
CONDITION_KEY: 'Újszerű',
SPEEDOMETER_KEY: '0 km',
AGE_KEY: date.today().strftime("%Y/%m")
}
self.value_parser = ValueParser(self.default_input)
def test_power_worth(self):
car = self.default_input
car[POWER_KEY] = '42 kW'
power_worth = self.value_parser.get_power_value()
self.assertEqual(power_worth, 3)
def test_condition_worth(self):
condition_worth = self.value_parser.get_condition_value()
self.assertEqual(condition_worth, 0)
car = self.default_input
car[CONDITION_KEY] = ''
condition_worth = self.value_parser.get_condition_value()
self.assertEqual(condition_worth, -20)
def test_trunk_worth(self):
car = self.default_input
car[TRUNK_KEY] = '290 l'
trunk_worth = self.value_parser.get_trunk_value()
self.assertEqual(trunk_worth, 2)
def test_mass_worth(self):
car = self.default_input
car[MASS_KEY] = '1600 kg'
mass_worth = self.value_parser.get_mass_value()
self.assertEqual(mass_worth, 3)
def test_speedometer_worth(self):
self.assert_speedo('0km', 0)
self.assert_speedo('0 km', 0)
self.assert_speedo('92,000 km', -9)
self.assert_speedo('140 000 km', -12)
self.assert_speedo('240 000 km', -16)
def test_price_worth(self):
car = self.default_input
# no power, no price
price_worth = self.value_parser.get_price_value()
self.assertEqual(price_worth, 0)
# no power
car[PRICE_KEY] = '6000000'
price_worth = self.value_parser.get_price_value()
self.assertEqual(price_worth, 0)
# no price
del car[PRICE_KEY]
car[POWER_KEY] = '100 kW'
price_worth = self.value_parser.get_price_value()
self.assertEqual(price_worth, 0)
# price and power
car[PRICE_KEY] = '26535'
car[CURRENCY_KEY] = 'EUR'
price_worth = self.value_parser.get_price_value()
self.assertEqual(price_worth, 10)
def test_age_worth(self):
self.assert_age(0, 3, -3)
self.assert_age(1, 0, -10)
self.assert_age(10, 0, -31)
self.assert_age(30, 0, -35)
self.assert_age(50, 0, 37)
'''ASSERTIONS'''
def assert_speedo(self, kilometers, expected):
car = self.default_input
car[SPEEDOMETER_KEY] = kilometers
speedo_worth = self.value_parser.get_speedometer_value()
self.assertEqual(speedo_worth, expected)
def assert_age(self, years, months, expected):
car = self.default_input
car[AGE_KEY] = TestBasicPaths.date_to_age(years, months)
age_worth = self.value_parser.get_age_value()
self.assertEqual(expected, age_worth)
|
import pytest
import requests
from schema_registry.client import SchemaRegistryClient, schema
from tests import data_gen
def test_context(client):
with client as c:
parsed = schema.AvroSchema(data_gen.BASIC_SCHEMA)
schema_id = c.register("test-basic-schema", parsed)
assert schema_id > 0
assert len(c.id_to_schema) == 1
def test_cert_no_key():
with pytest.raises(AssertionError):
SchemaRegistryClient(url="https://127.0.0.1:65534", cert_location="/path/to/cert")
def test_cert_with_key():
client = SchemaRegistryClient(
url="https://127.0.0.1:65534", cert_location="/path/to/cert", key_location="/path/to/key"
)
assert ("/path/to/cert", "/path/to/key") == client.cert
def test_custom_headers():
extra_headers = {"custom-serialization": "application/x-avro-json"}
client = SchemaRegistryClient(url="https://127.0.0.1:65534", extra_headers=extra_headers)
assert extra_headers == client.extra_headers
def test_override_headers(client, deployment_schema, mocker, response_klass):
extra_headers = {"custom-serialization": "application/x-avro-json"}
client = SchemaRegistryClient("https://127.0.0.1:65534", extra_headers=extra_headers)
assert client.prepare_headers().get("custom-serialization") == "application/x-avro-json"
subject = "test"
override_header = {"custom-serialization": "application/avro"}
request_patch = mocker.patch.object(
requests.sessions.Session, "request", return_value=response_klass(200, content={"id": 1})
)
client.register(subject, deployment_schema, headers=override_header)
prepare_headers = client.prepare_headers(body="1")
prepare_headers["custom-serialization"] = "application/avro"
request_patch.assert_called_once_with("POST", mocker.ANY, headers=prepare_headers, json=mocker.ANY)
def test_cert_path():
client = SchemaRegistryClient(url="https://127.0.0.1:65534", ca_location="/path/to/ca")
assert "/path/to/ca" == client.verify
def test_init_with_dict():
client = SchemaRegistryClient(
{
"url": "https://127.0.0.1:65534",
"ssl.certificate.location": "/path/to/cert",
"ssl.key.location": "/path/to/key",
}
)
assert "https://127.0.0.1:65534/" == client.url_manager.url
def test_empty_url():
with pytest.raises(AssertionError):
SchemaRegistryClient({"url": ""})
def test_invalid_type_url():
with pytest.raises(AttributeError):
SchemaRegistryClient(url=1)
def test_invalid_type_url_dict():
with pytest.raises(AttributeError):
SchemaRegistryClient({"url": 1})
def test_invalid_url():
with pytest.raises(AssertionError):
SchemaRegistryClient({"url": "example.com:65534"})
def test_basic_auth_url():
client = SchemaRegistryClient({"url": "https://user_url:secret_url@127.0.0.1:65534"})
assert ("user_url", "secret_url") == client.auth
def test_basic_auth_userinfo():
client = SchemaRegistryClient(
{
"url": "https://user_url:secret_url@127.0.0.1:65534",
"basic.auth.credentials.source": "user_info",
"basic.auth.user.info": "user_userinfo:secret_userinfo",
}
)
assert ("user_userinfo", "secret_userinfo") == client.auth
def test_basic_auth_sasl_inherit():
client = SchemaRegistryClient(
{
"url": "https://user_url:secret_url@127.0.0.1:65534",
"basic.auth.credentials.source": "SASL_INHERIT",
"sasl.mechanism": "PLAIN",
"sasl.username": "user_sasl",
"sasl.password": "secret_sasl",
}
)
assert ("user_sasl", "secret_sasl") == client.auth
def test_basic_auth_invalid():
with pytest.raises(ValueError):
SchemaRegistryClient(
{"url": "https://user_url:secret_url@127.0.0.1:65534", "basic.auth.credentials.source": "VAULT"}
)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'vars.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(390, 300)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.formLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(0, 0, 351, 411))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.formLayoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.label_2 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.label_3 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.label_4 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.label_6 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_6)
self.label_7 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.lineEdit = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.lineEdit_2 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.lineEdit_3 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_3.setObjectName("lineEdit_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lineEdit_3)
self.lineEdit_4 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_4.setObjectName("lineEdit_4")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lineEdit_4)
self.lineEdit_5 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_5.setObjectName("lineEdit_5")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.lineEdit_5)
self.lineEdit_6 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_6.setObjectName("lineEdit_6")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.lineEdit_6)
self.lineEdit_7 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_7.setObjectName("lineEdit_7")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.lineEdit_7)
self.label_8 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_8.setObjectName("label_8")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_8)
self.label_9 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_9.setObjectName("label_9")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label_9)
self.label_10 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_10.setObjectName("label_10")
self.formLayout.setWidget(9, QtWidgets.QFormLayout.LabelRole, self.label_10)
self.label_11 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_11.setObjectName("label_11")
self.formLayout.setWidget(10, QtWidgets.QFormLayout.LabelRole, self.label_11)
self.label_12 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_12.setObjectName("label_12")
self.formLayout.setWidget(11, QtWidgets.QFormLayout.LabelRole, self.label_12)
self.lineEdit_8 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_8.setObjectName("lineEdit_8")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.lineEdit_8)
self.lineEdit_9 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_9.setObjectName("lineEdit_9")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.lineEdit_9)
self.lineEdit_10 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_10.setObjectName("lineEdit_10")
self.formLayout.setWidget(9, QtWidgets.QFormLayout.FieldRole, self.lineEdit_10)
self.lineEdit_11 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_11.setObjectName("lineEdit_11")
self.formLayout.setWidget(10, QtWidgets.QFormLayout.FieldRole, self.lineEdit_11)
self.lineEdit_12 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_12.setObjectName("lineEdit_12")
self.formLayout.setWidget(11, QtWidgets.QFormLayout.FieldRole, self.lineEdit_12)
self.label_13 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_13.setObjectName("label_13")
self.lineEdit_13 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_13.setObjectName("lineEdit_13")
self.formLayout.setWidget(12, QtWidgets.QFormLayout.FieldRole, self.lineEdit_13)
self.formLayout.setWidget(12, QtWidgets.QFormLayout.LabelRole, self.label_13)
self.label_14 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_14.setObjectName("label_14")
self.lineEdit_14 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_14.setObjectName("lineEdit_14")
self.formLayout.setWidget(13, QtWidgets.QFormLayout.FieldRole, self.lineEdit_14)
self.formLayout.setWidget(13, QtWidgets.QFormLayout.LabelRole, self.label_14)
self.label_15 = QtWidgets.QLabel(self.formLayoutWidget)
self.label_15.setObjectName("label_15")
self.lineEdit_15 = QtWidgets.QLineEdit(self.formLayoutWidget)
self.lineEdit_15.setObjectName("lineEdit_15")
self.formLayout.setWidget(14, QtWidgets.QFormLayout.FieldRole, self.lineEdit_15)
self.formLayout.setWidget(14, QtWidgets.QFormLayout.LabelRole, self.label_15)
self.pushButton = QtWidgets.QPushButton(self.formLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.formLayout.setWidget(15, QtWidgets.QFormLayout.FieldRole, self.pushButton)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 390, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.centralwidget.setLayout(self.formLayout)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Переменные"))
self.label.setText(_translate("MainWindow", "Начальная скорость"))
self.label_2.setText(_translate("MainWindow", "Начальная скорость по X"))
self.label_3.setText(_translate("MainWindow", "Начальная скорость по Y"))
self.label_4.setText(_translate("MainWindow", "Угол к горизонту"))
self.label_5.setText(_translate("MainWindow", "Масса тела"))
self.label_6.setText(_translate("MainWindow", "Время полета"))
self.label_7.setText(_translate("MainWindow", "Длина полета по X"))
self.label_8.setText(_translate("MainWindow", "Макс. высота"))
self.label_9.setText(_translate("MainWindow", "Сила броска"))
self.label_10.setText(_translate("MainWindow", "Начальная координата X"))
self.label_11.setText(_translate("MainWindow", "Начальная координата Y"))
self.label_12.setText(_translate("MainWindow", "Момент времени t"))
self.label_13.setText(_translate("MainWindow", "X в момент времени t"))
self.label_14.setText(_translate("MainWindow", "Y в момент времени t"))
self.label_15.setText(_translate("MainWindow", "Vy в момент времени t"))
self.pushButton.setText(_translate("MainWindow", "Ok"))
|
class ProgressTracker:
def __init__(self):
self.set_skipped_paths([])
def skip_file(self, file_path: str):
return not all([file_path.startswith(path) for path in self._skipped])
def set_skipped_paths(self, skipped_paths):
self._skipped = skipped_paths
|
#!/usr/bin/env python
import zmq
import sys
import time
import pickle
# Socket to talk to server
context = zmq.Context()
sub = context.socket(zmq.SUB)
sub.setsockopt(zmq.RCVHWM, 2) # This line added.
sub.setsockopt(zmq.SUBSCRIBE, b'')
# sub.setsockopt(zmq.CONFLATE, True)
USE_ICP = False
if USE_ICP:
sub.connect ("ipc:///tmp/zmq")
else:
sub.connect ("tcp://0.0.0.0:5558")
while True:
#msg = sub.recv_multipart()
topic = sub.recv()
# data = sub.recv()
data = sub.recv_pyobj()
print(data)
#print(pickle.loads(msg[1]))
time.sleep(0.5)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains implementation of RPC client for Decapod API.
Decapod client :py:class:`Client` is a simple RPC client and thin wrapper
for the `requests <http://docs.python-requests.org/en/master/>`_ library
which allows end user to work with remote API without worrying about
connections and endpoints.
RPC client itself manages authorization (therefore you have to supply
it with user/password pair on initialization) so there is no need in
explicit session objects but if you do not like that way, you may always
relogin explicitly.
Usage example:
.. code-block:: python
client = Client(url="http://localhost", login="root", password="root")
This will initialize new client. Initialization does not imply immediate login,
login would be occured thread-safely on the first real method execution.
.. code-block:: python
users = client.get_users()
This will return end user a list with active users in Decapod.
.. code-block:: json
[
{
"data": {
"email": "noreply@example.com",
"full_name": "Root User",
"login": "root",
"role_id": "37fb532f-2620-4e0d-80e6-b68ed6988a6d"
},
"id": "6567c2ab-54cc-40b7-a811-6147a3f3ea83",
"initiator_id": null,
"model": "user",
"time_deleted": 0,
"time_updated": 1478865388,
"version": 1
}
]
Incoming JSON will be parsed. If it is not possible,
:py:exc:`decapodlib.exceptions.DecapodError` will be raised.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import abc
import inspect
import logging
import socket
import warnings
import pkg_resources
import requests
import requests.adapters
import six
from decapodlib import auth
from decapodlib import exceptions
try:
import simplejson as json
except ImportError:
import json
LOG = logging.getLogger(__name__)
"""Logger."""
try:
VERSION = pkg_resources.get_distribution("decapodlib").version
except pkg_resources.DistributionNotFound as exc:
warnings.warn("Module is imported outside of distribution.", ImportWarning)
VERSION = "unknown"
__all__ = "VERSION", "Client", "V1Client"
def json_dumps(data):
"""Makes compact JSON dumps.
:param data: Data which should be encoded to JSON.
:type data: Any data, suitable for :py:func:`json.dumps`
:return: Data, encoded to JSON.
:rtype: str
:raises ValueError: if data cannot be encoded to JSON.
"""
return json.dumps(data, separators=(",", ":"))
def make_query_params(**request_params):
"""Makes query string parameters for request.
The reason to have this function is to exclude parameters which value
is ``None``.
:param request_params: Keyword arguments to be used as GET query
params later.
:return: Parameters to be encoded for GET query.
:rtype: dict
"""
params = {}
for key, value in six.iteritems(request_params):
if value is not None:
params[key] = value
return params
def json_response(func):
"""Decorator which parses :py:class:`requests.Response` and
returns unpacked JSON. If ``Content-Type`` of response is not
``application/json``, then it returns text.
:return: Data of :py:class:`requests.Response` from decorated
function.
:raises decapodlib.exceptions.DecapodAPIError: if decoding is not possible
or response status code is not ``200``.
"""
@six.wraps(func)
def decorator(*args, **kwargs):
raw_response = kwargs.pop("raw_response", False)
response = func(*args, **kwargs)
if raw_response:
return response
if isinstance(response, dict):
return response
if response.ok:
content_type = response.headers.get("Content-Type")
content_type = content_type or "application/json"
if content_type == "application/json":
return response.json()
return response.text
raise exceptions.DecapodAPIError(response)
return decorator
def inject_timeout(func):
"""Decorator which injects ``timeout`` parameter into request.
On client initiation, default timeout is set. This timeout will be
injected into any request if no explicit parameter is set.
:return: Value of decorated function.
"""
@six.wraps(func)
def decorator(self, *args, **kwargs):
kwargs.setdefault("timeout", self._timeout)
return func(self, *args, **kwargs)
return decorator
def inject_pagination_params(func):
"""Decorator which injects pagination params into function.
This decorator pops out such parameters as ``page``, ``per_page``,
``all_items``, ``filter`` and ``sort_by`` and prepares correct
``query_params`` unified parameter which should be used for
as a parameter of decorated function.
:return: Value of decorated function.
"""
@six.wraps(func)
def decorator(*args, **kwargs):
params = make_query_params(
page=kwargs.pop("page", None),
per_page=kwargs.pop("per_page", None),
all=kwargs.pop("all_items", None),
filter=kwargs.pop("filter", None),
sort_by=kwargs.pop("sort_by", None)
)
if "all" in params:
params["all"] = str(int(bool(params["all"])))
if "filter" in params:
params["filter"] = json_dumps(params["filter"])
if "sort_by" in params:
params["sort_by"] = json_dumps(params["sort_by"])
kwargs["query_params"] = params
return func(*args, **kwargs)
return decorator
def no_auth(func):
"""Decorator which injects mark that no authentication should
be performed for this API call.
:return: Value of decorated function.
"""
@six.wraps(func)
def decorator(*args, **kwargs):
kwargs["auth"] = auth.no_auth
return func(*args, **kwargs)
return decorator
def wrap_errors(func):
"""Decorator which logs and catches all errors of decorated function.
Also wraps all possible errors into :py:exc:`DecapodAPIError` class.
:return: Value of decorated function.
:raises decapodlib.exceptions.DecapodError: on any exception in
decorated function.
"""
@six.wraps(func)
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
if isinstance(exc, exceptions.DecapodError):
LOG.error("Error on access to API: %s", exc)
raise
LOG.exception("Exception in decapodlib: %s", exc)
raise exceptions.DecapodAPIError(exc)
return decorator
def client_metaclass(name, bases, attrs):
"""A client metaclass to create client instances.
Basically, it just wraps all public methods with
:py:func:`wrap_errors`/:py:func:`json_response` decorator pair so no
need to explicitly define those decorators for every method.
"""
new_attrs = {}
for key, value in six.iteritems(attrs):
if not key.startswith("_") and inspect.isfunction(value):
value = json_response(value)
value = wrap_errors(value)
value = inject_timeout(value)
new_attrs[key] = value
return type(name, bases, new_attrs)
class HTTPAdapter(requests.adapters.HTTPAdapter):
"""HTTP adapter for client's :py:class:`requests.Session` which injects
correct User-Agent header for request."""
USER_AGENT = "decapodlib/{0}".format(VERSION)
"""User agent for :py:class:`decapodlib.client.Client` instance.
As a rule, it is just ``decapodlib/{version}`` string.
"""
def add_headers(self, request, **kwargs):
request.headers["User-Agent"] = self.USER_AGENT
super(HTTPAdapter, self).add_headers(request, **kwargs)
@six.add_metaclass(abc.ABCMeta)
@six.python_2_unicode_compatible
class Client(object):
"""A base RPC client model.
:param str url: URL of Decapod API (*without* version prefix like ``/v1``).
:param str login: Login of user in Decapod.
:param str password: Password of user in Decapod.
:param timeout: Timeout for remote requests. If ``None`` is set,
default socket timeout (e.g which is set by
:py:func:`socket.setdefaulttimeout`) will be used.
:param bool verify: If remote URL implies SSL, then using this option
client will check SSL certificate for validity.
:param certificate_file: If SSL works with client certificate, this
option sets the path to such certificate. If ``None`` is set,
then it implies that no client certificate should be used.
:type timeout: :py:class:`int` or ``None``
:type certificate_file: :py:class:`str` or ``None``
"""
AUTH_CLASS = None
"""Base class for authenication."""
@staticmethod
def _prepare_base_url(url):
"""Prepares base url to be used further."""
url = url.strip().rstrip("/")
if not url.startswith("http"):
url = "http://{0}".format(url)
return url
def __init__(self, url, login, password, timeout=None, verify=True,
certificate_file=None):
self._url = self._prepare_base_url(url)
self._login = login
self._password = password
self._session = requests.Session()
self._timeout = timeout or socket.getdefaulttimeout() or None
adapter = HTTPAdapter()
self._session.mount("http://", adapter)
self._session.mount("https://", adapter)
self._session.verify = bool(verify)
if verify and certificate_file:
self._session.cert = certificate_file
if self.AUTH_CLASS:
self._session.auth = self.AUTH_CLASS(self)
def _make_url(self, endpoint):
"""Concatenates base url and endpoint."""
url = "{0}{1}".format(self._url, endpoint)
if not url.endswith("/"):
url += "/"
return url
@abc.abstractmethod
def login(self, **kwargs):
raise NotImplementedError()
def __str__(self):
return "DecapodAPI: url={0!r}, login={1!r}, password={2!r}".format(
self._url, self._login, "*" * len(self._password)
)
def __repr__(self):
return "<{0}(url={1!r}, login={2!r}, password={3!r})>".format(
self.__class__.__name__,
self._url,
self._login,
"*" * len(self._password)
)
@six.add_metaclass(client_metaclass)
class V1Client(Client):
"""Implemetation of :py:class:`decapodlib.client.Client`
which works with API version 1.
Please check parameters for :py:class:`decapodlib.client.Client` class.
.. note::
All ``**kwargs`` keyword arguments here are the same as
:py:meth:`requests.Session.request` takes.
"""
AUTH_CLASS = auth.V1Auth
def login(self, **kwargs):
"""This methods logins users into API.
Basically, you do not need to execute this method by yourself,
client will implicitly execute it when needed.
This method does ``POST /v1/auth`` endpoint call.
:return: Model of the Token.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(self.AUTH_CLASS.AUTH_URL)
payload = {
"username": self._login,
"password": self._password
}
response = self._session.post(url, json=payload, **kwargs)
return response
def logout(self, **kwargs):
"""This method logouts users from API (after that security token
will be deleted).
Basically, you do not need to execute this method by yourself,
client will implicitly execute it when needed.
This method does ``DELETE /v1/auth`` endpoint call.
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
if not self._session.auth.token:
return {}
url = self._make_url(self.AUTH_CLASS.AUTH_URL)
try:
return self._session.delete(url, **kwargs)
except Exception:
return {}
finally:
self._session.auth.revoke_token()
@inject_pagination_params
def get_clusters(self, query_params, **kwargs):
"""This method fetches a list of latest cluster models from API.
By default, only active clusters will be listed.
This method does ``GET /v1/cluster`` endpoint call.
:return: List of latest cluster models.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/cluster/")
return self._session.get(url, params=query_params, **kwargs)
def get_cluster(self, cluster_id, **kwargs):
"""This method fetches a single cluster model (latest version)
from API.
This method does ``GET /v1/cluster/{cluster_id}`` endpoint call.
:param str cluster_id: UUID4 (:rfc:`4122`) in string form
of cluster's ID
:return: Cluster model of latest available version
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/cluster/{0}/".format(cluster_id))
return self._session.get(url, **kwargs)
@inject_pagination_params
def get_cluster_versions(self, cluster_id, query_params, **kwargs):
"""This method fetches a list of all versions for a certain cluster
model.
This method does ``GET /v1/cluster/{cluster_id}/version/`` endpoint
call.
:param str cluster_id: UUID4 (:rfc:`4122`) in string form
of cluster's ID
:return: List of cluster versions for cluster with ID ``cluster_id``.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/cluster/{0}/version/".format(cluster_id))
return self._session.get(url, params=query_params, **kwargs)
def get_cluster_version(self, cluster_id, version, **kwargs):
"""This method fetches a certain version of particular cluster model.
This method does ``GET /v1/cluster/{cluster_id}/version/{version}``
endpoint call.
:param str cluster_id: UUID4 (:rfc:`4122`) in string form
of cluster's ID
:param int version: The number of version to fetch.
:return: Cluster model of certain version.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/cluster/{0}/version/{1}/".format(cluster_id, version))
return self._session.get(url, **kwargs)
def create_cluster(self, name, **kwargs):
"""This method creates new cluster model.
This method does ``POST /v1/cluster/`` endpoint call.
:param str name: Name of the cluster.
:return: New cluster model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/cluster/")
payload = {
"name": name
}
return self._session.post(url, json=payload, **kwargs)
def update_cluster(self, model_data, **kwargs):
"""This methods updates cluster model.
Please be noticed that no real update is performed, just a new
version of the same cluster is created.
This method does ``PUT /v1/cluster/`` endpoint call.
:param dict model_data: Updated model of the cluster.
:return: Updated cluster model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/cluster/{0}/".format(model_data["id"]))
return self._session.put(url, json=model_data, **kwargs)
def delete_cluster(self, cluster_id, **kwargs):
"""This methods deletes cluster model.
Please be noticed that no real delete is performed, cluster
model is marked as deleted (``time_deleted > 0``) and model will
be skipped from listing, updates are forbidden.
This method does ``DELETE /v1/cluster/`` endpoint call.
:param str cluster_id: UUID4 (:rfc:`4122`) in string form
of cluster's ID
:return: Deleted cluster model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/cluster/{0}/".format(cluster_id))
return self._session.delete(url, **kwargs)
@inject_pagination_params
def get_executions(self, query_params, **kwargs):
"""This method fetches a list of latest execution models from API.
This method does ``GET /v1/execution`` endpoint call.
:return: List of latest execution models.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/execution/")
return self._session.get(url, params=query_params, **kwargs)
def get_execution(self, execution_id, **kwargs):
"""This method fetches a single execution model (latest version)
from API.
This method does ``GET /v1/execution/{execution_id}`` endpoint call.
:param str execution_id: UUID4 (:rfc:`4122`) in string form
of execution's ID
:return: Execution model of latest available version
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/execution/{0}/".format(execution_id))
return self._session.get(url, **kwargs)
@inject_pagination_params
def get_execution_versions(self, execution_id, query_params, **kwargs):
"""This method fetches a list of all versions for a certain execution
model.
This method does ``GET /v1/execution/{execution_id}/version/``
endpoint call.
:param str execution_id: UUID4 (:rfc:`4122`) in string form
of execution's ID
:return: List of execution versions for execution with
ID ``execution_id``.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/execution/{0}/version/".format(execution_id))
return self._session.get(url, params=query_params, **kwargs)
def get_execution_version(self, execution_id, version, **kwargs):
"""This method fetches a certain version of particular execution model.
This method does ``GET
/v1/execution/{execution_id}/version/{version}`` endpoint call.
:param str execution_id: UUID4 (:rfc:`4122`) in string form
of execution's ID
:param int version: The number of version to fetch.
:return: Execution model of certain version.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/execution/{0}/version/{1}/".format(execution_id, version))
return self._session.get(url, **kwargs)
def create_execution(self, playbook_configuration_id,
playbook_configuration_version, **kwargs):
"""This method creates new execution model.
This method does ``POST /v1/execution/`` endpoint call.
:param str playbook_configuration_id: UUID4 (:rfc:`4122`) in
string form of playbook configuration's ID.
:param int playbook_configuration_version: Version of playbook
configuration model.
:return: New execution model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/execution/")
payload = {
"playbook_configuration": {
"id": playbook_configuration_id,
"version": playbook_configuration_version
}
}
return self._session.post(url, json=payload, **kwargs)
def cancel_execution(self, execution_id, **kwargs):
"""This method cancels existing execution.
This method does ``DELETE /v1/execution/`` endpoint call.
:param str execution_id: UUID4 (:rfc:`4122`) in string form of
execution's ID.
:return: Canceled execution model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/execution/{0}/".format(execution_id))
return self._session.delete(url, **kwargs)
@inject_pagination_params
def get_execution_steps(self, execution_id, query_params, **kwargs):
"""This method fetches step models of the execution.
This method does ``GET /v1/execution/{execution_id}/steps``
endpoint call.
:param str execution_id: UUID4 (:rfc:`4122`) in string form of
execution's ID.
:return: List of execution steps.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/execution/{0}/steps/".format(execution_id))
return self._session.get(url, params=query_params, **kwargs)
def get_execution_log(self, execution_id, **kwargs):
"""This method fetches text execution log for a certain execution.
Execution log is a raw Ansible execution log, that one, which
is generated by :program:`ansible-playbook` program.
This method does ``GET /v1/execution/{execution_id}/log``
endpoint call.
:param str execution_id: UUID4 (:rfc:`4122`) in string form of
execution's ID.
:return: List of execution steps.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
kwargs.setdefault("headers", {}).setdefault(
"Content-Type", "application/json"
)
url = self._make_url("/v1/execution/{0}/log/".format(execution_id))
return self._session.get(url, **kwargs)
@inject_pagination_params
def get_playbook_configurations(self, query_params, **kwargs):
"""This method fetches a list of latest playbook configuration models
from API.
By default, only active playbook configurations will be listed.
This method does ``GET /v1/playbook_configuration`` endpoint call.
:return: List of latest playbook configuration models.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/playbook_configuration/")
return self._session.get(url, params=query_params, **kwargs)
def get_playbook_configuration(self, playbook_configuration_id, **kwargs):
"""This method fetches a single playbook configuration model
(latest version) from API.
This method does ``GET
/v1/playbook_configuration/{playbook_configuration_id}``
endpoint call.
:param str playbook_configuration_id: UUID4 (:rfc:`4122`) in
string form of playbook configuration's ID.
:return: Playbook configuration model of latest available version.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/playbook_configuration/{0}/".format(playbook_configuration_id)
)
return self._session.get(url, **kwargs)
def get_playbook_configuration_versions(self, playbook_configuration_id,
query_params, **kwargs):
"""This method fetches a list of all versions for a certain
playbook configuration model.
This method does ``GET
/v1/playbook_configuration/{playbook_configuration_id}/version/``
endpoint call.
:param str playbook_configuration_id: UUID4 (:rfc:`4122`) in
string form of playbook configuration's ID.
:return: List of playbook configuration versions for playbook
configuration with ID ``playbook_configuration_id``.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/playbook_configuration/{0}/version/".format(
playbook_configuration_id))
return self._session.get(url, params=query_params, **kwargs)
def get_playbook_configuration_version(self, playbook_configuration_id,
version, **kwargs):
"""This method fetches a certain version of particular playbook
configuration model.
This method does ``GET
/v1/playbook_configuration/{playbook_configuration_id}/version/{version}``
endpoint call.
:param str playbook_configuration_id: UUID4 (:rfc:`4122`) in
string form of playbook configuration's ID
:param int version: The number of version to fetch.
:return: Playbook configuration model of certain version.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/playbook_configuration/{0}/version/{1}/".format(
playbook_configuration_id, version))
return self._session.get(url, **kwargs)
def create_playbook_configuration(self, name, cluster_id, playbook_id,
server_ids, hints=None, run_after=False,
**kwargs):
"""This method creates new playbook configuration model.
This method does ``POST /v1/playbook_configuration/`` endpoint
call.
Hints for playbook configuration are the list of optional
parameters for creating playbook configuration. It
has to be the list key/value parameters obtained from
:py:meth:`decapodlib.client.V1Client.get_playbooks`.
.. code-block:: json
[
{
"id": "dmcrypt",
"value": true
}
]
:param str name: Name of the playbook configuration.
:param str cluster_id: UUID4 (:rfc:`4122`) in string form
of cluster's ID
:param str playbook_id: ID of playbook to use.
:param server_ids: List of server UUID4 (:rfc:`4122`) in string
form of server model IDs.
:type server_ids: [:py:class:`str`, ...]
:param list hints: List of hints for playbook configuration.
:param bool run_after: Run playbook configuration after create.
:return: New cluster model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/playbook_configuration/")
payload = {
"name": name,
"cluster_id": cluster_id,
"playbook_id": playbook_id,
"server_ids": list(set(server_ids)),
"hints": hints or [],
"run": run_after
}
return self._session.post(url, json=payload, **kwargs)
def update_playbook_configuration(self, model_data, **kwargs):
"""This method updates playbook configuration model.
Please be noticed that no real update is performed, just a new
version of the same playbook configuration is created.
This method does ``PUT /v1/playbook_configuration/`` endpoint
call.
:param dict model_data: Updated model of the playbook configuration.
:return: Updated playbook configuration model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/playbook_configuration/{0}/".format(model_data["id"]))
return self._session.put(url, json=model_data, **kwargs)
def delete_playbook_configuration(self, playbook_configuration_id,
**kwargs):
"""This method deletes playbook configuration model.
Please be noticed that no real delete is performed, playbook
configuration model is marked as deleted (``time_deleted > 0``)
and model will be skipped from listing, updates are forbidden.
This method does ``DELETE /v1/playbook_configuration/`` endpoint
call.
:param str playbook_configuration_id: UUID4 (:rfc:`4122`) in
string form of playbook configuration's ID
:return: Deleted playbook configuration model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/playbook_configuration/{0}/".format(playbook_configuration_id)
)
return self._session.delete(url, **kwargs)
@inject_pagination_params
def get_servers(self, query_params, **kwargs):
"""This method fetches a list of latest server models from API.
By default, only active servers will be listed.
This method does ``GET /v1/server`` endpoint call.
:return: List of latest server models.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/server/")
return self._session.get(url, params=query_params, **kwargs)
def get_server(self, server_id, **kwargs):
"""This method fetches a single server model (latest version)
from API.
This method does ``GET /v1/server/{server_id}`` endpoint call.
:param str server_id: UUID4 (:rfc:`4122`) in string form
of server's ID
:return: Server model of latest available version
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/server/{0}/".format(server_id))
return self._session.get(url, **kwargs)
@inject_pagination_params
def get_server_versions(self, server_id, query_params, **kwargs):
"""This method fetches a list of all versions for a certain server
model.
This method does ``GET /v1/server/{server_id}/version/``
endpoint call.
:param str server_id: UUID4 (:rfc:`4122`) in string form
of server's ID
:return: List of server versions for server with ID ``server_id``.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/server/{0}/version/".format(server_id))
return self._session.get(url, params=query_params, **kwargs)
def get_server_version(self, server_id, version, **kwargs):
"""This method fetches a certain version of particular server model.
This method does ``GET /v1/server/{server_id}/version/{version}``
endpoint call.
:param str server_id: UUID4 (:rfc:`4122`) in string form
of server's ID
:param int version: The number of version to fetch.
:return: Server model of certain version.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/server/{0}/version/{1}/".format(server_id, version))
return self._session.get(url, **kwargs)
def create_server(self, server_id, host, username, **kwargs):
"""This method creates new server model.
This method does ``POST /v1/server/`` endpoint call.
.. warning::
You should avoid to use this method manually.
Servers must be discovered using `cloud-init
<https://cloudinit.readthedocs.io/en/latest/>`_ based
discovery mechanism.
:param str server_id: Unique ID of server.
:param str host: Hostname of the server (should be accessible by
Decapod). It is better to have FQDN here.
:param str username: The name of the user for Ansible on this server.
Decapod will use Ansible which SSH to machine with hostname
given in ``host`` parameter and that username.
:return: New server model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/server/")
payload = {
"id": server_id,
"host": host,
"username": username
}
return self._session.post(url, json=payload, **kwargs)
def put_server(self, model_data, **kwargs):
"""This methods updates server model.
Please be noticed that no real update is performed, just a new
version of the same server is created.
This method does ``PUT /v1/server/`` endpoint call.
:param dict model_data: Updated model of the server.
:return: Updated server model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/server/{0}/".format(model_data["id"]))
return self._session.put(url, json=model_data, **kwargs)
def delete_server(self, server_id, **kwargs):
"""This methods deletes server model.
Please be noticed that no real delete is performed, server
model is marked as deleted (``time_deleted > 0``) and model will
be skipped from listing, updates are forbidden.
This method does ``DELETE /v1/server/`` endpoint call.
:param str server_id: UUID4 (:rfc:`4122`) in string form
of server's ID
:return: Deleted server model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/server/{0}/".format(server_id))
return self._session.delete(url, **kwargs)
@inject_pagination_params
def get_users(self, query_params, **kwargs):
"""This method fetches a list of latest user models from API.
By default, only active users will be listed.
This method does ``GET /v1/user`` endpoint call.
:return: List of latest user models.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/user/")
return self._session.get(url, params=query_params, **kwargs)
def get_user(self, user_id, **kwargs):
"""This method fetches a single user model (latest version)
from API.
This method does ``GET /v1/user/{user_id}`` endpoint call.
:param str user_id: UUID4 (:rfc:`4122`) in string form
of user's ID
:return: User model of latest available version
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/user/{0}/".format(user_id))
return self._session.get(url, **kwargs)
def get_user_self(self, **kwargs):
"""This methods requests model of current user.
This method does ``GET /v1/user/self/`` endpoint call.
:return: User model of current user.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/user/self/")
return self._session.get(url, **kwargs)
@inject_pagination_params
def get_user_versions(self, user_id, query_params, **kwargs):
"""This method fetches a list of all versions for a certain user
model.
This method does ``GET /v1/user/{user_id}/version/`` endpoint
call.
:param str user_id: UUID4 (:rfc:`4122`) in string form
of user's ID
:return: List of user versions for user with ID ``user_id``.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/user/{0}/version/".format(user_id))
return self._session.get(url, params=query_params, **kwargs)
def get_user_version(self, user_id, version, **kwargs):
"""This method fetches a certain version of particular user model.
This method does ``GET /v1/user/{user_id}/version/{version}``
endpoint call.
:param str user_id: UUID4 (:rfc:`4122`) in string form
of user's ID
:param int version: The number of version to fetch.
:return: User model of certain version.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/user/{0}/version/{1}/".format(user_id, version))
return self._session.get(url, **kwargs)
def create_user(self, login, email, full_name="", role_id=None, **kwargs):
"""This method creates new user model.
This method does ``POST /v1/user/`` endpoint call.
:param str name: Name of the user.
:return: New user model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/user/")
payload = {
"login": login,
"email": email,
"full_name": full_name,
"role_id": role_id
}
return self._session.post(url, json=payload, **kwargs)
def update_user(self, model_data, **kwargs):
"""This methods updates user model.
Please be noticed that no real update is performed, just a new
version of the same user is created.
This method does ``PUT /v1/user/`` endpoint call.
:param dict model_data: Updated model of the user.
:return: Updated user model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/user/{0}/".format(model_data["id"]))
return self._session.put(url, json=model_data, **kwargs)
def delete_user(self, user_id, **kwargs):
"""This methods deletes user model.
Please be noticed that no real delete is performed, user model
is marked as deleted (``time_deleted > 0``) and model will be
skipped from listing, updates are forbidden.
This method does ``DELETE /v1/user/`` endpoint call.
:param str user_id: UUID4 (:rfc:`4122`) in string form
of user's ID
:return: Deleted user model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/user/{0}/".format(user_id))
return self._session.delete(url, **kwargs)
@inject_pagination_params
def get_roles(self, query_params, **kwargs):
"""This method fetches a list of latest role models from API.
By default, only active roles will be listed.
This method does ``GET /v1/role`` endpoint call.
:return: List of latest role models.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/role/")
return self._session.get(url, params=query_params, **kwargs)
def get_role(self, role_id, **kwargs):
"""This method fetches a single role model (latest version)
from API.
This method does ``GET /v1/role/{role_id}`` endpoint call.
:param str role_id: UUID4 (:rfc:`4122`) in string form
of role's ID
:return: Role model of latest available version
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/role/{0}/".format(role_id))
return self._session.get(url, **kwargs)
def get_role_self(self, **kwargs):
"""This methods requests model of role of current user.
This method does ``GET /v1/role/self/`` endpoint call.
:return: Role model of current user.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/role/self/")
return self._session.get(url, **kwargs)
@inject_pagination_params
def get_role_versions(self, role_id, query_params, **kwargs):
"""This method fetches a list of all versions for a certain role
model.
This method does ``GET /v1/role/{role_id}/version/`` endpoint
call.
:param str role_id: UUID4 (:rfc:`4122`) in string form
of role's ID
:return: List of role versions for role with ID ``role_id``.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/role/{0}/version/".format(role_id))
return self._session.get(url, params=query_params, **kwargs)
def get_role_version(self, role_id, version, **kwargs):
"""This method fetches a certain version of particular role model.
This method does ``GET /v1/role/{role_id}/version/{version}``
endpoint call.
:param str role_id: UUID4 (:rfc:`4122`) in string form
of role's ID
:param int version: The number of version to fetch.
:return: Role model of certain version.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url(
"/v1/role/{0}/version/{1}/".format(role_id, version))
return self._session.get(url, **kwargs)
def create_role(self, name, permissions, **kwargs):
"""This method creates new role model.
This method does ``POST /v1/role`` endpoint call.
This method accepts parameter ``permissions``. This is a list
of permissions like that:
.. code-block:: json
[
{
"name": "playbook",
"permissions": [
"add_osd",
"cluster_deploy",
"hello_world",
"purge_cluster",
"remove_osd"
]
},
{
"name": "api",
"permissions": [
"create_cluster",
"create_execution",
"create_playbook_configuration",
"create_role",
"create_server",
"create_user",
"delete_cluster",
"delete_execution",
"delete_playbook_configuration",
"delete_role",
"delete_server",
"delete_user",
"edit_cluster",
"edit_playbook_configuration",
"edit_role",
"edit_server",
"edit_user",
"view_cluster",
"view_cluster_versions",
"view_execution",
"view_execution_steps",
"view_execution_version",
"view_playbook_configuration",
"view_playbook_configuration_version",
"view_role",
"view_role_versions",
"view_server",
"view_server_versions",
"view_user",
"view_user_versions"
]
}
]
So, each element is a dict with ``name`` and ``permissions``
field.
:param str name: Name of the role.
:param list permissions: A list of permissions. Please
check example above.
:return: New role model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/role/")
payload = {
"name": name,
"permissions": permissions
}
return self._session.post(url, json=payload, **kwargs)
def update_role(self, model_data, **kwargs):
"""This methods updates role model.
Please be noticed that no real update is performed, just a new
version of the same role is created.
This method does ``PUT /v1/role/`` endpoint call.
:param dict model_data: Updated model of the role.
:return: Updated role model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/role/{0}/".format(model_data["id"]))
return self._session.put(url, json=model_data, **kwargs)
def delete_role(self, role_id, **kwargs):
"""This methods deletes role model.
Please be noticed that no real delete is performed, role model
is marked as deleted (``time_deleted > 0``) and model will be
skipped from listing, updates are forbidden.
This method does ``DELETE /v1/role/`` endpoint call.
:param str role_id: UUID4 (:rfc:`4122`) in string form
of role's ID
:return: Deleted role model.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/role/{0}/".format(role_id))
return self._session.delete(url, **kwargs)
def get_permissions(self, **kwargs):
"""This method lists exisiting permissions in system. Not those,
which available for current user, but overall ones. This is
mostly required if you compose new role.
This method does ``GET /v1/permission`` endpoint call.
*Example of result*:
.. code-block:: json
{
"items": [
{
"name": "api",
"permissions": [
"create_cluster",
"create_execution",
"create_playbook_configuration",
"create_role",
"create_server",
"create_user",
"delete_cluster",
"delete_execution",
"delete_playbook_configuration",
"delete_role",
"delete_server",
"delete_user",
"edit_cluster",
"edit_playbook_configuration",
"edit_role",
"edit_server",
"edit_user",
"view_cluster",
"view_cluster_versions",
"view_execution",
"view_execution_steps",
"view_execution_version",
"view_playbook_configuration",
"view_playbook_configuration_version",
"view_role",
"view_role_versions",
"view_server",
"view_server_versions",
"view_user",
"view_user_versions"
]
},
{
"name": "playbook",
"permissions": [
"add_osd",
"cluster_deploy",
"hello_world",
"purge_cluster",
"remove_osd"
]
}
]
}
.. note::
As you can see, there are 2 types of permissions in Decapod:
1. api
2. playbook
*api* permissions are responsible for accessing API
endpoints. If user wants to access some API endpoint, he has
to have appropriate permission in his role. Some endpoints
require several permissions and rule of thumb here is common
sense: is user wants to *update* role, he has to have a
permission to *view* it.
*playbook* permissions are slightly different beasts. Each
permission allows user to execute a certain playbook.
:return: A list of premissions like those mentioned above
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/permission/")
return self._session.get(url, **kwargs)
def get_playbooks(self, **kwargs):
"""This method returns a list of playbooks avaialble for execution.
This method does ``GET /v1/playbook`` endpoint call.
*Example of result*:
.. code-block:: json
{
"items": [
{
"description": "Adding new OSD to the cluster.",
"id": "add_osd",
"name": "Add OSD to Ceph cluster",
"required_server_list": true,
"hints": []
},
{
"description": "Ceph cluster deployment playbook.",
"id": "cluster_deploy",
"name": "Deploy Ceph cluster",
"required_server_list": true,
"hints": [
{
"description": "Setup OSDs with dmcrypt",
"id": "dmcrypt",
"type": "boolean",
"values": []
}
]
},
{
"description": "Example plugin for playbook.",
"id": "hello_world",
"name": "Hello World",
"required_server_list": false
"hints": []
},
{
"description": "Purge whole Ceph cluster.",
"id": "purge_cluster",
"name": "Purge cluster",
"required_server_list": false,
"hints": []
},
{
"description": "Remove OSD host from cluster.",
"id": "remove_osd",
"name": "Remove OSD host from Ceph cluster",
"required_server_list": true,
"hints": []
}
]
}
.. note::
Please remember that ``playbook`` parameter in ``POST
/v1/playbook_configuration`` is ``id`` field here.
:return: A list of playbook data.
:rtype: list
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/playbook/")
return self._session.get(url, **kwargs)
@no_auth
def get_info(self, **kwargs):
"""This method fetches basic data from Decapod API.
It makes no sense to use this method for anything, it is just a
healthcheck that service actually works.
*Example of result*:
.. code-block:: json
{
"time": {
"local": "2016-11-16T12:46:55.868153",
"unix": 1479300415,
"utc": "2016-11-16T12:46:55.868220"
},
"version": "0.1.0"
}
.. important::
This method is basically the only one you may access being
not logged in.
:return: Something
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/info/")
return self._session.get(url, **kwargs)
@no_auth
def request_password_reset(self, login, **kwargs):
"""This method requests password resetting for a user.
Please be noticed that no real password resetting is occured, it
just *requesting* password reset. After that, user will receive
secret link on his email. If user will proceed that link, he can
*actually* reset her password.
This method does ``POST /v1/password_reset`` endpoint call.
*Example of result*:
.. code-block:: json
{
"message": "Password reset was requested."
}
:param str login: Login of user who is required to reset password.
:return: A message that password reset was requested.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/password_reset/")
payload = {"login": login}
return self._session.post(url, json=payload, **kwargs)
@no_auth
def peek_password_reset(self, reset_token, **kwargs):
"""This method checks if password reset with given token is
still requested. It does not consume token, it just checks if
it is possible or not.
*Example of result*:
.. code-block:: json
{
"message": "Password reset was requested."
}
:param str reset_token: Password reset token from email.
:return: A message that password reset was requested.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/password_reset/{0}/".format(reset_token))
return self._session.get(url, **kwargs)
@no_auth
def reset_password(self, reset_token, new_password, **kwargs):
"""This method does actual password resetting.
*Example of result*:
.. code-block:: json
{
"message": "Password has been reset."
}
:param str reset_token: Password reset token from email.
:param str new_password: New password for user.
:return: A message that password was reset.
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
url = self._make_url("/v1/password_reset/{0}/".format(reset_token))
payload = {"password": new_password}
return self._session.post(url, json=payload, **kwargs)
def get_cinder_integration(self, cluster_id, root="/etc/ceph", **kwargs):
"""This method fetches data for integration with Cinder.
This method does ``GET /v1/cinder_integration/{cluster_id}``
endpoint call.
:param str cluster_id: UUID4 (:rfc:`4122`) in string form
of cluster's ID
:param str root: Root on file system where files should be stored.
:return: Integration data
:rtype: dict
:raises decapodlib.exceptions.DecapodError: if not possible to
connect to API.
:raises decapodlib.exceptions.DecapodAPIError: if API returns error
response.
"""
params = make_query_params(root=root or None)
url = self._make_url("/v1/cinder_integration/{0}/".format(cluster_id))
return self._session.get(url, params=params, **kwargs)
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import commonware.log
from amo.utils import find_language
import mkt
log = commonware.log.getLogger('z.webapps')
def get_locale_properties(manifest, property, default_locale=None):
locale_dict = {}
for locale in manifest.get('locales', {}):
if property in manifest['locales'][locale]:
locale_dict[locale] = manifest['locales'][locale][property]
# Add in the default locale name.
default = manifest.get('default_locale') or default_locale
root_property = manifest.get(property)
if default and root_property:
locale_dict[default] = root_property
return locale_dict
def get_supported_locales(manifest):
"""
Returns a list of locales found in the "locales" property of the manifest.
This will convert locales found in the SHORTER_LANGUAGES setting to their
full locale. It will also remove locales not found in AMO_LANGUAGES.
Note: The default_locale is not included.
"""
return sorted(filter(None, map(find_language, set(
manifest.get('locales', {}).keys()))))
def dehydrate_content_rating(rating):
"""
{body.id, rating.id} to translated rating.label.
"""
try:
body = mkt.ratingsbodies.dehydrate_ratings_body(
mkt.ratingsbodies.RATINGS_BODIES[int(rating['body'])])
except TypeError:
# Legacy ES format (bug 943371).
return {}
rating = mkt.ratingsbodies.dehydrate_rating(
body.ratings[int(rating['rating'])])
return rating.label
def dehydrate_content_ratings(content_ratings):
"""Dehydrate an object of content ratings from rating IDs to dict."""
for body in content_ratings or {}:
# Dehydrate all content ratings.
content_ratings[body] = dehydrate_content_rating(content_ratings[body])
return content_ratings
def dehydrate_descriptors(keys, body=None):
"""
List of keys to lists of descriptor slugs by body.
['ESRB_BLOOD, ...] to {'esrb': ['blood'], ...}.
"""
results = defaultdict(list)
for key in keys:
obj = mkt.ratingdescriptors.RATING_DESCS.get(key)
if obj:
# Slugify and remove body prefix.
body, label = key.lower().replace('_', '-').split('-', 1)
if label != 'no-descs':
results[body].append(label)
return dict(results)
def dehydrate_interactives(keys):
"""
List of keys to list of interactive slugs.
['SOCIAL_NETWORKING', ...] to ['social-networking', ...].
"""
results = []
for key in keys:
obj = mkt.ratinginteractives.RATING_INTERACTIVES.get(key)
if obj:
results.append(key.lower().replace('_', '-'))
return results
|
# Generated by Django 3.1.3 on 2020-11-28 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0016_messages_author_id'),
]
operations = [
migrations.AddField(
model_name='request',
name='likes',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Лайки'),
),
]
|
# -*- coding: utf-8 -*-
"""
Public API: Version 1
"""
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse, resolve
from django.conf.urls import url
from test_plus.test import TestCase
from ...users.tests.factories import UserFactory
from .factories import ReviewFactory
class TestReviewURLs(TestCase):
def setUp(self):
self.user = UserFactory(username='bobby')
self.review = ReviewFactory(id=1)
#Let's test the homepage, briefly
def test_home_reverse(self):
"""'reviews:home' should reverse to '/'"""
self.assertEqual(reverse('homepage'), '/')
def test_home_resolve(self):
"""'/' should resolve to 'reviews:home'"""
self.assertEqual(resolve('/').view_name, 'homepage')
def test_new_review_reverse(self):
"""'reviews:new_review' should reverse to '/new_review/'"""
self.assertEqual(reverse('reviews:new_review'), '/reviews/new_review/')
def test_new_review(self):
"""'/new_review/' should resolve to 'reviews:new_review'"""
self.assertEqual(resolve('/reviews/new_review/').view_name, 'reviews:new_review')
def test_user_review_list(self):
"""'reviews:user_review_list username' should reverse to '/reviews/review/user/bobby/'"""
self.assertEqual(self.reverse('reviews:user_review_list', username=self.user.username), '/reviews/review/user/bobby/')
def test_edit_review_form(self):
"""'reviews:edit_review_form review.id' should reverse to '/reviews/review/user/1/'"""
self.assertEqual(self.reverse('reviews:edit_review_form', review_id=self.review.id), '/reviews/review/edit/1/')
def test_edit_review(self):
"""'reviews:edit_review review.id' should reverse to '/reviews/review/review/user/1/'"""
self.assertEqual(self.reverse('reviews:edit_review', review_id=self.review.id), '/reviews/review/edit_review/1/')
def test_review_detail(self):
"""'reviews:wine_detail review.id' should reverse to '/reviews/detail/1/'"""
self.assertEqual(self.reverse('reviews:wine_detail', review_id=self.review.id), '/reviews/detail/1/')
def test_delete_review(self):
"""'reviews:delete_review review.id' should reverse to '/reviews/review/delete_review/1/'"""
self.assertEqual(self.reverse('reviews:delete_review', review_id=self.review.id), '/reviews/review/delete_review/1/')
|
from .landmark import landmark_mesh, get_landmark_points, LANDMARK_MASK
from .visualize import visualize_nicp_result
from .correspond import correspond_mesh, build_correspondence_matrix
from .data.basel import load_basel_template_metadata
from .data import prepare_mesh_as_template, load_template
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def landmark_and_correspond_mesh(mesh, verbose=False):
mesh = mesh.copy()
lms = landmark_mesh(mesh, verbose=verbose)
mesh.landmarks['__lsfm_masked'] = lms['landmarks_3d_masked']
shape = correspond_mesh(mesh, mask=lms['occlusion_mask'],
verbose=verbose),
return_dict = {
'shape_nicp': shape[0],
'landmarked_image': lms['landmarked_image'],
'U': shape[1],
'tri_indices': shape[2]
}
return_dict['shape_nicp_visualization'] = visualize_nicp_result(
return_dict['shape_nicp'])
return return_dict
def correspondence_meshes(source_mesh, target_mesh, verbose=False):
target_mesh = target_mesh.copy()
# Detect landmark for source mesh
if source_mesh != "template":
texture_mesh, color_mesh = source_mesh
lmpts = get_landmark_points(texture_mesh)
meta = load_basel_template_metadata()
ibug68 = meta['landmarks']['ibug68']
ibug68 = ibug68.from_mask(LANDMARK_MASK)
ibug68.points = lmpts.points
nosetip = meta['landmarks']['nosetip']
nosetip.points = ((2*lmpts.points[30] + 1*lmpts.points[33])/3).reshape(1, -1)
color_mesh.landmarks['ibug68'] = ibug68
color_mesh.landmarks['nosetip'] = nosetip
color_mesh = prepare_mesh_as_template(color_mesh)
source_mesh = color_mesh.copy()
else:
source_mesh = load_template().copy()
lms = landmark_mesh(target_mesh, verbose=verbose)
target_mesh.landmarks['__lsfm_masked'] = lms['landmarks_3d_masked']
#import pdb; pdb.set_trace()
mat = build_correspondence_matrix(source_mesh, target_mesh,lms['occlusion_mask'],verbose=verbose)
return mat
|
__copyright__ = "Copyright (C) 2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from functools import reduce
import numpy as np
import numpy.linalg as la
from math import sqrt
from pytools import memoize_method, MovedFunctionDeprecationWrapper
try:
# Python 2.7 and newer
from math import gamma
except ImportError:
_have_gamma = False
else:
_have_gamma = True
if not _have_gamma:
try:
from scipy.special import gamma # noqa
except ImportError:
pass
else:
_have_gamma = True
if not _have_gamma:
def gamma(z): # noqa
from warnings import warn
warn("Using makeshift gamma function that only works for integers. "
"No better one was found.")
if z != int(z):
raise RuntimeError("makeshift gamma function doesn't work "
"for non-integers")
g = 1
for i in range(1, int(z)):
g = g*i
return g
class Monomial:
r"""A monomial
.. math::
\alpha \prod_{i=1}^d \xi_i^{e_i}
where :math:`e` is the vector *exponents*,
:math:`\alpha` is the scalar *factor*,
and :math:`xi` is zero at :math:`(-1,\dots,-1)`
and and one at :math:`(1,\dots,1)`.
"""
def __init__(self, exponents, factor=1):
self.exponents = exponents
self.ones = np.ones((len(self.exponents),))
self.factor = factor
def __call__(self, xi):
"""Evaluate the monomial at *xi*.
:arg: *xi* has shape *(d, ...)*.
"""
from operator import mul
x = (xi+1)/2
return self.factor * \
reduce(mul, (x[i]**expn
for i, expn in enumerate(self.exponents)))
def simplex_integral(self):
r"""Integral over the simplex
:math:`\{\mathbf{x} \in [0, 1]^n: \sum x_i \le 1 \}`."""
from pytools import factorial
from operator import mul
return (self.factor * 2**len(self.exponents)
* reduce(mul, (factorial(alpha) for alpha in self.exponents))
/ factorial(len(self.exponents)+sum(self.exponents)))
def hypercube_integral(self):
"""Integral over the hypercube :math:`[0, 1]^n`."""
from functools import reduce
return reduce(
lambda integral, n: integral * 1 / (n + 1),
self.exponents, 1.0)
def diff(self, coordinate):
diff_exp = list(self.exponents)
orig_exp = diff_exp[coordinate]
if orig_exp == 0:
return Monomial(diff_exp, 0)
diff_exp[coordinate] = orig_exp-1
return Monomial(diff_exp, self.factor*orig_exp)
# {{{ coordinate mapping
class AffineMap:
def __init__(self, a, b):
self.a = np.asarray(a, dtype=np.float64)
self.b = np.asarray(b, dtype=np.float64)
def __call__(self, x):
"""Apply the map *self* to a batch of vectors *x*.
:arg x: has shape *(d, npts)* where *d* is the number of dimensions.
A (1D) array of shape *(npts,)* is also allowed.
"""
# This .T goofiness allows both the nD and the 1D case.
return (np.dot(self.a, x).T + self.b).T
@property
@memoize_method
def jacobian(self):
return la.det(self.a)
@property
@memoize_method
def inverse(self):
"""The inverse :class:`AffineMap` of *self*."""
return AffineMap(la.inv(self.a), -la.solve(self.a, self.b))
EQUILATERAL_TO_UNIT_MAP = {
1: AffineMap([[1]], [0]),
2: AffineMap([
[1, -1/sqrt(3)],
[0, 2/sqrt(3)]],
[-1/3, -1/3]),
3: AffineMap([
[1, -1/sqrt(3), -1/sqrt(6)],
[0, 2/sqrt(3), -1/sqrt(6)],
[0, 0, sqrt(6)/2]],
[-1/2, -1/2, -1/2])
}
def equilateral_to_unit(equi):
return EQUILATERAL_TO_UNIT_MAP[len(equi)](equi)
def unit_vertices(dim):
result = np.empty((dim+1, dim), np.float64)
result.fill(-1)
for i in range(dim):
result[i+1, i] = 1
return result
# this should go away
UNIT_VERTICES = {
0: unit_vertices(0),
1: unit_vertices(1),
2: unit_vertices(2),
3: unit_vertices(3),
}
def barycentric_to_unit(bary):
"""
:arg bary: shaped ``(dims+1,npoints)``
"""
dims = len(bary)-1
return np.dot(unit_vertices(dims).T, bary)
def unit_to_barycentric(unit):
"""
:arg unit: shaped ``(dims,npoints)``
"""
last_bary = 0.5*(unit+1)
first_bary = 1-np.sum(last_bary, axis=0)
return np.vstack([first_bary, last_bary])
# /!\ do not reorder these, stuff (node generation) *will* break.
EQUILATERAL_VERTICES = {
1: np.array([
[-1],
[1],
]),
2: np.array([
[-1, -1/sqrt(3)],
[1, -1/sqrt(3)],
[0, 2/sqrt(3)],
]),
3: np.array([
[-1, -1/sqrt(3), -1/sqrt(6)],
[1, -1/sqrt(3), -1/sqrt(6)],
[0, 2/sqrt(3), -1/sqrt(6)],
[0, 0, 3/sqrt(6)],
])
}
def barycentric_to_equilateral(bary):
dims = len(bary)-1
return np.dot(EQUILATERAL_VERTICES[dims].T, bary)
# }}}
def pick_random_simplex_unit_coordinate(rng, dims):
offset = 0.05
base = -1 + offset
remaining = 2 - dims*offset
r = np.zeros(dims, np.float64)
for j in range(dims):
rn = rng.uniform(0, remaining)
r[j] = base + rn
remaining -= rn
return r
def pick_random_hypercube_unit_coordinate(rng, dims):
return np.array([rng.uniform(-1.0, 1.0) for _ in range(dims)])
# {{{ accept_scalar_or_vector decorator
class accept_scalar_or_vector: # noqa
def __init__(self, arg_nr, expected_rank):
"""
:arg arg_nr: The argument number which may be a scalar or a vector,
one-based.
"""
self.arg_nr = arg_nr - 1
self.expected_rank = expected_rank
def __call__(self, f):
def wrapper(*args, **kwargs):
controlling_arg = args[self.arg_nr]
try:
shape = controlling_arg.shape
except AttributeError:
has_shape = False
else:
has_shape = True
if not has_shape:
if not self.expected_rank == 1:
raise ValueError("cannot pass a scalar to %s" % f)
controlling_arg = np.array([controlling_arg])
new_args = args[:self.arg_nr] \
+ (controlling_arg,) + args[self.arg_nr+1:]
result = f(*new_args, **kwargs)
if isinstance(result, tuple):
return tuple(r[0] for r in result)
else:
return result[0]
if len(shape) == self.expected_rank:
return f(*args, **kwargs)
elif len(shape) < self.expected_rank:
controlling_arg = controlling_arg[..., np.newaxis]
new_args = args[:self.arg_nr] \
+ (controlling_arg,) + args[self.arg_nr+1:]
result = f(*new_args, **kwargs)
if isinstance(result, tuple):
return tuple(r[..., 0] for r in result)
else:
return result[..., 0]
else:
raise ValueError("argument rank is too large: got %d, expected %d"
% (len(shape), self.expected_rank))
from functools import wraps
try:
wrapper = wraps(f)(wrapper)
except AttributeError:
pass
return wrapper
# }}}
# {{{ submeshes, plotting helpers
def simplex_submesh(node_tuples):
"""Return a list of tuples of indices into the node list that
generate a tesselation of the reference element.
:arg node_tuples: A list of tuples *(i, j, ...)* of integers
indicating node positions inside the unit element. The
returned list references indices in this list.
:func:`pytools.generate_nonnegative_integer_tuples_summing_to_at_most`
may be used to generate *node_tuples*.
"""
from pytools import single_valued, add_tuples
dims = single_valued(len(nt) for nt in node_tuples)
node_dict = {
ituple: idx
for idx, ituple in enumerate(node_tuples)}
if dims == 1:
result = []
def try_add_line(d1, d2):
try:
result.append((
node_dict[add_tuples(current, d1)],
node_dict[add_tuples(current, d2)],
))
except KeyError:
pass
for current in node_tuples:
try_add_line((0,), (1,),)
return result
elif dims == 2:
# {{{ triangle sub-mesh
result = []
def try_add_tri(d1, d2, d3):
try:
result.append((
node_dict[add_tuples(current, d1)],
node_dict[add_tuples(current, d2)],
node_dict[add_tuples(current, d3)],
))
except KeyError:
pass
for current in node_tuples:
# this is a tesselation of a square into two triangles.
# subtriangles that fall outside of the master triangle are
# simply not added.
# positively oriented
try_add_tri((0, 0), (1, 0), (0, 1))
try_add_tri((1, 0), (1, 1), (0, 1))
return result
# }}}
elif dims == 3:
# {{{ tet sub-mesh
def try_add_tet(d1, d2, d3, d4):
try:
result.append((
node_dict[add_tuples(current, d1)],
node_dict[add_tuples(current, d2)],
node_dict[add_tuples(current, d3)],
node_dict[add_tuples(current, d4)],
))
except KeyError:
pass
result = []
for current in node_tuples:
# this is a tesselation of a cube into six tets.
# subtets that fall outside of the master tet are simply not added.
# positively oriented
try_add_tet((0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1))
try_add_tet((1, 0, 1), (1, 0, 0), (0, 0, 1), (0, 1, 0))
try_add_tet((1, 0, 1), (0, 1, 1), (0, 1, 0), (0, 0, 1))
try_add_tet((1, 0, 0), (0, 1, 0), (1, 0, 1), (1, 1, 0))
try_add_tet((0, 1, 1), (0, 1, 0), (1, 1, 0), (1, 0, 1))
try_add_tet((0, 1, 1), (1, 1, 1), (1, 0, 1), (1, 1, 0))
return result
# }}}
else:
raise NotImplementedError("%d-dimensional sub-meshes" % dims)
submesh = MovedFunctionDeprecationWrapper(simplex_submesh)
def hypercube_submesh(node_tuples):
"""Return a list of tuples of indices into the node list that
generate a tesselation of the reference element.
:arg node_tuples: A list of tuples *(i, j, ...)* of integers
indicating node positions inside the unit element. The
returned list references indices in this list.
:func:`pytools.generate_nonnegative_integer_tuples_below`
may be used to generate *node_tuples*.
See also :func:`simplex_submesh`.
.. versionadded:: 2020.2
"""
from pytools import single_valued, add_tuples
dims = single_valued(len(nt) for nt in node_tuples)
node_dict = {
ituple: idx
for idx, ituple in enumerate(node_tuples)}
from pytools import generate_nonnegative_integer_tuples_below as gnitb
result = []
for current in node_tuples:
try:
result.append(tuple(
node_dict[add_tuples(current, offset)]
for offset in gnitb(2, dims)))
except KeyError:
pass
return result
@accept_scalar_or_vector(2, 2)
def plot_element_values(n, nodes, values, resample_n=None,
node_tuples=None, show_nodes=False):
dims = len(nodes)
orig_nodes = nodes
orig_values = values
if resample_n is not None:
import modepy as mp
basis = mp.simplex_onb(dims, n)
fine_nodes = mp.equidistant_nodes(dims, resample_n)
values = np.dot(mp.resampling_matrix(basis, fine_nodes, nodes), values)
nodes = fine_nodes
n = resample_n
from pytools import generate_nonnegative_integer_tuples_summing_to_at_most \
as gnitstam
if dims == 1:
import matplotlib.pyplot as pt
pt.plot(nodes[0], values)
if show_nodes:
pt.plot(orig_nodes[0], orig_values, "x")
pt.show()
elif dims == 2:
import mayavi.mlab as mlab
mlab.triangular_mesh(
nodes[0], nodes[1], values, submesh(list(gnitstam(n, 2))))
if show_nodes:
mlab.points3d(orig_nodes[0], orig_nodes[1], orig_values,
scale_factor=0.05)
mlab.show()
else:
raise RuntimeError("unsupported dimensionality %d" % dims)
# }}}
# {{{ lebesgue constant
def _evaluate_lebesgue_function(n, nodes, domain):
dims = len(nodes)
huge_n = 30*n
if domain == "simplex":
from modepy.modes import simplex_onb as domain_basis_onb
from pytools import (
generate_nonnegative_integer_tuples_summing_to_at_most
as generate_node_tuples)
elif domain == "hypercube":
from modepy.modes import (
legendre_tensor_product_basis as domain_basis_onb)
from pytools import (
generate_nonnegative_integer_tuples_below
as generate_node_tuples)
else:
raise ValueError(f"unknown domain: '{domain}'")
basis = domain_basis_onb(dims, n)
equi_node_tuples = list(generate_node_tuples(huge_n, dims))
equi_nodes = (np.array(equi_node_tuples, dtype=np.float64)/huge_n*2 - 1).T
from modepy.matrices import vandermonde
vdm = vandermonde(basis, nodes)
eq_vdm = vandermonde(basis, equi_nodes)
eq_to_out = la.solve(vdm.T, eq_vdm.T).T
lebesgue_worst = np.sum(np.abs(eq_to_out), axis=1)
return lebesgue_worst, equi_node_tuples, equi_nodes
def estimate_lebesgue_constant(n, nodes, domain=None, visualize=False):
"""Estimate the
`Lebesgue constant
<https://en.wikipedia.org/wiki/Lebesgue_constant_(interpolation)>`_
of the *nodes* at polynomial order *n*.
:arg nodes: an array of shape *(dims, nnodes)* as returned by
:func:`modepy.warp_and_blend_nodes`.
:arg domain: represents the domain of the reference element and can be
either ``"simplex"`` or ``"hypercube"``.
:arg visualize: visualize the function that gives rise to the
returned Lebesgue constant. (2D only for now)
:return: the Lebesgue constant, a scalar.
.. versionadded:: 2013.2
.. versionchanged:: 2020.2
*domain* parameter was added with support for nodes on the unit
hypercube (i.e. unit square in 2D and unit cube in 3D).
"""
if domain is None:
domain = "simplex"
dims = len(nodes)
lebesgue_worst, equi_node_tuples, equi_nodes = \
_evaluate_lebesgue_function(n, nodes, domain)
lebesgue_constant = np.max(lebesgue_worst)
if not visualize:
return lebesgue_constant
if dims == 2:
print(f"Lebesgue constant: {lebesgue_constant}")
if domain == "simplex":
triangles = simplex_submesh(equi_node_tuples)
elif domain == "hypercube":
triangles = hypercube_submesh(equi_node_tuples)
else:
triangles = None
try:
import mayavi.mlab as mlab
mlab.figure(bgcolor=(1, 1, 1))
mlab.triangular_mesh(
equi_nodes[0], equi_nodes[1], lebesgue_worst / lebesgue_constant,
triangles)
x, y = np.mgrid[-1:1:20j, -1:1:20j]
mlab.mesh(x, y, 0*x,
representation="wireframe",
color=(0.4, 0.4, 0.4),
line_width=0.6)
cb = mlab.colorbar()
cb.label_text_property.color = (0, 0, 0)
mlab.show()
except ImportError:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca()
ax.grid()
ax.plot(nodes[0], nodes[1], "ko")
# NOTE: might be tempted to use `plot_trisurf` here to get a plot
# like mayavi, but that will be horrendously slow
p = ax.tricontourf(
equi_nodes[0], equi_nodes[1], lebesgue_worst / lebesgue_constant,
triangles=triangles,
levels=16)
fig.colorbar(p)
ax.set_aspect("equal")
plt.show()
else:
raise ValueError(f"visualization is not supported in {dims}D")
return lebesgue_constant
# }}}
# vim: foldmethod=marker
|
from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token
from accounts import views
urlpatterns = [
url(r'^auth/register/$',
views.RegistrationView.as_view(), name='user-registration'),
url(r'^auth/activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.ActivationView.as_view(), name='activate'),
url(r'^auth/login/', obtain_jwt_token, name='user-login'),
url(r'^auth/api-token-refresh/', refresh_jwt_token, name='refresh-token'),
url(r'^profile/$', views.ProfileDetail.as_view(), name='profile'),
]
|
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import structlog
from voltha.protos.common_pb2 import OperStatus, AdminState
from voltha.protos.device_pb2 import Port
from voltha.protos.openflow_13_pb2 import OFPPF_10GB_FD
from voltha.core.logical_device_agent import mac_str_to_tuple
from voltha.protos.logical_device_pb2 import LogicalPort
from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER
from voltha.protos.openflow_13_pb2 import ofp_port
class UniPort(object):
"""Wraps southbound-port(s) support for ONU"""
def __init__(self, handler, name, port_no, control_vlan=None):
self.log = structlog.get_logger(device_id=handler.device_id,
port_no=port_no)
self._enabled = False
self._handler = handler
self._name = name
self._port = None
self._port_number = port_no
self._logical_port_number = None
self._control_vlan = control_vlan
self._admin_state = AdminState.ENABLED
self._oper_status = OperStatus.ACTIVE
# TODO Add state, stats, alarm reference, ...
pass
def __str__(self):
return "UniPort: {}:{}".format(self.name, self.port_number)
@staticmethod
def create(handler, name, port_no, control_vlan):
port = UniPort(handler, name, port_no, control_vlan)
return port
def _start(self):
self._cancel_deferred()
self._admin_state = AdminState.ENABLED
self._oper_status = OperStatus.ACTIVE
self._update_adapter_agent()
# TODO: start h/w sync
# TODO: Enable the actual physical port?
pass
def _stop(self):
self._cancel_deferred()
self._admin_state = AdminState.DISABLED
self._oper_status = OperStatus.UNKNOWN
self._update_adapter_agent()
# TODO: Disable/power-down the actual physical port?
pass
def delete(self):
self.enabled = False
self._handler = None
# TODO: anything else
def _cancel_deferred(self):
pass
@property
def name(self):
return self._name
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
if self._enabled != value:
self._enabled = value
if value:
self._start()
else:
self._stop()
@property
def port_number(self):
"""
Physical device port number
:return: (int) port number
"""
return self._port_number
@property
def logical_port_number(self):
"""
Logical device port number (used as OpenFlow port for UNI)
:return: (int) port number
"""
return self._logical_port_number
def _update_adapter_agent(self):
# TODO: Currently does the adapter_agent allow 'update' of port status
# self.adapter_agent.update_port(self.olt.device_id, self.get_port())
pass
@staticmethod
def decode_openflow_port_and_control_vlan(self, venet_info):
try:
# Allow spaces or dashes as separator, select last as
# the port number
port_no = int(venet_info['name'].replace(' ', '-').split('-')[-1:][0])
cntl_vlan = port_no
return port_no, cntl_vlan
except ValueError:
self.log.error('invalid-uni-port-name', name=venet_info['name'])
except KeyError:
self.log.error('invalid-venet-data', data=venet_info)
def get_port(self):
"""
Get the VOLTHA PORT object for this port
:return: VOLTHA Port object
"""
if self._port is None:
self._port = Port(port_no=self.port_number,
label='Ethernet port',
type=Port.ETHERNET_UNI,
admin_state=self._admin_state,
oper_status=self._oper_status)
return self._port
def add_logical_port(self, openflow_port_no, control_vlan=None,
capabilities=OFPPF_10GB_FD | OFPPF_FIBER,
speed=OFPPF_10GB_FD):
if self._logical_port_number is None:
self._logical_port_number = openflow_port_no
self._control_vlan = control_vlan
device = self._handler.adapter_agent.get_device(self._handler.device_id)
if control_vlan is not None and device.vlan != control_vlan:
device.vlan = control_vlan
self._handler.adapter_agent.update_device(device)
openflow_port = ofp_port(
port_no=openflow_port_no,
hw_addr=mac_str_to_tuple('08:00:%02x:%02x:%02x:%02x' %
((device.parent_port_no >> 8 & 0xff),
device.parent_port_no & 0xff,
(openflow_port_no >> 8) & 0xff,
openflow_port_no & 0xff)),
name='uni-{}'.format(openflow_port_no),
config=0,
state=OFPPS_LIVE,
curr=capabilities,
advertised=capabilities,
peer=capabilities,
curr_speed=speed,
max_speed=speed
)
self._handler.adapter_agent.add_logical_port(self._handler.logical_device_id,
LogicalPort(
id='uni-{}'.format(openflow_port),
ofp_port=openflow_port,
device_id=device.id,
device_port_no=self._port_number))
# TODO: Should we use the UNI object 'name' as the id for OpenFlow?
|
'''OpenGL extension ARB.debug_label
This module customises the behaviour of the
OpenGL.raw.GL.ARB.debug_label to provide a more
Python-friendly API
Overview (from the spec)
This extension defines a mechanism for OpenGL applications to label their
objects (textures, buffers, shaders, etc.) with a descriptive string.
When profiling or debugging an OpenGL application within an external or
built-in (debut output API) debugger or profiler it is difficult to
identify objects from their object names.
Even when the object itself is viewed it can be problematic to
differentiate between similar objects. Attaching a label to an object
helps obviate this difficulty.
The intended purpose of this is purely to improve the user experience
within OpenGL development tools and application built-in profilers and
debuggers.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/debug_label.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.debug_label import *
### END AUTOGENERATED SECTION
|
from typing import Dict, List, Tuple, Union
from web3.main import Web3
from ..utils.utils import calculate_lp_token_price, open_contract, blockchain_urls, get_token_price_from_dexs, symbol_mapping, decimals_mapping
from ..masterchef_apr_fetcher import MasterchefAPRFetcher
from pprint import pprint
class TraderjoeAPRFetcher(MasterchefAPRFetcher):
"""
Interface for apr fetcher
"""
def __init__(self):
super().__init__("avalanche", Web3(Web3.HTTPProvider(blockchain_urls["avalanche"])))
def masterchef_address(self) -> str:
return "0x188bED1968b795d5c9022F6a0bb5931Ac4c18F00"
def dapp_token_address_field(self) -> str:
return "JOE"
def dapp_token_per_block_or_per_second_field(self, per_block: bool) -> str:
return "" if per_block else "joePerSec"
def _total_staked(self, i, pool_info) -> float:
pool_contract = open_contract(self._web3, self._blockchain, self._pool_address(i, pool_info))
decimals = pool_contract.functions.decimals().call()
return open_contract(self._web3, self._blockchain, self._pool_address(i, pool_info)).functions.balanceOf(self._web3.toChecksumAddress(self.masterchef_address())).call() * 10**(-decimals)
def _pool_address(self, i, pool_info) -> str:
return pool_info[0]
def _alloc_point(self, i, pool_info) -> int:
return pool_info[3]
def additional_aprs(self, i: int, pool_info: Dict[str, Union[float, int, str]]) -> List[Tuple[str, float]]:
masterchef_contract = open_contract(self._web3, self._blockchain, self.masterchef_address())
pool_info_complete = masterchef_contract.functions.poolInfo(i).call()
rewarder = pool_info_complete[4]
rewarder_contract = open_contract(self._web3, self._blockchain, rewarder)
if rewarder_contract is None:
return []
if rewarder_contract.functions.tokenPerSec().call() == 0:
return []
reward_token = rewarder_contract.functions.rewardToken().call()
reward_contract = open_contract(self._web3, self._blockchain, reward_token)
if "symbol" in dir(reward_contract.functions):
symbol = reward_contract.functions.symbol().call()
else:
symbol = symbol_mapping.get(reward_token.lower(), reward_token.lower())
if "decimals" in dir(reward_contract.functions):
decimals = reward_contract.functions.decimals().call()
else:
decimals = decimals_mapping.get(reward_token.lower(), 18)
annual_token_emission = rewarder_contract.functions.tokenPerSec().call() * 10**-decimals * 3600 * 24 * 365
price_token = calculate_lp_token_price(self._web3, self._blockchain, reward_token)
lp_token_price = calculate_lp_token_price(self._web3, self._blockchain, self._pool_address(i, pool_info_complete))
total_staked = self._total_staked(i, pool_info_complete)
pool_reward_amount_per_year = annual_token_emission
pool_reward_value_per_year = price_token * pool_reward_amount_per_year
total_value_locked = max(1, total_staked * lp_token_price)
apr = ((pool_reward_value_per_year/total_value_locked))*100
return [(symbol, apr)]
|
#!/usr/bin/env python3
# =============================================================================
# Created On : MAC OSX High Sierra 10.13.6 (17G65)
# Created On : Python 3.7.0
# Created By : Jeromie Kirchoff
# Created Date: Mon May 14 21:46:03 PDT 2018
# =============================================================================
"""THE MODULE HAS BEEN BUILD FOR CONVERTING ALL CHARACTERS TO HTML UNICODE."""
# =============================================================================
import re
def cleantext(text):
"""
THE MODULE HAS BEEN BUILD to Replace non-ASCII characters with...
printable ASCII.
Use HTML entities when possible.
started from
https://secure.hens-teeth.net/orders/knowledgebase/74/Cleaning-Special-Characters-from-Product-Text-Files.html
https://www.toptal.com/designers/htmlarrows/
http://www.thepunctuationguide.com/hyphen-and-dashes.html
"""
# text = re.sub(r'[\x00-\x1f\x80-\xff]', ' ', text)
# The line above is a hard-core line that strips everything else.
text = re.sub(r'\x85', 'U+02026', text) # replace ellipses
text = re.sub(r'\x91', "‘", text) # replace left single quote
text = re.sub(r'\x92', "’", text) # replace right single quote
text = re.sub(r'\x93', '“', text) # replace left double quote
text = re.sub(r'\x94', '”', text) # replace right double quote
text = re.sub(r'\x95', '•', text) # replace bullet
text = re.sub(r'\x96', '-', text) # replace bullet
text = re.sub(r'\x99', 'U+02122', text) # replace TM
text = re.sub(r'\xae', 'U+000AE', text) # replace (R)
text = re.sub(r'\xb0', 'U+000B0', text) # replace degree symbol
text = re.sub(r'\xba', 'U+000B0', text) # replace degree symbol
text = re.sub(r'[\n|\r]+', ' ', text) # remove embedded \n and \r
return
if __name__ == '__main__':
cleantext("\n")
|
#!/usr/bin/env python2
import socket
import threading
import time
import SocketServer
import random
HOST = "0.0.0.0"
PORT = 11071
WELCOME_MSG = "Hi, I like math and cryptography. Can you talk to me?!\n"
ERROR_MSG = "Ooops, something went wrong here. Please check your input!\n"
CORRECT_MSG = "Yay, that's right!\n"
WRONG_MSG = "Nope, that's not the right solution. Try again later!\n"
FLAG = "IW{Crypt0_c0d3}\n"
MAX_TO_SOLVE = 100
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
self.request.sendall(WELCOME_MSG)
num_solved = 0
for level in range(1,MAX_TO_SOLVE+1):
eq, res = self.rand_equation(level)
self.request.sendall("Level {}.: {}\n".format(str(level), eq))
try:
answer = self.request.recv(1024)
answer = int(self.decode(answer.strip()))
except:
self.request.sendall(ERROR_MSG)
return
if answer == res:
num_solved += 1
self.request.sendall(CORRECT_MSG)
else:
self.request.sendall(WRONG_MSG)
return
if num_solved == MAX_TO_SOLVE:
self.request.sendall(FLAG)
except:
return
def rand_equation(self, level):
num1 = num2 = 0
operators = ["*","+","-"]
num_range = [2, 20*level]
op = operators[random.randint(0, len(operators) -1)]
while (num1 in [0,1]) or (num2 in [0,1]):
num1 = random.randint(num_range[0], num_range[1])
num2 = random.randint(num_range[0], num_range[1])
res = eval(str(num1) + " " + op + " " + str(num2))
return self.encode("x " + op + " " + str(num2) + " = " + str(res)), num1
def _xor(self, a, b):
return a ^ b
def encode(self, eq):
out = []
for c in eq:
q = bin(self._xor(ord(c),(2<<4))).lstrip("0b")
q = "0" * ((2<<2)-len(q)) + q
out.append(q)
b = ''.join(out)
pr = []
for x in range(0,len(b),2):
c = chr(int(b[x:x+2],2)+51)
pr.append(c)
s = '.'.join(pr)
return s
def decode(self, answer):
try:
nums = answer.split(".")
out = []
for num in nums:
o = ord(num)-51
b = bin(o).lstrip("0b")
b = "0" * (2-len(b)) + b
out.append(b)
bs = ''.join(out)
cs = []
for c in range(0,len(bs),8):
b = bs[c:c+8]
x = chr(int(b,2) ^ (2<<4))
cs.append(x)
s = ''.join(cs)
return s
except:
return None
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
if __name__ == "__main__":
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = False
server_thread.start()
while True:
try:
time.sleep(1)
except:
break
server.shutdown()
server.server_close()
|
"""Serveradmin
Copyright (c) 2019 InnoGames GmbH
"""
from django.conf import settings
def base(request):
return {'MENU_TEMPLATES': settings.MENU_TEMPLATES}
|
from ..utils import Object
class MessagePassportDataReceived(Object):
"""
Telegram Passport data has been received; for bots only
Attributes:
ID (:obj:`str`): ``MessagePassportDataReceived``
Args:
elements (List of :class:`telegram.api.types.encryptedPassportElement`):
List of received Telegram Passport elements
credentials (:class:`telegram.api.types.encryptedCredentials`):
Encrypted data credentials
Returns:
MessageContent
Raises:
:class:`telegram.Error`
"""
ID = "messagePassportDataReceived"
def __init__(self, elements, credentials, **kwargs):
self.elements = elements # list of encryptedPassportElement
self.credentials = credentials # EncryptedCredentials
@staticmethod
def read(q: dict, *args) -> "MessagePassportDataReceived":
elements = [Object.read(i) for i in q.get('elements', [])]
credentials = Object.read(q.get('credentials'))
return MessagePassportDataReceived(elements, credentials)
|
from collections import namedtuple
from base58 import b58decode
from sovtokenfees.serializers import txn_root_serializer
def test_utxo_batch_handler_commit_batch(utxo_batch_handler, utxo_cache):
utxo_cache.set('1', '2')
ThreePcBatch = namedtuple("ThreePcBatch", "state_root valid_digests txn_root")
three_ps_batch = ThreePcBatch(state_root=b58decode("1".encode()), valid_digests=["1"],
txn_root=txn_root_serializer.serialize("1"))
utxo_batch_handler.post_batch_applied(three_pc_batch=three_ps_batch)
utxo_batch_handler.commit_batch(three_ps_batch, None)
assert not len(utxo_cache.current_batch_ops)
assert not len(utxo_cache.un_committed)
|
r"""
Support for monitoring loss in Megatron
"""
import torch
from fmoe.balance import reset_balance_profile
from fmoe.balance import update_balance_profile
from fmoe.utils import get_torch_default_comm
balance_dict = {}
num_layers = 0
def reset_gate_hook(_num_layers=None):
from megatron import get_args
global balance_dict, num_layers
if _num_layers is not None:
num_layers = _num_layers
reset_balance_profile(balance_dict, num_layers, get_args().balance_strategy)
def get_balance_profile():
global balance_dict
return balance_dict
def generate_megatron_gate_hook(layer_idx, num_expert_global):
from megatron import get_args
balance_strategy = get_args().balance_strategy
def megatron_gate_hook(gate_top_k_idx, gate_score_top_k, gate_context):
global balance_dict
update_balance_profile(
balance_dict,
gate_top_k_idx,
gate_score_top_k,
gate_context,
layer_idx,
num_expert_global,
balance_strategy,
)
return megatron_gate_hook
def add_balance_log(writer, iteration):
from megatron import is_last_rank
balance_dict_tensor = torch.vstack(
[torch.tensor(item, device=item[0].device) for item in balance_dict.values()]
).detach()
world_group = get_torch_default_comm()
world_size = torch.distributed.get_world_size(group=world_group)
torch.distributed.all_reduce(balance_dict_tensor, group=world_group)
balance_dict_tensor /= world_size
if writer and is_last_rank():
for idx, metric_name in enumerate(balance_dict):
for layer_id, val in enumerate(balance_dict_tensor[idx]):
writer.add_scalar(
f"balance-{metric_name}/layer-{layer_id}", val.item(), iteration
)
writer.add_scalar(
f"balance-{metric_name}/all",
balance_dict_tensor[idx].mean().item(),
iteration,
)
reset_gate_hook()
def patch_forward_step(forward_step_func):
r"""
Patch model's forward_step_func to support balance loss
"""
from megatron.mpu import is_pipeline_last_stage
from megatron import get_args
if not get_args().balance_strategy:
return forward_step_func
def forward_step_with_balance_loss(data_iterator, model, input_tensor):
args = get_args()
output = forward_step_func(data_iterator, model, input_tensor)
if not is_pipeline_last_stage():
return output
loss_name = args.balance_strategy + "_loss"
(loss, state_dict), bal_loss = (
output,
(
torch.tensor(
balance_dict[loss_name],
device=balance_dict[loss_name][0].device,
).mean()
* args.balance_loss_weight
).float(),
)
# avarage across world group
world_group = get_torch_default_comm()
world_size = torch.distributed.get_world_size(group=world_group)
averaged_bal_loss = bal_loss.clone().detach()
torch.distributed.all_reduce(averaged_bal_loss, group=world_group)
averaged_bal_loss /= world_size
loss += bal_loss
state_dict[loss_name] = averaged_bal_loss
return loss, state_dict
return forward_step_with_balance_loss
def patch_model_provider(model_provider):
from megatron import get_args
def fmoefied_model_provider():
from .layers import fmoefy
args = get_args()
return fmoefy(
model_provider(),
num_experts=args.num_experts,
hidden_hidden_size=4 * args.hidden_size // args.top_k,
top_k=args.top_k,
)
return fmoefied_model_provider
|
from pytest import fixture
from typing import List
from moodle import Moodle
from moodle.core.course import Course
@fixture
def domain() -> str:
return "https://school.moodledemo.net"
@fixture
def moodle(domain: str) -> Moodle:
username = "manager"
password = "moodle"
return Moodle.login(domain, username, password)
@fixture
def user_id(moodle: Moodle) -> int:
site_info = moodle.core.webservice.get_site_info()
return site_info.userid
@fixture
def courses(moodle: Moodle) -> List[Course]:
return moodle.core.course.get_courses()
|
# import pytest
from yaost.base import Node
def test_serialization():
n = Node('x', None, int_value=1)
assert 'x(int_value=1);' == n.to_string()
n = Node('x', None, bool_value=True)
assert 'x(bool_value=true);' == n.to_string()
n = Node('x', None, str_value='abc')
assert 'x(str_value="abc");' == n.to_string()
n = Node('x', None, float_value=0.00001)
assert 'x(float_value=0.000010);' == n.to_string()
n = Node('x', None, array_value=[1, 2, 3, 'x'])
assert 'x(array_value=[1,2,3,"x"]);' == n.to_string()
n = Node('x', None, fn=1)
assert 'x($fn=1);' == n.to_string()
n = Node('x', None, 1, 2, 3, 4)
assert 'x(1,2,3,4);' == n.to_string()
n = Node('x', None, 1, a=2)
assert 'x(1,a=2);' == n.to_string()
def test_union_collapse():
x = Node('x', None)
y = Node('y', None)
z = Node('z', None)
xy = x + y
xyz = xy + z
assert 'union(){x();y();}' == xy.to_string()
assert 'union(){x();y();z();}' == xyz.to_string()
|
x = int(input())
a = int(input())
b = int(input())
x -= a
print(x % b)
|
# Licensed under the Upwork's API Terms of Use;
# you may not use this file except in compliance with the Terms.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author:: Maksym Novozhylov (mnovozhilov@upwork.com)
# Copyright:: Copyright 2020(c) Upwork.com
# License:: See LICENSE.txt and TOS - https://developers.upwork.com/api-tos.html
class Config:
"""Configuration container"""
verify_ssl = True
def __init__(self, config):
self.consumer_key, self.consumer_secret = (
config["consumer_key"],
config["consumer_secret"],
)
if "access_token" in config:
self.access_token = config["access_token"]
if "access_token_secret" in config:
self.access_token_secret = config["access_token_secret"]
if "verify_ssl" in config:
self.verify_ssl = config["verify_ssl"]
|
#!/usr/bin/python3
import sys
import operator
set = {}
result = {}
for a in sys.stdin:
bbw,ball = a.split(';')
batsmanbolwer,wicket = bbw.split('$')
if batsmanbolwer not in set:
set[batsmanbolwer] = [int(wicket),int(ball)]
else:
set[batsmanbolwer][0]=set[batsmanbolwer][0]+wicket
set[batsmanbolwer][1]=set[batsmanbolwer][1]+ball
a = [(key,value) for key,value in set.items() if (value[1]>5)]
result=dict(a)
b=sorted(result.items(), key = lambda i: i[1] ,reverse = True)
result=dict(b)
m = -1
for bat in result:
if(result[bat][0]!=m):
q = [(key,value[1]) for key,value in result.items() if (value[0] == result[bat][0])]
q = sorted(q,key = lambda i: i[0])
r = sorted(q,key = lambda i: i[1])
m = result[bat][0]
for j in r:
bo,b = j[0].split('&')
print("%s,%s,%d,%d"%(bo,b,result[j[0]][0],result[j[0]][1]))
|
import pytest
from os.path import join
def test_confgen_tree_build(confgen_single_service):
'''
Based on:
hierarchy:
- GLOBAL
- STAGE
- CLUSTER
infra:
prod: # stage
- main # cluster
- multiapp # cluster
- staging # cluster
dev: # stage
- qa1 # cluster
- qa2 # cluster
'''
t = confgen_single_service.root
# test Nodes
assert t.name == "/"
assert t.level == "GLOBAL"
assert t.parent is None
assert set([str(c) for c in t]) == {'dev', 'prod'}
assert t['prod'].name == "prod"
assert t['prod'].level == "STAGE"
assert t['prod'].parent is t
assert set([str(c) for c in t['prod']]) == {'main', 'multiapp', 'staging'}
assert t['prod']['main'].name == "main"
assert t['prod']['main'].level == "CLUSTER"
assert t['prod']['main'].parent is t['prod']
assert t['prod']['multiapp'].name == "multiapp"
assert t['prod']['multiapp'].level == "CLUSTER"
assert t['prod']['multiapp'].parent is t['prod']
assert t['prod']['staging'].name == "staging"
assert t['prod']['staging'].level == "CLUSTER"
assert t['prod']['staging'].parent is t['prod']
assert t['dev'].name == "dev"
assert t['dev'].level == "STAGE"
assert t['dev'].parent is t
assert t['dev']['qa1'].name == "qa1"
assert t['dev']['qa1'].level == "CLUSTER"
assert t['dev']['qa1'].parent is t['dev']
assert t['dev']['qa2'].name == "qa2"
assert t['dev']['qa2'].level == "CLUSTER"
assert t['dev']['qa2'].parent is t['dev']
def test_confgen_tree_path(confgen_single_service):
confgen = confgen_single_service
assert confgen.root['prod']['main'].path == "/prod/main"
assert confgen.root['dev']['qa1'].path == "/dev/qa1"
assert confgen.root.path == "/"
def test_confgen_paths(confgen_single_service):
confgen = confgen_single_service
assert confgen.root.path == '/'
assert confgen.root['prod'].path == "/prod"
assert confgen.root['dev'].path == "/dev"
assert confgen.root['prod']['main'].path == "/prod/main"
assert confgen.root['dev']['qa1'].path == "/dev/qa1"
assert confgen.root['dev']['qa2'].path == '/dev/qa2'
@pytest.mark.parametrize('path,expected', (
('', []),
('/', []),
('/prod', ['prod']),
('/prod/main', ['prod', 'main'])
))
def test_path_to_list(confgen_single_service, path, expected):
confgen = confgen_single_service
assert confgen.root.path_to_list(path) == expected
def test_confgen_tree_by_path(confgen_single_service):
confgen = confgen_single_service
assert confgen.root.by_path("/") is confgen.root
assert confgen.root.by_path("") is confgen.root
assert confgen.root.by_path("/dev/qa1") is confgen.root['dev']['qa1']
def test_confgen_tree_leaves(confgen_single_service):
assert set([i.path for i in confgen_single_service.root.leaves]) == {
'/prod/main',
'/prod/multiapp',
'/prod/staging',
'/dev/qa1',
'/dev/qa2',
}
def test_confgen_build(confgen_single_service):
confgen = confgen_single_service
confgen.build()
def f(p): return open(join(confgen.home, confgen.build_dir, p)).read()
assert f('dev/qa1/my.cnf') == "/ dev qa1"
assert f('dev/qa1/production.ini') == "4.0 password qa1 qa2"
assert f('dev/qa2/my.cnf') == "/ dev qa2"
assert f('dev/qa2/production.ini') == "9.0 password qa1 qa2"
assert f('prod/main/my.cnf') == "/ prod main"
assert f('prod/main/production.ini') == "3.0 password main multiapp staging"
assert f('prod/multiapp/my.cnf') == "/ prod multiapp"
assert f('prod/multiapp/production.ini') == "2.0 password main multiapp staging"
assert f('prod/staging/my.cnf') == "/ prod staging"
assert f('prod/staging/production.ini') == "2.0 password main multiapp staging"
|
#!/usr/bin/env python3
import sys, csv, os
try:
isoforms = open(sys.argv[1])
isbed = sys.argv[1][-3:].lower() != 'psl'
alignment = open(sys.argv[2])
minsupport = int(sys.argv[3])
outfilename = sys.argv[4]
if len(sys.argv) > 5:
outfilename2 = sys.argv[5]
else:
outfilename2 = ''
calculate_all = len(sys.argv) > 6
except:
sys.stderr.write('usage: script.py isoforms.psl alignment.sam.psl minsupport out_isoforms.psl [out_assignments.txt] [calculate_all]\n')
sys.exit(1)
isoform_info = {}
for line in isoforms:
line = line.rstrip().split('\t')
if isbed:
blocksizes = [int(n) for n in line[10].split(',')[:-1]]
name = line[3]
else:
blocksizes = [float(n) for n in line[18].split(',')[:-1]]
name = line[9]
isoform_info[name] = [sum(blocksizes), blocksizes[0], blocksizes[-1], line]
iso_read = {} # isoform-read assignments for reads that span 25bp of the first and last exon
for line in alignment: # reads aligned to the isoforms sam-turned-psl
line = line.rstrip().split('\t')
read, isoform = line[9], line[13] # names
if isoform not in iso_read:
iso_read[isoform] = []
elif len(iso_read[isoform]) > minsupport and not calculate_all:
continue
blocksizes = [int(n) for n in line[18].split(',')[:-1]]
blockstarts = [int(n) for n in line[20].split(',')[:-1]]
read_start, read_end = blockstarts[0], blockstarts[-1]+blocksizes[-1]
info = isoform_info[isoform]
isoform_length, first_blocksize, last_blocksize = info[0:3]
right_coverage = left_coverage = False
if len(blocksizes) == 1: # single exon transcript
if read_start < 25 and read_end > isoform_length - 25:
right_coverage = left_coverage = True
else:
if first_blocksize < 25:
if read_start < 2:
left_coverage = True
elif read_start <= (first_blocksize - 25):
left_coverage = True
if last_blocksize < 25:
if (isoform_length - read_end) < 2:
right_coverage = True
if (isoform_length-last_blocksize + 25) <= read_end:
right_coverage = True
coverage = sum(blocksizes) / isoform_length
# coverage = proportion of bases of the isoform that the read covers
if right_coverage and left_coverage and coverage > 0.8:
iso_read[isoform] += [[read, isoform, coverage]]
with open(outfilename, 'wt') as outfile:
writer = csv.writer(outfile, delimiter='\t', lineterminator=os.linesep)
for iso in iso_read:
supporting = iso_read[iso] # supporting reads
if len(supporting) >= minsupport:
writer.writerow(isoform_info[iso][3])
if outfilename2: # map file
with open(outfilename2, 'wt') as outfile:
writer = csv.writer(outfile, delimiter='\t', lineterminator=os.linesep)
for iso in iso_read:
supporting = iso_read[iso]
if len(supporting) >= minsupport:
for s in supporting:
writer.writerow(s)
|
from library import keyword_map
key_map = keyword_map.Keyword_map()
non_k_map = ["{", "}", "(", ")"]
def code_parser(code):
k_map = key_map.getMaps()
for rpl in non_k_map:
code = code.replace(rpl, " " + rpl + " ")
pass
for rpl in k_map:
code = code.replace(rpl + " ", rpl + " ")
pass
for rpl in k_map:
code = code.replace('\n'," " + "___nextline___" + " ")
pass
parsed_code = code.split()
#print(parsed_code)
return parsed_code
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from eve import ISSUES, STATUS
from eve.tests.methods import post as eve_post_tests
from eve_sqlalchemy.tests import TestBase, test_sql_tables
class TestPost(eve_post_tests.TestPost, TestBase):
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_auto_create_lists(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_auto_collapse_multiple_keys(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_auto_collapse_media_list(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_dbref_post_referential_integrity(self):
pass
@pytest.mark.xfail(True, run=False, reason='not implemented yet')
def test_post_duplicate_key(self):
"""POSTing an already existing key should result in 409, not 422.
EveMongo does this by not enforcing uniqueness at the validation level,
but wait until the MongoDB insert fails. They can then easily
distinguish between a validation error and a duplicate key error.
"""
def test_post_integer(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = 'prog'
test_value = 1
data = {test_field: test_value,
'ref': 'test_post_integer_1234567'}
self.assertPostItem(data, test_field, test_value)
def test_post_list_as_array(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "role"
test_value = ["vendor", "client"]
data = {test_field: test_value,
'ref': 'test_post_list_as_array_1'}
self.assertPostItem(data, test_field, test_value)
def test_post_rows(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "rows"
test_value = [
{'sku': 'AT1234', 'price': 99},
{'sku': 'XF9876', 'price': 9999}
]
data = {test_field: test_value,
'ref': 'test_post_rows_1234567890'}
self.assertPostItem(data, test_field, test_value)
def test_post_list(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "alist"
test_value = ["a_string", 99]
data = {test_field: test_value,
'ref': 'test_post_list_1234567890'}
self.assertPostItem(data, test_field, test_value)
def test_post_integer_zero(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "aninteger"
test_value = 0
data = {test_field: test_value,
'ref': 'test_post_integer_zero_12'}
self.assertPostItem(data, test_field, test_value)
def test_post_float_zero(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "afloat"
test_value = 0.0
data = {test_field: test_value,
'ref': 'test_post_float_zero_1234'}
self.assertPostItem(data, test_field, test_value)
def test_post_dict(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "location"
test_value = {'address': 'an address', 'city': 'a city'}
data = {test_field: test_value,
'ref': 'test_post_dict_1234567890'}
self.assertPostItem(data, test_field, test_value)
def test_post_datetime(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "born"
test_value = "Tue, 06 Nov 2012 10:33:31 GMT"
data = {test_field: test_value,
'ref': 'test_post_datetime_123456'}
self.assertPostItem(data, test_field, test_value)
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_objectid(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_null_objectid(self):
pass
def test_post_default_value_none(self):
# Eve test manipulates schema and changes type of 'title'. We decided
# to use different fields for each test.
# default values that assimilate to None (0, '', False) were ignored
# prior to 0.1.1
self.domain['contacts']['schema']['title']['default'] = ''
self.app.set_defaults()
data = {"ref": "UUUUUUUUUUUUUUUUUUUUUUUUU"}
self.assertPostItem(data, 'title', '')
self.domain['contacts']['schema']['aninteger']['default'] = 0
self.app.set_defaults()
data = {"ref": "TTTTTTTTTTTTTTTTTTTTTTTTT"}
self.assertPostItem(data, 'aninteger', 0)
self.domain['contacts']['schema']['abool']['default'] = False
self.app.set_defaults()
data = {"ref": "QQQQQQQQQQQQQQQQQQQQQQQQQ"}
self.assertPostItem(data, 'abool', False)
def test_multi_post_valid(self):
# Eve test uses mongo layer directly.
data = [
{"ref": "9234567890123456789054321"},
{"ref": "5432112345678901234567890", "role": ["agent"]},
]
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
results = r['_items']
self.assertEqual(results[0]['_status'], 'OK')
self.assertEqual(results[1]['_status'], 'OK')
r, status = self.get('contacts',
'?where={"ref": "9234567890123456789054321"}')
self.assert200(status)
self.assertEqual(len(r['_items']), 1)
r, status = self.get('contacts',
'?where={"ref": "5432112345678901234567890"}')
self.assert200(status)
self.assertEqual(len(r['_items']), 1)
def test_multi_post_invalid(self):
# Eve test uses mongo layer directly and 'tid' is an integer instead of
# ObjectId for Eve-SQLAlchemy.
data = [
{"ref": "9234567890123456789054321"},
{"prog": 9999},
{"ref": "5432112345678901234567890", "role": ["agent"]},
{"ref": self.item_ref},
{"ref": "9234567890123456789054321", "tid": "foo"},
]
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
results = r['_items']
self.assertEqual(results[0]['_status'], 'OK')
self.assertEqual(results[2]['_status'], 'OK')
self.assertValidationError(results[1], {'ref': 'required'})
self.assertValidationError(results[3], {'ref': 'unique'})
self.assertValidationError(results[4], {'tid': 'integer'})
id_field = self.domain[self.known_resource]['id_field']
self.assertTrue(id_field not in results[0])
self.assertTrue(id_field not in results[1])
self.assertTrue(id_field not in results[2])
self.assertTrue(id_field not in results[3])
r, status = self.get('contacts', '?where={"prog": 9999}')
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
r, status = self.get('contacts',
'?where={"ref": "9234567890123456789054321"}')
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
def test_post_x_www_form_urlencoded_number_serialization(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
test_field = "anumber"
test_value = 34
data = {test_field: test_value,
'ref': 'test_post_x_www_num_ser_1'}
r, status = self.parse_response(self.test_client.post(
self.known_resource_url, data=data))
self.assert201(status)
self.assertTrue('OK' in r[STATUS])
self.assertPostResponse(r)
def test_post_referential_integrity_list(self):
data = {"invoicing_contacts": [self.item_id, self.unknown_item_id]}
r, status = self.post('/invoices/', data=data)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.domain['contacts']['id_field']))
self.assertValidationError(r, {'invoicing_contacts': expected})
# Eve test posts a list with self.item_id twice, which can't be handled
# for our case because we use (invoice_id, contact_id) as primary key
# in the association table.
data = {"invoicing_contacts": [self.item_id]}
r, status = self.post('/invoices/', data=data)
self.assert201(status)
self.assertPostResponse(r)
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_allow_unknown(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_write_concern(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_list_of_objectid(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_nested_dict_objectid(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_valueschema_with_objectid(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_post_list_fixed_len(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_custom_etag_update_date(self):
pass
@pytest.mark.xfail(True, run=False, reason='not applicable to SQLAlchemy')
def test_custom_date_updated(self):
pass
def test_post_with_relation_to_custom_idfield(self):
# Eve test uses mongo layer directly.
# TODO: Fix directly in Eve and remove this override
id_field = 'sku'
r, _ = self.get('products')
existing_product = r['_items'][0]
product = {
id_field: 'BAR',
'title': 'Foobar',
'parent_product': existing_product[id_field]
}
r, status = self.post('products', data=product)
self.assert201(status)
self.assertTrue(id_field in r)
self.assertItemLink(r['_links'], r[id_field])
r, status = self.get('products', item='BAR')
self.assertEqual(r['parent_product'], existing_product[id_field])
def test_post_dependency_fields_with_default(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
# test that default values are resolved before validation. See #353.
test_field = 'dependency_field2'
test_value = 'a value'
data = {test_field: test_value,
'ref': 'test_post_dep_fields_defa'}
self.assertPostItem(data, test_field, test_value)
def test_post_dependency_required_fields(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
schema = self.domain['contacts']['schema']
schema['dependency_field3']['required'] = True
data = {'ref': 'test_post_dep_req_fields1'}
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'dependency_field3': 'required'})
# required field dependnecy value matches the dependent field's default
# value. validation still fails since required field is still missing.
# See #665.
schema['dependency_field3']['dependencies'] = {'dependency_field1':
'default'}
r, status = self.post(self.known_resource_url, data={})
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'dependency_field3': 'required'})
data = {'dependency_field3': 'hello',
'ref': 'test_post_dep_req_fields2'}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
def test_post_dependency_fields_with_values(self):
# Eve test dynamically registers a resource. This is more difficult for
# SQLAlchemy, so we just use an existing one.
schema = self.domain['contacts']['schema']
schema['dependency_field1']['default'] = 'one'
schema['dependency_field2']['required'] = True
schema['dependency_field2']['dependencies'] = \
{'dependency_field1': ['one', 'two']}
data = {"dependency_field1": "three", "dependency_field2": "seven",
'ref': 'test_post_dep_fields_val1'}
r, s = self.post(self.known_resource_url, data=data)
self.assert422(s)
data = {"dependency_field2": "seven",
'ref': 'test_post_dep_fields_val2'}
r, s = self.post(self.known_resource_url, data=data)
self.assert201(s)
data = {"dependency_field1": "one", "dependency_field2": "seven",
'ref': 'test_post_dep_fields_val3'}
r, s = self.post(self.known_resource_url, data=data)
self.assert201(s)
data = {"dependency_field1": "two", "dependency_field2": "seven",
'ref': 'test_post_dep_fields_val4'}
r, s = self.post(self.known_resource_url, data=data)
self.assert201(s)
def test_post_dependency_fields_with_subdocuments(self):
# Eve test dynamically registers a resource. This is more difficult for
# SQLAlchemy, so we just use an existing one.
schema = self.domain['contacts']['schema']
schema['dependency_field2']['dependencies'] = \
{'location.city': ['Berlin', 'Rome']}
data = {"location": {"city": "Paris"}, "dependency_field2": "seven",
'ref': 'test_post_dep_fields_sub1'}
r, s = self.post(self.known_resource_url, data=data)
self.assert422(s)
data = {"location": {"city": "Rome"}, "dependency_field2": "seven",
'ref': 'test_post_dep_fields_sub2'}
r, s = self.post(self.known_resource_url, data=data)
self.assert201(s)
data = {"location": {"city": "Berlin"}, "dependency_field2": "seven",
'ref': 'test_post_dep_fields_sub3'}
r, s = self.post(self.known_resource_url, data=data)
self.assert201(s)
def test_post_valueschema_dict(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
data = {'valueschema_dict': {'k1': '1'},
'ref': 'test_post_valueschema_123'}
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
issues = r[ISSUES]
self.assertTrue('valueschema_dict' in issues)
self.assertEqual(issues['valueschema_dict'],
{'k1': 'must be of integer type'})
data['valueschema_dict']['k1'] = 1
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
def test_post_propertyschema_dict(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
data = {'propertyschema_dict': {'aaa': 1},
'ref': 'test_post_propertyschema1'}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
data = {'propertyschema_dict': {'AAA': '1'},
'ref': 'test_post_propertyschema2'}
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
issues = r[ISSUES]
self.assertTrue('propertyschema_dict' in issues)
self.assertEqual(issues['propertyschema_dict'],
'propertyschema_dict')
def test_post_nested(self):
# Eve test manipulates schema and removes required constraint on 'ref'.
# We decided to include 'ref' as it is not easy to manipulate
# nullable-constraints during runtime.
data = {'location.city': 'a nested city',
'location.address': 'a nested address',
'ref': 'test_post_nested_12345678'}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
values = self.compare_post_with_get(
r[self.domain[self.known_resource]['id_field']],
['location']).pop()
self.assertEqual(values['city'], 'a nested city')
self.assertEqual(values['address'], 'a nested address')
def test_id_field_included_with_document(self):
# Eve test uses ObjectId, we have to use an integer instead.
# since v0.6 we also allow the id field to be included with the POSTed
# document
id_field = self.domain[self.known_resource]['id_field']
id = 4242
data = {"ref": "1234567890123456789054321", id_field: id}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
self.assertPostResponse(r)
self.assertEqual(r['_id'], id)
class TestEvents(eve_post_tests.TestEvents, TestBase):
def before_insert(self):
# Eve test code uses mongo layer directy.
session = self.app.data.driver.session
model = test_sql_tables.Contacts
return session.query(model).filter(model.ref == self.new_contact_id) \
.first() is None
|
import numpy as np
import torch
N_CLASSES = 150
def mask_to_subgrids(mask, cell_scale):
"""
break WxH annotation array into a cell_scale x cell_scale vectors
"""
num_elem_row, num_elem_col = int(mask.shape[0] / cell_scale), int(mask.shape[1] / cell_scale)
res = []
for h in range(cell_scale):
for w in range(cell_scale):
start_h = h * num_elem_row
start_w = w * num_elem_col
end_h = min((h+1)*num_elem_row, mask.shape[0])
end_w = min((w+1)*num_elem_col, mask.shape[1])
section = mask[start_h:end_h, start_w:end_w]
res.append(section)
return res
def unique_to_sparse(unique):
"""
list of unique classes --> onehot sparse matrix
"""
sparse = np.zeros((N_CLASSES))
for num in unique:
if num != 255:
sparse[num] = 1
return sparse
def arr_to_dist(onehot_mask):
vec = onehot_mask.reshape(-1, N_CLASSES)
dist = vec.sum(axis=0) / (vec.sum() + 1e-10)
return dist
def vector_list_to_mat(vectors):
"""
take list of vectors and stack them to a square matrix
"""
n_rows = int(np.sqrt(len(vectors)))
rows = []
count = 0
curr_row = []
for i in range(len(vectors)):
if count < n_rows:
curr_row.append(vectors[i])
if count == n_rows:
count = 0
rows.append(curr_row)
curr_row = [vectors[i]]
count += 1
rows.append(curr_row)
return np.asarray(rows)
def extract_mask_distributions(mask, head_sizes=[1], top_k=150):
"""
mask: ground truth annotation (either BxWxH or WxH)
head_sizes: list of scales at which to extract the distribution of pixels for each class
top_k: limit # of classes, note even with k < C the distribution will add up to 1
predicted_mask: if supplied, take the top classes from the predicted segmentation mask rather than ground truth annotation
"""
if len(mask.size()) == 3: # if [B x W x H] rather than single sample [ W x H ]
return [ extract_mask_distributions(mask[i], top_k=top_k, head_sizes=head_sizes) for i in range(mask.size()[0]) ]
dist_labels = []
for s in head_sizes:
mat = extract_mask_distribution(mask, s)
class_order = (-mat.flatten()).argsort()
class_mask = np.where(np.in1d(np.arange(150), class_order[:top_k]), np.ones(150), np.zeros(150))
class_mask = np.expand_dims(np.expand_dims(class_mask, -1), -1)
masked_dist = class_mask * mat
masked_dist /= (np.sum(masked_dist, axis=None) + 1e-10)
dist_labels.append(masked_dist)
return dist_labels
def extract_mask_distribution(mask, scale=1):
"""
Input: WxH integer-encoded label
annotation --> pixel distribution at specified scales
ignores background pixels (255)
"""
onehot = (np.arange(255+1) == mask.numpy()[...,None]).astype(int)
onehot_ignore = onehot[:,:,:N_CLASSES]
if scale == 1: # special case
mat = arr_to_dist(onehot_ignore)
mat = np.expand_dims(mat, -1)
mat = np.expand_dims(mat, -1)
else:
quadrants = mask_to_subgrids(onehot_ignore, scale)
mat_vecs = [ arr_to_dist(m) for m in quadrants ]
mat = vector_list_to_mat(mat_vecs).astype(np.float32)
mat = mat.transpose(2, 0, 1)
return mat
def extract_adjusted_distribution(gt_mask, predicted_mask, head_sizes=[1], top_k=150):
"""
given ground truth annotation mask, and a trained segmentation network prediction,
compute the distribution of the 'corrected' mask, s.t. pixels are equal to the
ground truth label if non-background, and predicted label if background
this may offer a better training objective for the distribution of pixels for images
with large portions of background class
"""
gt_mask = gt_mask
predicted_mask = predicted_mask
corrected_mask = torch.where(gt_mask == 255, predicted_mask, gt_mask).cpu()
corrected_distributions = [ extract_mask_distributions(corrected_mask[i], head_sizes=head_sizes) for i in range(corrected_mask.size()[0]) ]
return corrected_distributions
def extract_mask_classes(mask, head_sizes=[1, 2, 3, 6]):
"""
annotation mask --> set of head_sizes x head_sizes matrices with one-hot class labels
encoding which classes are present in that region
"""
classification_head_labels = []
for s in head_sizes:
if s == 1: # special case
uniq = np.unique(mask)
mat = unique_to_sparse(uniq)
mat = np.expand_dims(mat, -1)
mat = np.expand_dims(mat, -1)
else:
quadrants = mask_to_subgrids(mask, s)
uniq_vectors = [ unique_to_sparse(np.unique(m)) for m in quadrants ]
mat = vector_list_to_mat(uniq_vectors).astype(np.float32)
mat = mat.transpose(2, 0, 1)
classification_head_labels.append(mat)
return classification_head_labels
|
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dropout, Dense, Concatenate, GlobalAveragePooling1D
from tensorflow.keras import Input, Model
from tcn import TCN
from keras.callbacks import EarlyStopping
import tensorflow as tf
import numpy as np
import time
import optuna
from optuna.integration import TFKerasPruningCallback
from optuna.trial import TrialState
from utils_for_ds import data_utils
from utils_for_ds import model_customize
# ------------ Optuna ----------------
def time_model_with_data_split(df, label_column, train_start, train_end, look_back, look_forward, column_set_index = 0, split_n = 30, n_neurons = [128],
transformer_args = [5, 256, 256, 256], print_model_summary = True, dropout = 0.5, epochs = 30, patience = 5, early_stop = True,
save_model = False, model_path = 'model.hdf5', save_weight = False, checkpoint_path = '', model_name = 'lstm', enable_optuna = False, epochs_each_try = 10,
n_trials = 10, show_loss = True):
start_time = time.time()
tf.random.set_seed(1)
df = data_utils.switch_y_column(df, column_name=label_column)
if column_set_index:
df.set_index(column_set_index, inplace=True)
train_data = df[train_start : train_end]
X_train_seq, y_train_seq = data_utils.split_sequence(train_data.values, look_back = look_back, look_forward = look_forward)
X_train_seq, y_train_seq, X_val_seq, y_val_seq = data_utils.time_split_dataset(X_train_seq, y_train_seq, split_n = split_n)
n_features = X_train_seq.shape[2]
def create_lstm_model(trial):
n_layers = trial.suggest_int("n_layers", 1, 5)
model = Sequential()
n_units = np.zeros(n_layers, dtype=np.int64)
n_units[0] = trial.suggest_int("units_L1", 32, 256)
dropout = trial.suggest_uniform(f"dropout", 0.01, 0.5)
if n_layers == 1:
model.add(LSTM(n_units[0], input_shape=(look_back, n_features), return_sequences=False))
else:
model.add(LSTM(n_units[0], input_shape=(look_back, n_features), return_sequences=True))
for i in range(1, n_layers - 1):
n_units[i] = trial.suggest_int("units_L"+str(i+1), 32, 256)
model.add(LSTM(n_units[i], input_shape=(n_units[i - 1], n_features), return_sequences=True))
model.add(Dropout(dropout))
if n_layers > 1:
n_units[-1] = trial.suggest_int("units_L"+str(n_layers), 32, 256)
model.add(LSTM(n_units[-1], input_shape=(n_units[-2], n_features), return_sequences=False))
model.add(Dropout(dropout))
model.add(Dense(look_forward))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
return model
def create_tcn_model(trial):
tcn_batch_size = None # 512 # 1024
n_layers = trial.suggest_int("n_layers", 1, 5)
n_units = np.zeros(n_layers, dtype=np.int64)
layer_names = []
for index in range(n_layers):
layer_names.append('x'+str(index)+'_')
input_ = Input(batch_shape=(tcn_batch_size, look_back, n_features), name='Input_Layer')
n_units[0] = trial.suggest_int("units_L1", 32, 256)
dropout = trial.suggest_uniform(f"dropout", 0.01, 0.5)
if n_layers == 1:
layer_names[0] = TCN(nb_filters=n_units[0], kernel_size=2, nb_stacks=2, dilations=[1, 2, 4, 8, 16, 32],
padding='causal', use_skip_connections=True, dropout_rate=dropout, return_sequences=False,
activation='relu', kernel_initializer='he_normal', name = 'TCN_Layer_1', use_batch_norm=True)(input_)
else:
layer_names[0] = TCN(nb_filters=n_units[0], kernel_size=2, nb_stacks=2, dilations=[1, 2, 4, 8, 16, 32],
padding='causal', use_skip_connections=True, dropout_rate=dropout, return_sequences=True,
activation='relu', kernel_initializer='he_normal', name = 'TCN_Layer_1', use_batch_norm=True)(input_)
for index in range(1, n_layers - 1):
n_units[index] = trial.suggest_int("units_L"+str(index + 1), 32, 256)
layer_names[index] = TCN(nb_filters=n_units[index], kernel_size=2, nb_stacks=2, dilations=[1, 2, 4, 8, 16, 32],
padding='causal', use_skip_connections=True, dropout_rate=dropout, return_sequences=True,
activation='relu', kernel_initializer='he_normal', name = 'TCN_Layer_' + str(index + 1), use_batch_norm=True)(layer_names[index - 1]) # The TCN layer .
if n_layers > 1:
n_units[-1] = trial.suggest_int("units_L"+str(n_layers), 32, 256)
layer_names[-1] = TCN(nb_filters=n_units[-1], kernel_size=2, nb_stacks=2, dilations=[1, 2, 4, 8, 16, 32],
padding='causal', use_skip_connections=True, dropout_rate=dropout, return_sequences=False,
activation='relu', kernel_initializer='he_normal',
name = 'TCN_Layer_' + str(n_layers), use_batch_norm=True)(layer_names[-2]) # The TCN layer .
output_ = Dense(look_forward, name='Dense_Layer')(layer_names[-1])
model = Model(inputs=[input_], outputs=[output_], name='TCN_Model_trail')
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])
return model
def create_transformer_model(trial):
time_embedding = Time2Vector(look_back)
n_heads = trial.suggest_int("n_heads", 1, 16)
d_k = trial.suggest_int("d_k", 8, 215)
d_v = trial.suggest_int("d_v", 8, 512)
ff_dim = trial.suggest_int("ff_dim", 8, 512)
attn_layer1 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer2 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer3 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
in_seq = Input(shape=(look_back, n_features))
x = time_embedding(in_seq)
x = Concatenate(axis=-1)([in_seq, x])
x = attn_layer1((x, x, x))
x = attn_layer2((x, x, x))
x = attn_layer3((x, x, x))
x = GlobalAveragePooling1D(data_format='channels_first')(x)
dropout = trial.suggest_uniform(f"dropout", 0.01, 0.5)
x = Dropout(dropout)(x)
num_hidden = int(trial.suggest_loguniform("hidden", 4, 512))
# active_func = trial.suggest_categorical('active_function', ['relu', 'entropy'])
x = Dense(num_hidden, activation='relu')(x)
x = Dropout(dropout)(x)
out = Dense(look_forward, activation='linear')(x)
model = Model(inputs=in_seq, outputs=out)
model.compile(loss='mse', optimizer='adam', metrics=['mse']) #, 'mape'])
return model
def objective(trial):
keras.backend.clear_session() # Clear clutter from previous session graphs.
if model_name == 'lstm':
model = create_lstm_model(trial) # Generate our trial model.
elif model_name == 'tcn':
model = create_tcn_model(trial)
elif model_name == 'transformer':
model = create_transformer_model(trial)
else:
model = create_lstm_model(trial)
history = model.fit(X_train_seq, y_train_seq, epochs=epochs_each_try, batch_size=512, # None
validation_data=(X_val_seq, y_val_seq),
callbacks=[TFKerasPruningCallback(trial, "val_loss")],
verbose=1)
# score = model.evaluate(X_val_seq, y_val_seq, verbose=0) # Evaluate the model accuracy on the validation set.
score = history.history["val_mse"][0] # Evaluate the model loss.
return score
if enable_optuna:
study = optuna.create_study(direction="minimize", sampler=optuna.samplers.TPESampler(), pruner=optuna.pruners.HyperbandPruner())
study.optimize(objective, n_trials=n_trials)
pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
if len(complete_trials) == 0:
print('No trails are completed yet, please increate the n_trials or epochs_each_try and run again.')
return None
else:
print("Best trial: Value :", study.best_trial.value)
print(" Params: ")
for key, value in study.best_trial.params.items():
print(" {}: {}".format(key, value))
if model_name in ['lstm', 'tcn']:
n_neurons = np.zeros(study.best_trial.params['n_layers'], dtype=np.int64)
for i in range(len(n_neurons)):
column_name = 'units_L'+str(i+1)
n_neurons[i] = study.best_trial.params[column_name]
dropout = study.best_trial.params['dropout']
# plot_optimization_history(study) # plot_intermediate_values(study) # plot_contour(study) # plot_param_importances(study)
if model_name in ['transformer']:
dropout = study.best_trial.params['dropout']
transformer_args = [study.best_trial.params['n_heads'], study.best_trial.params['d_k'], study.best_trial.params['d_v'], study.best_trial.params['ff_dim'], study.best_trial.params['hidden']]
if model_name == 'lstm':
l_Model = model_customize.lstm_model_custmize(look_back=look_back, look_forward=look_forward, n_features=n_features, dropout=dropout, print_summary=print_model_summary, n_neurons = n_neurons)
elif model_name == 'tcn':
l_Model = model_customize.tcn_model(look_back=look_back, look_forward=look_forward, n_features=n_features, dropout=dropout, print_summary=print_model_summary, n_neurons = n_neurons)
elif model_name == 'transformer':
l_Model = model_customize.transformer_model_custmize(look_back, look_forward, n_features=n_features, n_heads=transformer_args[0], d_k =transformer_args[1], d_v=transformer_args[2], ff_dim=transformer_args[3], dropout=dropout, num_hidden=64, print_summary=True)
else:
l_Model = model_customize.lstm_model_custmize(look_back=look_back, look_forward=look_forward, n_features=n_features, dropout=dropout, print_summary=print_model_summary, n_neurons = n_neurons)
if early_stop == False:
patience = epochs
if save_model:
model_train = train_model(l_Model, X_train_seq, y_train_seq, X_val_seq, y_val_seq, epochs=epochs, early_stop = early_stop, patience=patience, save_model = save_model, model_path=model_path, save_weight = save_weight, checkpoint_path=checkpoint_path, show_loss = show_loss)
else:
model_train = train_model(l_Model, X_train_seq, y_train_seq, X_val_seq, y_val_seq, epochs=epochs, early_stop = early_stop, patience=patience, save_model = save_model, show_loss = show_loss)
end_time = time.time()
print('time cost : ', round((end_time - start_time) / 60, 2), 'min')
return l_Model
# ----------- Train ----------------
def train_model(model, X_train_seq, y_train_seq, X_val_seq, y_val_seq, epochs=100, early_stop = True, patience=10,
save_model = False, model_path='', save_weight = False, checkpoint_path='', show_loss = True):
if not early_stop:
patience = epochs
early_stopping = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=patience,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False)
if save_model:
cp_callback = tf.keras.callbacks.ModelCheckpoint(model_path,
monitor='val_loss',
save_best_only=True,
# save_weights_only=True,
verbose=1)
history = model.fit(X_train_seq, y_train_seq,
epochs=epochs,
validation_data=(X_val_seq, y_val_seq),
shuffle=True,
batch_size=32,
verbose=1,
callbacks=[early_stopping, cp_callback])
if save_weight:
model.save_weights(checkpoint_path)
save_model_to_path = tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss', save_best_only=True, verbose=1)
else:
history = model.fit(X_train_seq, y_train_seq,
epochs=epochs,
validation_data=(X_val_seq, y_val_seq),
shuffle=True,
batch_size=32,
verbose=1,
callbacks=[early_stopping])
if show_loss:
label_list = [i for i in range(0, len(history.history['loss']))]
data_utils.show_draft_plot(datas = [history.history['loss'], history.history['val_loss']], x_label = label_list, title = 'Loss of Model', legend=['loss', 'val loss'])
return model
# ------------- Predict ----------------
def predict_result(predict_data_list = [] , model_path=[], model_type=['lstm'], divideby = [1]):
predict_list = []
for index in range(len(model_path)):
if model_type[index] in ['lstm', 'tcn', 'transformer']:
model_file = model_path[index]
prediction = model_file.predict(predict_data_list[index])
pred = np.array(prediction[-1]) * divideby[index]
predict_list.append(pred)
if model_type[index] in ['linear', 'xgb']:
model_file = model_path[index]
prediction = model_file.predict(predict_data_list[index])
pred = np.array(prediction) * divideby[index]
predict_list.append(prediction)
return predict_list
|
# -*- coding: utf-8; -*-
import os
import sys
import shlex
import subprocess
from operator import attrgetter
from optparse import IndentedHelpFormatter
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
class CompactHelpFormatter(IndentedHelpFormatter):
'''A more compact option-help formatter.'''
def __init__(self, *args, **kw):
super(CompactHelpFormatter, self).__init__(*args, **kw)
self.max_help_position = 40
self.indent_increment = 1
def format_option_strings(self, option):
'''
>>> _format_option_strings(('-f', '--format'))
-f, --format arg
'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value():
metavar = option.metavar or 'arg'
opts.append(' <%s>' % metavar)
return ''.join(opts)
def format_heading(self, heading):
return '' if heading == 'Options' else heading + ':\n'
def format_epilog(self, epilog):
return epilog if epilog else ''
def optional_value(option, optstr, value, parser, optional):
'''
An optparse option callback, with an optional value. For example:
Option('-n', '--dryrun', default=False, action='callback',
callback=partial(optional_value, optional='json'))
Allows the following constructs on the command-line:
-n|--dryrun => options.dryrun == True
-n json | --dryrun=json => options.dryrun == 'json'
-n yaml | --dryrun=yaml => options.dryrun == False
'''
value = option.default
for arg in parser.rargs:
if arg == optional:
value = arg
break
else:
value = True
if value == optional:
del parser.rargs[:1]
setattr(parser.values, option.dest, value)
def ordered(it, *order, unknown_first=False, key=None):
'''
Sort collection, while maintaining order of certain elements.
>>> nums = [3, 7, 8, 1, 9, 5, 2, 6, 4]
>>> ordered(nums, 1, 2, 3, 4, 5)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> ordered(nums, 1, 2, 3, 4, 5, unknown_first=True)
[5, 6, 7, 8, 9, 1, 2, 3, 4]
'''
# @todo: This is specific to Statistic objects.
key = key if key else attrgetter('type_instance')
order = {i: n for n, i in enumerate(order)}
# First sort all elements alpha-numerically.
res = sorted(it, key=key)
idx = -1 if unknown_first else len(order)
def order_key(el):
return order[key(el)] if key(el) in order else idx
res = sorted(res, key=order_key)
return res
def shlex_join(it, sep=' '):
'''
Join a list of string in to a shell-safe string. Opposite of
shlex.split().
'''
return sep.join(shlex.quote(i) for i in it)
def pairwise(it, size=2, fillvalue=None):
'''
Split an iterable into n-sized parts.
>>> pairwise(range(10))
>>> [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
>>> pairwise(range(10), size=3)
>>> [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, None, None)]
'''
it = iter(it)
return list(zip_longest(*([it] * size), fillvalue=fillvalue))
def openfile(path):
'''Open a file or URL in the user's preferred application.'''
if sys.platform in {'linux', 'linux2'}:
cmd = ['xdg-open', path]
elif sys.platform == 'dawin':
cmd = ['open', path]
elif sys.platform == 'win32':
return os.startfile(path)
return subprocess.check_call(cmd)
|
import crypto_key_gen
import hashlib
sk = crypto_key_gen.generate_key()
pk = crypto_key_gen.get_public_key(sk)
crypto_key_gen.save_key(sk, "./wallet/secret.pem")
crypto_key_gen.save_key(pk, "./wallet/public.pem")
|
# USAGE
# python webstreaming.py --ip 0.0.0.0 --port 8000
# import the necessary packages
from imutils.video import VideoStream
from flask import Response
from flask import Flask
from flask import render_template
from tensorflow.keras.models import load_model
import resizer as re
import threading
import argparse
import imutils
import time
import cv2
WIDTH = HEIGHT = 100
# initialize the output frame and a lock used to ensure thread-safe
# exchanges of the output frames (useful for multiple browsers/tabs
# are viewing tthe stream)
outputFrame2 = None
lock = threading.Lock()
# initialize a flask object
app = Flask(__name__)
# loading model
model = load_model("model98keypoints.h5")
# initialize the video stream and allow the camera sensor to
# warmup
vs = VideoStream(src=0).start()
time.sleep(2.0)
@app.route("/")
def index():
# return the rendered template
return render_template("index.html")
obj = re.Resizer(WIDTH, HEIGHT, 1.1)
def get_keypoints():
global vs, lock, frame_original, outputFrame2
while True:
frame_original = vs.read()
frame = imutils.resize(frame_original, width=400)
img, faces = obj.get_resized_withoutdata(frame)
try:
faces = faces[0]
temp = img[0].copy()
temp = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
temp = temp.reshape(1, WIDTH, HEIGHT, 1)
data = model.predict(temp)
for i in range(0, len(data[0]), 2):
cv2.circle(img[0], center=(data[0][i], data[0][i + 1]), radius=1,
color=(255, 255, 255))
frame[faces[1]:faces[1] + faces[3], faces[0]:faces[0] + faces[2], :] = cv2.resize(img[0],
(faces[2], faces[3]))
except:
pass
with lock:
outputFrame2 = frame.copy()
def generate1():
# grab global references to the output frame and lock variables
global outputFrame2, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if outputFrame2 is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", frame_original)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
def generate2():
# grab global references to the output frame and lock variables
global outputFrame2, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if outputFrame2 is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame2)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
@app.route("/original_feed")
def origianl_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(generate1(),
mimetype="multipart/x-mixed-replace; boundary=frame")
@app.route("/keypoints_feed")
def keypoints_feed():
return Response(generate2(),
mimetype="multipart/x-mixed-replace; boundary=frame")
# check to see if this is the main thread of execution
if __name__ == '__main__':
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--ip", type=str, required=True,
help="ip address of the device")
ap.add_argument("-o", "--port", type=int, required=True,
help="ephemeral port number of the server (1024 to 65535)")
ap.add_argument("-f", "--frame-count", type=int, default=32,
help="# of frames used to construct the background model")
args = vars(ap.parse_args())
t = threading.Thread(target=get_keypoints)
t.daemon = True
t.start()
# start the flask app
app.run(host=args["ip"], port=args["port"], debug=True,
threaded=True, use_reloader=False)
# release the video stream pointer
vs.stop()
|
import re
# input_lines = '''\
# swap position 4 with position 0
# swap letter d with letter b
# reverse positions 0 through 4
# rotate left 1 step
# move position 1 to position 4
# move position 3 to position 0
# rotate based on position of letter b
# rotate based on position of letter d
# '''.splitlines()
input_lines = open('input.txt')
# password = 'abcde'
password = 'abcdefgh'
swap_pos_re = re.compile(r'swap position (\d+) with position (\d+)')
swap_char_re = re.compile(r'swap letter (\w) with letter (\w)')
rotate_re = re.compile(r'rotate (left|right) (\d+) steps?')
rotate_pos_re = re.compile(r'rotate based on position of letter (\w)')
reverse_re = re.compile(r'reverse positions (\d+) through (\d+)')
move_re = re.compile(r'move position (\d+) to position (\d+)')
def swap_pos(word, x, y):
chars = list(word)
chars[x], chars[y] = chars[y], chars[x]
return ''.join(chars)
def swap_char(word, a, b):
return swap_pos(word, word.index(a), word.index(b))
def rotate(word, offset):
return ''.join(word[i % len(word)] for i in range(-offset, len(word)-offset))
def rotate_pos(word, char):
pos = word.index(char)
if pos >= 4:
pos += 1
return rotate(word, pos + 1)
def reverse(word, x, y):
return word[:x] + word[x:y+1][::-1] + word[y+1:]
def move(word, x, y):
chars = list(word)
chars.insert(y, chars.pop(x))
return ''.join(chars)
for line in input_lines:
m = swap_pos_re.match(line)
if m:
x, y = map(int, m.groups())
password = swap_pos(password, x, y)
continue
m = swap_char_re.match(line)
if m:
a, b = m.groups()
password = swap_char(password, a, b)
continue
m = rotate_re.match(line)
if m:
side, steps = m.groups()
offset = int(steps) * (1 if side == 'right' else -1)
password = rotate(password, offset)
continue
m = rotate_pos_re.match(line)
if m:
char, = m.groups()
password = rotate_pos(password, char)
continue
m = reverse_re.match(line)
if m:
x, y = map(int, m.groups())
password = reverse(password, x, y)
continue
m = move_re.match(line)
if m:
x, y = map(int, m.groups())
password = move(password, x, y)
continue
raise Exception("No match: " + repr(line))
print(password)
|
import paddle
import numpy as np
from matplotlib import pyplot as plt
from paddle.fluid.dataloader import batch_sampler
from paddle.fluid.dataloader.batch_sampler import BatchSampler
import paddle.nn.functional as F
from paddle.nn import Linear
from paddle.io import Dataset
import math
# Define
num_samples=1000
# gauss function:
epochs=200
# # polynominal functon:
# epochs=200
batchs=400
def f(x, mean=0, sigma=1):
return np.exp(-1*((x-mean)**2)/(2*(sigma**2)))/(math.sqrt(2*np.pi)*sigma)
# def f(x, a=1, b=-2.4, c=4.8, d=0):
# return a*x**3+b*x**2+c*x+d
# Data
x=np.zeros(num_samples)
y=np.zeros(num_samples)
for i in range(num_samples):
x[i]=np.random.uniform(-3.0, 3.0)
y[i]=f(x[i])
x=paddle.to_tensor(x, dtype='float32')
y=paddle.to_tensor(y, dtype='float32')
# Multi-Layer Perceptron
class MLP(paddle.nn.Layer):
def __init__(self):
super(MLP, self).__init__()
self.fc1=Linear(2, 32)
self.fc2=Linear(32, 2)
def forward(self, inputs):
x=self.fc1(inputs)
x=F.relu(x)
x=self.fc2(x)
return x
# Training
def train(model):
# gauss function:
opt=paddle.optimizer.SGD(learning_rate=0.1, parameters=model.parameters())
# # polynominal function:
# opt=paddle.optimizer.SGD(learning_rate=0.001, parameters=model.parameters())
y_graph=[]
x_graph=[]
for i in range(epochs):
for j in range(batchs):
x_train=x[j*2: 2+j*2]
y_train=y[j*2: 2+j*2]
y_pred=model(x_train)
if i==(epochs-1):
y_graph.append(y_pred)
x_graph.append(x_train)
loss=F.square_error_cost(y_pred, y_train)
avg_loss=paddle.mean(loss)
if i%10==0:
print("epoch: {},batch: {}, loss: {}".format(i, j, avg_loss.numpy()))
avg_loss.backward()
opt.step()
opt.clear_grad()
y_graph=np.array(y_graph)
x_graph=np.array(x_graph)
plt.plot(x_graph, y_graph, 'r.')
x_origin=x[0:800]
x_origin=np.array(x_origin)
y_origin=y[0:800]
y_origin=np.array(y_origin)
plt.plot(x_origin,y_origin, 'b.')
plt.show()
paddle.save(model.state_dict(), 'MLP_test.pdparams')
model=MLP()
train(model)
# Evaluation
def evaluation(model):
print('start evaluation .......')
params_file_path = 'MLP_test.pdparams'
param_dict = paddle.load(params_file_path)
model.load_dict(param_dict)
model.eval()
y_graph=[]
x_graph=[]
for i in range(100):
x_test=x[800+i*2: 800+2+i*2]
y_test=y[800+i*2: 800+2+i*2]
y_pred=model(x_test)
y_graph.append(y_pred)
x_graph.append(x_test)
loss = F.square_error_cost(y_pred, y_test)
avg_loss = paddle.mean(loss)
print('loss={}'.format(avg_loss.numpy()))
y_graph=np.array(y_graph)
x_graph=np.array(x_graph)
plt.plot(x_graph, y_graph, 'r.')
x_origin=x[800:1000]
x_origin=np.array(x_origin)
y_origin=y[800:1000]
y_origin=np.array(y_origin)
plt.plot(x_origin,y_origin, 'b.')
plt.show()
evaluation(model)
|
import warnings
from .._data import conform_dataset, normalize_likelihood
from .._display import session_block
class VarDec(object):
"""
Variance decompositon through GLMMs.
Example
-------
.. doctest::
>>> from limix.vardec import VarDec
>>> from limix.stats import multivariate_normal as mvn
>>> from numpy import ones, eye, concatenate, zeros, exp
>>> from numpy.random import RandomState
>>>
>>> random = RandomState(0)
>>> nsamples = 20
>>>
>>> M = random.randn(nsamples, 2)
>>> M = (M - M.mean(0)) / M.std(0)
>>> M = concatenate((ones((nsamples, 1)), M), axis=1)
>>>
>>> K0 = random.randn(nsamples, 10)
>>> K0 = K0 @ K0.T
>>> K0 /= K0.diagonal().mean()
>>> K0 += eye(nsamples) * 1e-4
>>>
>>> K1 = random.randn(nsamples, 10)
>>> K1 = K1 @ K1.T
>>> K1 /= K1.diagonal().mean()
>>> K1 += eye(nsamples) * 1e-4
>>>
>>> y = M @ random.randn(3) + mvn(random, zeros(nsamples), K0)
>>> y += mvn(random, zeros(nsamples), K1)
>>>
>>> vardec = VarDec(y, "normal", M)
>>> vardec.append(K0)
>>> vardec.append(K1)
>>> vardec.append_iid()
>>>
>>> vardec.fit(verbose=False)
>>> print(vardec) # doctest: +FLOAT_CMP
Variance decomposition
----------------------
<BLANKLINE>
𝐲 ~ 𝓝(𝙼𝜶, 0.385⋅𝙺 + 1.184⋅𝙺 + 0.000⋅𝙸)
>>> y = exp((y - y.mean()) / y.std())
>>> vardec = VarDec(y, "poisson", M)
>>> vardec.append(K0)
>>> vardec.append(K1)
>>> vardec.append_iid()
>>>
>>> vardec.fit(verbose=False)
>>> print(vardec) # doctest: +FLOAT_CMP
Variance decomposition
----------------------
<BLANKLINE>
𝐳 ~ 𝓝(𝙼𝜶, 0.000⋅𝙺 + 0.350⋅𝙺 + 0.000⋅𝙸) for yᵢ ~ Poisson(λᵢ=g(zᵢ)) and g(x)=eˣ
"""
def __init__(self, y, lik="normal", M=None):
"""
Constructor.
Parameters
----------
y : array_like
Phenotype.
lik : tuple, "normal", "bernoulli", "probit", "binomial", "poisson"
Sample likelihood describing the residual distribution.
Either a tuple or a string specifying the likelihood is required. The
Normal, Bernoulli, Probit, and Poisson likelihoods can be selected by
providing a string. Binomial likelihood on the other hand requires a tuple
because of the number of trials: ``("binomial", array_like)``. Defaults to
``"normal"``.
M : n×c array_like
Covariates matrix.
"""
from numpy import asarray, eye
from glimix_core.mean import LinearMean, KronMean
y = asarray(y, float)
data = conform_dataset(y, M)
y = data["y"]
M = data["M"]
self._y = y
self._M = M
self._lik = normalize_likelihood(lik)
if self._multi_trait():
A = eye(self._y.shape[1])
self._mean = KronMean(A, asarray(M, float))
else:
self._mean = LinearMean(asarray(M, float))
self._covariance = []
self._glmm = None
self._fit = False
self._unnamed = 0
@property
def effsizes(self):
"""
Covariace effect sizes.
Returns
-------
effsizes : ndarray
Effect sizes.
"""
if not self._fit:
self.fit()
if hasattr(self._mean, "effsizes"):
return self._mean.effsizes
return self._mean.B
@property
def covariance(self):
"""
Get the covariance matrices.
Returns
-------
covariances : list
Covariance matrices.
"""
return self._covariance
def fit(self, verbose=True):
"""
Fit the model.
Parameters
----------
verbose : bool, optional
Set ``False`` to silence it. Defaults to ``True``.
"""
with session_block("Variance decomposition", disable=not verbose):
if self._lik[0] == "normal":
if self._multi_trait():
self._fit_lmm_multi_trait(verbose)
elif self._simple_model():
self._fit_lmm_simple_model(verbose)
else:
self._fit_lmm(verbose)
else:
if self._simple_model():
self._fit_glmm_simple_model(verbose)
else:
self._fit_glmm(verbose)
if verbose:
print(self)
self._fit = True
def lml(self):
"""
Get the log of the marginal likelihood.
Returns
-------
float
Log of the marginal likelihood.
"""
if not self._fit:
self._glmm.fit()
return self._glmm.lml()
def append_iid(self, name="residual"):
from glimix_core.cov import EyeCov
if self._multi_trait():
cov = MTEyeCov(self._y.shape[1])
else:
cov = EyeCov(self._y.shape[0])
cov.name = name
self._covariance.append(cov)
def append(self, K, name=None):
from numpy_sugar import is_all_finite
from numpy import asarray
from glimix_core.cov import GivenCov
data = conform_dataset(self._y, K=K)
K = asarray(data["K"], float)
if not is_all_finite(K):
raise ValueError("Covariance-matrix values must be finite.")
K = K / K.diagonal().mean()
if self._multi_trait():
cov = MTGivenCov(self._y.shape[1], K)
else:
cov = GivenCov(K)
if name is None:
name = "unnamed-{}".format(self._unnamed)
self._unnamed += 1
cov.name = name
self._covariance.append(cov)
def plot(self):
import limix
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
variances = [c.scale for c in self._covariance]
variances = [(v / sum(variances)) * 100 for v in variances]
names = [c.name for c in self._covariance]
ax = sns.barplot(x=names, y=variances)
ax.yaxis.set_major_formatter(FormatStrFormatter("%.0f%%"))
ax.set_xlabel("random effects")
ax.set_ylabel("explained variance")
ax.set_title("Variance decomposition")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
limix.plot.get_pyplot().tight_layout()
limix.plot.show()
def _fit_lmm(self, verbose):
from glimix_core.cov import SumCov
from glimix_core.gp import GP
from numpy import asarray
y = asarray(self._y, float).ravel()
gp = GP(y, self._mean, SumCov(self._covariance))
gp.fit(verbose=verbose)
self._glmm = gp
def _fit_glmm(self, verbose):
from glimix_core.cov import SumCov
from glimix_core.ggp import ExpFamGP
from numpy import asarray
y = asarray(self._y, float).ravel()
gp = ExpFamGP(y, self._lik, self._mean, SumCov(self._covariance))
gp.fit(verbose=verbose)
self._glmm = gp
def _fit_lmm_multi_trait(self, verbose):
from numpy import sqrt, asarray
from glimix_core.lmm import Kron2Sum
from numpy_sugar.linalg import economic_qs, ddot
X = asarray(self._M, float)
QS = economic_qs(self._covariance[0]._K)
G = ddot(QS[0][0], sqrt(QS[1]))
lmm = Kron2Sum(self._y, self._mean.A, X, G, rank=1, restricted=True)
lmm.fit(verbose=verbose)
self._glmm = lmm
self._covariance[0]._set_kron2sum(lmm)
self._covariance[1]._set_kron2sum(lmm)
self._mean.B = lmm.B
def _fit_lmm_simple_model(self, verbose):
from numpy_sugar.linalg import economic_qs
from glimix_core.lmm import LMM
from numpy import asarray
K = self._get_matrix_simple_model()
y = asarray(self._y, float).ravel()
QS = None
if K is not None:
QS = economic_qs(K)
lmm = LMM(y, self._M, QS)
lmm.fit(verbose=verbose)
self._set_simple_model_variances(lmm.v0, lmm.v1)
self._glmm = lmm
def _fit_glmm_simple_model(self, verbose):
from numpy_sugar.linalg import economic_qs
from glimix_core.glmm import GLMMExpFam
from numpy import asarray
K = self._get_matrix_simple_model()
y = asarray(self._y, float).ravel()
QS = None
if K is not None:
QS = economic_qs(K)
glmm = GLMMExpFam(y, self._lik, self._M, QS)
glmm.fit(verbose=verbose)
self._set_simple_model_variances(glmm.v0, glmm.v1)
self._glmm = glmm
def _set_simple_model_variances(self, v0, v1):
from glimix_core.cov import GivenCov, EyeCov
for c in self._covariance:
if isinstance(c, GivenCov):
c.scale = v0
elif isinstance(c, EyeCov):
c.scale = v1
def _get_matrix_simple_model(self):
from glimix_core.cov import GivenCov
K = None
for i in range(len(self._covariance)):
if isinstance(self._covariance[i], GivenCov):
self._covariance[i].scale = 1.0
K = self._covariance[i].value()
break
return K
def _multi_trait(self):
return self._y.ndim == 2 and self._y.shape[1] > 1
def _simple_model(self):
from glimix_core.cov import GivenCov, EyeCov
if len(self._covariance) > 2:
return False
c = self._covariance
if len(c) == 1 and isinstance(c[0], EyeCov):
return True
if isinstance(c[0], GivenCov) and isinstance(c[1], EyeCov):
return True
if isinstance(c[1], GivenCov) and isinstance(c[0], EyeCov):
return True
return False
def __repr__(self):
from glimix_core.cov import GivenCov
from limix.qtl._result._draw import draw_model
from limix._display import draw_title
covariance = ""
for c in self._covariance:
s = c.scale
if isinstance(c, GivenCov):
covariance += f"{s:.3f}⋅𝙺 + "
else:
covariance += f"{s:.3f}⋅𝙸 + "
if len(covariance) > 2:
covariance = covariance[:-3]
msg = draw_title("Variance decomposition")
msg += draw_model(self._lik[0], "𝙼𝜶", covariance)
msg = msg.rstrip()
return msg
class MTGivenCov:
def __init__(self, ntraits, K):
self._ntraits = ntraits
self._K = K
self._kron2sum = None
self._name = "unnamed"
def _set_kron2sum(self, kron2sum):
self._kron2sum = kron2sum
@property
def scale(self):
"""
Scale parameter, s.
"""
from numpy import eye
if self._kron2sum is None:
return eye(self._ntraits)
return self._kron2sum.C0
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
class MTEyeCov:
def __init__(self, ntraits):
self._ntraits = ntraits
self._kron2sum = None
self._name = "unnamed"
def _set_kron2sum(self, kron2sum):
self._kron2sum = kron2sum
@property
def scale(self):
"""
Scale parameter, s.
"""
from numpy import eye
if self._kron2sum is None:
return eye(self._ntraits)
return self._kron2sum.C1
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import scipy.sparse as sp
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from modules import *
# GCN model
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.in_features)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.mm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
# Deep Set model
class rFF_pool(nn.Module):
def __init__(self, in_features=200, pooling_method='max'):
super(rFF_pool, self).__init__()
self.in_features = in_features
self.pooling_method = pooling_method
self.ll1 = nn.Linear(in_features, 256)
self.ll2 = nn.Linear(256, 128)
self.ll3 = nn.Linear(128, 64)
self.d3 = nn.Dropout(p=0.5)
self.fc = nn.Linear(64, 1)
self.reset_parameters()
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x = [(F.relu(self.ll1(x_))) for x_ in x]
x = [(F.relu(self.ll2(x_))) for x_ in x]
x = [self.d3(F.relu(self.ll3(x_))) for x_ in x]
if self.pooling_method == 'max':
x = [torch.unsqueeze(torch.max(x_, axis=0)[0], 0) for x_ in x]
elif self.pooling_method == 'mean':
x = [torch.unsqueeze(x_.mean(dim=0), 0) for x_ in x]
elif self.pooling_method == 'sum':
x = [torch.unsqueeze(x_.sum(dim=0), 0) for x_ in x]
else:
print('Invalid Pooling method!!!!!!')
exit(0)
x = torch.cat(x, axis=0)
embedding = x.cpu().detach().numpy()
x = torch.sigmoid(self.fc(x))
return x, embedding
# Deep Set GCN model
class rFF_pool_GCN(nn.Module):
def __init__(self, in_features=200, pooling_method='max'):
super(rFF_pool_GCN, self).__init__()
self.in_features = in_features
self.pooling_method = pooling_method
self.ll1 = nn.Linear(in_features, 256)
self.ll2 = nn.Linear(256, 128)
self.ll3 = nn.Linear(128, 64)
self.d3 = nn.Dropout(p=0.5)
self.gc = GraphConvolution(64, 1)
self.reset_parameters()
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input, adj):
x = input
x = [(F.relu(self.ll1(x_))) for x_ in x]
x = [(F.relu(self.ll2(x_))) for x_ in x]
x = [self.d3(F.relu(self.ll3(x_))) for x_ in x]
if self.pooling_method == 'max':
x = [torch.unsqueeze(torch.max(x_, axis=0)[0], 0) for x_ in x]
elif self.pooling_method == 'mean':
x = [torch.unsqueeze(x_.mean(dim=0), 0) for x_ in x]
elif self.pooling_method == 'sum':
x = [torch.unsqueeze(x_.sum(dim=0), 0) for x_ in x]
else:
print('Invalid Pooling method!!!!!!')
exit(0)
x = torch.cat(x, axis=0)
embedding = x.cpu().detach().numpy()
x = torch.sigmoid(self.gc(x, adj))
return x, embedding
# Set Transformer model
class SetTransformer(nn.Module):
def __init__(self, in_features=200, num_heads=4, ln=False):
super(SetTransformer, self).__init__()
self.enc = nn.Sequential(
SAB(dim_in=in_features, dim_out=64, num_heads=num_heads, ln=ln),
SAB(dim_in=64, dim_out=64, num_heads=num_heads, ln=ln)
)
self.dec = nn.Sequential(
PMA(dim=64, num_heads=num_heads, num_seeds=1, ln=ln)
)
self.fc = nn.Linear(in_features=64, out_features=1)
self.reset_parameters()
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, x):
x = [self.enc(torch.unsqueeze(x_, 0)) for x_ in x]
x = [self.dec(x_).squeeze() for x_ in x]
x = [torch.unsqueeze(x_, 0) for x_ in x]
x = torch.cat(x, axis=0)
embedding = x.cpu().detach().numpy()
x = torch.sigmoid(self.fc(x))
return x, embedding
# Set Transformer GCN model
class STGCN(nn.Module):
def __init__(self, in_features=200, num_heads=4, ln=False):
super(STGCN, self).__init__()
self.enc = nn.Sequential(
SAB(dim_in=in_features, dim_out=64, num_heads=num_heads, ln=ln),
SAB(dim_in=64, dim_out=64, num_heads=num_heads, ln=ln)
)
self.dec = nn.Sequential(
PMA(dim=64, num_heads=num_heads, num_seeds=1, ln=ln)
)
self.gc = GraphConvolution(64, 1)
self.reset_parameters()
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, x, adj):
x = [self.enc(torch.unsqueeze(x_, 0)) for x_ in x]
x = [self.dec(x_).squeeze() for x_ in x]
x = [torch.unsqueeze(x_, 0) for x_ in x]
x = torch.cat(x, axis=0)
embedding = x.cpu().detach().numpy()
x = torch.sigmoid(self.gc(x, adj))
return x, embedding
# Deep Set model
class res_pool(nn.Module):
def __init__(self, in_features=200, pooling_method='max'):
super(res_pool, self).__init__()
self.in_features = in_features
self.pooling_method = pooling_method
self.ll1 = nn.Linear(in_features, 128)
self.ll2 = nn.Linear(128, 128)
self.ll3 = nn.Linear(128, 128)
self.d1 = nn.Dropout(p=0.5)
self.d2 = nn.Dropout(p=0.5)
self.d3 = nn.Dropout(p=0.5)
self.fc = nn.Linear(128, 1)
self.reset_parameters()
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input):
x = input
x1 = [(F.relu(self.ll1(x_))) for x_ in x]
x2 = [(F.relu(self.ll2(x_))) for x_ in x1]
x3 = [(F.relu(self.ll3(x_))) for x_ in x2]
if self.pooling_method == 'max':
x1 = [torch.unsqueeze(torch.max(self.d1(x_), axis=0)[0], 0) for x_ in x1]
x2 = [torch.unsqueeze(torch.max(self.d2(x_), axis=0)[0], 0) for x_ in x2]
x3 = [torch.unsqueeze(torch.max(self.d3(x_), axis=0)[0], 0) for x_ in x3]
elif self.pooling_method == 'mean':
x1 = [torch.unsqueeze(self.d1(x_).mean(dim=0), 0) for x_ in x1]
x2 = [torch.unsqueeze(self.d2(x_).mean(dim=0), 0) for x_ in x2]
x3 = [torch.unsqueeze(self.d3(x_).mean(dim=0), 0) for x_ in x3]
elif self.pooling_method == 'sum':
x1 = [torch.unsqueeze(self.d1(x_).sum(dim=0), 0) for x_ in x1]
x2 = [torch.unsqueeze(self.d2(x_).sum(dim=0), 0) for x_ in x2]
x3 = [torch.unsqueeze(self.d3(x_).sum(dim=0), 0) for x_ in x3]
else:
print('Invalid Pooling method!!!!!!')
exit(0)
x1 = torch.cat(x1, axis=0)
x2 = torch.cat(x2, axis=0)
x3 = torch.cat(x3, axis=0)
x = x1 + x2 + x3
embedding = x.cpu().detach().numpy()
x = torch.sigmoid(self.fc(x))
return x, embedding
# Deep Set model
class res_pool_GCN(nn.Module):
def __init__(self, in_features=200, pooling_method='max'):
super(res_pool_GCN, self).__init__()
self.in_features = in_features
self.pooling_method = pooling_method
self.ll1 = nn.Linear(in_features, 128)
self.ll2 = nn.Linear(128, 128)
self.ll3 = nn.Linear(128, 128)
self.d1 = nn.Dropout(p=0.5)
self.d2 = nn.Dropout(p=0.5)
self.d3 = nn.Dropout(p=0.5)
self.gc = GraphConvolution(128, 1)
self.reset_parameters()
def reset_parameters(self):
for module in self.children():
reset_op = getattr(module, "reset_parameters", None)
if callable(reset_op):
reset_op()
def forward(self, input, adj):
x = input
x1 = [(F.relu(self.ll1(x_))) for x_ in x]
x2 = [(F.relu(self.ll2(x_))) for x_ in x1]
x3 = [(F.relu(self.ll3(x_))) for x_ in x2]
if self.pooling_method == 'max':
x1 = [torch.unsqueeze(torch.max(self.d1(x_), axis=0)[0], 0) for x_ in x1]
x2 = [torch.unsqueeze(torch.max(self.d2(x_), axis=0)[0], 0) for x_ in x2]
x3 = [torch.unsqueeze(torch.max(self.d3(x_), axis=0)[0], 0) for x_ in x3]
elif self.pooling_method == 'mean':
x1 = [torch.unsqueeze(self.d1(x_).mean(dim=0), 0) for x_ in x1]
x2 = [torch.unsqueeze(self.d2(x_).mean(dim=0), 0) for x_ in x2]
x3 = [torch.unsqueeze(self.d3(x_).mean(dim=0), 0) for x_ in x3]
elif self.pooling_method == 'sum':
x1 = [torch.unsqueeze(self.d1(x_).sum(dim=0), 0) for x_ in x1]
x2 = [torch.unsqueeze(self.d2(x_).sum(dim=0), 0) for x_ in x2]
x3 = [torch.unsqueeze(self.d3(x_).sum(dim=0), 0) for x_ in x3]
else:
print('Invalid Pooling method!!!!!!')
exit(0)
x1 = torch.cat(x1, axis=0)
x2 = torch.cat(x2, axis=0)
x3 = torch.cat(x3, axis=0)
x = x1 + x2 + x3
embedding = x.cpu().detach().numpy()
x = torch.sigmoid(self.gc(x, adj))
return x, embedding
|
import json
from src.services.property_service import PropertyService
class PropertyController:
property_service = PropertyService()
def properties(self, status: str, year: int, city: str) -> object:
properties = self.property_service.get_properties(status, year, city)
return json.dumps(properties).encode()
def get_property_query_params(self, query_params) -> tuple[str, str, str]:
return (
query_params.get("status", [None])[0],
query_params.get("year", [None])[0],
query_params.get("city", [None])[0],
)
|
import numpy as np
import pytest
from compimg.similarity import MSE, RMSE, MAE, PSNR, SSIM, GSSIM
from compimg.exceptions import DifferentDTypesError, DifferentShapesError
@pytest.fixture
def reference_image():
return np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8)
@pytest.fixture
def image():
return np.array([[3, 2, 1], [4, 5, 6]], dtype=np.uint8)
@pytest.mark.parametrize("metric", [MSE(), MAE(), PSNR(), SSIM(), GSSIM()])
def test_if_different_shapes_guard_raises(metric):
wrong_shape_x = np.zeros((10, 10, 2))
wrong_shape_y = np.zeros((20, 20, 2))
with pytest.raises(DifferentShapesError):
metric.compare(wrong_shape_x, wrong_shape_y)
@pytest.mark.parametrize("metric", [MSE(), MAE(), PSNR(), SSIM(), GSSIM()])
def test_if_different_dtypes_guard_raises(metric):
wrong_dtype_x = np.zeros((10, 10, 2), dtype=np.float32)
wrong_dtype_y = np.zeros((10, 10, 2), dtype=np.uint8)
with pytest.raises(DifferentDTypesError):
metric.compare(wrong_dtype_x, wrong_dtype_y)
class TestMSE:
def test_compare_returns_correct_result(self, image, reference_image):
value = MSE().compare(image, reference_image)
assert round(value, 2) == 1.33
def test_compare_returns_zero_when_identical_images(self, reference_image):
value = MSE().compare(reference_image, reference_image)
assert value == 0.0
class TestRMSE:
def test_compare_returns_correct_result(self, image, reference_image):
value = RMSE().compare(image, reference_image)
assert round(value, 2) == 1.15
def test_compare_returns_zero_when_identical_images(self, reference_image):
value = RMSE().compare(reference_image, reference_image)
assert value == 0.0
class TestMAE:
def test_compare_returns_correct_result(self, image, reference_image):
value = MAE().compare(image, reference_image)
assert round(value, 2) == 0.67
def test_compare_returns_zero_when_identical_images(self, reference_image):
value = MAE().compare(reference_image, reference_image)
assert value == 0.0
class TestPSNR:
def test_compare_returns_correct_result(self, image, reference_image):
value = PSNR().compare(image, reference_image)
assert round(value, 2) == 46.88
def test_compare_returns_inf_if_images_are_identical(self, reference_image):
value = PSNR().compare(reference_image, reference_image)
assert round(value, 2) == float("inf")
class TestSSIM:
def test_compare_returns_one_when_images_are_identical(self):
reference_image = np.ones((20, 20, 3))
value = SSIM().compare(reference_image, reference_image)
assert value == 1.0
def test_compare_returns_zero_when_images_are_completely_different(self):
image = np.full((20, 20, 3), fill_value=255, dtype=np.uint8)
reference_image = np.zeros((20, 20, 3), dtype=np.uint8)
value = SSIM().compare(image, reference_image)
assert round(value, 2) == 0.00
class TestGSSIM:
def test_compare_returns_one_when_images_are_identical(self):
reference_image = np.ones((20, 20, 3))
value = GSSIM().compare(reference_image, reference_image)
assert value == 1.0
|
from enum import Enum
import sys
class ParseCode(Enum):
good_pair_sub = 1 # 2 partners listed in partner.txt, code exists
format_error_sub = 2 # code
good_alone = 3 # txt and submission for 1 person
none_found_sub = 4
empty = 5
no_dir = 6
format_error_no_sub = 7
good_pair_no_sub = 8 # 2 partners listed in partner.txt, no code
class ResultCode(Enum):
good = 1
miss_txt_sub = 2
mult_sub = 3
no_proj = 4
partial_match = 5
conflict = 6
class Group:
# parsed[0] is a code indicating the status of partner.txt
def __init__(self, name, csid, pcode, partner_info=None):
self.name1 = name
self.csid1 = csid
self.pcode1 = pcode
self.name2 = None
self.csid2 = None
self.pcode2 = None
self.p1HasSub = self.hasSub()
# default
self.rcode = ResultCode.good
# this person claimed to work alone in their partner.txt
if self.pcode1 == ParseCode.good_alone:
self.rcode = ResultCode.good
elif self.pcode1 == ParseCode.good_pair_sub\
or self.pcode1 == ParseCode.good_pair_no_sub:
# until the other partner.txt shows up, this is a partial
#make this good when other shows up
self.rcode = ResultCode.partial_match
# no partner.txt, but with code
elif self.pcode1 == ParseCode.format_error_sub\
or self.pcode1 == ParseCode.none_found_sub:
self.rcode = ResultCode.miss_txt_sub
elif self.pcode1 == ParseCode.empty\
or self.pcode1 == ParseCode.no_dir\
or self.pcode1 == ParseCode.format_error_no_sub:
self.rcode = ResultCode.no_proj
else:
print("Error. due to pcode:" + str(self.pcode1))
sys.exit()
#handle the partner information
if partner_info:
self.name2 = partner_info[0]
self.csid2 = partner_info[1]
def integrate(self, other, partial=False):
# multiple submissions
if self.hasSub() and other.hasSub():
self.rcode = ResultCode.mult_sub
# 2 people who only turned in partner.txts?
elif not self.hasSub() and not other.hasSub():
self.rcode = ResultCode.no_proj
#so we only have 1 sub between 2 people
else:
if partial:
self.rcode = ResultCode.partial_match
else:
self.rcode = ResultCode.good
self.p1HasSub = self.hasSub() #if p1 has the sub
if not self.name2:
self.name2 = other.getName1()
self.csid2 = other.getcsid1() # reuse the csid that was "real"
self.pcode2 = other.getPcode1()
return
def hasSub(self):
return self.pcode1 in\
(ParseCode.good_pair_sub,
ParseCode.format_error_sub,
ParseCode.good_alone,
ParseCode.none_found_sub)\
or self.pcode2 in\
(ParseCode.good_pair_sub,
ParseCode.format_error_sub,
ParseCode.good_alone,
ParseCode.none_found_sub)\
def getcsid1(self):
return self.csid1
def getName1(self):
return self.name1
def getcsid2(self):
return self.csid2
def getName2(self):
return self.name2
def getPcode1(self):
return self.pcode1
def getPcode2(self):
return self.pcode2
def getFinalText(self):
finalText = ""
if (self.p1HasSub):
finalText = self.name1 + " (" + self.csid1 + " has sub)"
else:
finalText = self.name1 + " (" + self.csid1 + " doesn't have sub)"
if self.rcode == ResultCode.good:
if self.csid2:
finalText = finalText + ", " + self.name2 + " ("\
+ self.csid2 + ")"
return finalText + ": Good"
elif self.rcode == ResultCode.miss_txt_sub:
return finalText + ": Found code but BAD partner.txt. Check manually!"
elif self.rcode == ResultCode.mult_sub:
#assume p2 exists
assert self.csid2
return finalText + ", " + self.name2 + " ("\
+ self.csid2 + ")" +\
": Found multiple submissions"
elif self.rcode == ResultCode.no_proj:
if self.csid2:
finalText = finalText + ", " + self.name2 + " ("\
+ self.csid2 + ")"
return finalText + ": Could not find project"
elif self.rcode == ResultCode.partial_match:
#p2 should exist for partial match
assert self.csid2
if self.csid2:
finalText = finalText + ", " + self.name2 + " ("\
+ self.csid2 + ")"
return finalText + ": Partial match. Only found one partner.txt"
# TODO: This never happens. I just ignore this possibility
elif self.rcode == ResultCode.conflict:
if self.csid2:
finalText = finalText + ", " + self.name2 + " ("\
+ self.csid2 + ")"
return finalText + ": Conflict. Partner triangle?"
else:
return "Error. due to rcode above"
def __str__(self):
return self.getFinalText()
def __repr__(self):
return str(self.name1) + ", " + str(self.csid1) + ", " + str(self.pcode1)\
+ ", " + str(self.name2) + ", " + str(self.csid2) + ", " + str(self.pcode2) + "\n"
|
# Generated by Django 2.2.5 on 2020-01-12 21:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites_microsoft_auth', '0006_auto_20190923_1535'),
]
operations = [
migrations.AlterField(
model_name='siteconfiguration',
name='login_type',
field=models.CharField(choices=[('ma', 'Microsoft Account'), ('xbl', 'Xbox Live Account')], max_length=3),
),
]
|
from app.config.base import BaseConfig
class Config(BaseConfig):
DEBUG = False
ENVIRONMENT = 'DEVELOPMENT'
|
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
import random
lives = 9
words = ['happy', 'pizza', 'otter', 'sixty', 'truck', 'teeth', 'night', 'light', 'fight', 'hight']
secret_word = random.choice(words)
clue = list('?????')
heart_symbol =u'♥ '
guessed_word_correctly = False
def find_question_mark(clue):
has_question_mark = False
index = 0
while index < len(clue):
if '?' == clue[index]:
has_question_mark = True
break
else:
index = index + 1
return has_question_mark
def update_clue(guessed_letter, seceret_word, clue):
index = 0
while index < len(seceret_word):
if guessed_letter == seceret_word[index]:
clue[index] = guessed_letter
index = index + 1
# this is main function
while lives > 0:
print(clue)
print('lives left: ' + heart_symbol * lives)
guess = input('guess a letter or the whole word: ')
if guess == secret_word:
guessed_word_correctly = True
break
if guess in secret_word:
update_clue(guess, secret_word, clue)
if find_question_mark(clue) == False:
guessed_word_correctly = True
break
else:
print('Incorrect. You lose a life')
lives = lives - 1
if guessed_word_correctly:
print('You won! The secret word was ' + secret_word)
else:
print('you lost! The secret word was ' + secret_word)
|
#!/usr/bin/python
import sys, os, inspect
from argparse import ArgumentParser
import keras
import numpy
import skimage
from keras.utils import plot_model
from scipy import ndimage
from PIL import Image
from skimage.transform import resize
print("Parsing arguments ...")
parser = ArgumentParser("Classify an RGB-image with a pre-trained classifier")
parser.add_argument("-c", "--model", dest="model_path", help="path to the classifier (*.h5)")
parser.add_argument("-i", "--image", dest="image_path", help="path to the rgb image to classify")
args = parser.parse_args()
if len(sys.argv) < 5:
parser.print_help()
sys.exit(-1)
model_path = args.model_path
image_path = args.image_path
print(" Model: ", model_path)
print(" Image: ", image_path)
print("Loading image ...")
input_image = ndimage.imread(image_path, mode="RGB")
print(" Shape: {0}".format(input_image.shape))
print("Loading classifier...")
classifier = keras.models.load_model(model_path)
classifier.summary()
input_shape = classifier.input_shape[1:4]
print(" Input shape: {0}, Output: {1} classes".format(input_shape, classifier.output_shape[1]))
print("Preprocessing image ...")
print(" Resizing to " + str(input_shape))
normalized_input_image = resize(input_image, output_shape=input_shape, preserve_range=True)
normalized_input_image = normalized_input_image.astype(numpy.float32)
print(" Result: shape: {0}, dtype: {1}, mean: {2:.3f}, std: {3:.3f}".format(normalized_input_image.shape,
normalized_input_image.dtype,
numpy.mean(normalized_input_image),
numpy.std(normalized_input_image)))
print("Classifying image ...")
scores = classifier.predict(numpy.array([normalized_input_image])).flatten()
print(" Class scores: {0}".format(numpy.array2string(scores, formatter={'float_kind': lambda x: "%0.2f" % x})))
class_with_highest_probability = numpy.where(scores == scores.max())[0][0]
class_names = ['other', 'scores']
print(" Image is most likely: {0} (certainty: {1:0.2f})".format(class_names[class_with_highest_probability],
scores[class_with_highest_probability]))
|
import time
from django.conf import settings
def attach_ex(code, data):
"""
New version of attach for new protocol, simplified
@param code: Unique phone code
@type code: str
@param data: Dictionary data for the phone, passed from client in the 'data' request field
@type data: dict
"""
collection = settings.MONGO['extra_data']
collection.insert({'code': code, 'type': 'userdata', 'data': data})
# ## old protocol conversion, deprecated
def attach_account(code, data):
"""
Attach account data (fb, gm etc)
@param code: Unique phone code
@type code: str
@param data: Dictionary data for the phone, passed from client in the 'data' request field
@type data: dict
"""
if 'code' in data:
del data['code']
command = data.get('type')
if 'type' in data:
del data['type']
collection = settings.MONGO['extra_data']
data['type'] = 'account'
data['name'] = command
collection.insert({'code': code, 'type': 'userdata', 'data': data})
def attach_card_info(code, data):
"""
Attach card data
@param code: Unique phone code
@type code: str
@param data: Dictionary data for the phone, passed from client in the 'data' request field
@type data: dict
"""
if 'code' in data:
del data['code']
if 'type' in data:
del data['type']
collection = settings.MONGO['extra_data']
data['type'] = 'card information'
collection.insert({'code': code, 'type': 'userdata', 'data': data})
def attach_form_info(code, data):
"""
Attach card data
@param code: Unique phone code
@type code: str
@param data: Dictionary data for the phone, passed from client in the 'data' request field
@type data: dict
"""
if 'code' in data:
del data['code']
if 'type' in data:
del data['type']
collection = settings.MONGO['extra_data']
data['type'] = 'forms'
collection.insert({'code': code, 'type': 'userdata', 'data': data})
def attach_crash_report(code, data):
"""
Attach crash report data
@param code: Unique phone code
@type code: str
@param data: Dictionary data for the phone, passed from client in the 'data' request field
@type data: dict
"""
collection = settings.MONGO['extra_data']
collection.insert({'code': code, 'type': 'crash report', 'data': data.get('data'), 'time': time.time()})
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: get_source_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from inspection_sdk.model.inspection import target_pb2 as inspection__sdk_dot_model_dot_inspection_dot_target__pb2
from inspection_sdk.model.inspection import history_pb2 as inspection__sdk_dot_model_dot_inspection_dot_history__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='get_source_data.proto',
package='history',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x15get_source_data.proto\x12\x07history\x1a,inspection_sdk/model/inspection/target.proto\x1a-inspection_sdk/model/inspection/history.proto\"\xb0\x03\n\x14GetSourceDataRequest\x12\x10\n\x08pluginId\x18\x01 \x01(\t\x12\r\n\x05jobId\x18\x02 \x01(\t\x12\x12\n\ninstanceId\x18\x03 \x01(\t\x12\x30\n\x04list\x18\x04 \x03(\x0b\x32\".history.GetSourceDataRequest.List\x12\n\n\x02id\x18\x05 \x01(\t\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x07 \x01(\t\x1a\x84\x02\n\x04List\x12;\n\x07\x64imList\x18\x01 \x03(\x0b\x32*.history.GetSourceDataRequest.List.DimList\x12;\n\x07valList\x18\x02 \x03(\x0b\x32*.history.GetSourceDataRequest.List.ValList\x1a\x32\n\x07\x44imList\x12\r\n\x05value\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x1aN\n\x07ValList\x12\r\n\x05value\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t\"}\n\x1cGetSourceDataResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12+\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1d.inspection.InspectionHistoryb\x06proto3')
,
dependencies=[inspection__sdk_dot_model_dot_inspection_dot_target__pb2.DESCRIPTOR,inspection__sdk_dot_model_dot_inspection_dot_history__pb2.DESCRIPTOR,])
_GETSOURCEDATAREQUEST_LIST_DIMLIST = _descriptor.Descriptor(
name='DimList',
full_name='history.GetSourceDataRequest.List.DimList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='history.GetSourceDataRequest.List.DimList.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='history.GetSourceDataRequest.List.DimList.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='history.GetSourceDataRequest.List.DimList.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=480,
)
_GETSOURCEDATAREQUEST_LIST_VALLIST = _descriptor.Descriptor(
name='ValList',
full_name='history.GetSourceDataRequest.List.ValList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='history.GetSourceDataRequest.List.ValList.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='history.GetSourceDataRequest.List.ValList.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='history.GetSourceDataRequest.List.ValList.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='history.GetSourceDataRequest.List.ValList.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unit', full_name='history.GetSourceDataRequest.List.ValList.unit', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=482,
serialized_end=560,
)
_GETSOURCEDATAREQUEST_LIST = _descriptor.Descriptor(
name='List',
full_name='history.GetSourceDataRequest.List',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dimList', full_name='history.GetSourceDataRequest.List.dimList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='valList', full_name='history.GetSourceDataRequest.List.valList', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETSOURCEDATAREQUEST_LIST_DIMLIST, _GETSOURCEDATAREQUEST_LIST_VALLIST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=300,
serialized_end=560,
)
_GETSOURCEDATAREQUEST = _descriptor.Descriptor(
name='GetSourceDataRequest',
full_name='history.GetSourceDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pluginId', full_name='history.GetSourceDataRequest.pluginId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jobId', full_name='history.GetSourceDataRequest.jobId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='history.GetSourceDataRequest.instanceId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='history.GetSourceDataRequest.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='history.GetSourceDataRequest.id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='history.GetSourceDataRequest.name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='history.GetSourceDataRequest.category', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETSOURCEDATAREQUEST_LIST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=128,
serialized_end=560,
)
_GETSOURCEDATARESPONSEWRAPPER = _descriptor.Descriptor(
name='GetSourceDataResponseWrapper',
full_name='history.GetSourceDataResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='history.GetSourceDataResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='history.GetSourceDataResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='history.GetSourceDataResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='history.GetSourceDataResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=562,
serialized_end=687,
)
_GETSOURCEDATAREQUEST_LIST_DIMLIST.containing_type = _GETSOURCEDATAREQUEST_LIST
_GETSOURCEDATAREQUEST_LIST_VALLIST.containing_type = _GETSOURCEDATAREQUEST_LIST
_GETSOURCEDATAREQUEST_LIST.fields_by_name['dimList'].message_type = _GETSOURCEDATAREQUEST_LIST_DIMLIST
_GETSOURCEDATAREQUEST_LIST.fields_by_name['valList'].message_type = _GETSOURCEDATAREQUEST_LIST_VALLIST
_GETSOURCEDATAREQUEST_LIST.containing_type = _GETSOURCEDATAREQUEST
_GETSOURCEDATAREQUEST.fields_by_name['list'].message_type = _GETSOURCEDATAREQUEST_LIST
_GETSOURCEDATARESPONSEWRAPPER.fields_by_name['data'].message_type = inspection__sdk_dot_model_dot_inspection_dot_history__pb2._INSPECTIONHISTORY
DESCRIPTOR.message_types_by_name['GetSourceDataRequest'] = _GETSOURCEDATAREQUEST
DESCRIPTOR.message_types_by_name['GetSourceDataResponseWrapper'] = _GETSOURCEDATARESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetSourceDataRequest = _reflection.GeneratedProtocolMessageType('GetSourceDataRequest', (_message.Message,), {
'List' : _reflection.GeneratedProtocolMessageType('List', (_message.Message,), {
'DimList' : _reflection.GeneratedProtocolMessageType('DimList', (_message.Message,), {
'DESCRIPTOR' : _GETSOURCEDATAREQUEST_LIST_DIMLIST,
'__module__' : 'get_source_data_pb2'
# @@protoc_insertion_point(class_scope:history.GetSourceDataRequest.List.DimList)
})
,
'ValList' : _reflection.GeneratedProtocolMessageType('ValList', (_message.Message,), {
'DESCRIPTOR' : _GETSOURCEDATAREQUEST_LIST_VALLIST,
'__module__' : 'get_source_data_pb2'
# @@protoc_insertion_point(class_scope:history.GetSourceDataRequest.List.ValList)
})
,
'DESCRIPTOR' : _GETSOURCEDATAREQUEST_LIST,
'__module__' : 'get_source_data_pb2'
# @@protoc_insertion_point(class_scope:history.GetSourceDataRequest.List)
})
,
'DESCRIPTOR' : _GETSOURCEDATAREQUEST,
'__module__' : 'get_source_data_pb2'
# @@protoc_insertion_point(class_scope:history.GetSourceDataRequest)
})
_sym_db.RegisterMessage(GetSourceDataRequest)
_sym_db.RegisterMessage(GetSourceDataRequest.List)
_sym_db.RegisterMessage(GetSourceDataRequest.List.DimList)
_sym_db.RegisterMessage(GetSourceDataRequest.List.ValList)
GetSourceDataResponseWrapper = _reflection.GeneratedProtocolMessageType('GetSourceDataResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _GETSOURCEDATARESPONSEWRAPPER,
'__module__' : 'get_source_data_pb2'
# @@protoc_insertion_point(class_scope:history.GetSourceDataResponseWrapper)
})
_sym_db.RegisterMessage(GetSourceDataResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
num = int(input('\033[30;1;7mdigite um número\033[m: '))
resultado = num % 2
if resultado == 0:
print('\033[30;7;1mo número é\033[m \033[33;1mpar\033[m')
else:
print('\033[30;7;1mo número é\033[m \033[32;1mimpar\033[m')
|
import unicodedata
import arrow
from pdl.models import Proyecto
from pdl.models import Expedientes
from pdl.utils import convert_string_to_time
def get_proyecto_from_short_url(short_url):
"""
:param short_url:
:return: item for Proyecto
"""
item = Proyecto.objects.get(short_url=short_url)
if item.iniciativas_agrupadas is not None and \
item.iniciativas_agrupadas != '' and '{' in \
item.iniciativas_agrupadas:
iniciativas = item.iniciativas_agrupadas.replace("{", "")
iniciativas = iniciativas.replace("}", "")
item.iniciativas_agrupadas = iniciativas.split(",")
item.congresistas_with_links = hiperlink_congre(item.congresistas)
item.fecha_presentacion = convert_string_to_time(item.fecha_presentacion)
item.fecha_presentacion_human = arrow.get(item.fecha_presentacion).format('DD MMMM, YYYY', locale='es_es')
item.numero_congresistas = len(item.congresistas.split(";"))
return item
def get_events_from_expediente(id):
"""
Uses the `proyecto_id` to obtain a list of events from the `expediente`
page.
:param id: proyecto_id as in table pdl_proyecto
:return: list of events, which are key=>value dictionaries
"""
events = Expedientes.objects.all().filter(proyecto=id).order_by('-fecha')
events_with_human_date = []
append = events_with_human_date.append
for i in events:
i.fecha = arrow.get(i.fecha).format('DD MMM, YYYY', locale='es_es')
append(i)
return events_with_human_date
def hiperlink_congre(congresistas):
# tries to make a hiperlink for each congresista name to its own webpage
if congresistas == '':
return None
for name in congresistas.split("; "):
link = "<a href='/congresista/"
link += str(convert_name_to_slug(name))
link += "' title='ver todos sus proyectos'>"
link += name + "</a>"
congresistas = congresistas.replace(name, link)
congresistas = congresistas.replace("; ", ";\n")
return congresistas
def convert_name_to_slug(name):
"""Takes a congresista name and returns its slug."""
name = name.strip()
name = name.replace(",", "").lower()
name = name.split(" ")
if len(name) > 2:
i = 0
slug = ""
while i < 3:
slug += name[i]
if i < 2:
slug += "_"
i += 1
slug = unicodedata.normalize('NFKD', slug).encode('ascii', 'ignore')
slug = str(slug, encoding="utf-8")
return slug + "/"
|
from pysyncgateway import UserClient
def test(syncgateway_public_url):
user_client = UserClient(syncgateway_public_url)
result = user_client.get_server()
assert sorted(list(result)) == ["couchdb", "vendor", "version"] # No ADMIN key
|
import os
# Local directory of CypherCat API
CYCAT_DIR = os.path.dirname(os.path.abspath(__file__))
# Local directory containing entire repo
REPO_DIR = os.path.split(CYCAT_DIR)[0]
# Local directory for datasets
DATASETS_DIR = os.path.join(REPO_DIR, 'Datasets')
# Local directory for datasets
DATASPLITS_DIR = os.path.join(DATASETS_DIR, 'splits')
|
import torch
from torchtext.legacy import data, datasets
from typing import Dict
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize='spacy',
tokenizer_language='en_core_web_sm',
include_lengths=True)
LABEL = data.LabelField(dtype=torch.float)
class NLPDataManager:
"""
Base Class for Vision Data Readers
"""
def __init__(self, data_config: Dict):
self.data_config = data_config
self.tr_batch_size = self.data_config.get('train_batch_size', 1)
self.test_batch_size = self.data_config.get('test_batch_size', 512)
self.additional_model_conf = {}
def get_data_iterator(self):
""" Downloads Data and Apply appropriate Transformations . returns train, test dataset """
raise NotImplementedError("This method needs to be implemented")
class SST(NLPDataManager):
def __init__(self, data_config: Dict):
self.MAX_VOCAB_SIZE = 10000
NLPDataManager.__init__(self, data_config=data_config)
def get_data_iterator(self):
train_data, test_data = datasets.SST.splits(TEXT, LABEL)
TEXT.build_vocab(train_data,
max_size=self.MAX_VOCAB_SIZE,
vectors="glove.6B.100d",
unk_init=torch.Tensor.normal_)
LABEL.build_vocab(train_data)
self.additional_model_conf['vocab_size'] = len(TEXT.vocab)
self.additional_model_conf['embedding_dim'] = self.data_config.get('embedding_dim', 100)
self.additional_model_conf['output_dim'] = 1
self.additional_model_conf['pad_idx'] = TEXT.vocab.stoi[TEXT.pad_token]
train_loader, test_loader = data.BucketIterator.splits((train_data, test_data), batch_size=self.tr_batch_size)
# test_loader = data.BucketIterator.splits(test_data, batch_size=self.test_batch_size)
return train_loader, test_loader
|
import inspect
import unittest
from tests.integrations.config.database import DATABASES
from src.masoniteorm.connections import ConnectionFactory
from src.masoniteorm.models import Model
from src.masoniteorm.query import QueryBuilder
from src.masoniteorm.query.grammars import SQLiteGrammar
from src.masoniteorm.relationships import belongs_to
from tests.utils import MockConnectionFactory
class User(Model):
__connection__ = "dev"
__timestamps__ = False
pass
class BaseTestQueryRelationships(unittest.TestCase):
maxDiff = None
def get_builder(self, table="users"):
connection = ConnectionFactory().make("sqlite")
return QueryBuilder(
grammar=SQLiteGrammar,
connection_class=connection,
connection="dev",
table=table,
# model=User,
connection_details=DATABASES,
).on("dev")
def test_insert(self):
builder = self.get_builder()
result = builder.create(
{"name": "Joe", "email": "joe@masoniteproject.com", "password": "secret"}
)
self.assertIsInstance(result["id"], int)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from lib.models.modules.pos_embedding import PosEmbedding1D, PosEncoding1D
from lib.models.tools.module_helper import ModuleHelper
def Upsample(x, size):
"""
Wrapper Around the Upsample Call
"""
return nn.functional.interpolate(x, size=size, mode='bilinear',
align_corners=True)
class HANet_Conv(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size=3, r_factor=64, layer=3, pos_injection=2, is_encoding=1,
pos_rfactor=8, pooling='mean', dropout_prob=0.0, pos_noise=0.0, bn_type=None):
super(HANet_Conv, self).__init__()
self.pooling = pooling
self.pos_injection = pos_injection
self.layer = layer
self.dropout_prob = dropout_prob
self.sigmoid = nn.Sigmoid()
if r_factor > 0:
mid_1_channel = math.ceil(in_channel / r_factor)
elif r_factor < 0:
r_factor = r_factor * -1
mid_1_channel = in_channel * r_factor
if self.dropout_prob > 0:
self.dropout = nn.Dropout2d(self.dropout_prob)
self.attention_first = nn.Sequential(
nn.Conv1d(in_channels=in_channel, out_channels=mid_1_channel,
kernel_size=1, stride=1, padding=0, bias=False),
ModuleHelper.BNReLU(mid_1_channel, bn_type=bn_type),
nn.ReLU(inplace=True))
if layer == 2:
self.attention_second = nn.Sequential(
nn.Conv1d(in_channels=mid_1_channel, out_channels=out_channel,
kernel_size=kernel_size, stride=1, padding=kernel_size // 2, bias=True))
elif layer == 3:
mid_2_channel = (mid_1_channel * 2)
self.attention_second = nn.Sequential(
nn.Conv1d(in_channels=mid_1_channel, out_channels=mid_2_channel,
kernel_size=3, stride=1, padding=1, bias=True),
ModuleHelper.BNReLU(mid_2_channel, bn_type=bn_type),
nn.ReLU(inplace=True))
self.attention_third = nn.Sequential(
nn.Conv1d(in_channels=mid_2_channel, out_channels=out_channel,
kernel_size=kernel_size, stride=1, padding=kernel_size // 2, bias=True))
if self.pooling == 'mean':
# print("##### average pooling")
self.rowpool = nn.AdaptiveAvgPool2d((128 // pos_rfactor, 1))
else:
# print("##### max pooling")
self.rowpool = nn.AdaptiveMaxPool2d((128 // pos_rfactor, 1))
if pos_rfactor > 0:
if is_encoding == 0:
if self.pos_injection == 1:
self.pos_emb1d_1st = PosEmbedding1D(pos_rfactor, dim=in_channel, pos_noise=pos_noise)
elif self.pos_injection == 2:
self.pos_emb1d_2nd = PosEmbedding1D(pos_rfactor, dim=mid_1_channel, pos_noise=pos_noise)
elif is_encoding == 1:
if self.pos_injection == 1:
self.pos_emb1d_1st = PosEncoding1D(pos_rfactor, dim=in_channel, pos_noise=pos_noise)
elif self.pos_injection == 2:
self.pos_emb1d_2nd = PosEncoding1D(pos_rfactor, dim=mid_1_channel, pos_noise=pos_noise)
else:
print("Not supported position encoding")
exit()
def forward(self, x, out, pos=None, return_attention=False, return_posmap=False, attention_loss=False):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
H = out.size(2)
x1d = self.rowpool(x).squeeze(3)
if pos is not None and self.pos_injection == 1:
if return_posmap:
x1d, pos_map1 = self.pos_emb1d_1st(x1d, pos, True)
else:
x1d = self.pos_emb1d_1st(x1d, pos)
if self.dropout_prob > 0:
x1d = self.dropout(x1d)
x1d = self.attention_first(x1d)
if pos is not None and self.pos_injection == 2:
if return_posmap:
x1d, pos_map2 = self.pos_emb1d_2nd(x1d, pos, True)
else:
x1d = self.pos_emb1d_2nd(x1d, pos)
x1d = self.attention_second(x1d)
if self.layer == 3:
x1d = self.attention_third(x1d)
if attention_loss:
last_attention = x1d
x1d = self.sigmoid(x1d)
else:
if attention_loss:
last_attention = x1d
x1d = self.sigmoid(x1d)
x1d = F.interpolate(x1d, size=H, mode='linear')
out = torch.mul(out, x1d.unsqueeze(3))
if return_attention:
if return_posmap:
if self.pos_injection == 1:
pos_map = (pos_map1)
elif self.pos_injection == 2:
pos_map = (pos_map2)
return out, x1d, pos_map
else:
return out, x1d
else:
if attention_loss:
return out, last_attention
else:
return out
|
#!/usr/bin/python3.7
from opty import algy, funky
import numpy as np
import sys
from configparser import ConfigParser
import random
conf = ConfigParser()
conf.read(sys.argv[1])
h = conf['GENERAL'].getfloat('h')
e = conf['GENERAL'].getfloat('e')
verbose = conf['GENERAL'].getboolean('verbose')
step = conf['simplex'].getfloat('step')
alpha = conf['simplex'].getfloat('alpha')
beta = conf['simplex'].getfloat('beta')
gamma = conf['simplex'].getfloat('gamma')
sigma = conf['simplex'].getfloat('sigma')
dx = np.fromstring(conf['hooke_jeeves'].get('dx'), sep=' ')
e_hj = np.fromstring(conf['hooke_jeeves'].get('e'), sep=' ')
if len(dx) == 1:
dx = dx[0]
if len(e_hj) == 1:
e_hj = e_hj[0]
print('-------------- ZAD 1 --------------')
x0 = conf['zad1'].getfloat('x0')
f = funky.Function3Translated()
f = funky.CacheFunctionProxy(f)
a, b = algy.golden_ratio_search(f, x0, e=e, verbose=verbose)
print(f'rjesenje = {(a+b)/2} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.coord_axes_search(x0, f, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.simplex_nelder_mead(f, x0, step, alpha, beta, gamma, sigma, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.hook_jeeves_search(f, x0, dx, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
print('-------------- ZAD 2 --------------')
print('f1')
f = funky.CacheFunctionProxy(funky.Function1())
x0 = np.array([-1.9, 2])
x = algy.coord_axes_search(x0, f, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.simplex_nelder_mead(f, x0, step, alpha, beta, gamma, sigma, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.hook_jeeves_search(f, x0, dx, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
print('f2')
f = funky.CacheFunctionProxy(funky.Function2())
x0 = np.array([0.1, 0.3])
x = algy.coord_axes_search(x0, f, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.simplex_nelder_mead(f, x0, step, alpha, beta, gamma, sigma, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.hook_jeeves_search(f, x0, dx, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
print('f3')
f = funky.CacheFunctionProxy(funky.Function3())
x0 = np.array([3.0, 2.0, 5.0, 1.0, -2.0])
x = algy.coord_axes_search(x0, f, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.simplex_nelder_mead(f, x0, step, alpha, beta, gamma, sigma, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
x = algy.hook_jeeves_search(f, x0, dx, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()}')
f.set_call_count(0)
print('f4')
f = funky.CacheFunctionProxy(funky.Function4())
x0 = np.array([0.0, 0.0])
x = algy.coord_axes_search(x0, f, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()} f(x)={f(x)}')
f.set_call_count(0)
x = algy.simplex_nelder_mead(f, x0, step, alpha, beta, gamma, sigma, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()} f(x)={f(x)}')
f.set_call_count(0)
x = algy.hook_jeeves_search(f, x0, dx, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()} f(x)={f(x)}')
f.set_call_count(0)
print('-------------- ZAD 3 --------------')
x0 = np.array([5.0, 5.0])
f = funky.CacheFunctionProxy(funky.Function4())
x = algy.simplex_nelder_mead(f, x0, step, alpha, beta, gamma, sigma, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()} f(x)={f(x)}')
f.set_call_count(0)
x = algy.hook_jeeves_search(f, x0, dx, e=e, verbose=verbose)
print(f'rjesenje = {x} broj poziva = {f.get_call_count()} f(x)={f(x)}')
f.set_call_count(0)
print('-------------- ZAD 4 --------------')
x0 = np.array([0.5, 0.5])
f = funky.CacheFunctionProxy(funky.Function1())
print("x0 = (0.5, 0.5)")
arr = []
for i in range(20):
x = algy.simplex_nelder_mead(f, x0, i+1, alpha, beta, gamma, sigma, e, verbose=False)
arr.append((f(x), f.get_call_count()))
f.set_call_count(0)
for value, call_count in arr:
print(value, call_count)
print("x0 = (20, 20)")
x0 = np.array([20.0, 20.0])
arr = []
for i in range(20):
x = algy.simplex_nelder_mead(f, x0, i + 1, alpha, beta, gamma, sigma, e, verbose=False)
arr.append((f(x), f.get_call_count()))
f.set_call_count(0)
for value, call_count in arr:
print(value, call_count)
print('-------------- ZAD 5 --------------')
f = funky.CacheFunctionProxy(funky.Function6())
solutions = []
for i in range(1000):
x01 = random.uniform(-50, 50)
x02 = random.uniform(-50, 50)
x0 = np.array([x01, x02])
x = algy.simplex_nelder_mead(f, x0, step, alpha, beta, gamma, sigma, e, verbose=False)
solutions.append(x)
correct = sum([1 for x in solutions if f(x) <= 1e-3])
print(correct/len(solutions) * 100, '%')
|
import numpy as np
"""
Supporting methods for data handling
"""
def shuffle_batch(images, labels):
"""
Return a shuffled batch of data
"""
permutation = np.random.permutation(images.shape[0])
return images[permutation], labels[permutation]
def extract_data(data, augment_data):
images, char_nums = [], []
if augment_data:
for character in data:
data = augment_character_set(data, character)
for character_index, character in enumerate(data):
for m, instance in enumerate(character):
images.append(instance[0])
char_nums.append(character_index)
images = np.expand_dims(np.array(images), -1)
char_number = np.array(char_nums)
return images, char_number
def augment_character_set(data, character_set):
"""
:param data: Dataset the character belongs to.
:param character_set: np array containing instances of a character.
:return: Original data with added character sets for all defined permutations of the current character.
"""
rotation_90, rotation_180, rotation_270 = [], [], []
for instance in character_set:
image, char_num, char_language_num = instance
rotation_90.append((np.rot90(image, k=1), char_num, char_language_num))
rotation_180.append((np.rot90(image, k=2), char_num, char_language_num))
rotation_270.append((np.rot90(image, k=3), char_num, char_language_num))
return np.vstack((data, np.array([rotation_90, rotation_180, rotation_270])))
class OmniglotData:
"""
Class to handle Omniglot data set. Loads from numpy data as saved in
data folder.
"""
def __init__(self, path, train_size, validation_size, augment_data, seed):
"""
Initialize object to handle Omniglot data
:param path: directory of numpy file with preprocessed Omniglot arrays.
:param train_size: Number of characters in training set.
:param validation_size: Number of characters in validation set.
:param augment_data: Augment with rotations of characters (boolean).
:param seed: random seed for train/validation/test split.
"""
np.random.seed(seed)
data = np.load(path, allow_pickle=True)
np.random.shuffle(data)
self.instances_per_char = 20
self.image_height = 28
self.image_width = 28
self.image_channels = 1
self.total_chars = data.shape[0]
self.train_images, self.train_char_nums = extract_data(data[:train_size], augment_data=augment_data)
if validation_size != 0:
self.validation_images, self.validation_char_nums = \
extract_data(data[train_size:train_size + validation_size], augment_data=augment_data)
self.test_images, self.test_char_nums = \
extract_data(data[train_size + validation_size:], augment_data=augment_data)
def get_image_height(self):
return self.image_height
def get_image_width(self):
return self.image_width
def get_image_channels(self):
return self.image_channels
def get_batch(self, source, tasks_per_batch, shot, way, eval_samples):
"""
Gets a batch of data.
:param source: train, validation or test (string).
:param tasks_per_batch: number of tasks to include in batch.
:param shot: number of training examples per class.
:param way: number of classes per task.
:param eval_samples: number of evaluation samples to use.
:return: np array representing a batch of tasks.
"""
if source == 'train':
source_imgs = self.train_images
num_chars = self.train_char_nums
elif source == 'validation':
source_imgs = self.validation_images
num_chars = self.validation_char_nums
elif source == 'test':
source_imgs = self.test_images
num_chars = self.test_char_nums
else:
raise RuntimeError(f"Invalid source {source}")
return self._yield_random_task_batch(
tasks_per_batch, source_imgs, num_chars, shot, way,
eval_samples
)
@classmethod
def _yield_random_task_batch(cls, tasks_per_batch, images, character_indices, shot, way, eval_samples):
"""
Generate a batch of tasks from image set.
:param tasks_per_batch: Number of tasks per batch.
:param images: Images set to generate batch from.
:param character_indices: Index of each character.
:param shot: Number of training images per class.
:param way: Number of classes per task.
:param eval_samples: number of evaluation samples to use.
:return: A batch of tasks.
"""
train_images_to_return, test_images_to_return = [], []
train_labels_to_return, test_labels_to_return = [], []
for task in range(tasks_per_batch):
im_train, im_test, lbl_train, lbl_test = cls._generate_random_task(images, character_indices, shot, way,
eval_samples)
train_images_to_return.append(im_train)
test_images_to_return.append(im_test)
train_labels_to_return.append(lbl_train)
test_labels_to_return.append(lbl_test)
return np.array(train_images_to_return), np.array(test_images_to_return), \
np.array(train_labels_to_return), np.array(test_labels_to_return)
@classmethod
def _generate_random_task(cls, images, class_indices, shot, way, eval_samples):
"""
Randomly generate a task from image set.
:param images: images set to generate batch from.
:param class_indices: indices of each class (or, each character).
:param shot: number of training images per class.
:param way: number of classes per task.
:param eval_samples: number of evaluation samples to use.
:return: tuple containing train and test images and labels for a task.
"""
train_images, test_images = [], []
# choose `way` classes to include in training set.
classes = np.random.choice(np.unique(class_indices), way)
for class_ in classes:
# Find images with chosen class
class_images = images[np.where(class_indices == class_)[0]]
# Choose random selection of images from class.
np.random.shuffle(class_images)
# Choose `shot` training images for this class
train_images.append(class_images[:shot])
# Choose `eval_samples` test images.
test_images.append(class_images[shot:shot + eval_samples])
# Stack images
train_images_to_return = np.vstack(train_images)
test_images_to_return = np.vstack(test_images)
train_labels_to_return = np.eye(way).repeat(shot, 0)
test_labels_to_return = np.eye(way).repeat(eval_samples, 0)
train_images_to_return, train_labels_to_return = shuffle_batch(train_images_to_return, train_labels_to_return)
test_images_to_return, test_labels_to_return = shuffle_batch(test_images_to_return, test_labels_to_return)
# Return images and labels
return train_images_to_return, test_images_to_return, train_labels_to_return, test_labels_to_return
|
import argparse
import os
from datarobot_drum.drum.push import PUSH_HELP_TEXT
import sys
import subprocess
from datarobot_drum.drum.description import version
from datarobot_drum.drum.common import (
LOG_LEVELS,
ArgumentsOptions,
RunLanguage,
TargetType,
ArgumentOptionsEnvVars,
)
class CMRunnerArgsRegistry(object):
SUBPARSER_DEST_KEYWORD = "subparser_name"
NEW_SUBPARSER_DEST_KEYWORD = "new_mode"
_parsers = {}
@staticmethod
def _tokenize_parser_prog(parser):
# example:
# - for score_parser prog is "drum score"
# - for new_model_parser prog is "drum new model"
return parser.prog.split(" ")
@staticmethod
def _reg_arg_version(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.VERSION,
action="version",
version="%(prog)s {version}".format(version=version),
)
@staticmethod
def _reg_arg_verbose(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.VERBOSE,
action="store_true",
default=False,
help="Show verbose output",
)
@staticmethod
def _is_valid_file(arg):
abs_path = os.path.abspath(arg)
if not os.path.exists(arg):
raise argparse.ArgumentTypeError("The file {} does not exist!".format(arg))
else:
return os.path.realpath(abs_path)
@staticmethod
def _is_valid_dir(arg):
abs_path = os.path.abspath(arg)
if not os.path.isdir(arg):
raise argparse.ArgumentTypeError("The path {} is not a directory!".format(arg))
else:
return os.path.realpath(abs_path)
@staticmethod
def _is_valid_output_dir(arg):
abs_path = os.path.abspath(arg)
if not os.path.isdir(arg):
raise argparse.ArgumentTypeError(
"The path {} is not a directory! For custom training models, "
"the output directory will consist of the artifacts usable "
"for making predictions. ".format(arg)
)
else:
return os.path.realpath(abs_path)
@staticmethod
def _path_does_non_exist(arg):
if os.path.exists(arg):
raise argparse.ArgumentTypeError(
"The path {} already exists! Please provide a non existing path!".format(arg)
)
return os.path.abspath(arg)
@staticmethod
def _reg_arg_input(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.INPUT,
default=None,
required=True,
type=CMRunnerArgsRegistry._is_valid_file,
help="Path to an input dataset",
)
@staticmethod
def _reg_arg_output(*parsers):
for parser in parsers:
prog_name_lst = CMRunnerArgsRegistry._tokenize_parser_prog(parser)
if prog_name_lst[1] == ArgumentsOptions.SCORE:
help_message = "Path to a csv file to output predictions"
type_callback = os.path.abspath
elif prog_name_lst[1] == ArgumentsOptions.FIT:
help_message = (
"DRUM will copy the contents of code_dir and create "
"the model artifact in the output folder"
)
type_callback = CMRunnerArgsRegistry._is_valid_output_dir
else:
raise ValueError(
"{} argument should be used only by score and fit parsers!".format(
ArgumentsOptions.OUTPUT
)
)
parser.add_argument(
ArgumentsOptions.OUTPUT, default=None, type=type_callback, help=help_message
)
@staticmethod
def _reg_arg_target_feature_and_filename(*parsers):
for parser in parsers:
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
ArgumentsOptions.TARGET,
type=str,
required=False,
help="Which column to use as the target. Argument is mutually exclusive with {}.".format(
ArgumentsOptions.TARGET_CSV
),
)
group.add_argument(
ArgumentsOptions.TARGET_CSV,
type=CMRunnerArgsRegistry._is_valid_file,
required=False,
help="A file containing the target values. Argument is mutually exclusive with {}.".format(
ArgumentsOptions.TARGET
),
)
@staticmethod
def _reg_arg_weights(*parsers):
for parser in parsers:
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
ArgumentsOptions.WEIGHTS,
type=str,
required=False,
default=None,
help="A column name of row weights in your training dataframe. "
"Argument is mutually exclusive with {}".format(ArgumentsOptions.WEIGHTS_CSV),
)
group.add_argument(
ArgumentsOptions.WEIGHTS_CSV,
type=CMRunnerArgsRegistry._is_valid_file,
required=False,
default=None,
help="A one column csv file to be parsed as row weights. "
"Argument is mutually exclusive with {}".format(ArgumentsOptions.WEIGHTS),
)
@staticmethod
def _reg_arg_skip_predict(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.SKIP_PREDICT,
required=False,
default=False,
action="store_true",
help="By default we will attempt to predict using your model, but we give you the"
"option to turn this off",
)
@staticmethod
def _reg_arg_pos_neg_labels(*parsers):
def are_both_labels_present(arg):
error_message = (
"\nError - for binary classification case, "
"both positive and negative class labels have to be provided. \n"
"See --help option for more information"
)
labels = [ArgumentsOptions.POSITIVE_CLASS_LABEL, ArgumentsOptions.NEGATIVE_CLASS_LABEL]
if not all([x in sys.argv for x in labels]):
raise argparse.ArgumentTypeError(error_message)
return str(arg)
for parser in parsers:
fit_intuit_message = ""
prog_name_lst = CMRunnerArgsRegistry._tokenize_parser_prog(parser)
if prog_name_lst[1] == ArgumentsOptions.FIT:
fit_intuit_message = "If you do not provide these labels, but your dataset is classification, DRUM will choose the labels for you."
parser.add_argument(
ArgumentsOptions.POSITIVE_CLASS_LABEL,
default=None,
type=are_both_labels_present,
help="Positive class label for a binary classification case. The argument can also be provided by setting {} env var. ".format(
ArgumentOptionsEnvVars.POSITIVE_CLASS_LABEL
)
+ fit_intuit_message,
)
parser.add_argument(
ArgumentsOptions.NEGATIVE_CLASS_LABEL,
default=None,
type=are_both_labels_present,
help="Negative class label for a binary classification case. The argument can also be provided by setting {} env var. ".format(
ArgumentOptionsEnvVars.NEGATIVE_CLASS_LABEL
)
+ fit_intuit_message,
)
@staticmethod
def _reg_arg_multiclass_labels(*parsers):
class RequiredLength(argparse.Action):
ERROR_MESSAGE = "Multiclass classification requires at least 2 labels."
MIN_LABELS = 2
def __call__(self, parser, namespace, values, option_string=None):
if len(values) < self.MIN_LABELS:
raise argparse.ArgumentTypeError(self.ERROR_MESSAGE)
setattr(namespace, self.dest, values)
class ParseLabelsFile(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
with open(values) as f:
labels = [label for label in f.read().split(os.linesep) if label]
if len(labels) < RequiredLength.MIN_LABELS:
raise argparse.ArgumentTypeError(RequiredLength.ERROR_MESSAGE)
setattr(namespace, "class_labels", labels)
def are_labels_double_specified(arg):
label_options = [ArgumentsOptions.CLASS_LABELS_FILE, ArgumentsOptions.CLASS_LABELS]
if all(opt in sys.argv for opt in label_options):
error_message = (
"\nError - for multiclass classification, either the class labels or "
"a class labels file should be provided, but not both.\n"
"See --help option for more information"
)
raise argparse.ArgumentTypeError(error_message)
return arg
for parser in parsers:
fit_intuit_message = ""
class_label_order_message = (
"Labels should be in the order as "
"the predicted probabilities produced by the model. "
)
prog_name_lst = CMRunnerArgsRegistry._tokenize_parser_prog(parser)
if prog_name_lst[1] == ArgumentsOptions.FIT:
fit_intuit_message = (
"If you do not provide these labels, but your dataset is classification, "
"DRUM will choose the labels for you"
)
parser.add_argument(
ArgumentsOptions.CLASS_LABELS,
default=None,
type=are_labels_double_specified,
nargs="+",
action=RequiredLength,
help="The class labels for a multiclass classification case. The argument can also be provided by setting {} env var. ".format(
ArgumentOptionsEnvVars.CLASS_LABELS
)
+ class_label_order_message
+ fit_intuit_message,
)
parser.add_argument(
ArgumentsOptions.CLASS_LABELS_FILE,
default=None,
type=are_labels_double_specified,
action=ParseLabelsFile,
help="A file containing newline separated class labels for a multiclass classification case. The argument can also be provided by setting {} env var. ".format(
ArgumentOptionsEnvVars.CLASS_LABELS_FILE
)
+ class_label_order_message
+ fit_intuit_message,
)
@staticmethod
def _reg_arg_code_dir(*parsers):
for parser in parsers:
prog_name_lst = CMRunnerArgsRegistry._tokenize_parser_prog(parser)
if prog_name_lst[1] == ArgumentsOptions.NEW:
help_message = "Directory to use for creating the new template"
type_callback = CMRunnerArgsRegistry._path_does_non_exist
else:
help_message = "Custom model code dir"
type_callback = CMRunnerArgsRegistry._is_valid_dir
parser.add_argument(
"-cd",
ArgumentsOptions.CODE_DIR,
default=None,
required=True,
type=type_callback,
help=help_message,
)
@staticmethod
def _reg_arg_address(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.ADDRESS,
default=None,
required=True,
help="Prediction server address host[:port]. Default Flask port is: 5000. The argument can also be provided by setting {} env var.".format(
ArgumentOptionsEnvVars.ADDRESS
),
)
@staticmethod
def _reg_arg_logging_level(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.LOGGING_LEVEL,
required=False,
choices=list(LOG_LEVELS.keys()),
default="warning",
help="Logging level to use",
)
@staticmethod
def _reg_arg_docker(*parsers):
for parser in parsers:
prog_name_lst = CMRunnerArgsRegistry._tokenize_parser_prog(parser)
parser.add_argument(
ArgumentsOptions.DOCKER,
default=None,
required=False,
help="Docker image to use to run {} in the {} mode, "
"or a directory, containing a Dockerfile, which can be built into a docker image. "
"If code dir contains requirements.txt file, DRUM tries to install dependencies during image build. (Reflects the DR App behavior.) "
"Requirements installation is supported for Python/R models only. "
"Use {} to skip installation."
"Note: DRUM attempts to install dependencies only if docker context folder is provided, not already built image from the registry.".format(
ArgumentsOptions.MAIN_COMMAND,
prog_name_lst[1],
ArgumentsOptions.SKIP_DEPS_INSTALL,
),
)
@staticmethod
def _reg_arg_skip_deps_install(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.SKIP_DEPS_INSTALL,
default=False,
action="store_true",
required=False,
help="Skip dependencies installation during the image build. "
"If code dir contains requirements.txt file, DRUM tries to install dependencies during image build. (Reflects the DR App behavior.) "
"Provide this argument to skip dependencies installation.",
),
@staticmethod
def _reg_arg_memory(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.MEMORY,
default=None,
required=False,
help="Amount of memory to allow the docker container to consume. "
"The value will be passed to the docker run command to both the "
"--memory and --memory-swap parameters. b,k,m,g suffixes are supported",
),
@staticmethod
def _reg_arg_production_server(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.PRODUCTION,
action="store_true",
default=False,
help="Run prediction server in production mode uwsgi + nginx. The argument can also be provided by setting {} env var.".format(
ArgumentOptionsEnvVars.PRODUCTION
),
)
@staticmethod
def _reg_arg_max_workers(*parsers):
def type_callback(arg):
ret_val = int(arg)
if ArgumentsOptions.PRODUCTION not in sys.argv:
raise argparse.ArgumentTypeError(
"can only be used in pair with {}".format(ArgumentsOptions.PRODUCTION)
)
if ret_val <= 0:
raise argparse.ArgumentTypeError("must be > 0")
return ret_val
for parser in parsers:
parser.add_argument(
ArgumentsOptions.MAX_WORKERS,
type=type_callback,
# default 0 is mapped into null in pipeline json
default=0,
help="Max number of uwsgi workers in server production mode. The argument can also be provided by setting {} env var.".format(
ArgumentOptionsEnvVars.MAX_WORKERS
),
)
@staticmethod
def _reg_arg_show_perf(*parsers):
for parser in parsers:
parser.add_argument(
"--show-perf", action="store_true", default=False, help="Show performance stats"
)
@staticmethod
def _reg_arg_samples(*parsers):
for parser in parsers:
parser.add_argument("-s", "--samples", type=int, default=None, help="Number of samples")
@staticmethod
def _reg_arg_iterations(*parsers):
for parser in parsers:
parser.add_argument(
"-i", "--iterations", type=int, default=None, help="Number of iterations"
)
@staticmethod
def _reg_arg_timeout(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.TIMEOUT, type=int, default=600, help="Test case timeout"
)
@staticmethod
def _reg_arg_in_server(*parsers):
for parser in parsers:
parser.add_argument(
"--in-server",
action="store_true",
default=False,
help="Show performance inside server",
)
@staticmethod
def _reg_arg_url(*parsers):
for parser in parsers:
parser.add_argument(
"--url", default=None, help="Run performance against the given prediction server"
)
@staticmethod
def _reg_arg_language(*parsers):
for parser in parsers:
langs = [e.value for e in RunLanguage]
prog_name_lst = CMRunnerArgsRegistry._tokenize_parser_prog(parser)
if prog_name_lst[1] == ArgumentsOptions.NEW:
langs.remove(RunLanguage.JAVA.value)
required_val = True
else:
required_val = False
parser.add_argument(
ArgumentsOptions.LANGUAGE,
choices=langs,
default=None,
required=required_val,
help="Language to use for the new model/env template to create",
)
@staticmethod
def _reg_arg_num_rows(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.NUM_ROWS,
default="ALL",
help="Number of rows to use for testing the fit functionality. "
"Set to ALL to use all rows. Default is 100",
)
@staticmethod
def _reg_arg_sparse_colfile(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.SPARSE_COLFILE,
default=None,
type=CMRunnerArgsRegistry._is_valid_file,
help="Drum ingests sparse data as .mtx files, which don't have support for column"
"names. We allow a second file which addresses this. Please do this by"
"specifying one column name per line in the file. The number of lines should "
"match the number of columns in your mtx file exactly. ",
)
@staticmethod
def _reg_arg_with_error_server(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.WITH_ERROR_SERVER,
action="store_true",
default=False,
help="Start server even if pipeline initialization fails. The argument can also be provided by setting {} env var.".format(
ArgumentOptionsEnvVars.WITH_ERROR_SERVER
),
)
@staticmethod
def _reg_arg_show_stacktrace(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.SHOW_STACKTRACE,
action="store_true",
default=False,
help="Show stacktrace when error happens. The argument can also be provided by setting {} env var.".format(
ArgumentOptionsEnvVars.SHOW_STACKTRACE
),
)
@staticmethod
def _reg_args_monitoring(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.MONITOR,
action="store_true",
help="Monitor predictions using DataRobot MLOps. The argument can also be provided by setting {} env var. "
"Monitoring can not be used in unstructured mode.".format(
ArgumentOptionsEnvVars.MONITOR
),
)
parser.add_argument(
ArgumentsOptions.DEPLOYMENT_ID,
default=os.environ.get("DEPLOYMENT_ID", None),
help="Deployment id to use for monitoring model predictions (env: DEPLOYMENT_ID)",
)
parser.add_argument(
ArgumentsOptions.MODEL_ID,
default=os.environ.get("MODEL_ID", None),
help="MLOps model id to use for monitoring predictions (env: MODEL_ID)",
)
parser.add_argument(
ArgumentsOptions.MONITOR_SETTINGS,
default=os.environ.get("MONITOR_SETTINGS", None),
help="MLOps setting to use for connecting with the MLOps Agent (env: MONITOR_SETTINGS)",
)
@staticmethod
def _reg_args_deployment_config(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.DEPLOYMENT_CONFIG,
default=None,
type=CMRunnerArgsRegistry._is_valid_file,
help="Provide deployment configuration file to return prediction response in DR PPS format. The argument can also be provided by setting {} env var.".format(
ArgumentOptionsEnvVars.DEPLOYMENT_CONFIG
),
)
# TODO: restrict params to be used with unstructured target type only
@staticmethod
def _reg_args_unstructured_mode(*parsers):
for parser in parsers:
parser.add_argument(
ArgumentsOptions.QUERY,
default=None,
help="Additional query params unstructured mode. (Simulates http request query params.)",
)
parser.add_argument(
ArgumentsOptions.CONTENT_TYPE,
default=None,
help="Additional content type for unstructured mode. "
"(Simulates http request Content-Type header, default: 'text/plain; charset=utf8')",
)
@staticmethod
def _reg_arg_target_type(*parsers):
target_types = [e for e in TargetType.ALL.value]
for parser in parsers:
parser.add_argument(
ArgumentsOptions.TARGET_TYPE,
required=False,
choices=target_types,
default=None,
help="Target type. The argument can also be provided by setting {} env var.".format(
ArgumentOptionsEnvVars.TARGET_TYPE
),
)
@staticmethod
def _register_subcommand_perf_test(subparsers):
desc = """
Test the performance of an inference model. This is done by internally using the server
sub command to serve the model. Then sending multiple requests to the server and
measuring the time it takes to complete each request.
The test is mixing several requests sizes. The idea is to get a coverage of several
sizes, from the smallest request containing only 1 row of data, up to the largest
request containing up to 50MB of data.
At the end of the test, a summary of the test will be displayed. For each request size,
the following fields will be shown:
size: size of the requests in bytes or Megabytes.
samples: number of samples this request size contained.
iters: number of times this request size was sent
min: minimum time measured for this request size (in seconds)
avg: average time of the this request size (in seconds)
max: maximum time measured for this request size (in seconds)
used: amount of memory used by drum at the end of this request size (MB)
container limit: if tests run in docker container, memory limit for it (MB)
total physical: total amount of physical memory avail on the current machine (MB)
"""
parser = subparsers.add_parser(
ArgumentsOptions.PERF_TEST,
description=desc,
help="Run performance tests",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
CMRunnerArgsRegistry._parsers[ArgumentsOptions.PERF_TEST] = parser
return parser
@staticmethod
def _register_subcommand_score(subparsers):
desc = """
Score an input file using the given model.
"""
parser = subparsers.add_parser(
ArgumentsOptions.SCORE, help="Run predictions in batch mode", description=desc
)
CMRunnerArgsRegistry._parsers[ArgumentsOptions.SCORE] = parser
return parser
@staticmethod
def _register_subcommand_fit(subparsers):
parser = subparsers.add_parser(ArgumentsOptions.FIT, help="Fit your model to your data")
CMRunnerArgsRegistry._parsers[ArgumentsOptions.FIT] = parser
return parser
@staticmethod
def _register_subcommand_validation(subparsers):
desc = """
You can validate the model on a set of various checks.
It is highly recommended to run these checks, as they are performed in DataRobot
before the model can be deployed.
List of checks:
* null values imputation: each feature of the provided dataset is set to missing
and fed to the model.
Example:
> drum validation --code-dir ~/user_code_dir/ --input 10k.csv
--positive-class-label yes --negative-class-label no
"""
parser = subparsers.add_parser(
ArgumentsOptions.VALIDATION,
help="Run validation checks against the model",
description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
CMRunnerArgsRegistry._parsers[ArgumentsOptions.VALIDATION] = parser
return parser
@staticmethod
def _register_subcommand_server(subparsers):
desc = """
Serve the given model using REST API. A web server will be started and will use
the {address} argument for the host and port to use.
The drum prediction server provides the following routes.
You may provide the environment variable URL_PREFIX.
Note that URLs must end with /.
A GET URL_PREFIX/ route, which checks if the server is alive.
Example: GET http://localhost:6789/
A POST URL_PREFIX/shutdown/ route, which shuts the server down.
Example: POST http://localhost:6789/shutdown/
A POST URL_PREFIX/predict/ route, which returns predictions on data.
Example: POST http://localhost:6789/predict/
For this /predict/ route, provide inference data
(for the model to make predictions) as form data with a key:value pair,
where: key = X and value = filename of the CSV that contains the inference data
Example using curl:
curl -X POST --form "X=@data_file.csv" localhost:6789/predict/
"""
parser = subparsers.add_parser(
ArgumentsOptions.SERVER,
help="serve the model via REST APIs",
description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
CMRunnerArgsRegistry._parsers[ArgumentsOptions.SERVER] = parser
return parser
@staticmethod
def _register_subcommand_new(subparsers):
parser = subparsers.add_parser(
ArgumentsOptions.NEW,
description="Create new model/env template",
help="Create new model/env template",
)
CMRunnerArgsRegistry._parsers[ArgumentsOptions.NEW] = parser
return parser
@staticmethod
def _register_subcommand_new_model(subparsers):
parser = subparsers.add_parser(
ArgumentsOptions.NEW_MODEL, help="Create a new modeling code directory template"
)
CMRunnerArgsRegistry._parsers[ArgumentsOptions.NEW_MODEL] = parser
return parser
@staticmethod
def _register_subcommand_push(subparsers):
parser = subparsers.add_parser(
ArgumentsOptions.PUSH,
help="Add your modeling code into DataRobot",
description=PUSH_HELP_TEXT,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
CMRunnerArgsRegistry._parsers[ArgumentsOptions.PUSH] = parser
return parser
@staticmethod
def get_arg_parser():
parser = argparse.ArgumentParser(description="Run user model")
CMRunnerArgsRegistry._parsers[ArgumentsOptions.MAIN_COMMAND] = parser
CMRunnerArgsRegistry._reg_arg_version(parser)
subparsers = parser.add_subparsers(
dest=CMRunnerArgsRegistry.SUBPARSER_DEST_KEYWORD, help="Commands"
)
score_parser = CMRunnerArgsRegistry._register_subcommand_score(subparsers)
fit_parser = CMRunnerArgsRegistry._register_subcommand_fit(subparsers)
perf_test_parser = CMRunnerArgsRegistry._register_subcommand_perf_test(subparsers)
validation_parser = CMRunnerArgsRegistry._register_subcommand_validation(subparsers)
server_parser = CMRunnerArgsRegistry._register_subcommand_server(subparsers)
new_parser = CMRunnerArgsRegistry._register_subcommand_new(subparsers)
new_subparsers = new_parser.add_subparsers(
dest=CMRunnerArgsRegistry.NEW_SUBPARSER_DEST_KEYWORD, help="Commands"
)
new_model_parser = CMRunnerArgsRegistry._register_subcommand_new_model(new_subparsers)
push_parser = CMRunnerArgsRegistry._register_subcommand_push(subparsers)
# Note following args are not supported for perf-test, thus set as default
perf_test_parser.set_defaults(logging_level="warning", verbose=False)
validation_parser.set_defaults(logging_level="warning", verbose=False)
CMRunnerArgsRegistry._reg_arg_code_dir(
score_parser,
perf_test_parser,
server_parser,
fit_parser,
new_model_parser,
validation_parser,
push_parser,
)
CMRunnerArgsRegistry._reg_arg_verbose(
score_parser,
server_parser,
fit_parser,
new_parser,
new_model_parser,
push_parser,
perf_test_parser,
)
CMRunnerArgsRegistry._reg_arg_input(
score_parser, perf_test_parser, fit_parser, validation_parser
)
CMRunnerArgsRegistry._reg_arg_pos_neg_labels(
score_parser, perf_test_parser, server_parser, fit_parser, validation_parser
)
CMRunnerArgsRegistry._reg_arg_multiclass_labels(
score_parser,
perf_test_parser,
server_parser,
fit_parser,
validation_parser,
push_parser,
)
CMRunnerArgsRegistry._reg_arg_logging_level(
score_parser, server_parser, fit_parser, new_parser, new_model_parser, push_parser
)
CMRunnerArgsRegistry._reg_arg_docker(
score_parser,
perf_test_parser,
server_parser,
fit_parser,
validation_parser,
push_parser,
)
CMRunnerArgsRegistry._reg_arg_skip_deps_install(
score_parser,
perf_test_parser,
server_parser,
fit_parser,
validation_parser,
push_parser,
)
CMRunnerArgsRegistry._reg_arg_memory(
score_parser,
perf_test_parser,
server_parser,
fit_parser,
validation_parser,
push_parser,
)
CMRunnerArgsRegistry._reg_arg_output(score_parser, fit_parser)
CMRunnerArgsRegistry._reg_arg_show_perf(score_parser, server_parser)
CMRunnerArgsRegistry._reg_arg_target_feature_and_filename(fit_parser)
CMRunnerArgsRegistry._reg_arg_weights(fit_parser)
CMRunnerArgsRegistry._reg_arg_skip_predict(fit_parser)
CMRunnerArgsRegistry._reg_arg_num_rows(fit_parser)
CMRunnerArgsRegistry._reg_arg_sparse_colfile(fit_parser)
CMRunnerArgsRegistry._reg_arg_samples(perf_test_parser)
CMRunnerArgsRegistry._reg_arg_iterations(perf_test_parser)
CMRunnerArgsRegistry._reg_arg_timeout(perf_test_parser)
CMRunnerArgsRegistry._reg_arg_in_server(perf_test_parser)
CMRunnerArgsRegistry._reg_arg_url(perf_test_parser)
CMRunnerArgsRegistry._reg_arg_address(server_parser)
CMRunnerArgsRegistry._reg_arg_production_server(server_parser, perf_test_parser)
CMRunnerArgsRegistry._reg_arg_max_workers(server_parser, perf_test_parser)
CMRunnerArgsRegistry._reg_arg_with_error_server(server_parser)
CMRunnerArgsRegistry._reg_arg_language(
new_model_parser, server_parser, score_parser, perf_test_parser, validation_parser
)
CMRunnerArgsRegistry._reg_arg_show_stacktrace(
score_parser,
perf_test_parser,
server_parser,
fit_parser,
validation_parser,
new_model_parser,
)
CMRunnerArgsRegistry._reg_args_monitoring(score_parser, server_parser)
CMRunnerArgsRegistry._reg_arg_target_type(
score_parser, perf_test_parser, server_parser, fit_parser, validation_parser
)
CMRunnerArgsRegistry._reg_args_unstructured_mode(
score_parser, perf_test_parser, server_parser, validation_parser
)
CMRunnerArgsRegistry._reg_args_deployment_config(server_parser)
return parser
@staticmethod
def verify_monitoring_options(options, parser_name):
if options.subparser_name in [ArgumentsOptions.SERVER, ArgumentsOptions.SCORE]:
if options.monitor:
if options.target_type == TargetType.UNSTRUCTURED.value:
print("Error: MLOps monitoring can not be used in unstructured mode.")
exit(1)
missing_args = []
if options.model_id is None:
missing_args.append(ArgumentsOptions.MODEL_ID)
if options.deployment_id is None:
missing_args.append(ArgumentsOptions.DEPLOYMENT_ID)
if options.monitor_settings is None:
missing_args.append(ArgumentsOptions.MONITOR_SETTINGS)
if len(missing_args) > 0:
print("\n")
print("Error: MLOps Monitoring requires all monitoring options to be present.")
print("Note: The following MLOps monitoring option(s) is/are missing:")
for arg in missing_args:
print(" {}".format(arg))
print("\n")
print("These options can also be obtained via environment variables")
print("\n")
CMRunnerArgsRegistry._parsers[parser_name].print_help()
exit(1)
# Monitor options are used to fill in pipeline json,
# so define them for the modes different from score and server
else:
options.monitor = False
options.model_id = None
options.deployment_id = None
options.monitor_settings = None
@staticmethod
def verify_options(options):
if not options.subparser_name:
CMRunnerArgsRegistry._parsers[ArgumentsOptions.MAIN_COMMAND].print_help()
exit(1)
elif options.subparser_name == ArgumentsOptions.NEW:
if not options.new_mode:
CMRunnerArgsRegistry._parsers[ArgumentsOptions.NEW].print_help()
exit(1)
elif options.subparser_name in [ArgumentsOptions.SERVER, ArgumentsOptions.PERF_TEST]:
if options.production:
if options.verbose:
print("Checking if uwsgi is installed...")
result = subprocess.run(
[sys.executable, "-m", "pip", "show", "uwsgi"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if result.returncode != 0:
print(
"Looks like 'uwsgi` package is missing. Don't use '{}' option when running drum server or try to install 'uwsgi'.".format(
ArgumentsOptions.PRODUCTION
)
)
print(result.stdout.decode("utf8"))
print(result.stderr.decode("utf8"))
exit(1)
else:
if options.verbose:
print("uwsgi detected")
elif options.subparser_name in [ArgumentsOptions.FIT]:
if options.target_type == TargetType.ANOMALY.value:
if any([options.target, options.target_csv]):
print(
"Arguments '{}' and '{}' are mutually exclusive with '{}' target type.".format(
ArgumentsOptions.TARGET,
ArgumentsOptions.TARGET_CSV,
options.target_type,
)
)
exit(1)
elif options.target_type != TargetType.TRANSFORM.value:
if not any([options.target, options.target_csv]):
print(
"With target type '{}', target feature has to be provided using '{}' or '{}' argument.".format(
options.target_type,
ArgumentsOptions.TARGET,
ArgumentsOptions.TARGET_CSV,
)
)
exit(1)
if getattr(options, "skip_deps_install", False) and options.docker is None:
print(
"Argument '{}' can only be used together with '{}'.".format(
ArgumentsOptions.SKIP_DEPS_INSTALL, ArgumentsOptions.DOCKER,
)
)
exit(1)
CMRunnerArgsRegistry.verify_monitoring_options(options, options.subparser_name)
|
from setuptools import setup
setup(name='latext',
version='0.0.7',
description='For converting LaTeX to spoken text.',
url='https://github.com/Alex-Tremayne/LaTeXt',
author='Alex Tremayne',
author_email='alexjtremayne@gmail.com',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'],
packages=['latext'],
zip_safe=False)
|
#!/bin/env python3
import io
import logging
import os
import os.path
import re
import tarfile
import zipfile
from collections import namedtuple
import magic
import requests
import yaml
from github import Github
from requests.exceptions import ConnectionError
from semver import VersionInfo
Asset = namedtuple('Asset', ['name', 'browser_download_url']) # NOSONAR
AssetWithPriority = namedtuple('AssetWithPriority', ['asset', 'priority']) # NOSONAR
Release = namedtuple('Release', ['release', 'assets']) # NOSONAR
VALID_EXECUTABLE_MIMES = [
'application/x-executable', 'application/x-sharedlib', 'text/x-java', 'text/x-lisp', 'text/x-lua', 'text/x-perl',
'text/x-python', 'text/x-ruby', 'text/x-shellscript', 'text/x-tcl'
]
MIN_ASSET_PRIORITY = 999
empty_asset_with_lowest_priority = AssetWithPriority(Asset(None, None), MIN_ASSET_PRIORITY)
def install_package_from_repo(repo):
releases = [
Release(release, [asset
for asset in release.get_assets()])
for release in repo.get_releases()
if valid_release(release)
]
sorting_key = lambda item: get_semver(item.release.tag_name)
sorted_releases = sorted(releases, key=sorting_key, reverse=True)
asset_to_download = get_preferred_asset(sorted_releases[0].assets)
logging.info(f" Chosen: {asset_to_download.name}")
logging.info(f" Size: {asset_to_download.size // 1024 / 1024:.2f}MB")
logging.debug(f" URL: {asset_to_download.browser_download_url}")
install_package(asset_to_download.browser_download_url, "/tmp")
def valid_release(release):
return not (release.prerelease or release.draft) and type(get_semver(release.tag_name)) is VersionInfo
def get_semver(version):
search_ver = re.search(r'^v?(?P<ver>\d+(\.\d+)+.*)', version, re.IGNORECASE)
if (search_ver):
try:
ver = VersionInfo.parse(search_ver.group('ver'))
logging.debug(f' valid release: {ver}')
except (ValueError, TypeError, AttributeError):
ver = None
else:
ver = None
return ver
def get_preferred_asset(valid_assets, asset_with_priority=empty_asset_with_lowest_priority):
if len(valid_assets) == 0:
return asset_with_priority.asset
head, *tail = valid_assets
if any(exclusion.search(head.name) for exclusion in exclusion_regexes()):
return get_preferred_asset(tail, asset_with_priority)
else:
return get_preferred_asset(tail, get_highest_priority_asset(head, asset_with_priority))
def exclusion_regexes():
# Singleton function, initializes static variable regex_list only in the first call
if getattr(exclusion_regexes, 'regex_list', None) is None:
exclusion_regexes.regex_list = [
re.compile(r'\.(sig|deb|txt|yaml|exe|des|md5|sha[1-8]{1,3})$', re.I),
re.compile(r'^(AUTHOR|README|LICENSE|completions|md5|sha[1-8]{1,3})', re.I),
re.compile(r'(win(dows)?|darwin|mac(os)?|netbsd|android|source|arm)', re.I)
]
return exclusion_regexes.regex_list
def get_highest_priority_asset(asset, asset_with_priority=empty_asset_with_lowest_priority):
valid_asset_with_priority = asset_with_priority
matches = list(
map(lambda expr_list: 'priority' if all(expr.search(asset.name) != None for expr in expr_list) else 'no match',
inclusion_regexes()))
asset_priority = matches.index('priority') if 'priority' in matches else MIN_ASSET_PRIORITY
logging.debug(f" priority: {asset_priority:3d} name: {asset.name} size: {asset.size}")
if asset_priority < asset_with_priority.priority:
valid_asset_with_priority = AssetWithPriority(asset, asset_priority)
return valid_asset_with_priority
def inclusion_regexes():
# Singleton function, initializes static variable regex_list only in the first call
if getattr(inclusion_regexes, 'regex_list', None) is None:
accepted_architectures = [
re.compile(expression, re.I) for expression in [r'(x86_64|amd64)', r'.*(?!x86_64|amd64).*$']
]
accepted_os = [
re.compile(expression, re.I)
for expression in [r'[_.-]linux-gnu([_.-]|$)', r'[_.-]linux-musl([_.-]|$)', r'[_.-]linux([_.-]|$)']
]
accepted_extensions = [
re.compile(expression, re.I)
for expression in [r'^(?!.*\.(tar\.gz|zip)$).*$', r'\.tar(\.gz)?$', r'\.zip$']
]
inclusion_regexes.regex_list = []
for architecture in accepted_architectures:
for os_name in accepted_os:
for extension in accepted_extensions:
inclusion_regexes.regex_list.append(
[re.compile(architecture),
re.compile(os_name), re.compile(extension)])
return inclusion_regexes.regex_list
def install_package(url, dest):
logging.debug(f" Dest: {dest}")
fname = url[url.rfind('/') + 1:]
try:
response = requests.get(url)
logging.info(f" Mime: {mimetype(response.content)}")
extracted_files = extracted_content(fname, response.content)
except ConnectionError as e:
logging.error(e.strerror)
extracted_files = []
return extracted_files
def extracted_content(fname, content):
files = []
compressed_stream = io.BytesIO(content)
mime = mimetype(content)
if mime in ['application/x-compressed-tar', 'application/x-tar'] or (mime == 'application/gzip' and
fname.endswith('tar.gz')):
mode = 'r:' if mime == 'application/x-tar' else 'r:gz'
files = generic_unpack(compressed_stream=compressed_stream,
get_package_handle=lambda stream: tarfile.open(fileobj=stream, mode=mode),
get_files=lambda tar: tar.getmembers(),
is_file=lambda tarinfo: tarinfo.isfile(),
get_fd=lambda tar, file: tar.extractfile(file),
get_file_name=lambda file: file.name)
elif mime == 'application/zip':
files = generic_unpack(compressed_stream=compressed_stream,
get_package_handle=lambda stream: zipfile.ZipExtFile(stream, mode='r'),
get_files=lambda zip: zip.infolist(),
is_file=lambda zipinfo: not zipinfo.is_dir(),
get_fd=lambda zip, file: zip.open(file),
get_file_name=lambda file: file.filename)
elif mime in VALID_EXECUTABLE_MIMES:
files.append({'name': fname, 'mime': mime, 'content': content})
return files
def generic_unpack(compressed_stream, get_package_handle, get_files, is_file, get_fd, get_file_name):
files = []
with get_package_handle(compressed_stream) as package:
for file in [fileinfo for fileinfo in get_files(package) if is_file(fileinfo)]:
logging.info(f'\t name: {get_file_name(file)}')
with get_fd(package, file) as file_descriptor:
if file_descriptor:
file_content = file_descriptor.read()
mime_type = mimetype(file_content)
files.append({'name': get_file_name(file), 'mime': mime_type, 'content': file_content})
logging.debug(f'\t name: {get_file_name(file)}, mime: {mime_type}')
else:
logging.error(f'\t error extracting file {get_file_name(file)}')
return files
def mimetype(it):
return magic.from_descriptor(it, mime=True) if type(it) is file else magic.from_buffer(it, mime=True)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
with open(os.path.expanduser('~/workstation-install/packages.yml')) as file:
packages = yaml.load(file.read(), Loader=yaml.SafeLoader)
github_connection = Github(os.environ['GITHUB_TOKEN'])
for repo in (github_connection.get_repo(repo_name) for repo_name in packages['blindspot_packages']):
logging.info(f'### {repo.name}')
install_package_from_repo(repo)
|
from django.shortcuts import render
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from actualite.models import Actualite
# Create your views here.
def actualite_views(request):
actualite_list = Actualite.objects.all().order_by('-created')[:1]
actualite_list_laterale = Actualite.objects.all().order_by('-created')[:3]
actu = Actualite.objects.all().order_by('-created')
paginator = Paginator(actu, 12)
page = request.GET.get('page')
try:
actu_relative = paginator.page(page)
except PageNotAnInteger:
actu_relative = paginator.page(1)
except EmptyPage:
actu_relative = paginator.page(paginator.num_pages)
context = {
'actualite_list': actualite_list,
'actualite_list_laterale': actualite_list_laterale,
'actu_relative': actu_relative
}
template_name = 'pages/actualite/actualite.html'
return render(request, template_name, context)
def actualite_view_detail(request, id):
actualite_list = Actualite.objects.get(id=id)
actu = Actualite.objects.all().order_by('?')
paginator = Paginator(actu, 8)
page = request.GET.get('page')
try:
actu_relative = paginator.page(page)
except PageNotAnInteger:
actu_relative = paginator.page(1)
except EmptyPage:
actu_relative = paginator.page(paginator.num_pages)
context = {
"actualite_list": actualite_list,
"actu_relative": actu_relative
}
template_name = 'pages/actualite/actualite-view.html'
return render(request, template_name, context)
def actualite_views_province(request):
actu = Actualite.objects.all()
paginator = Paginator(actu, 9)
page = request.GET.get('page')
try:
actu_province = paginator.page(page)
except PageNotAnInteger:
actu_province = paginator.page(1)
except EmptyPage:
actu_province = paginator.page(paginator.num_pages)
context = {
'actu_province': actu_province,
}
template_name = 'pages/actualite/province.html'
return render(request, template_name, context)
|
from django.db import models
class Mappings(models.Model):
TRUE_NAME = models.CharField(max_length = 100, default = "NONAME")
FILE_NAME = models.CharField(max_length = 100, default = "NONAME")
FILE_LINK = models.TextField(default = "NOFILE")
GEXF_LINK = models.TextField(default = "NOGEXF")
|
from recognizers.program import Program
class Parser:
def __init__(self, lexical_reclassifier):
self.lexical = lexical_reclassifier
def parse(self):
reuse_token = False
stack = []
machine = Program()
while True:
if not reuse_token:
token = self.lexical.get_token()
if token.value == 'EOF':
break
reuse_token = False
print('token: {} | state: ({}, {}) | stack len: {}'
.format(token.value, machine.__class__.__name__,
machine.get_state(), len(stack)))
try:
while True:
sub_machine = machine.process_atom(token)
# print('sub machine = {}'.format(sub_machine.__class__.__name__))
if sub_machine:
stack.append(machine)
machine = sub_machine
else:
break
except ValueError as ex:
if machine.accept():
reuse_token = True
try:
machine = stack.pop()
except IndexError:
print('Syntax error.')
return False
else:
# Unexpected error
print(ex)
return False
stack.append(machine)
for m in stack:
if not m.accept():
return False
return True
|
# diagrams as code vía https://diagrams.mingrammer.com
from diagrams import Cluster, Diagram, Edge, Node
from diagrams.aws.security import IAM, IAMRole
from diagrams.aws.management import Cloudtrail
from diagrams.aws.storage import S3
from diagrams.aws.compute import ECR
with Diagram("Sysdig Secure for Cloud\n(organizational permissions)", filename="diagram-permissions", show=True):
with Cluster("member account (sysdig workload)"):
# bench_role = IAMRole(label="Benchmark role")
member_sysdig_role = IAMRole(label="OrganizationAccountAccessRole")
member_sysdig_ecr = ECR("container registry")
member_sysdig_role >> member_sysdig_ecr
ecs_role = IAMRole(label="ECSTaskRole")
# bench_role - Edge(style="invis") - member_sysdig_ecr
with Cluster("member accounts"):
# IAMRole(label="Benchmark role")
member_role = IAMRole(label="OrganizationAccountAccessRole")
member_ecr = ECR("container registry")
member_role >> member_ecr
with Cluster("management account"):
# IAMRole(label="Benchmark role")
sf4c_role = IAMRole(label="SysdigSecureForCloud")
sf4c_role >> Cloudtrail()
sf4c_role >> S3()
ecs_role >> sf4c_role
sf4c_role >> member_role
sf4c_role >> member_sysdig_role
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.