content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Splits the preprocessed data into training, validation, and test set.
Created on Tue Sep 28 16:45:51 2021
@author: lbechberger
"""
import os, argparse, csv
import pandas as pd
from sklearn.model_selection import train_test_split
from code.util import COLUMN_LABEL
# setting up CLI
parser = argparse.ArgumentParser(description="Splitting the data set")
parser.add_argument("input_file", help="path to the input csv file")
parser.add_argument("output_folder", help="path to the output folder")
parser.add_argument(
"-s", "--seed", type=int, help="seed for the random number generator", default=None
)
parser.add_argument(
"-t", "--test_size", type=float, help="relative size of the test set", default=0.2
)
parser.add_argument(
"-v",
"--validation_size",
type=float,
help="relative size of the validation set",
default=0.2,
)
args = parser.parse_args()
# load the data
df = pd.read_csv(
args.input_file, quoting=csv.QUOTE_NONNUMERIC, lineterminator="\n", low_memory=False
)
# split into (training & validation) and test set
X, X_test = train_test_split(
df,
test_size=args.test_size,
random_state=args.seed,
shuffle=True,
stratify=df[COLUMN_LABEL],
)
# split remainder into training and validation
relative_validation_size = args.validation_size / (1 - args.test_size)
X_train, X_val = train_test_split(
X,
test_size=relative_validation_size,
random_state=args.seed,
shuffle=True,
stratify=X[COLUMN_LABEL],
)
# store the three data sets separately
X_train.to_csv(
os.path.join(args.output_folder, "training.csv"),
index=False,
quoting=csv.QUOTE_NONNUMERIC,
line_terminator="\n",
)
X_val.to_csv(
os.path.join(args.output_folder, "validation.csv"),
index=False,
quoting=csv.QUOTE_NONNUMERIC,
line_terminator="\n",
)
X_test.to_csv(
os.path.join(args.output_folder, "test.csv"),
index=False,
quoting=csv.QUOTE_NONNUMERIC,
line_terminator="\n",
)
print(
"Training: {0} examples, Validation: {1} examples, Test: {2} examples".format(
len(X_train), len(X_val), len(X_test)
)
)
|
from .server import Server
from .task import Task
import bisect
class EventQueue:
def __init__(self, num_places):
self.n = num_places
self.sleeping_places = list(range(self.n))
self.running_tasks = []
self.finish_at_list = []
self.t = 0
self.tasks = []
def push_all(self, tasks):
self.tasks.extend(tasks)
def pop(self):
while len(self.sleeping_places) > 0 and len(self.tasks) > 0:
place = self.sleeping_places.pop(0)
starting = self.tasks.pop(0)
starting.start_at = self.t
starting.finish_at = self.t + starting.dt
starting.place_id = place
f = starting.finish_at
idx = bisect.bisect_right(self.finish_at_list, f)
self.finish_at_list.insert(idx, f)
self.running_tasks.insert(idx, starting)
if len(self.sleeping_places) == self.n:
return None
else:
self.finish_at_list.pop(0)
next_task = self.running_tasks.pop(0)
self.t = next_task.finish_at
p = next_task.place_id
self.sleeping_places.append(p)
return next_task
_queue = None
_stub_simulator = None
def start_stub(stub_simulator, num_proc=1, logger=None, dump_path='tasks.bin'):
global _queue, _stub_simulator
_stub_simulator = stub_simulator
Server._instance = Server(logger)
_queue = EventQueue(num_proc)
# override the methods
def print_tasks_stub(self, tasks):
for t in tasks:
res, dt = _stub_simulator(t)
t.results = res
t.dt = int(1000 * dt)
_queue.push_all(tasks)
Server._print_tasks = print_tasks_stub
def receive_result_stub(self):
t = _queue.pop()
if t is None:
return None
t.rc = 0
return t
Server._receive_result = receive_result_stub
Server.org_exit = Server.__exit__
def _exit(self, exc_type, exc_val, exc_tb):
self.org_exit(exc_type, exc_val, exc_tb)
Task.dump_binary(dump_path)
Server.__exit__ = _exit
return Server._instance
|
import logging
from prometheus_client import Gauge, CollectorRegistry, push_to_gateway
class MetricsPusher:
def __init__(self, pushgateway, job='covid19mon'):
self._pushgateway = pushgateway
self._job = job
self._registry = CollectorRegistry()
self._gauges = {
'reported': Gauge('covid_reported_count', 'New entries for country', ['country'], registry=self._registry)
}
self._reported = None
def report(self, metrics):
self._reported = dict()
for country, value in metrics.items():
self._reported[country] = 1
self._gauges['reported'].labels(country).set(self._reported[country])
if self._pushgateway:
push_to_gateway(self._pushgateway, job=self._job, registry=self._registry)
logging.debug(f'pushed {len(self._reported)} records. records: {self._reported}')
def reported(self):
return self._reported
|
from __future__ import absolute_import, division, print_function
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import EmpiricalMarginal, TracePredictive
from pyro.infer.mcmc import MCMC, NUTS
from tests.common import assert_equal
def model(num_trials):
phi_prior = dist.Uniform(num_trials.new_tensor(0.), num_trials.new_tensor(1.))\
.expand_by([num_trials.shape[0]])
success_prob = pyro.sample("phi", phi_prior)
return pyro.sample("obs", dist.Binomial(num_trials, success_prob))
def test_posterior_predictive():
true_probs = torch.ones(5) * 0.7
num_trials = torch.ones(5) * 1000
num_success = dist.Binomial(num_trials, true_probs).sample()
conditioned_model = poutine.condition(model, data={"obs": num_success})
nuts_kernel = NUTS(conditioned_model, adapt_step_size=True)
mcmc_run = MCMC(nuts_kernel, num_samples=1000, warmup_steps=200).run(num_trials)
posterior_predictive = TracePredictive(model, mcmc_run, num_samples=10000).run(num_trials)
marginal_return_vals = EmpiricalMarginal(posterior_predictive)
assert_equal(marginal_return_vals.mean, torch.ones(5) * 700, prec=30)
def test_nesting():
def nested():
true_probs = torch.ones(5) * 0.7
num_trials = torch.ones(5) * 1000
num_success = dist.Binomial(num_trials, true_probs).sample()
conditioned_model = poutine.condition(model, data={"obs": num_success})
nuts_kernel = NUTS(conditioned_model, adapt_step_size=True)
mcmc_run = MCMC(nuts_kernel, num_samples=10, warmup_steps=2).run(num_trials)
return mcmc_run
with poutine.trace() as tp:
nested()
nested()
assert len(tp.trace.nodes) == 0
|
import json
from django import test
import jwt
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from django.test import TestCase
class TestAPI(TestCase):
def test_signUp(self):
client = APIClient()
response = client.post(
'/user/',
{
"username": "user_prueba_1",
"password": "password_prueba_1",
"name": "user prueba",
"email": "user_prueba_1@misionTIC.com",
"account": {
"lastChangeDate": "2021-09-23T10:25:43.511Z",
"balance": 20000,
"isActive": "true"
}
},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual('refresh' in response.data.keys(), True)
self.assertEqual('access' in response.data.keys(), True)
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import os, sys, traceback
from functools import wraps
import maya.cmds as mc
import maya.mel as mel
import maya.api.OpenMaya as om2
import maya.OpenMayaUI as omUI
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
try:
from PySide2.QtWidgets import QMainWindow, QApplication
from PySide2.QtGui import QPainterPath, QRegion, QIcon
from PySide2.QtUiTools import QUiLoader
from PySide2.QtCore import Qt, QPoint, QRect
from shiboken2 import wrapInstance
except ImportError:
from PySide.QtGui import QMainWindow, QApplication, QPainterPath, QRegion, QIcon
from PySide.QtUiTools import QUiLoader
from PySide.QtCore import Qt, QPoint, QRect
from shiboken import wrapInstance
#----------------------------------------------------------------------------------------------------------------------
# 関数の前後にundoInfoのopenChunkとcloseChunkを実行するデコレーター
def openCloseChunk(func):
@wraps(func)
def wrapper(*args, **kargs):
action = None
try:
mc.undoInfo(openChunk=True)
action = func(*args, **kargs)
except:
print(traceback.format_exc())
pass
finally:
mc.undoInfo(closeChunk=True)
return action
return wrapper
#----------------------------------------------------------------------------------------------------------------------
class kkDisplayVertexColorSeparatelyWindow(MayaQWidgetBaseMixin, QMainWindow):
targetObj = None
targetObjMesh = None
targetObjVtxCount = None
targetObjVtxIdxList = None
jobNum_attributeChange_R = 0
jobNum_attributeChange_G = 0
jobNum_attributeChange_B = 0
jobNum_attributeChange_A = 0
jobNum_attributeChange_Base = 0
jobNum_nodeDeleted_R = 0
jobNum_nodeDeleted_G = 0
jobNum_nodeDeleted_B = 0
jobNum_nodeDeleted_A = 0
jobNum_otherSceneOpened = 0
callbackID_nameChanged = None
baseColorSet = ""
baseColorSerRep = "RGBA"
baseColorBeforeEdit = None
attrDispColor = 0
pOption_matChl = ""
pOption_matBld = ""
isHistoryDeleted = True
# 中間オブジェクトを持っているか
hasIntermediateObject = False
mouseCursorPos = QPoint(0, 0)
isDragging = False
# 各ボタンのOnOff時のサイズを定義しておく
btn_R_checkOnRect = QRect(18, 72, 164, 36)
btn_R_checkOffRect = QRect(10, 70, 180, 40)
btn_G_checkOnRect = QRect(18, 117, 164, 36)
btn_G_checkOffRect = QRect(10, 115, 180, 40)
btn_B_checkOnRect = QRect(18, 162, 164, 36)
btn_B_checkOffRect = QRect(10, 160, 180, 40)
btn_A_checkOnRect = QRect(18, 207, 164, 36)
btn_A_checkOffRect = QRect(10, 205, 180, 40)
uiFIle = None
def __init__(self, parent=None):
# すでにウィンドウ開いていた場合閉じておく
self.deleteInstances()
selList = om2.MGlobal.getActiveSelectionList()
mDagPath, _ = selList.getComponent(0)
self.targetObj = om2.MFnTransform(mDagPath)
self.targetObjMesh = om2.MFnMesh(mDagPath)
self.targetObjVtxCount = self.targetObjMesh.numVertices
self.targetObjVtxIdxList = xrange(self.targetObjVtxCount)
mObj = mDagPath.node()
# ターゲットのオブジェクト名が変更されたcallbackを受けて実行する関数を登録
self.callbackID_nameChanged = om2.MNodeMessage.addNameChangedCallback(mObj, self.targetObjNameChangedCallback)
super(kkDisplayVertexColorSeparatelyWindow, self).__init__(parent)
self.setupUI()
# displayColorsを取得して残しておきつつ、確認できるようにカラー表示をONにしておく
self.attrDispColor = mc.getAttr("%s.displayColors"%self.targetObjMesh.fullPathName())
mc.setAttr("%s.displayColors"%self.targetObjMesh.fullPathName(), 1)
# colorMaterialChannelとmaterialBlendを取得して残しておきつつ変更する
self.pOption_matChl = mc.polyOptions(q=True, colorMaterialChannel=True, gl=False)[0]
self.pOption_matBld = mc.polyOptions(q=True, materialBlend=True, gl=False)[0]
mc.polyOptions(colorMaterialChannel="ambientDiffuse", gl=False)
mc.polyOptions(materialBlend="overwrite", gl=False)
# 中間オブジェクトがあるか確認
historyList = mc.bakePartialHistory(self.targetObjMesh.fullPathName(), q=True, prePostDeformers=True) or []
if len(historyList) > 0:
self.hasIntermediateObject = True
# 実行前にアクティブになっていたベースのcolorSetを保存しておく
curColorSetList = mc.polyColorSet(q=True, currentColorSet=True)
# colorSerがない場合生成する
if curColorSetList == None:
curColorSet = mc.polyColorSet(create=True, colorSet="colorSet", clamped=True, representation="RGBA")[0]
else:
curColorSet = curColorSetList[0]
self.baseColorSet = curColorSet
self.baseColorSerRep = mc.polyColorSet(q=True, currentColorSet=True, representation=True)
self.baseColorBeforeEdit = self.targetObjMesh.getVertexColors(self.baseColorSet)
# self.baseColorSerRepで得たベースのcolorSetの種類を元に各色を表現するためのtempのcolorSetを追加
self.checkColorSet()
# 現在のcolorSetの色を取得して、各色のcolorSetを編集
# 中間オブジェクトある場合、そのcolorSet編集時にpolyColorPerVertexノードが作られる
self.getBaseVertexColorData()
# 中間オブジェクトある場合、念のため途中でヒストリ削除されてノードが消えた時に復活させるjobを設定
if self.hasIntermediateObject == True:
self.setDeleteNodeJobs()
# 別シーンが開かれたらウィンドウを閉じるscriptJobを登録する
self.otherSceneOpenedJob()
if self.hasIntermediateObject == True:
self.jobNum_attributeChange_Base = mc.scriptJob(
attributeChange=["tmpColorSet_Base_Node.vertexColor", self.vtxColBase],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_Base = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColBase],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
#==============================================================================================
# .uiファイルを読み込み、ウィンドウの設定
def setupUI(self):
currentFilePath = os.path.dirname(__file__)
# .uiファイルを読み込み
loader = QUiLoader()
uiFilePath = os.path.join(currentFilePath, 'kkDisplayVertexColorSeparatelyGUI.ui')
self.uiFIle = loader.load(uiFilePath)
self.setCentralWidget(self.uiFIle)
# scriptJobのparent設定のためにオブジェクト名を設定
self.setObjectName("kkDisplayVertexColorSeparatelyWindow")
# ウインドウのタイトルを指定
self.setWindowTitle("kkDisplayVertexColorSeparately")
# ウインドウのサイズを指定
self.resize(200, 300)
# UI要素にシグナルを追加
self.setSignals()
# SelectedNameに選択オブジェクト名を表示
self.uiFIle.lineEdit_SelObj.setText(self.targetObj.name())
# 内蔵のpaintVertexColourツールアイコンをセットする
self.uiFIle.btn_PaintTool.setIcon(QIcon(':/paintVertexColour.png'))
# フレームレスにする
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
# ウィンドウ自体の角を丸くする
path = QPainterPath()
path.addRoundedRect(self.rect(), 10, 10)
region = QRegion(path.toFillPolygon().toPolygon())
self.setMask(region)
#==============================================================================================
# このウィンドウが閉じたときの処理
def closeEvent(self, event):
# 他のオブジェクトを選択している可能性もあるのでそのリストを取得しておき、
# 選択をターゲットに置き換えておく
selList = mc.ls(sl=True)
mc.select(self.targetObj.fullPathName(), replace=True)
# ウィンドウのインスタンスをdeleteすることで登録したscriptJobもまとめて解除しておく
self.deleteInstances()
# ノード名変更のコールバックを削除
if self.callbackID_nameChanged:
om2.MNodeMessage.removeCallback(self.callbackID_nameChanged)
self.callbackID_nameChanged = None
# ターゲットオブジェクトの全colorSetリストを取得
allColorSetList = self.targetObjMesh.getColorSetNames()
# tmpColorSetを削除する
if "tmpColorSet_R" in allColorSetList:
mc.polyColorSet(delete=True, colorSet="tmpColorSet_R")
if "tmpColorSet_G" in allColorSetList:
mc.polyColorSet(delete=True, colorSet="tmpColorSet_G")
if "tmpColorSet_B" in allColorSetList:
mc.polyColorSet(delete=True, colorSet="tmpColorSet_B")
if "tmpColorSet_A" in allColorSetList:
mc.polyColorSet(delete=True, colorSet="tmpColorSet_A")
# displayColorsを元に戻しておく
mc.setAttr("%s.displayColors"%self.targetObjMesh.fullPathName(), self.attrDispColor)
# colorMaterialChannelとmaterialBlendを元に戻しておく
mc.polyOptions(colorMaterialChannel=self.pOption_matChl, gl=False)
mc.polyOptions(materialBlend=self.pOption_matBld, gl=False)
# 最後にヒストリもきれいにしておく
historyDelete(self.targetObj.fullPathName(), False)
# 選択を戻す
mc.select(selList, replace=True)
#==============================================================================================
# フレームレスのウィンドウを動かすためにmouseEventを使用
def mouseReleaseEvent(self, event):
self.isDragging = False
self.mouseCursorPos = event.pos()
def mousePressEvent(self, event):
self.isDragging = True
self.mouseCursorPos = event.pos()
def mouseMoveEvent(self, event):
if self.isDragging == True:
self.move(event.globalPos() - self.mouseCursorPos)
#==============================================================================================
# 別のシーンが開かれたときに自動でこのウィンドウを閉じる
def otherSceneOpenedJob(self):
self.jobNum_otherSceneOpened = mc.scriptJob(
event=["SceneOpened", self.close],
parent="kkDisplayVertexColorSeparatelyWindow")
#==============================================================================================
# ターゲットの名前が変更されたとき、表示名も変更を反映する
def targetObjNameChangedCallback(self, node, previous, *args):
dagNode = om2.MFnDagNode(node)
self.uiFIle.lineEdit_SelObj.setText(dagNode.name())
print("Target Name Changed : %s >> %s"%(previous, dagNode.name()))
#==============================================================================================
# シグナルの設定
def setSignals(self):
# colorSetの種類がRGBかRGBAじゃない場合無効化
if self.baseColorSerRep == "RGB" or self.baseColorSerRep == "RGBA":
self.uiFIle.btn_R.toggled.connect(self.vtxR_Toggle)
self.uiFIle.btn_G.toggled.connect(self.vtxG_Toggle)
self.uiFIle.btn_B.toggled.connect(self.vtxB_Toggle)
else:
self.uiFIle.btn_R.setEnabled(False)
self.uiFIle.btn_G.setEnabled(False)
self.uiFIle.btn_B.setEnabled(False)
# colorSetの種類がRGBかRGBAじゃない場合無効化
if self.baseColorSerRep == "RGBA" or self.baseColorSerRep == "A":
self.uiFIle.btn_A.toggled.connect(self.vtxA_Toggle)
else:
self.uiFIle.btn_A.setEnabled(False)
self.uiFIle.btn_Revert.clicked.connect(self.revert)
self.uiFIle.btn_PaintTool.clicked.connect(self.selectPaintTool)
self.uiFIle.btn_Close.clicked.connect(self.close)
#==============================================================================================
# Rのボタンがクリックされたときの処理を設定
def vtxR_Toggle(self, checked):
if checked:
self.uiFIle.btn_R.setGeometry(self.btn_R_checkOnRect)
self.uiFIle.btn_G.setChecked(False)
self.uiFIle.btn_G.setGeometry(self.btn_G_checkOffRect)
self.uiFIle.btn_B.setChecked(False)
self.uiFIle.btn_B.setGeometry(self.btn_B_checkOffRect)
self.uiFIle.btn_A.setChecked(False)
self.uiFIle.btn_A.setGeometry(self.btn_A_checkOffRect)
if self.hasIntermediateObject == True:
# もしtmpColorSet_R_Nodeがない場合getBaseVertexColorDataで生成し直す
if len(mc.ls("tmpColorSet_R_Node")) == 0:
self.getBaseVertexColorData()
self.jobNum_attributeChange_R = mc.scriptJob(
attributeChange=["tmpColorSet_R_Node.vertexColor", self.vtxColSep_R],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_R = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_R],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_R")
else:
if self.jobNum_attributeChange_R > 0:
mc.scriptJob(kill=self.jobNum_attributeChange_R, force=True)
self.jobNum_attributeChange_R = 0
self.uiFIle.btn_R.setChecked(False)
self.uiFIle.btn_R.setGeometry(self.btn_R_checkOffRect)
# RGBAすべてOFFの場合ベースのcolorSetに戻す
if self.uiFIle.btn_R.isChecked() == False and self.uiFIle.btn_G.isChecked() == False and\
self.uiFIle.btn_B.isChecked() == False and self.uiFIle.btn_A.isChecked() == False:
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
#==============================================================================================
# Gのボタンがクリックされたときの処理を設定
def vtxG_Toggle(self, checked):
if checked:
self.uiFIle.btn_G.setGeometry(self.btn_G_checkOnRect)
self.uiFIle.btn_R.setChecked(False)
self.uiFIle.btn_R.setGeometry(self.btn_R_checkOffRect)
self.uiFIle.btn_B.setChecked(False)
self.uiFIle.btn_B.setGeometry(self.btn_B_checkOffRect)
self.uiFIle.btn_A.setChecked(False)
self.uiFIle.btn_A.setGeometry(self.btn_A_checkOffRect)
if self.hasIntermediateObject == True:
# もしtmpColorSet_G_Nodeがない場合getBaseVertexColorDataで生成し直す
if len(mc.ls("tmpColorSet_G_Node")) == 0:
self.getBaseVertexColorData()
self.jobNum_attributeChange_G = mc.scriptJob(
attributeChange=["tmpColorSet_G_Node.vertexColor", self.vtxColSep_G],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_G = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_G],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_G")
else:
if self.jobNum_attributeChange_G > 0:
mc.scriptJob(kill=self.jobNum_attributeChange_G, force=True)
self.jobNum_attributeChange_G = 0
self.uiFIle.btn_G.setChecked(False)
self.uiFIle.btn_G.setGeometry(self.btn_G_checkOffRect)
# RGBAすべてOFFの場合ベースのcolorSetに戻す
if self.uiFIle.btn_R.isChecked() == False and self.uiFIle.btn_G.isChecked() == False and\
self.uiFIle.btn_B.isChecked() == False and self.uiFIle.btn_A.isChecked() == False:
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
#==============================================================================================
# Bのボタンがクリックされたときの処理を設定
def vtxB_Toggle(self, checked):
if checked:
self.uiFIle.btn_B.setGeometry(self.btn_B_checkOnRect)
self.uiFIle.btn_R.setChecked(False)
self.uiFIle.btn_R.setGeometry(self.btn_R_checkOffRect)
self.uiFIle.btn_G.setChecked(False)
self.uiFIle.btn_G.setGeometry(self.btn_G_checkOffRect)
self.uiFIle.btn_A.setChecked(False)
self.uiFIle.btn_A.setGeometry(self.btn_A_checkOffRect)
if self.hasIntermediateObject == True:
# もしtmpColorSet_B_Nodeがない場合getBaseVertexColorDataで生成し直す
if len(mc.ls("tmpColorSet_B_Node")) == 0:
self.getBaseVertexColorData()
self.jobNum_attributeChange_B = mc.scriptJob(
attributeChange=["tmpColorSet_B_Node.vertexColor", self.vtxColSep_B],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_B = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_B],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_B")
else:
if self.jobNum_attributeChange_B > 0:
mc.scriptJob(kill=self.jobNum_attributeChange_B, force=True)
self.jobNum_attributeChange_B = 0
self.uiFIle.btn_B.setChecked(False)
self.uiFIle.btn_B.setGeometry(self.btn_B_checkOffRect)
# RGBAすべてOFFの場合ベースのcolorSetに戻す
if self.uiFIle.btn_R.isChecked() == False and self.uiFIle.btn_G.isChecked() == False and\
self.uiFIle.btn_B.isChecked() == False and self.uiFIle.btn_A.isChecked() == False:
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
#==============================================================================================
# Aのボタンがクリックされたときの処理を設定
def vtxA_Toggle(self, checked):
if checked:
self.uiFIle.btn_A.setGeometry(self.btn_A_checkOnRect)
self.uiFIle.btn_R.setChecked(False)
self.uiFIle.btn_R.setGeometry(self.btn_R_checkOffRect)
self.uiFIle.btn_G.setChecked(False)
self.uiFIle.btn_G.setGeometry(self.btn_G_checkOffRect)
self.uiFIle.btn_B.setChecked(False)
self.uiFIle.btn_B.setGeometry(self.btn_B_checkOffRect)
if self.hasIntermediateObject == True:
# もしtmpColorSet_A_Nodeがない場合getBaseVertexColorDataで生成し直す
if len(mc.ls("tmpColorSet_A_Node")) == 0:
self.getBaseVertexColorData()
self.jobNum_attributeChange_A = mc.scriptJob(
attributeChange=["tmpColorSet_A_Node.vertexColor", self.vtxColSep_A],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_A = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_A],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_A")
else:
if self.jobNum_attributeChange_A > 0:
mc.scriptJob(kill=self.jobNum_attributeChange_A, force=True)
self.jobNum_attributeChange_A = 0
self.uiFIle.btn_A.setChecked(False)
self.uiFIle.btn_A.setGeometry(10, 205, 180, 40)
# RGBAすべてOFFの場合ベースのcolorSetに戻す
if self.uiFIle.btn_R.isChecked() == False and self.uiFIle.btn_G.isChecked() == False and\
self.uiFIle.btn_B.isChecked() == False and self.uiFIle.btn_A.isChecked() == False:
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
#==============================================================================================
# revertのボタンがクリックされたときの処理を設定
def revert(self):
vtxCount = self.targetObjMesh.numVertices
if not self.targetObjVtxCount == vtxCount:
self.targetObjVtxIdxList = xrange(vtxCount)
self.targetObjMesh.setVertexColors(self.baseColorBeforeEdit, self.targetObjVtxIdxList)
self.getBaseVertexColorData()
#==============================================================================================
# paintToolのボタンがクリックされたときの処理を設定
def selectPaintTool(self):
mel.eval("PaintVertexColorTool;")
#==============================================================================================
# tmpColorSet_RのvertexColorのattributeChangeによるscriptJobの処理を設定
@openCloseChunk
def vtxColSep_R(self):
if self.uiFIle.btn_R.isChecked() == True:
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_R")
vtxColors_tmpColorSet_R = self.targetObjMesh.getVertexColors("tmpColorSet_R")
baseVtxColors_Edit_R = self.targetObjMesh.getVertexColors(self.baseColorSet)
vtxCount = self.targetObjMesh.numVertices
if not self.targetObjVtxCount == vtxCount:
self.targetObjVtxIdxList = xrange(vtxCount)
for x in xrange(vtxCount):
vtxColors_tmpColorSet_R[x].r = vtxColors_tmpColorSet_R[x].r
vtxColors_tmpColorSet_R[x].g = vtxColors_tmpColorSet_R[x].r
vtxColors_tmpColorSet_R[x].b = vtxColors_tmpColorSet_R[x].r
# 変更のあったRをベースに反映するために上書き
baseVtxColors_Edit_R[x].r = vtxColors_tmpColorSet_R[x].r
self.targetObjMesh.setVertexColors(vtxColors_tmpColorSet_R, self.targetObjVtxIdxList)
# colorSetをベースに変更して、ベースに色を反映する
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
self.targetObjMesh.setVertexColors(baseVtxColors_Edit_R, self.targetObjVtxIdxList)
# colorSetを戻しておく
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_R")
if self.hasIntermediateObject == True:
self.jobNum_attributeChange_R = mc.scriptJob(
attributeChange=["tmpColorSet_R_Node.vertexColor", self.vtxColSep_R],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_R = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_R],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
#==============================================================================================
# tmpColorSet_GのvertexColorのattributeChangeによるscriptJobの処理を設定
@openCloseChunk
def vtxColSep_G(self):
if self.uiFIle.btn_G.isChecked() == True:
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_G")
vtxColors_tmpColorSet_G = self.targetObjMesh.getVertexColors("tmpColorSet_G")
baseVtxColors_Edit_G = self.targetObjMesh.getVertexColors(self.baseColorSet)
vtxCount = self.targetObjMesh.numVertices
if not self.targetObjVtxCount == vtxCount:
self.targetObjVtxIdxList = xrange(vtxCount)
for x in xrange(vtxCount):
vtxColors_tmpColorSet_G[x].r = vtxColors_tmpColorSet_G[x].r
vtxColors_tmpColorSet_G[x].g = vtxColors_tmpColorSet_G[x].r
vtxColors_tmpColorSet_G[x].b = vtxColors_tmpColorSet_G[x].r
# 変更のあったGをベースに反映するために上書き
baseVtxColors_Edit_G[x].g = vtxColors_tmpColorSet_G[x].r
self.targetObjMesh.setVertexColors(vtxColors_tmpColorSet_G, self.targetObjVtxIdxList)
# colorSetをベースに変更して、ベースに色を反映する
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
self.targetObjMesh.setVertexColors(baseVtxColors_Edit_G, self.targetObjVtxIdxList)
# colorSetを戻しておく
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_G")
if self.hasIntermediateObject == True:
self.jobNum_attributeChange_G = mc.scriptJob(
attributeChange=["tmpColorSet_G_Node.vertexColor", self.vtxColSep_G],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_G = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_G],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
#==============================================================================================
# tmpColorSet_BのvertexColorのattributeChangeによるscriptJobの処理を設定
@openCloseChunk
def vtxColSep_B(self):
if self.uiFIle.btn_B.isChecked() == True:
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_B")
vtxColors_tmpColorSet_B = self.targetObjMesh.getVertexColors("tmpColorSet_B")
baseVtxColors_Edit_B = self.targetObjMesh.getVertexColors(self.baseColorSet)
vtxCount = self.targetObjMesh.numVertices
if not self.targetObjVtxCount == vtxCount:
self.targetObjVtxIdxList = xrange(vtxCount)
for x in xrange(vtxCount):
vtxColors_tmpColorSet_B[x].r = vtxColors_tmpColorSet_B[x].r
vtxColors_tmpColorSet_B[x].g = vtxColors_tmpColorSet_B[x].r
vtxColors_tmpColorSet_B[x].b = vtxColors_tmpColorSet_B[x].r
# 変更のあったBをベースに反映するために上書き
baseVtxColors_Edit_B[x].b = vtxColors_tmpColorSet_B[x].r
self.targetObjMesh.setVertexColors(vtxColors_tmpColorSet_B, self.targetObjVtxIdxList)
# colorSetをベースに変更して、ベースに色を反映する
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
self.targetObjMesh.setVertexColors(baseVtxColors_Edit_B, self.targetObjVtxIdxList)
# colorSetを戻しておく
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_B")
if self.hasIntermediateObject == True:
self.jobNum_attributeChange_B = mc.scriptJob(
attributeChange=["tmpColorSet_B_Node.vertexColor", self.vtxColSep_B],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_B = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_B],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
#==============================================================================================
# tmpColorSet_AのvertexColorのattributeChangeによるscriptJobの処理を設定
@openCloseChunk
def vtxColSep_A(self):
if self.uiFIle.btn_A.isChecked() == True:
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_A")
vtxColors_tmpColorSet_A = self.targetObjMesh.getVertexColors("tmpColorSet_A")
baseVtxColors_Edit_A = self.targetObjMesh.getVertexColors(self.baseColorSet)
vtxCount = self.targetObjMesh.numVertices
if not self.targetObjVtxCount == vtxCount:
self.targetObjVtxIdxList = xrange(vtxCount)
for x in xrange(vtxCount):
vtxColors_tmpColorSet_A[x].r = vtxColors_tmpColorSet_A[x].r
vtxColors_tmpColorSet_A[x].g = vtxColors_tmpColorSet_A[x].r
vtxColors_tmpColorSet_A[x].b = vtxColors_tmpColorSet_A[x].r
# 変更のあったBをベースに反映するために上書き
baseVtxColors_Edit_A[x].a = vtxColors_tmpColorSet_A[x].r
self.targetObjMesh.setVertexColors(vtxColors_tmpColorSet_A, self.targetObjVtxIdxList)
# colorSetをベースに変更して、ベースに色を反映する
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
self.targetObjMesh.setVertexColors(baseVtxColors_Edit_A, self.targetObjVtxIdxList)
# colorSetを戻しておく
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_A")
if self.hasIntermediateObject == True:
self.jobNum_attributeChange_A = mc.scriptJob(
attributeChange=["tmpColorSet_A_Node.vertexColor", self.vtxColSep_A],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_A = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColSep_A],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
#==============================================================================================
# ベースのvertexColorのattributeChangeによるscriptJobの処理を設定
@openCloseChunk
def vtxColBase(self):
# RGBAすべてOFFの場合ベースのcolorSetに戻す
if self.uiFIle.btn_R.isChecked() == False and self.uiFIle.btn_G.isChecked() == False and\
self.uiFIle.btn_B.isChecked() == False and self.uiFIle.btn_A.isChecked() == False:
self.getBaseVertexColorData()
if self.hasIntermediateObject == True:
self.jobNum_attributeChange_Base = mc.scriptJob(
attributeChange=["tmpColorSet_Base_Node.vertexColor", self.vtxColBase],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
else:
self.jobNum_attributeChange_Base = mc.scriptJob(
attributeChange=["%s.colorSet"%self.targetObjMesh.fullPathName(), self.vtxColBase],
allChildren=True,
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True,
runOnce=True)
#==============================================================================================
# colorSetの存在をチェックして、なかったら生成する
@openCloseChunk
def checkColorSet(self):
allColorSetList = self.targetObjMesh.getColorSetNames()
# tmpColorSetがすでに存在するかチェックしてなければ生成
if self.baseColorSerRep == "RGB" or self.baseColorSerRep == "RGBA":
if not "tmpColorSet_R" in allColorSetList:
mc.polyColorSet(create=True, colorSet="tmpColorSet_R", clamped=True, representation="RGB")
if not "tmpColorSet_G" in allColorSetList:
mc.polyColorSet(create=True, colorSet="tmpColorSet_G", clamped=True, representation="RGB")
if not "tmpColorSet_B" in allColorSetList:
mc.polyColorSet(create=True, colorSet="tmpColorSet_B", clamped=True, representation="RGB")
if self.baseColorSerRep == "RGBA" or self.baseColorSerRep == "A":
if not "tmpColorSet_A" in allColorSetList:
mc.polyColorSet(create=True, colorSet="tmpColorSet_A", clamped=True, representation="RGB")
#==============================================================================================
# tmpColorSetNodeがヒストリの削除などでノードが消されてしまった場合のscriptJobを設定
def setDeleteNodeJobs(self):
if self.baseColorSerRep == "RGB" or self.baseColorSerRep == "RGBA":
self.jobNum_nodeDeleted_R = mc.scriptJob(
nodeDeleted=["tmpColorSet_R_Node", self.deletedNode_R],
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True)
self.jobNum_nodeDeleted_G = mc.scriptJob(
nodeDeleted=["tmpColorSet_G_Node", self.deletedNode_G],
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True)
self.jobNum_nodeDeleted_B = mc.scriptJob(
nodeDeleted=["tmpColorSet_B_Node", self.deletedNode_B],
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True)
if self.baseColorSerRep == "RGBA" or self.baseColorSerRep == "A":
self.jobNum_nodeDeleted_A = mc.scriptJob(
nodeDeleted=["tmpColorSet_A_Node", self.deletedNode_A],
parent="kkDisplayVertexColorSeparatelyWindow",
compressUndo=True)
#==============================================================================================
# ベースのcolorSetに設定されている頂点カラーを取得して、それを元にtmpColorSetを生成する
@openCloseChunk
def getBaseVertexColorData(self):
# 選択しているものが頂点じゃなくメッシュなので、component.getElements()じゃなく
# MFnMesh.numVerticesによって得られる頂点数からindexListを作る
vtxCount = self.targetObjMesh.numVertices
if not self.targetObjVtxCount == vtxCount:
self.targetObjVtxIdxList = xrange(vtxCount)
baseVtxColors = self.targetObjMesh.getVertexColors(self.baseColorSet)
# ベースのcolorSetの種類がRGBかRGBAの場合のみRGBの処理を行う
if self.baseColorSerRep == "RGB" or self.baseColorSerRep == "RGBA":
# 一旦baseVtxColorsのMColorArrayをコピーしたリストを作っておく
baseVtxColors_R = baseVtxColors[:]
baseVtxColors_G = baseVtxColors[:]
baseVtxColors_B = baseVtxColors[:]
# tmpColorSet_RにbaseColorSetのRedを適用する
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_R")
for x in range(vtxCount):
baseVtxColors_R[x].r = baseVtxColors_R[x].r
baseVtxColors_R[x].g = baseVtxColors_R[x].r
baseVtxColors_R[x].b = baseVtxColors_R[x].r
self.targetObjMesh.setVertexColors(baseVtxColors_R, self.targetObjVtxIdxList)
# tmpColorSet_GにbaseColorSetのGreenを適用する
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_G")
for y in xrange(vtxCount):
baseVtxColors_G[y].r = baseVtxColors_G[y].g
baseVtxColors_G[y].g = baseVtxColors_G[y].g
baseVtxColors_G[y].b = baseVtxColors_G[y].g
self.targetObjMesh.setVertexColors(baseVtxColors_G, self.targetObjVtxIdxList)
# tmpColorSet_BにbaseColorSetのBlueを適用する
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_B")
for z in range(vtxCount):
baseVtxColors_B[z].r = baseVtxColors_B[z].b
baseVtxColors_B[z].g = baseVtxColors_B[z].b
baseVtxColors_B[z].b = baseVtxColors_B[z].b
self.targetObjMesh.setVertexColors(baseVtxColors_B, self.targetObjVtxIdxList)
# ベースのcolorSetの種類がRGBAかAの場合のみAlphaの処理を行う
if self.baseColorSerRep == "RGBA" or self.baseColorSerRep == "A":
# 一旦baseVtxColorsのMColorArrayをコピーしたリストを作っておく
baseVtxColors_A = baseVtxColors[:]
# tmpColorSet_AにbaseColorSetのAlphaを適用する
self.targetObjMesh.setCurrentColorSetName("tmpColorSet_A")
for w in range(vtxCount):
baseVtxColors_A[w].r = baseVtxColors_A[w].a
baseVtxColors_A[w].g = baseVtxColors_A[w].a
baseVtxColors_A[w].b = baseVtxColors_A[w].a
self.targetObjMesh.setVertexColors(baseVtxColors_A, self.targetObjVtxIdxList)
# colorSetをベースに戻しておく
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
if self.hasIntermediateObject == True:
# tmpColorSet_Base_Nodeがない場合、baseColor変更感知用のpolyColorPerVertexを作っておく
if len(mc.ls("tmpColorSet_Base_Node", type="polyColorPerVertex")) == 0:
self.targetObjMesh.setCurrentColorSetName(self.baseColorSet)
self.targetObjMesh.setVertexColors(baseVtxColors, self.targetObjVtxIdxList)
polyColorVertexNodeList = mc.ls(type="polyColorPerVertex")
for polyColorVertexNode in polyColorVertexNodeList:
colorSetName = mc.getAttr("%s.colorSetName"%polyColorVertexNode)
if "tmpColorSet_R" in colorSetName:
mc.rename(polyColorVertexNode, "tmpColorSet_R_Node")
elif "tmpColorSet_G" in colorSetName:
mc.rename(polyColorVertexNode, "tmpColorSet_G_Node")
elif "tmpColorSet_B" in colorSetName:
mc.rename(polyColorVertexNode, "tmpColorSet_B_Node")
elif "tmpColorSet_A" in colorSetName:
mc.rename(polyColorVertexNode, "tmpColorSet_A_Node")
elif self.baseColorSet in colorSetName:
mc.rename(polyColorVertexNode, "tmpColorSet_Base_Node")
#==============================================================================================
# 各tmpColorSetNodeが消えてしまったら生成し直す
def deletedNode_R(self):
self.getBaseVertexColorData()
def deletedNode_G(self):
self.getBaseVertexColorData()
def deletedNode_B(self):
self.getBaseVertexColorData()
def deletedNode_A(self):
self.getBaseVertexColorData()
#==============================================================================================
# このウィンドウが存在したら消す
def deleteInstances(self):
for obj in getMayaWindow().children():
if obj.objectName() == "kkDisplayVertexColorSeparatelyWindow":
obj.setParent(None)
obj.deleteLater()
#----------------------------------------------------------------------------------------------------------------------
# デフォーマがついているとコンポーネントエディタから頂点カラーを変更した際に
# ヒストリを削除しないときちんと反映されずscriptJobが反応しないための対処
def historyDelete(targetObj, isStart):
if isStart == True:
dialogMessage = ""
lang = mc.about(uiLanguage=True)
if lang == "ja_JP":
dialogMessage = "実行前に「デフォーマ以外のヒストリ」削除を行いますがよろしいですか?"
else:
dialogMessage = 'Do you delete "Non-Deformer History"\nfor the selected object before execution?'
# ヒストリを削除してよいかの確認ダイアログ表示
selDialog = mc.confirmDialog(
title='kkDisplayVertexColorSeparately_Check',
message=dialogMessage,
button=['Yes','No'],
defaultButton='Yes',
cancelButton='No',
dismissString='No')
if selDialog == "No":
mc.warning("kkDisplayVertexColorSeparately is Canceled."),
return False
# デフォーマ以外のヒストリの削除実行
mc.bakePartialHistory(targetObj, prePostDeformers=True)
return True
#----------------------------------------------------------------------------------------------------------------------
def getMayaWindow():
mainWinPtr = omUI.MQtUtil.mainWindow()
return wrapInstance(long(mainWinPtr), QMainWindow)
#----------------------------------------------------------------------------------------------------------------------
def main():
selList = mc.ls(sl=True, type="transform")
if len(selList) == 0:
mc.warning("No Select..."),
return
selMeshList = mc.listRelatives(selList[0], shapes=True, type="mesh")
if len(selMeshList) == 0:
mc.warning("Mesh is not selected..."),
return
# 選択を1つだけに絞っておく
mc.select(selList[0], replace=True)
isHistoryDeleted = historyDelete(selList[0], True)
if isHistoryDeleted == True:
app = QApplication.instance()
dispVtxColSepWindow = kkDisplayVertexColorSeparatelyWindow()
dispVtxColSepWindow.show()
sys.exit()
app.exec_()
if __name__ == '__main__':
main()
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
count, longestCount = 0, 0
# enumerate iteration give index and value at the time
for index, value in enumerate(s):
# Check index not equals last index to intercept index out of bound
# Check current value with next value, if equal reset count to 0
if index != len(s) - 1 and value == s[index + 1]:
# Set count as longestCount value if count greater than longestCount
longestCount = count if count > longestCount else longestCount
# Reset count if current value equal with next value
count = 0
count += 1
return longestCount
if __name__ == "__main__":
s = "abrkaabcdefghijjxxx"
length = Solution().lengthOfLongestSubstring(s)
assert length == 10, "%s is not 10" % length
print("Passed Test")
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Benchmarks for QAOA optimizations.
"""
import pennylane as qml
from pennylane import qaoa
from .default_settings import _qaoa_defaults
def benchmark_qaoa(hyperparams={}):
"""
Performs QAOA optimizations.
Args:
hyperparams (dict): hyperparameters to configure this benchmark
* 'graph': Graph represented as a NetworkX Graph class
* 'n_layers': Number of layers in the QAOA circuit
* 'params': Numpy array of trainable parameters that is fed into the circuit
* 'device': Device on which the circuit is run
* 'interface': Name of the interface to use
* 'diff_method': Name of differentiation method
"""
graph, n_layers, params, device, options_dict = _qaoa_defaults(hyperparams)
H_cost, H_mixer = qaoa.min_vertex_cover(graph, constrained=False)
n_wires = len(graph.nodes)
def qaoa_layer(gamma, alpha):
qaoa.cost_layer(gamma, H_cost)
qaoa.mixer_layer(alpha, H_mixer)
@qml.qnode(device)
def circuit(params):
for w in range(n_wires):
qml.Hadamard(wires=w)
qml.layer(qaoa_layer, n_layers, params[0], params[1])
return [qml.sample(qml.PauliZ(i)) for i in range(n_wires)]
circuit(params)
|
from flask import Blueprint
username = Blueprint('username', __name__)
from . import views
|
#!/usr/bin/python3
import sys, os, json
vmid = sys.argv[1]
phase = sys.argv[2]
hostname = os.uname()[1]
conf_file = sys.path[0]+'/'+'change-lan.conf'
if hostname.startswith("pv"):
print('State: ' + phase + ". Prod - exit.")
# sys.exit()
def get_conf(file):
if os.path.isfile(file):
with open(file,'r') as conf_file:
try:
data = json.load(conf_file)
except json.decoder.JSONDecodeError:
sys.exit('Not valid json in file ' + file)
conf_file.close()
return(data['conf'])
else:
sys.exit('Conf file ' + file + ' not found')
def find_conf(configs, vmid, hostname):
result = []
for config in configs:
if (config['Hostname'] == hostname) and (str(config['VMID']) == vmid):
result = config
if len(result) == 0:
sys.exit('Not found config for ' + hostname + ':' + vmid)
return(result)
def chek_mac(new_conf, vmid, hostname):
cmd = 'pvesh get /nodes/' + hostname + '/qemu/' + vmid + '/config -output-format json'
conf_vm = json.load(os.popen(cmd))
net_conf_vm = ''
for k, v in conf_vm.items():
if 'net' in k:
net_conf_vm += v
if (new_conf['MAC_UP'] in net_conf_vm) and (new_conf['MAC_DOWN'] in net_conf_vm):
pass
else:
sys.exit('Does not match MAC address')
def get_bash_conf(new_conf):
res_conf = []
res_conf.append('sed -i -r "s/(net.: virtio=)(' + new_conf['MAC_UP'] + ')(.*)(,link_down=1)/\\1\\2\\3/" /etc/pve/local/qemu-server/' + str(new_conf['VMID']) + '.conf')
res_conf.append('sed -i -r "/,link_down=1/! s/(net.: virtio=)(' + new_conf['MAC_DOWN'] + ')(.*)/\\1\\2\\3,link_down=1/" /etc/pve/local/qemu-server/' + str(new_conf['VMID']) + '.conf')
return(res_conf)
if phase == 'pre-start':
print('Snippet started: pre-start')
configs = get_conf(conf_file)
new_conf = find_conf(configs, vmid, hostname)
chek_mac(new_conf, vmid, hostname)
cmd_list = get_bash_conf(new_conf)
for cmd in cmd_list:
print('Command to execute: ' + cmd)
status = os.system(cmd)
if status != 0:
sys.exit('The command completed with an error: ' + status)
print('Snippet work done') |
from collections import defaultdict, deque
import numpy as np
from matplotlib import pyplot as plt
from operator import itemgetter
import sys
import math
import time
import heapq
def freq_dict(l):
d = defaultdict(int)
for i in l:
d[i] += 1
return d
def most_frequent(d):
return max(d.items(), key=itemgetter(1))[0]
def choose_attribute(x, is_bool):
j_best, split_value, min_entropy = -1, -1, math.inf
y = x[:, -1]
mn, mx = np.amin(x, axis=0), np.amax(x, axis=0)
for j in range(len(is_bool)):
w, med = x[:, j], 0.5
if not is_bool[j]: med = np.median(w)
if mn[j] == mx[j] or med == mx[j]: continue
y_split = [y[w <= med], y[w > med]]
entropy, p = 0, 1 / len(y)
for y_ in y_split:
h, prob = 0, 1 / len(y_)
counts = np.unique(y_, return_counts=True)[1].astype('float32') * prob
entropy -= p * np.sum(counts * np.log(counts)) * len(y_)
if entropy < min_entropy:
min_entropy = entropy
j_best = j
split_value = med
if j_best == -1:
return -1, None, None, None, None, None
left = x[x[:, j_best] <= split_value]
right = x[x[:, j_best] > split_value]
return j_best, split_value, left, right
class Node:
"""
Node class for the decision tree
Data:
self.left, self.right: left and right children
self.parent: parent of the node, None if this node is root
self.is_leaf: boolean
self.attribute_num: attribute number being split on - if self.is_leaf is False
self.class_freq: class frequencies if self.is_leaf is True
self.cl: class decision if self.is_leaf is True
self.x: data associated to this node if self.is_leaf is True (used while growing tree)
self.split_value: value to split on
self.correct: correctly classified validation datapoints
self.correct_ifleaf: correctly classified validation datapoints if it were a leaf
"""
def __init__(self, x, x_test=None, x_valid=None, par=None):
self.parent = par
self.left = None
self.right = None
self.attribute_num = -1
self.is_leaf = True
self.x = x
self.x_test = x_test
self.x_valid = x_valid
self.class_freq = freq_dict(x[:, -1])
self.cl = most_frequent(self.class_freq)
self.split_value = None
self.correct = 0
self.correct_ifleaf = 0
self.correct_test = 0
self.correct_ifleaf_test = 0
self.correct_train = 0
self.correct_ifleaf_train = 0
self.is_deleted = False
def __lt__(self, node):
return node.correct < self.correct
class DecisionTree:
"""
Decision tree class
Data:
self.root: root node of the tree
self.train_accuracies: training accuracies found while training the model
self.test_accuracies: test accuracies found while training the model
self.valid_accuracies: validation accuracies found while training the model
"""
# D is a numpy array, last col is y
# is_bool: list of booleans: True if data is boolean, False if data is int
# threshold: threshold for training accuracy
def __init__(self,
D_train=None,
D_test=None,
D_valid=None,
is_bool=None,
threshold=1.0,
prediction_frequency=1000,
pruning=False,
max_nodes=math.inf):
"""
Constructor for a DecisionTree
Parameters:
-----------------------------------------------------------------------
D_train, D_test, D_valid: numpy arrays denoting train, test and val data
is_bool: indicator for each column whether it is boolean or not
threshold: accuracy till which the model needs to run
prediction_frequency: intervals at which accuracies need to be computed
pruning: boolean indicating whether pruning needs to be done or not
max_nodes: maximum nodes allowed in the tree
"""
self.train_accuracies = []
self.test_accuracies = []
self.valid_accuracies = []
self.num_classes = int(D_train[:, -1].max()) + 1
self.valid_accuracies_after_pruning = []
self.train_accuracies_after_pruning = []
self.test_accuracies_after_pruning = []
self.pruned_tree_sizes = []
if D_train is not None:
self.grow_tree(
D_train=D_train,
D_test=D_test,
D_valid=D_valid,
is_bool=is_bool,
threshold=threshold,
prediction_frequency=prediction_frequency,
pruning=pruning,
max_nodes=max_nodes)
else:
self.root = None
def predict(self, D_test):
"""
Predict labels of the given data using the model
"""
predicted = []
for x in D_test:
node = self.root
while not node.is_leaf:
if x[node.attribute_num] <= node.split_value:
node = node.left
else:
node = node.right
predicted.append(node.cl)
return np.array(predicted)
def grow_tree(self,
D_train,
D_test,
D_valid,
is_bool,
threshold,
prediction_frequency,
pruning,
max_nodes):
"""
Create the tree
Parameters:
------------------------------------------------------------------------
D_train, D_test, D_valid: numpy arrays denoting train, test and val data
is_bool: indicator for each column whether it is boolean or not
threshold: accuracy till which the model needs to run
prediction_frequency: intervals at which accuracies need to be computed - not used
pruning: boolean indicating whether pruning needs to be done or not
max_nodes: maximum nodes allowed in the tree
Raises:
Exception 'Empty data' if D_train is empty
"""
# empty data
if len(D_train) == 0:
raise Exception('Empty data')
self.root = Node(x=D_train, x_test=D_test, x_valid=D_valid)
q = deque()
q.appendleft(self.root)
node_list = []
node_list.append(self.root)
total_nodes = 1
predictions_completed = 0
train_accuracy, test_accuracy, valid_accuracy = 0, 0, 0
y_train, y_test, y_valid = D_train[:, -1], D_test[:, -1], D_valid[:, -1]
total_valid = D_valid.shape[0]
total_test = D_test.shape[0]
total_train = D_train.shape[0]
def cnt_help(arr, element):
return np.count_nonzero(arr == element)
def cnt(n):
return cnt_help(n.x[:, -1], n.cl)
def cnt_t(n):
return cnt_help(n.x_test[:, -1], n.cl)
def cnt_v(n):
return cnt_help(n.x_valid[:, -1], n.cl)
total_correct_train = cnt(self.root)
total_correct_test = cnt_t(self.root)
total_correct_valid = cnt_v(self.root)
while train_accuracy < threshold and q and total_nodes < max_nodes:
node = q.pop()
# if node is pure
if len(node.class_freq) == 1:
node.x = None
else:
j, node.split_value, left_x, right_x = choose_attribute(node.x, is_bool)
if j == -1:
node.x = None
continue
left_x_test = node.x_test[node.x_test[:, j] <= node.split_value]
left_x_valid = node.x_valid[node.x_valid[:, j] <= node.split_value]
right_x_test = node.x_test[node.x_test[:, j] > node.split_value]
right_x_valid = node.x_valid[node.x_valid[:, j] > node.split_value]
node.attribute_num = j
node.is_leaf = False
node.left = Node(x=left_x, x_test=left_x_test, x_valid=left_x_valid, par=node)
node.right = Node(x=right_x, x_test=right_x_test, x_valid=right_x_valid, par=node)
q.appendleft(node.left)
q.appendleft(node.right)
node_list.append(node.left)
node_list.append(node.right)
total_nodes += 2
# find number of elements correct in left
# find number of elements correct in right
# find number of elements correct in current
# add difference of (left + right) - cur
train_diff = -cnt(node) + cnt(node.left) + cnt(node.right)
test_diff = -cnt_t(node) + cnt_t(node.left) + cnt_t(node.right)
valid_diff = -cnt_v(node) + cnt_v(node.left) + cnt_v(node.right)
total_correct_train += train_diff
total_correct_test += test_diff
total_correct_valid += valid_diff
train_accuracy = total_correct_train / total_train
test_accuracy = total_correct_test / total_test
valid_accuracy = total_correct_valid / total_valid
self.train_accuracies.append(100 * train_accuracy)
self.test_accuracies.append(100 * test_accuracy)
self.valid_accuracies.append(100 * valid_accuracy)
node.x, node.class_freq = None, None
node.x_test, node.x_valid = None, None
# finally discard all data in leaf nodes
for node in node_list:
node.x = None
node.x_valid = None
node.x_test = None
if not pruning:
return
# now pass validation data through the node using dfs, and compute the confusion matrices at each node
# compute the accuracy change at each non-leaf node
# sort the nodes according to accuracy changes
# remove nodes greedily as follows:
# pop node from heap
# if node is deleted or node's latest value is not the same as the other member of the pair, continue
# if found a node that doesn't increase validation accuracy, stop
# else remove node and all members of the subtree
# also set the left and right children of this node to None
# change correct, is_leaf of this node
# then propagate to all ancestors of the node
# then compute total accuracy using the root node
# computes correctly classified at each node
# option = 1, 2, 3 correspond to train, test and val respectively
def compute_correct(n, data, option=3):
computed_value = cnt_help(data[:, -1], n.cl)
if option == 3:
n.correct_ifleaf = computed_value
elif option == 2:
n.correct_ifleaf_test = computed_value
else:
n.correct_ifleaf_train = computed_value
if not n.is_leaf:
data_left = data[data[:, n.attribute_num] <= n.split_value]
data_right = data[data[:, n.attribute_num] > n.split_value]
computed_value = compute_correct(n.left, data_left, option) +\
compute_correct(n.right, data_right, option)
if option == 3:
n.correct = computed_value
elif option == 2:
n.correct_test = computed_value
else:
n.correct_train = computed_value
return computed_value
# recompute the confusion for each ancestor
def propagate_confusion_upwards(n, heap):
while n.parent is not None:
n.parent.correct = n.parent.left.correct + n.parent.right.correct
n.parent.correct_test = n.parent.left.correct_test + n.parent.right.correct_test
n.parent.correct_train = n.parent.left.correct_train + n.parent.right.correct_train
heapq.heappush(heap, (n.parent.correct - n.parent.correct_ifleaf, n.parent))
n = n.parent
compute_correct(self.root, D_valid, 3)
compute_correct(self.root, D_test, 2)
compute_correct(self.root, D_train, 1)
# now create a heap, and put all nodes in it
heap = []
for node in node_list:
if not node.is_leaf:
heapq.heappush(heap, (node.correct - node.correct_ifleaf, node))
def set_delete_subtree(n):
n.is_deleted = True
if n.is_leaf:
return 1
else:
return 1 + set_delete_subtree(n.left) + set_delete_subtree(n.right)
total = D_valid.shape[0]
total_test = D_test.shape[0]
total_train = D_train.shape[0]
while heap:
diff, n = heapq.heappop(heap)
if n.is_deleted or (n.correct - n.correct_ifleaf != diff):
continue
if diff >= 0:
break
total_nodes -= set_delete_subtree(n)
n.correct = n.correct_ifleaf
n.correct_test = n.correct_ifleaf_test
n.correct_train = n.correct_ifleaf_train
n.is_leaf = True
n.left = None
n.right = None
propagate_confusion_upwards(n, heap)
self.valid_accuracies_after_pruning.append(100 * self.root.correct / total)
self.train_accuracies_after_pruning.append(100 * self.root.correct_train / total_train)
self.test_accuracies_after_pruning.append(100 * self.root.correct_test / total_test)
self.pruned_tree_sizes.append(total_nodes)
return
def mainA():
train = np.loadtxt(sys.argv[1], delimiter=',', skiprows=2)
test = np.loadtxt(sys.argv[2], delimiter=',', skiprows=2)
valid = np.loadtxt(sys.argv[3], delimiter=',', skiprows=2)
is_bool = [(False if i < 10 else True) for i in range(54)]
prediction_frequency = 1
decision_tree = DecisionTree(
D_train=train,
D_test=test,
D_valid=valid,
is_bool=is_bool,
threshold=1.0,
prediction_frequency=prediction_frequency,
pruning=False)
x = list(range(1, 2 * len(decision_tree.train_accuracies) + 1, 2))
plt.xlabel('Number of nodes')
plt.ylabel('Accuracy (in %)')
plt.plot(x, decision_tree.train_accuracies, label='Training accuracy')
plt.plot(x, decision_tree.test_accuracies, label='Test accuracy')
plt.plot(x, decision_tree.valid_accuracies, label='Validation accuracy')
print('final train accuracy:', decision_tree.train_accuracies[-1])
print('final test accuracy:', decision_tree.test_accuracies[-1])
print('final validation accuracy:', decision_tree.valid_accuracies[-1])
plt.legend()
plt.savefig('decision_tree_accuracies.png')
plt.close()
def mainB():
train = np.loadtxt(sys.argv[1], delimiter=',', skiprows=2)
test = np.loadtxt(sys.argv[2], delimiter=',', skiprows=2)
valid = np.loadtxt(sys.argv[3], delimiter=',', skiprows=2)
is_bool = [(False if i < 10 else True) for i in range(54)]
prediction_frequency = 1
decision_tree = DecisionTree(
D_train=train,
D_test=test,
D_valid=valid,
is_bool=is_bool,
threshold=1.0,
prediction_frequency=prediction_frequency,
pruning=True)
x = list(range(1, 2 * len(decision_tree.train_accuracies) + 1, 2))
print('initial train accuracy:', decision_tree.train_accuracies[-1])
print('initial test accuracy:', decision_tree.test_accuracies[-1])
print('initial validation accuracy:', decision_tree.valid_accuracies[-1])
print('post pruning train accuracy:', decision_tree.train_accuracies_after_pruning[-1])
print('post pruning test accuracy:', decision_tree.test_accuracies_after_pruning[-1])
print('post pruning validation accuracy:', decision_tree.valid_accuracies_after_pruning[-1])
plt.xlabel('Number of nodes')
plt.ylabel('Accuracy (in %)')
plt.plot(x, decision_tree.train_accuracies, label='Training accuracy')
plt.plot(x, decision_tree.test_accuracies, label='Test accuracy')
plt.plot(x, decision_tree.valid_accuracies, label='Validation accuracy')
plt.legend()
plt.savefig('decision_tree_accuracies.png')
plt.close()
plt.xlabel('Number of nodes')
plt.ylabel('Accuracy (in %)')
plt.plot(decision_tree.pruned_tree_sizes, decision_tree.valid_accuracies_after_pruning, label='Validation accuracy')
plt.plot(decision_tree.pruned_tree_sizes, decision_tree.train_accuracies_after_pruning, label='Training accuracy')
plt.plot(decision_tree.pruned_tree_sizes, decision_tree.test_accuracies_after_pruning, label='Test accuracy')
plt.legend()
plt.xlim(90000, 50000)
plt.savefig('decision_tree_post_pruning.png')
def mainC():
train = np.loadtxt(sys.argv[1], delimiter=',', skiprows=2)
test = np.loadtxt(sys.argv[2], delimiter=',', skiprows=2)
valid = np.loadtxt(sys.argv[3], delimiter=',', skiprows=2)
from sklearn.ensemble import RandomForestClassifier
scores = []
possible_n_estimators = [50, 150, 250, 350, 450] # 50 to 450
possible_max_features = [0.1, 0.3, 0.5, 0.7, 0.9] # 0.1 to 1.0
possible_min_samples_split = [2, 4, 6, 8, 10] # 2 to 10
best_oob_score = -1
best_n_estimators, best_min_samples_split, best_max_features = -1, -1, -1
best_model = None
for n_estimators in possible_n_estimators:
for max_features in possible_max_features:
for min_samples_split in possible_min_samples_split:
t = time.time()
clf = RandomForestClassifier(n_estimators=n_estimators,
max_features=max_features,
min_samples_split=min_samples_split,
bootstrap=True,
oob_score=True,
n_jobs=4)
clf.fit(train[:, :-1], train[:, -1])
oob_score = clf.oob_score_
print(n_estimators, max_features, min_samples_split, ':', oob_score)
if oob_score > best_oob_score:
best_oob_score = oob_score
best_n_estimators = n_estimators
best_max_features = max_features
best_min_samples_split = min_samples_split
best_model = clf
print(best_n_estimators, best_max_features, best_min_samples_split)
print('oob score:', best_oob_score)
y_pred_test = best_model.predict(test[:, :-1])
y_pred_valid = best_model.predict(valid[:, :-1])
y_pred_train = best_model.predict(train[:, :-1])
print('training:', (y_pred_train == train[:, -1]).sum() / len(train[:, -1]))
print('validation:', (y_pred_valid == valid[:, -1]).sum() / len(valid[:, -1]))
print('test', (y_pred_test == test[:, -1]).sum() / len(test[:, -1]))
def mainD():
train = np.loadtxt(sys.argv[1], delimiter=',', skiprows=2)
test = np.loadtxt(sys.argv[2], delimiter=',', skiprows=2)
valid = np.loadtxt(sys.argv[3], delimiter=',', skiprows=2)
from sklearn.ensemble import RandomForestClassifier
scores = []
possible_n_estimators = [50, 150, 250, 350, 450] # 50 to 450
possible_max_features = [0.1, 0.3, 0.5, 0.7, 0.9] # 0.1 to 1.0
possible_min_samples_split = [2, 4, 6, 8, 10] # 2 to 10
def run_parameters(n_estimators, max_features, min_samples_split):
clf = RandomForestClassifier(n_estimators=n_estimators,
max_features=max_features,
min_samples_split=min_samples_split,
bootstrap=True,
criterion='gini',
oob_score=True,
n_jobs=4)
clf.fit(train[:, :-1], train[:, -1])
oob_score = clf.oob_score_
y_pred_test = clf.predict(test[:, :-1])
y_pred_valid = clf.predict(valid[:, :-1])
test_acc = (y_pred_test == test[:, -1]).sum() / len(test[:, -1])
valid_acc = (y_pred_valid == valid[:, -1]).sum() / len(valid[:, -1])
return ((n_estimators, max_features, min_samples_split), (oob_score, test_acc, valid_acc))
answers = []
answers.append(run_parameters(450, 0.7, 2))
#print('answers:', answers)
for n in [50, 150, 250, 350]:
answers.append(run_parameters(n, 0.7, 2))
#print('answers:', answers)
for f in [0.1, 0.3, 0.5, 0.9]:
answers.append(run_parameters(450, f, 2))
#print('answers:', answers)
for s in [4, 6, 8, 10]:
answers.append(run_parameters(450, 0.7, s))
#print('answers:', answers)
x = dict()
for (parameters, scores) in answers:
x[parameters] = (100 * scores[0], 100 * scores[1], 100 * scores[2])
n_test, n_val, n_oob = [], [], []
f_test, f_val, f_oob = [], [], []
s_test, s_val, s_oob = [], [], []
for n in possible_n_estimators:
oob, test_acc, val_acc = x[(n, 0.7, 2)]
n_oob.append(oob)
n_test.append(test_acc)
n_val.append(val_acc)
for f in possible_max_features:
oob, test_acc, val_acc = x[(450, f, 2)]
f_oob.append(oob)
f_test.append(test_acc)
f_val.append(val_acc)
for s in possible_min_samples_split:
oob, test_acc, val_acc = x[(450, 0.7, s)]
s_oob.append(oob)
s_test.append(test_acc)
s_val.append(val_acc)
plt.xlabel('Number of estimators')
plt.ylabel('Accuracy (in %)')
plt.plot(possible_n_estimators, n_oob, label='Out of bag')
plt.plot(possible_n_estimators, n_test, label='Test')
plt.plot(possible_n_estimators, n_val, label='Validation')
plt.legend()
plt.savefig('estimator_sensitivity.png')
plt.close()
plt.xlabel('Fraction of features used')
plt.ylabel('Accuracy (in %)')
plt.plot(possible_max_features, f_oob, label='Out of bag')
plt.plot(possible_max_features, f_test, label='Test')
plt.plot(possible_max_features, f_val, label='Validation')
plt.legend()
plt.savefig('feature_sensitivity.png')
plt.close()
plt.xlabel('Minimum samples needed for split')
plt.ylabel('Accuracy (in %)')
plt.plot(possible_min_samples_split, s_oob, label='Out of bag')
plt.plot(possible_min_samples_split, s_test, label='Test')
plt.plot(possible_min_samples_split, s_val, label='Validation')
plt.legend()
plt.savefig('min_samples_split_sensitivity.png')
plt.close()
def write_predictions(fname, arr):
np.savetxt(fname, arr, fmt="%d", delimiter="\n")
def main():
pruning = (sys.argv[1] == '2')
train = np.loadtxt(sys.argv[2], delimiter=',', skiprows=2)
test = np.loadtxt(sys.argv[4], delimiter=',', skiprows=1)
valid = np.loadtxt(sys.argv[3], delimiter=',', skiprows=1)
is_bool = [(False if i < 10 else True) for i in range(54)]
prediction_frequency = 1
decision_tree = DecisionTree(
D_train=train,
D_test=test,
D_valid=valid,
is_bool=is_bool,
threshold=1.0,
prediction_frequency=prediction_frequency,
pruning=pruning)
y_pred = decision_tree.predict(test[:, :-1])
write_predictions(sys.argv[5], y_pred)
if __name__ == '__main__':
main()
|
import logging
import os
import shutil
import subprocess
from openfl.utilities.logs import setup_loggers
setup_loggers(logging.INFO)
logger = logging.getLogger(__name__)
def prepare_collaborator_workspace(col_dir, arch_path):
logger.info(f'Prepare collaborator directory: {col_dir}')
if os.path.exists(col_dir):
shutil.rmtree(col_dir)
os.makedirs(col_dir)
arch_col_path = shutil.copy(arch_path, col_dir)
shutil.unpack_archive(arch_col_path, col_dir)
logger.info('Collaborator directory prepared')
def run_aggregator(model_interface, fl_experiment):
logger.info('run_aggregator')
fl_experiment.start_experiment(model_interface)
logger.info('Aggregator stopped')
def run_experiment(col_data_paths, model_interface, arch_path, fl_experiment):
logger.info('Starting the experiment!')
for col_dir in col_data_paths:
prepare_collaborator_workspace(col_dir, arch_path)
processes = []
for col_name in col_data_paths:
logger.info(f'Starting collaborator: {col_name}')
p = subprocess.Popen(
f'fx collaborator start -n {col_name} -p plan/plan.yaml -d data.yaml'.split(' '),
cwd=os.path.join(os.getcwd(), col_name)
)
processes.append(p)
run_aggregator(model_interface, fl_experiment)
for p in processes:
p.terminate()
logger.info('The experiment completed!')
|
from sys import argv, stdout
import sys
from typing import final
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import re
from pySmartDL import *
from tkinter import filedialog
import os
import tkinter
import re
## THis shit better work
## https://anitop.vercel.app/api/v1/top-anime
## use the above link for trending anime display
def start(url):
#### Scrapper phase 1
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, "lxml")
links = []
top = tkinter.Tk()
currdir = os.getcwd()
top.withdraw()
downdir = filedialog.askdirectory(parent=top, initialdir=currdir, title='Choose Download location Bitch')
for link in soup.findAll('a'):
links.append(link.get('href'))
## Episode Filter
def Filter(datalist):
return [val for val in datalist
if re.search(r'episode', val)]
#Download Link Filter
def Filter2(datalist2):
return [val2 for val2 in datalist2
if re.search(r'gogo', val2)]
#Mp4 Filter
def Filter3(datalist3):
return [val3 for val3 in datalist3
if re.search('.mp4', val3)]
## This shit took me hourssssssss ffs
for episode in Filter(links):
# Download eps with specfic character Filter
episodes = (f"https://animekisa.tv/{episode}")
req2 = Request(episodes, headers={'User-Agent': 'Mozilla/5.0'})
webpage2 = urlopen(req2).read()
soup2 = BeautifulSoup(webpage2,"lxml")
results2 = re.findall("https.*",str(soup2))
low2 = Filter2(results2)
s1=re.sub("[[;']","",str(low2))
s2=re.sub('[]"]','',s1)
try:
req3 = Request(s2, headers={'User-Agent': 'Mozilla/5.0'})
webpage3=urlopen(req3).read()
soup3= BeautifulSoup(webpage3,"lxml")
results3 = re.findall("https.*",str(soup3))
low3=Filter3(results3)
for x in low3:
rel = re.findall("^https://storage.*mp4'",x)
rel2 = Filter3(rel)
s3=re.sub("[[;']","",str(rel2))
s4=re.sub('[]"]','',s3)
f1=re.findall("^http.*label:",s4)
f2=re.findall("https://storage.*mp4",str(f1))
f3=re.sub("[]'[]","",str(f2))
dest = downdir
url= f3
obj = SmartDL(url,dest)
obj.start()
except ValueError:
pass
java_link = sys.argv[1]
start(url=java_link)
sys.stdout.flush()
|
"""
This is a program for making a song prediction
according to the lyrcis available in
file output.csv
"""
import re
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import RandomOverSampler
warnings.filterwarnings("ignore")
TV = TfidfVectorizer()
M = MultinomialNB()
ROS = RandomOverSampler()
LYRICS_DF = pd.read_csv('output.csv')
LYRICS_DF['singer_number'] = LYRICS_DF['singer_number'].astype(int)
LYRICSWORDS = LYRICS_DF['lyrics_words'].to_list()
def print_evaluations(y_train, y_pred, model):
"""
This function summaries all scores and
makes a confusion matrix heatman
"""
print(f'How does model {model} score:')
print(f"The accuracy of the model is: {round(accuracy_score(y_train, y_pred), 3)}")
print(f"The precision of the model is: {round(precision_score(y_train, y_pred, average='weighted'), 3)}")
print(f"The recall of the model is: {round(recall_score(y_train, y_pred, average='weighted'), 3)}")
print(f"The f1-score of the model is: {round(f1_score(y_train, y_pred, average='weighted'), 3)}")
#print confusion matrix
plt.figure(figsize=(15, 15))
Cm = confusion_matrix(y_train, y_pred)
print(Cm)
Ax = plt.subplot()
sns.heatmap(Cm, annot=True, ax=Ax)
Ax.set_xlabel('Predicted labels')
Ax.set_ylabel('True labels')
Ax.set_title('Confusion Matrix %s' % (model))
return accuracy_score(y_train, y_pred, model)
if __name__ == '__main__':
print('')
print("""
This is a program for making a song prediction
according to the lyrcis available in
file output.csv
""")
print('')
print("""
Please save your lyrics as a txt file
in this folder and write the name
of the file here:
""")
print('')
SONG = input()
SONG = str(SONG)
with open("%s.txt" % (SONG), "r") as myfile:
DATA = myfile.readlines()
DATA = str(DATA)
DATA = re.sub(r"\\n", ' ', DATA)
print('')
print("""
These are your lyrics:
""")
print('')
print(DATA)
TV.fit(LYRICSWORDS)
TV_VECTORS = TV.transform(LYRICSWORDS)
Y = LYRICS_DF['singer'].to_list()
YNUMBERS = LYRICS_DF['singer_number'].to_list()
M.fit(TV_VECTORS, YNUMBERS)
NEW_SONG = [DATA]
TV_VEC = TV.transform(NEW_SONG)
#simple naive bayes
print('')
print("""
This is a simple naive bayes predcition
without input optimization:
""")
print('')
print("""
(please check dictionary printed at the end of the
run to see which artist corresponds
to which artistnumber)
""")
print('')
print("""
Your song belongs most probably
to this artistnumber:
""")
print('')
print(M.predict(TV_VEC))
print('')
print("""
These are the probabilities that your
song belongs to each artistnumber:
""")
print('')
print(M.predict_proba(TV_VEC))
print('')
DF = pd.DataFrame(zip(LYRICSWORDS, Y), columns=['LYRICSWORDS', 'YNUMBERS'])
Y = DF['YNUMBERS']
X = DF[['LYRICSWORDS']]
X_RESAMPLE, Y_RESAMPLE = ROS.fit_resample(X, Y)
CV = CountVectorizer(ngram_range=(1, 1))
CV.fit(LYRICSWORDS)
WORD_VECTORS = CV.transform(LYRICSWORDS)
CV.get_feature_names()
DF2 = pd.DataFrame(WORD_VECTORS.todense(), columns=CV.get_feature_names())
X = DF2
Y = DF['YNUMBERS']
print('')
print("""
These are the train-test predicitions
for a baseline model:
""")
print('')
SPLIT = 0.1
X_TRAIN, X_TEST, Y_TRAIN, Y_TEST = train_test_split(X,
YNUMBERS,
random_state=10,
test_size=SPLIT)
#Baseline model
YPRED_BL = [0] * X_TRAIN.shape[0]
print_evaluations(Y_TRAIN, YPRED_BL, 'Baseline')
NEW_DF = pd.concat([X, Y], axis=1)
NEW_DF.groupby('YNUMBERS').size()
#NEW_DF.groupby('YNUMBERS').size()[1]/NEW_DF.shape[0]*100
X = NEW_DF.iloc[:, :-1]
Y = NEW_DF.YNUMBERS
# simple Random forest model
print('')
print("""
These are the results of the
random forest evaluation:
""")
RF = RandomForestClassifier(n_estimators=20, max_depth=3, random_state=10)
RF.fit(X_TRAIN, Y_TRAIN)
YPRED_RF = RF.predict(X_TEST)
RF2 = RF
print('')
print("""
This is the random forest prediction
for the artist number for your song:
""")
print('')
print(RF.predict(TV_VEC))
print("""
These are the probabilities that
your song belongs to each artistnumber:
""")
print('')
print(RF.predict_proba(TV_VEC))
print('')
print("""
These are the random forest evaluations
for the train-test split:
""")
print('')
print_evaluations(Y_TEST, YPRED_RF, 'RandomForest')
# Random oversampling model
ROS = RandomOverSampler(random_state=10)
X_ROS, Y_ROS = ROS.fit_resample(X_TRAIN, Y_TRAIN)
np.unique(Y_ROS, return_counts=True)
RF2.fit(X_ROS, Y_ROS)
YPRED_ROS = RF2.predict(X_TEST)
print('')
print("""
This is the random oversampling prediction
of the artist number with random forest
evaluation for your song:
""")
print('')
print(RF2.predict(TV_VEC))
print('')
print("""
These are the probabilities that
your song belongs to each artistnumber:
""")
print('')
print(RF2.predict_proba(TV_VEC))
print('')
print("""
These are the random oversampling
evaluations with the train-test split:
""")
print('')
print_evaluations(Y_TEST, YPRED_ROS, 'RandomOversampling')
Y = LYRICS_DF['singer'].to_list()
YNUMBERS = LYRICS_DF['singer_number'].to_list()
ARTISTLISTFINAL = dict(zip(Y, YNUMBERS))
print('')
print("""
This is the code for the artists
and the belonging artistnumbers:
""")
print(ARTISTLISTFINAL)
print('')
print("""
These are the heatmaps for the confusion
matrix of each different evaluation:
""")
|
import os
import time
import requests
import traceback
from urllib import urlencode
from routes.models import Route, Directions
from django.core.management.base import BaseCommand, CommandError
DIRECTIONS_URL='https://api.mapbox.com/directions/v5/mapbox/cycling/'
MAX_WAYPOINTS = 25
class Command(BaseCommand):
help = 'Retrieves route directions'
def get_directions(self, coords):
# flip flop lat/lng because mapbox expects it that way
coords_joined = ';'.join(Route.coords_lng_lat(coords))
params = {
'steps': 'true',
'continue_straight': 'true',
'access_token': os.getenv('MAPBOX_ACCESS_TOKEN'),
}
url = '%s%s?%s' % (DIRECTIONS_URL, coords_joined, urlencode(params))
return requests.get(url)
def handle(self, *args, **options):
routes = Route.objects.all()
for route in routes:
# get/create directions record for this route
directions = Directions.objects.filter(route=route)
if directions.exists():
self.stdout.write(self.style.WARNING('route %s exists so skipping' % route))
continue
else:
directions = Directions(
route=route,
)
self.stdout.write(self.style.WARNING('fetching route %s which has %s waypoints' % (route, len(route.coords))))
# build a running list of directions while iterating over chunks of waypoints
route_directions = None
has_waypoints = True
waypoint_index = 0
try:
while(has_waypoints):
time.sleep(1)
fetch_waypoints = route.coords[waypoint_index:waypoint_index + MAX_WAYPOINTS]
response = self.get_directions(fetch_waypoints)
if response.ok:
json = response.json()
# first saving of directions for this route
if not route_directions:
# use first route returned
route_directions = json['routes'][0]
else:
# append legs
if json.get('routes') and len(json['routes']):
route_directions['legs'] += json['routes'][0]['legs']
else:
print fetch_waypoints
print re
self.stdout.write(self.style.NOTICE('No route found for waypoints, skipping'))
waypoint_index += MAX_WAYPOINTS
# make sure we have at least two waypoints left
has_waypoints = len(route.coords) - 1 - waypoint_index >= 2;
else:
raise Exception(response.content)
except Exception as e:
self.stdout.write(self.style.NOTICE('Failed getting directions for %s so deleting route directions (%s)' % (route, e)))
# delete this directions record if it's already been saved
if directions.pk:
directions.delete()
print traceback.format_exc()
# just quit entirely - we may be throttled
break
directions.directions = route_directions
directions.save()
self.stdout.write(self.style.SUCCESS('Successfully retrieved directions for %s' % route)) |
"""
Definition for a binary tree node.
"""
import printTree
inputarray = [1, 2, 3, 4, 5, 6, 7, 8, 9]
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def sortedArrayToBST(nums):
if not len(nums): return None
mid = int(len(nums)/2)
root = TreeNode(nums[ mid ])
root.left = sortedArrayToBST( nums[:mid] )
root.right = sortedArrayToBST( nums[mid+1:] )
return root
printTree.printTree( sortedArrayToBST( inputarray ) )
|
# python get_t_names_by_gene.py Homo_sapiens.GRCh38.82.cleared.gtf ENSG00000230021
import sys
gtf = sys.argv[1]
gene = sys.argv[2]
fout = open(gene + '.names', 'w')
with open(gtf, 'r') as fin:
for line in fin:
fields = line.strip().split('\t')
type = fields[2]
others = fields[8].split('; ')
g_id = others[0].split('"')[1]
t_id = others[2].split('"')[1]
if type == 'transcript' and g_id == gene:
fout.write(t_id + '\n')
fout.close() |
# Generated by Django 4.0a1 on 2021-12-13 10:18
import django.contrib.postgres.fields
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lab', '0009_alter_run_beamline'),
]
operations = [
migrations.CreateModel(
name='ObjectGroup',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(blank=True, max_length=255, verbose_name='Group label')),
('inventory', models.CharField(blank=True, max_length=255, verbose_name='Inventory')),
('dating', models.CharField(max_length=255, verbose_name='Dating')),
('materials', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=255), default=list, size=None, verbose_name='Materials')),
('discovery_place', models.CharField(blank=True, max_length=255, verbose_name='Place of discovery')),
('collection', models.CharField(blank=True, max_length=255, verbose_name='Collection')),
],
),
migrations.CreateModel(
name='Object',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=255, verbose_name='Label')),
('differentiation_information', models.CharField(blank=True, max_length=255, verbose_name='Differentiation information')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab.objectgroup')),
],
),
migrations.AddField(
model_name='run',
name='run_object_groups',
field=models.ManyToManyField(related_name='runs', to='lab.ObjectGroup', verbose_name='Object groups'),
),
]
|
"""Check redundant brackets
For a given expression in the form of a string, find if there exist any redundant brackets or not. It is given that the expression contains only rounded brackets or parenthesis and the input expression will always be balanced.
A pair of the bracket is said to be redundant when a sub-expression is surrounded by unnecessary or needless brackets.
Example:
Expression: (a+b)+c
Since there are no needless brackets, hence, the output must be 'false'.
Expression: ((a+b))
The expression can be reduced to (a+b). Hence the expression has redundant brackets and the output will be 'true'.
Input Format :
The first and the only line of input contains a string expression, without any spaces in between.
Output Format :
The first and the only line of output will print either 'true' or 'false'(without the quotes) denoting whether the input expression contains redundant brackets or not.
Constraints:
0 <= N <= 10^6
Where N is the length of the expression.
Time Limit: 1 second
Sample Input 1:
a+(b)+c
Sample Output 1:
true
Explanation:
The expression can be reduced to a+b+c. Hence, the brackets are redundant.
Sample Input 2:
(a+b)
Sample Output 2:
false"""
def checkRedundantBrackets(expression) :
s=expression
le = len(s)
l=[]
for i in range(le):
if s[i]=="(": # checks open bracket
l.append("(") # put open brackets to stack
elif s[i] in "+-*/": #check char in the string
l.append(s[i]) #if it an operator then put it into the stack
elif s[i] ==")": # checks close bracket
if l[-1]=="(": # and if there is no operator in the stack
return True # then its redundant and further code will not run
elif l[-1] in "+-*/": # if it has operator
while l[-1]!="(": # then pop till we don't get open bracket
l.pop()
l.pop() #pop open bracket
return False #it is false if no redundant bracket is found till end
expression=input("Enter the expression:\n")
print("output:")
if checkRedundantBrackets(expression):
print('true')
else:
print('false')
"""
Time complexity: O(n)
Space complexity: O(n)
""" |
"""
Let's see what we've done so far using sqlite command shell:
________________________________________________________________________________
$ sqlite3 test.db
SQLite version 3.7.17 2013-05-20 00:56:22
Enter ".help" for instructions
Enter SQL statements terminated with a ";"
sqlite> .tables
books
sqlite> SELECT * FROM books;
1|Learning Python|Mark Lutz|$36.19|Jul 6, 2013
2|Two Scoops of Django: Best Practices For Django 1.6|Daniel Greenfeld|$34.68|Feb 1, 2014
3|Python Cookbook|David Beazley|$30.29|May 29, 2013
4|The Quick Python Book|Naomi R. Ceder|$16.39|Jan 15, 2010
5|Python Testing|David Sale|$38.20|Sep 2, 2014
sqlite> .mode column
sqlite> .headers on
sqlite> SELECT * FROM books;
id title author price year
---------- --------------- ---------- ---------- -----------
1 Learning Python Mark Lutz $36.19 Jul 6, 2013
2 Two Scoops of D Daniel Gre $34.68 Feb 1, 2014
3 Python Cookbook David Beaz $30.29 May 29, 201
4 The Quick Pytho Naomi R. C $16.39 Jan 15, 201
5 Python Testing David Sale $38.20 Sep 2, 2014
sqlite>
________________________________________________________________________________
Note that we modified the way the data is displayed in the console.
We used the column mode and turend on the headers.
|
from pathlib import Path
from typing import Union
from ..base import ParametrizedValue
class Logger(ParametrizedValue):
args_joiner = ','
def __init__(self, alias, *args):
self.alias = alias or ''
super().__init__(*args)
class LoggerFile(Logger):
"""Allows logging into files."""
name = 'file'
plugin = 'logfile'
def __init__(self, filepath: Union[str, Path], alias=None):
"""
:param str filepath: File path.
:param str alias: Logger alias.
"""
super().__init__(alias, str(filepath))
class LoggerFileDescriptor(Logger):
"""Allows logging using file descriptor."""
name = 'fd'
plugin = 'logfile'
def __init__(self, fd: int, alias=None):
"""
:param str fd: File descriptor.
:param str alias: Logger alias.
"""
super().__init__(alias, fd)
class LoggerStdIO(Logger):
"""Allows logging stdio."""
name = 'stdio'
plugin = 'logfile'
def __init__(self, alias=None):
"""
:param str alias: Logger alias.
"""
super().__init__(alias)
class LoggerSocket(Logger):
"""Allows logging into UNIX and UDP sockets."""
name = 'socket'
plugin = 'logsocket'
def __init__(self, addr_or_path: Union[str, Path], alias=None):
"""
:param str addr_or_path: Remote address or filepath.
Examples:
* /tmp/uwsgi.logsock
* 192.168.173.19:5050
:param str alias: Logger alias.
"""
super().__init__(alias, str(addr_or_path))
class LoggerSyslog(Logger):
"""Allows logging into Unix standard syslog."""
name = 'syslog'
plugin = 'syslog'
def __init__(self, app_name=None, facility=None, alias=None):
"""
:param str app_name:
:param str facility:
* https://en.wikipedia.org/wiki/Syslog#Facility
:param str alias: Logger alias.
"""
super().__init__(alias, app_name, facility)
class LoggerRsyslog(LoggerSyslog):
"""Allows logging into Unix standard syslog or a remote syslog."""
name = 'rsyslog'
plugin = 'rsyslog'
def __init__(self, app_name=None, host=None, facility=None, split=None, packet_size=None, alias=None):
"""
:param str app_name:
:param str host: Address (host and port) or UNIX socket path.
:param str facility:
* https://en.wikipedia.org/wiki/Syslog#Facility
:param bool split: Split big messages into multiple chunks if they are bigger
than allowed packet size. Default: ``False``.
:param int packet_size: Set maximum packet size for syslog messages. Default: 1024.
.. warning:: using packets > 1024 breaks RFC 3164 (#4.1)
:param str alias: Logger alias.
"""
super().__init__(app_name, facility, alias=alias)
self.args.insert(0, host)
self._set('rsyslog-packet-size', packet_size)
self._set('rsyslog-split-messages', split, cast=bool)
class LoggerRedis(Logger):
"""Allows logging into Redis.
.. note:: Consider using ``dedicate_thread`` param.
"""
name = 'redislog'
plugin = 'redislog'
def __init__(self, host=None, command=None, prefix=None, alias=None):
"""
:param str host: Default: 127.0.0.1:6379
:param str command: Command to be used. Default: publish uwsgi
Examples:
* publish foobar
* rpush foo
:param str prefix: Default: <empty>
:param str alias: Logger alias.
"""
super().__init__(alias, host, command, prefix)
class LoggerMongo(Logger):
"""Allows logging into Mongo DB.
.. note:: Consider using ``dedicate_thread`` param.
"""
name = 'mongodblog'
plugin = 'mongodblog'
def __init__(self, host=None, collection=None, node=None, alias=None):
"""
:param str host: Default: 127.0.0.1:27017
:param str collection: Command to be used. Default: uwsgi.logs
:param str node: An identification string for the instance
sending logs Default: <server hostname>
:param str alias: Logger alias.
"""
super().__init__(alias, host, collection, node)
class LoggerZeroMq(Logger):
"""Allows logging into ZeroMQ sockets."""
name = 'zeromq'
plugin = 'logzmq'
def __init__(self, connection_str, alias=None):
"""
:param str connection_str:
Examples:
* tcp://192.168.173.18:9191
:param str alias: Logger alias.
"""
super().__init__(alias, connection_str)
|
# coding: utf-8
"""
Hydrogen Proton API
Financial engineering module of Hydrogen Atom # noqa: E501
OpenAPI spec version: 1.9.2
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from proton_api.api_client import ApiClient
class SimulationsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def backtest(self, backtest_request, **kwargs): # noqa: E501
"""Backtest # noqa: E501
Run a historical analysis for a group of investments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.backtest(backtest_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BacktestRequest backtest_request: Request payload for Backtest (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.backtest_with_http_info(backtest_request, **kwargs) # noqa: E501
else:
(data) = self.backtest_with_http_info(backtest_request, **kwargs) # noqa: E501
return data
def backtest_with_http_info(self, backtest_request, **kwargs): # noqa: E501
"""Backtest # noqa: E501
Run a historical analysis for a group of investments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.backtest_with_http_info(backtest_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BacktestRequest backtest_request: Request payload for Backtest (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['backtest_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method backtest" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'backtest_request' is set
if self.api_client.client_side_validation and ('backtest_request' not in params or
params['backtest_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `backtest_request` when calling `backtest`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'backtest_request' in params:
body_params = params['backtest_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/backtest', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def event_study(self, event_study_request, **kwargs): # noqa: E501
"""Event Study # noqa: E501
Analyze a group of investments against key historical events # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.event_study(event_study_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EventStudyRequest event_study_request: Request payload for Event Study (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.event_study_with_http_info(event_study_request, **kwargs) # noqa: E501
else:
(data) = self.event_study_with_http_info(event_study_request, **kwargs) # noqa: E501
return data
def event_study_with_http_info(self, event_study_request, **kwargs): # noqa: E501
"""Event Study # noqa: E501
Analyze a group of investments against key historical events # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.event_study_with_http_info(event_study_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param EventStudyRequest event_study_request: Request payload for Event Study (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['event_study_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method event_study" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'event_study_request' is set
if self.api_client.client_side_validation and ('event_study_request' not in params or
params['event_study_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `event_study_request` when calling `event_study`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'event_study_request' in params:
body_params = params['event_study_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/event_study', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def monte_carlo(self, monte_carlo_request, **kwargs): # noqa: E501
"""Monte Carlo # noqa: E501
Simulate the future growth of a group of investments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.monte_carlo(monte_carlo_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MonteCarloRequest monte_carlo_request: Request payload for Monte Carlo (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.monte_carlo_with_http_info(monte_carlo_request, **kwargs) # noqa: E501
else:
(data) = self.monte_carlo_with_http_info(monte_carlo_request, **kwargs) # noqa: E501
return data
def monte_carlo_with_http_info(self, monte_carlo_request, **kwargs): # noqa: E501
"""Monte Carlo # noqa: E501
Simulate the future growth of a group of investments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.monte_carlo_with_http_info(monte_carlo_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param MonteCarloRequest monte_carlo_request: Request payload for Monte Carlo (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['monte_carlo_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method monte_carlo" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'monte_carlo_request' is set
if self.api_client.client_side_validation and ('monte_carlo_request' not in params or
params['monte_carlo_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `monte_carlo_request` when calling `monte_carlo`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'monte_carlo_request' in params:
body_params = params['monte_carlo_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/monte_carlo', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def portfolio_what_if(self, portfolio_what_if_request, **kwargs): # noqa: E501
"""Porfolio What-If # noqa: E501
Simulate the impact of adding, removing, reducing, or increasing various positions in a group of investments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.portfolio_what_if(portfolio_what_if_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PortfolioWhatIfRequest portfolio_what_if_request: Request payload for Portfolio What-If (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.portfolio_what_if_with_http_info(portfolio_what_if_request, **kwargs) # noqa: E501
else:
(data) = self.portfolio_what_if_with_http_info(portfolio_what_if_request, **kwargs) # noqa: E501
return data
def portfolio_what_if_with_http_info(self, portfolio_what_if_request, **kwargs): # noqa: E501
"""Porfolio What-If # noqa: E501
Simulate the impact of adding, removing, reducing, or increasing various positions in a group of investments # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.portfolio_what_if_with_http_info(portfolio_what_if_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PortfolioWhatIfRequest portfolio_what_if_request: Request payload for Portfolio What-If (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio_what_if_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portfolio_what_if" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio_what_if_request' is set
if self.api_client.client_side_validation and ('portfolio_what_if_request' not in params or
params['portfolio_what_if_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `portfolio_what_if_request` when calling `portfolio_what_if`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'portfolio_what_if_request' in params:
body_params = params['portfolio_what_if_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/portfolio_what_if', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def savings_calculator(self, savings_calculator_request, **kwargs): # noqa: E501
"""Savings Calculator # noqa: E501
Simulate the future growth of a simple savings account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.savings_calculator(savings_calculator_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SavingsCalculatorRequest savings_calculator_request: Request payload for Savings Calculator (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.savings_calculator_with_http_info(savings_calculator_request, **kwargs) # noqa: E501
else:
(data) = self.savings_calculator_with_http_info(savings_calculator_request, **kwargs) # noqa: E501
return data
def savings_calculator_with_http_info(self, savings_calculator_request, **kwargs): # noqa: E501
"""Savings Calculator # noqa: E501
Simulate the future growth of a simple savings account # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.savings_calculator_with_http_info(savings_calculator_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SavingsCalculatorRequest savings_calculator_request: Request payload for Savings Calculator (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['savings_calculator_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method savings_calculator" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'savings_calculator_request' is set
if self.api_client.client_side_validation and ('savings_calculator_request' not in params or
params['savings_calculator_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `savings_calculator_request` when calling `savings_calculator`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'savings_calculator_request' in params:
body_params = params['savings_calculator_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/savings_calculator', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def scenario_analysis(self, scneario_analysis_request, **kwargs): # noqa: E501
"""Scenario Analysis # noqa: E501
Analyze a group of investments against a series of external economic factors # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.scenario_analysis(scneario_analysis_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ScenarioAnalysisRequest scneario_analysis_request: Request payload for Scenario Analysis (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.scenario_analysis_with_http_info(scneario_analysis_request, **kwargs) # noqa: E501
else:
(data) = self.scenario_analysis_with_http_info(scneario_analysis_request, **kwargs) # noqa: E501
return data
def scenario_analysis_with_http_info(self, scneario_analysis_request, **kwargs): # noqa: E501
"""Scenario Analysis # noqa: E501
Analyze a group of investments against a series of external economic factors # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.scenario_analysis_with_http_info(scneario_analysis_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ScenarioAnalysisRequest scneario_analysis_request: Request payload for Scenario Analysis (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scneario_analysis_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method scenario_analysis" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scneario_analysis_request' is set
if self.api_client.client_side_validation and ('scneario_analysis_request' not in params or
params['scneario_analysis_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `scneario_analysis_request` when calling `scenario_analysis`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'scneario_analysis_request' in params:
body_params = params['scneario_analysis_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/scenario_analysis', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def sensitivity_analysis(self, sensitivity_analysis_request, **kwargs): # noqa: E501
"""Sensitivity Analysis # noqa: E501
Analyze a group of investments against an external economic factor # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sensitivity_analysis(sensitivity_analysis_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SensitivityAnalysisRequest sensitivity_analysis_request: Request payload for Sensitivity Analysis (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.sensitivity_analysis_with_http_info(sensitivity_analysis_request, **kwargs) # noqa: E501
else:
(data) = self.sensitivity_analysis_with_http_info(sensitivity_analysis_request, **kwargs) # noqa: E501
return data
def sensitivity_analysis_with_http_info(self, sensitivity_analysis_request, **kwargs): # noqa: E501
"""Sensitivity Analysis # noqa: E501
Analyze a group of investments against an external economic factor # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sensitivity_analysis_with_http_info(sensitivity_analysis_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SensitivityAnalysisRequest sensitivity_analysis_request: Request payload for Sensitivity Analysis (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sensitivity_analysis_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sensitivity_analysis" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sensitivity_analysis_request' is set
if self.api_client.client_side_validation and ('sensitivity_analysis_request' not in params or
params['sensitivity_analysis_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `sensitivity_analysis_request` when calling `sensitivity_analysis`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'sensitivity_analysis_request' in params:
body_params = params['sensitivity_analysis_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/sensitivity_analysis', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
import cv2
def bicubic():
BICUBIC_SCALE = 2
INPUT_NAME = "input/1.png"
OUTPUT_NAME = "output/1-bicubic.png"
# Read image
img = cv2.imread(INPUT_NAME, cv2.IMREAD_COLOR)
# Enlarge image with Bicubic interpolation method
img = cv2.resize(img, None, fx=BICUBIC_SCALE, fy=BICUBIC_SCALE, interpolation=cv2.INTER_CUBIC)
cv2.imwrite(OUTPUT_NAME, img)
# print success!
print("Bicubic enlargement with factor " + str(BICUBIC_SCALE) + " success!")
bicubic()
|
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class Test__Iand__(TestCase):
def generate_bool_data(self, shape):
input1 = np.random.uniform(0, 1, shape).astype(np.float32)
input1 = input1 < 0.5
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
input2 = np.random.uniform(min_d, max_d, shape).astype(dtype)
# modify from numpy.ndarray to torch.tensor
npu_input1 = torch.from_numpy(input1)
npu_input2 = torch.from_numpy(input2)
return npu_input1, npu_input2
def generate_single_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_scalar(self, min_d, max_d):
scalar = np.random.uniform(min_d, max_d)
return scalar
def generate_int_scalar(self, min_d, max_d):
scalar = np.random.randint(min_d, max_d)
return scalar
def cpu_op_exec(self, input1, input2):
input1 = input1.to("cpu")
input2 = input2.to("cpu")
output = input1.__iand__(input2)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_scalar(self, input1, input2):
input1 = input1.to("cpu")
output = input1.__iand__(input2)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec(self, input1, input2):
input1 = input1.to("npu")
input2 = input2.to("npu")
output = input1.__iand__(input2)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_scalar(self, input1, input2):
input1 = input1.to("npu")
output = input1.__iand__(input2)
output = output.to("cpu")
output = output.numpy()
return output
def test___iand___bool(self, device):
npu_input1, npu_input2 = self.generate_bool_data((3, 5)), self.generate_bool_data((3, 5))
cpu_output = self.cpu_op_exec(npu_input1, npu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test___iand___int16(self, device):
npu_input1, npu_input2= self.generate_data(0, 100, (4, 3), np.int16)
cpu_output = self.cpu_op_exec(npu_input1, npu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
cpu_output = cpu_output.astype(np.int32)
npu_output = npu_output.astype(np.int32)
self.assertRtolEqual(cpu_output, npu_output)
def test___iand___int32(self, device):
npu_input1, npu_input2= self.generate_data(0, 100, (4, 3), np.int32)
cpu_output = self.cpu_op_exec(npu_input1, npu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
cpu_output = cpu_output.astype(np.int32)
npu_output = npu_output.astype(np.int32)
self.assertRtolEqual(cpu_output, npu_output)
def test___iand___scalar_bool(self, device):
npu_input1 = self.generate_bool_data((3, 5))
cpu_output = self.cpu_op_exec_scalar(npu_input1, True)
npu_output = self.npu_op_exec_scalar(npu_input1, True)
self.assertRtolEqual(cpu_output, npu_output)
def test___iand___scalar_int16(self, device):
npu_input1 = self.generate_single_data(0, 100, (4, 3), np.int16)
cpu_output = self.cpu_op_exec_scalar(npu_input1, 1)
npu_output = self.npu_op_exec_scalar(npu_input1, 1)
cpu_output = cpu_output.astype(np.int32)
npu_output = npu_output.astype(np.int32)
self.assertRtolEqual(cpu_output, npu_output)
def test___iand___scalar_int32(self, device):
npu_input1 = self.generate_single_data(0, 100, (4, 3), np.int32)
cpu_output = self.cpu_op_exec_scalar(npu_input1, 1)
npu_output = self.npu_op_exec_scalar(npu_input1, 1)
cpu_output = cpu_output.astype(np.int32)
npu_output = npu_output.astype(np.int32)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(Test__Iand__, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests() |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
from imutils.object_detection import non_max_suppression
import numpy as np
import time
"""
"""
model_bin = "../../models/east_net/frozen_east_text_detection.pb"
layer_names = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
padding = 5
def main():
dnn = cv.dnn.readNet(model_bin)
dnn.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
dnn.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
image = cv.imread("G:\\Project\\opencv-ascs-resources\\meter_pointer_roi\\2020-03-05_22-18-30.jpeg")
# image = cv.imread("./target.jpeg")
cv.imshow("src", image)
(h, w) = image.shape[:2]
r_h = h / float(320)
r_w = w / float(320)
data = cv.dnn.blobFromImage(image, 1.0, (320, 320), (123.68, 116.78, 103.94), True, False)
start = time.time()
dnn.setInput(data)
scores, geometry = dnn.forward(layer_names)
end = time.time()
print("[INFO] test detection took {:.6f} seconds".format(end - start))
num_rows, num_cols = scores.shape[2: 4]
rects = []
confidences = []
for y in range(0, num_rows):
scores_data = scores[0, 0, y]
x_data_0 = geometry[0, 0, y]
x_data_1 = geometry[0, 1, y]
x_data_2 = geometry[0, 2, y]
x_data_3 = geometry[0, 3, y]
angles_data = geometry[0, 4, y]
for x in range(0, num_cols):
if scores_data[x] < 0.5:
continue
off_set_x, off_set_y = x * 4.0, y * 4.0
angle = angles_data[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = x_data_0[x] + x_data_2[x]
w = x_data_1[x] + x_data_3[x]
end_x = int(off_set_x + (cos * x_data_1[x]) + (sin * x_data_2[x]))
end_y = int(off_set_y - (sin * x_data_1[x]) + (cos * x_data_2[x]))
start_x = int(end_x - w)
start_y = int(end_y - h)
rects.append([start_x, start_y, end_x, end_y])
confidences.append(float(scores_data[x]))
# 最大区域抑制
boxes = non_max_suppression(np.array(rects), probs=confidences)
# boxes = cv.dnn.NMSBoxes(rects, confidences, 0.5, 0.8) # 抑制目标, 最大目标
result = np.zeros(image.shape[:2], dtype=image.dtype)
# for i in boxes:
# i = i[0]
# start_x, start_y, end_x, end_y = rects[i]
# start_x = int(start_x * r_w)
# start_y = int(start_y * r_h)
# end_x = int(end_x * r_w)
# end_y = int(end_y * r_h)
# cv.rectangle(result, (start_x, start_y), (end_x, end_y), (255, 0, 0), 2)
for start_x, start_y, end_x, end_y in boxes:
start_x = int(start_x * r_w)
start_y = int(start_y * r_h)
end_x = int(end_x * r_w)
end_y = int(end_y * r_h)
cv.rectangle(image, (start_x, start_y), (end_x, end_y), (0, 255, 0), 2)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 1))
result = cv.morphologyEx(result, cv.MORPH_DILATE, kernel)
contours, hierachy = cv.findContours(result, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
text_boxes = []
for index in range(len(contours)):
box = cv.boundingRect(contours[index])
if box[2] < 10 or box[3] < 10:
continue
# cv.rectangle(image, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (255, 0, 0), 2, cv.LINE_AA)
# x, y, w, h
text_boxes.append((box[0], box[1], box[0] + box[2], box[1] + box[3]))
nums = len(text_boxes)
for i in range(nums):
for j in range(i + 1, nums, 1):
y_i = text_boxes[i][1]
y_j = text_boxes[j][1]
if y_i > y_j:
temp = text_boxes[i]
text_boxes[i] = text_boxes[j]
text_boxes[j] = temp
for x, y, w, h in text_boxes:
# name = "./{}.jpeg".format(
# time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time())))
# print("{} save in {}".format("INFO", name))
roi = image[y: h + padding, x: w, :]
# cv.imwrite(name, roi)
text_area_detect(roi)
cv.imshow("finder", image)
# cv.imshow("result", result)
cv.waitKey(0)
def text_area_detect(roi):
gray = cv.cvtColor(roi, cv.COLOR_BGR2GRAY)
binary = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 45, 15)
cv.imshow("text_roi", gray)
cv.waitKey(0)
if "__main__" == __name__:
main()
cv.destroyAllWindows()
|
import matplotlib.pyplot as plt
def temperature_plot(temperature_array, mesh):
""" This function will generate a plot of the temperatures at each node.
inputs
-------
temperature_array: This is an array containing temperatures at each node
mesh: An array containing the radial position for each
outputs
-------
A lovely plot of the temperature distribution.
"""
mesh_radii = []
for element in mesh:
for node in element[1]:
mesh_radii += [node]
mesh_radii_set = list(set(mesh_radii))
mesh_radii_ordered = sorted(mesh_radii_set, key=float)
plt.plot(mesh_radii_ordered, temperature_array)
plt.xlabel('Radial Position')
plt.ylabel('Temperature')
return plt.show()
|
from math import *
from fractions import *
l = []
def solve():
x = int(raw_input())
n = 1
while True:
t1 = (n*n) - ((n*(n-1))/2)
t2 = ((n-1)*n*(2*n-1))/6
t2 -= (n*n*(n-1))/2
if x-t2 < n*t1:
break
m = (x-t2)/t1
if m*t1 == x-t2:
l.append((n,m))
if n != m:
l.append((m,n))
n += 1
l.sort()
print len(l)
for pair in l:
print pair[0], pair[1]
return
solve()
|
'''
python if 구문(statement)
if 조건식:
조건식이 참일 때 실행할 문장
if 조건식:
참일 때 실행할 문장
else:
거짓일때 실행할 문장
'''
# 숫자를 입력받아서 양수인 경우에만 출력
num = int(input('>>>정수 입력:'))
if num > 0 :
print(f'num = {num}')
print('프로그램 종료')
# else문 같이 쓰기
if num > 0 :
print('양수')
else:
print('0 또는 음수')
print('프로그램 종료')
'''
if문 여러개 사용하기
if 조건식1:
조건식1이 일 때 실행할 문장
elif 조건식2:
조건식2가 참일 때 실행할 문장
....
else:
모든 조건식들이 거짓일 때 실행할 문장
'''
# if-elif-else
score = 85
if score >= 90:
print('A')
elif score >= 80:
print('B')
elif score >= 70:
print('C')
else:
print('F')
print('프로그램 종료')
# if, elif, else 블록 안에서 또 다른 if 구문을 사용할 수도 있음.
if num % 2 == 0: # 짝수이면
if num % 4 == 0:
print('4의 배수')
else:
print('4의 배수가 아닌 짝수')
else: # 홀수이면
print('홀수')
|
def add_number(start, end):
c=0
for number in range(start,end):
c=c+number
return c
test1 = add_number(333,777)
print(test1)
|
import re
import requests
import base64
import os.path
def is_valid_url(string):
return re.search(r'(http(s)?://.)(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)',
string)
def is_hex_color(string):
return re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', string)
class APIError(Exception):
pass
client_library = "python-sdk"
class CoreAPI:
"""
Initialize Core API with an API key and optional region (US, EU)
Core API is used to directly scan and validate global driver license, passport, and ID card.
:param apikey: You API key
:param region: US/EU, defaults to US
:raises ValueError: Invalid input argument
"""
DEFAULT_CONFIG = {
"accuracy": 2,
"authenticate": False,
"authenticate_module": 1,
"ocr_scaledown": 2000,
"outputimage": False,
"outputface": False,
"outputmode": "url",
"dualsidecheck": False,
"verify_expiry": True,
"verify_documentno": "",
"verify_name": "",
"verify_dob": "",
"verify_age": "",
"verify_address": "",
"verify_postcode": "",
"country": "",
"region": "",
"type": "",
"checkblocklist": "",
"vault_save": True,
"vault_saveunrecognized": "",
"vault_noduplicate": "",
"vault_automerge": "",
"vault_customdata1": "",
"vault_customdata2": "",
"vault_customdata3": "",
"vault_customdata4": "",
"vault_customdata5": "",
"barcodemode": False,
"biometric_threshold": 0.4,
"aml_check": False,
"aml_strict_match": False,
"aml_database": "",
"contract_generate": "",
"contract_format": "",
"contract_prefill_data": "",
"client": client_library
}
def __init__(self, apikey, region="US"):
if not apikey:
raise ValueError("Please provide an API key")
if not region:
raise ValueError("Please set an API region (US, EU)")
self.config = self.DEFAULT_CONFIG
self.apikey = apikey
self.throw_error = False
if region.upper() == "EU":
self.apiendpoint = "https://api-eu.idanalyzer.com/"
elif region.upper() == "US":
self.apiendpoint = "https://api.idanalyzer.com/"
else:
self.apiendpoint = region
def throw_api_exception(self, throw_exception = False):
"""
Whether an exception should be thrown if API response contains an error message
:param throw_exception: Throw exception upon API error, defaults to false
"""
self.throw_error = throw_exception is True
def reset_config(self):
"""
Reset all API configurations except API key and region.
"""
self.config = self.DEFAULT_CONFIG
def set_accuracy(self, accuracy=2):
"""
Set OCR Accuracy
:param accuracy: 0 = Fast, 1 = Balanced, 2 = Accurate, defaults to 2
"""
self.config['accuracy'] = accuracy
def enable_authentication(self, enabled=False, module=2):
"""
Validate the document to check whether the document is authentic and has not been tampered,
and set authentication module
:param enabled: Enable or disable Document Authentication, defaults to False
:param module: Authentication module version: 1, 2 or quick, defaults to 2
:raises ValueError: Invalid input argument Invalid input argumentInvalid input argument
"""
self.config['authenticate'] = enabled is True
if enabled and module != 1 and module != 2 and module != 'quick':
raise ValueError("Invalid authentication module, 1, 2 or 'quick' accepted.")
self.config['authenticate_module'] = module
def set_ocr_image_resize(self, max_scale=2000):
"""
Scale down the uploaded image before sending to OCR engine. Adjust this value to fine tune recognition
accuracy on large full-resolution images. Set 0 to disable image resizing.
:param max_scale: 0 or 500~4000, defaults to 2000
:raises ValueError: Invalid input argument Invalid input argumentInvalid input argument
"""
if max_scale != 0 and (max_scale < 500 or max_scale > 4000):
raise ValueError("Invalid scale value, 0, or 500 to 4000 accepted.")
self.config['ocr_scaledown'] = max_scale
def set_biometric_threshold(self, threshold=0.4):
"""
Set the minimum confidence score to consider faces being identical
:param threshold: float between 0 to 1, higher value yields more strict verification, defaults to 0.4
:raises ValueError: Invalid input argument Invalid input argumentInvalid input argument
"""
if threshold <= 0 or threshold > 1:
raise ValueError("Invalid threshold value, float between 0 to 1 accepted.")
self.config['biometric_threshold'] = threshold
def enable_image_output(self, crop_document=False, crop_face=False, output_format="url"):
"""
Generate cropped image of document and/or face, and set output format [url, base64]
:param crop_document: Enable or disable document cropping, defaults to False
:param crop_face: Enable or disable face cropping, defaults to False
:param output_format: url or base64, defaults to url
:raises ValueError: Invalid input argument Invalid input argumentInvalid input argument
"""
if output_format != 'url' and output_format != 'base64':
raise ValueError("Invalid output format, 'url' or 'base64' accepted.")
self.config['outputimage'] = crop_document is True
self.config['outputface'] = crop_face is True
self.config['outputmode'] = output_format
def enable_aml_check(self, enabled=False):
"""
Check document holder's name and document number against ID Analyzer AML Database for sanctions, crimes and PEPs.
:param enabled: Enable or disable AML/PEP check
"""
self.config["aml_check"] = enabled is True
def set_aml_database(self, databases="au_dfat,ca_dfatd,ch_seco,eu_fsf,fr_tresor_gels_avoir,gb_hmt,ua_sfms,un_sc,us_ofac,eu_cor,eu_meps,global_politicians,interpol_red"):
"""
Specify the source databases to perform AML check, if left blank, all source databases will be checked.
Separate each database code with comma, for example: un_sc,us_ofac. For full list of source databases and corresponding code visit AML API Overview.
:param databases: Database codes separated by comma
"""
self.config["aml_database"] = databases
def enable_aml_strict_match(self, enabled=False):
"""
By default, entities with identical name or document number will be considered a match even though their birthday or nationality may be unknown.
Enable this parameter to reduce false-positives by only matching entities with exact same nationality and birthday.
:param enabled: Enable or disable AML strict match mode
"""
self.config["aml_strict_match"] = enabled is True
def enable_dualside_check(self, enabled=False):
"""
Check if the names, document number and document type matches between the front and the back of the document
when performing dual-side scan. If any information mismatches error 14 will be thrown.
:param enabled: Enable or disable dual-side information check, defaults to False
"""
self.config['dualsidecheck'] = enabled is True
def verify_expiry(self, enabled=False):
"""
Check if the document is still valid based on its expiry date.
:param enabled: Enable or disable expiry check, defaults to False
"""
self.config['verify_expiry'] = enabled is True
def verify_document_number(self, document_number):
"""
Check if supplied document or personal number matches with document.
:param document_number: Document or personal number requiring validation
:raises ValueError: Invalid input argument Invalid input argumentInvalid input argument
"""
if not document_number:
self.config['verify_documentno'] = ""
else:
self.config['verify_documentno'] = document_number
def verify_name(self, full_name):
"""
Check if supplied name matches with document.
:param full_name: Full name requiring validation
:raises ValueError: Invalid input argument Invalid input argument
"""
if not full_name:
self.config['verify_name'] = ""
else:
self.config['verify_name'] = full_name
def verify_dob(self, dob):
"""
Check if supplied date of birth matches with document.
:param dob: Date of birth in YYYY/MM/DD
:raises ValueError: Invalid input argument
"""
if not dob:
self.config['verify_dob'] = ""
else:
if not re.search(r'^(\d{4}/\d{2}/\d{2})$', dob):
raise ValueError("Invalid birthday format (YYYY/MM/DD)")
self.config['verify_dob'] = dob
def verify_age(self, age_range):
"""
Check if the document holder is aged between the given range.
:param age_range: Age range, example: 18-40
:raises ValueError: Invalid input argument
"""
if not age_range:
self.config['verify_age'] = ""
else:
if not re.search(r'^(\d+-\d+)$', age_range):
raise ValueError("Invalid age range format (minAge-maxAge)")
self.config['verify_age'] = age_range
def verify_address(self, address):
"""
Check if supplied address matches with document.
:param address: Address requiring validation
"""
if not address:
self.config['verify_address'] = ""
else:
self.config['verify_address'] = address
def verify_postcode(self, postcode):
"""
Check if supplied postcode matches with document.
:param postcode: Postcode requiring validation
"""
if not postcode:
self.config['verify_postcode'] = ""
else:
self.config['verify_postcode'] = postcode
def restrict_country(self, country_codes):
"""
Check if the document was issued by specified countries, if not error code 10 will be thrown.
Separate multiple values with comma. For example "US,CA" would accept documents from United States and Canada.
:param country_codes: ISO ALPHA-2 Country Code separated by comma
"""
if not country_codes:
self.config['country'] = ""
else:
self.config['country'] = country_codes
def restrict_state(self, states):
"""
Check if the document was issued by specified state, if not error code 11 will be thrown.
Separate multiple values with comma. For example "CA,TX" would accept documents from California and Texas.
:param states: State full name or abbreviation separated by comma
"""
if not states:
self.config['region'] = ""
else:
self.config['region'] = states
def restrict_type(self, document_type="DIP"):
"""
Check if the document was one of the specified types, if not error code 12 will be thrown.
For example, "PD" would accept both passport and drivers license.
:param document_type: P: Passport, D: Driver's License, I: Identity Card
"""
if not document_type:
self.config['type'] = ""
else:
self.config['type'] = document_type
def enable_barcode_mode(self, enabled=False):
"""
Disable Visual OCR and read data from AAMVA Barcodes only
:param enabled: Enable or disable Barcode Mode
"""
self.config['barcodemode'] = enabled is True
def enable_vault(self, enabled=True, save_unrecognized=False, no_duplicate_image=False, auto_merge_document=False):
"""
Save document image and parsed information in your secured vault.
You can list, search and update document entries in your vault through Vault API or web portal.
:param enabled: Enable or disable Vault
:param save_unrecognized: Save document image in your vault even if the document cannot be recognized.
:param no_duplicate_image: Prevent duplicated images from being saved.
:param auto_merge_document: Merge images with same document number into a single entry inside vault.
"""
self.config['vault_save'] = enabled is True
self.config['vault_saveunrecognized'] = save_unrecognized is True
self.config['vault_noduplicate'] = no_duplicate_image is True
self.config['vault_automerge'] = auto_merge_document is True
def set_vault_data(self, data1="", data2="", data3="", data4="", data5=""):
"""
Add up to 5 custom strings that will be associated with the vault entry,
this can be useful for filtering and searching entries.
:param data1: Custom data field 1
:param data2: Custom data field 2
:param data3: Custom data field 3
:param data4: Custom data field 4
:param data5: Custom data field 5
"""
self.config['vault_customdata1'] = data1
self.config['vault_customdata2'] = data2
self.config['vault_customdata3'] = data3
self.config['vault_customdata4'] = data4
self.config['vault_customdata5'] = data5
def generate_contract(self, template_id, out_format="PDF", prefill_data=None):
"""
Generate legal document using data from user uploaded ID
:param template_id: Contract Template ID displayed under web portal
:param out_format: Output file format: PDF, DOCX or HTML
:param prefill_data: Dictionary or JSON string, to autofill dynamic fields in contract template.
:raises ValueError: Invalid input argument
"""
if prefill_data is None:
prefill_data = {}
if not template_id:
raise ValueError("Invalid template ID")
self.config['contract_generate'] = template_id
self.config['contract_format'] = out_format
self.config['contract_prefill_data'] = prefill_data
def set_parameter(self, parameter_key, parameter_value):
"""
Set an API parameter and its value, this function allows you to set any API parameter without using the built-in functions
:param parameter_key: Parameter key
:param parameter_value: Parameter value
"""
self.config[parameter_key] = parameter_value
def scan(self, **options):
r"""
Perform scan on ID document with Core API,
optionally specify document back image, face verification image, face verification video and video passcode
:param \**options:
See below
:Keyword Arguments:
* *document_primary* (``str``) --
Front of Document (File path, base64 content or URL)
* *document_secondary* (``str``) --
Back of Document (File path, base64 content or URL)
* *biometric_photo* (``str``) --
Face Photo (File path, base64 content or URL)
* *biometric_video* (``str``) --
Face Video (File path, base64 content or URL)
* *biometric_video* (``str``) --
Face Video Passcode (4 Digit Number)
:return Scan and verification results of ID document
:rtype: dict
:raises ValueError: Invalid input argument
:raises APIError: API returned an error
"""
payload = self.config
payload["apikey"] = self.apikey
if not options.get('document_primary'):
raise ValueError("Primary document image required.")
if is_valid_url(options['document_primary']):
payload['url'] = options['document_primary']
elif os.path.isfile(options['document_primary']):
with open(options['document_primary'], "rb") as image_file:
payload['file_base64'] = base64.b64encode(image_file.read())
elif len(options['document_primary']) > 100:
payload['file_base64'] = options['document_primary']
else:
raise ValueError("Invalid primary document image, file not found or malformed URL.")
if options.get('document_secondary'):
if is_valid_url(options['document_secondary']):
payload['url_back'] = options['document_secondary']
elif os.path.isfile(options['document_secondary']):
with open(options['document_secondary'], "rb") as image_file:
payload['file_back_base64'] = base64.b64encode(image_file.read())
elif len(options['document_secondary']) > 100:
payload['file_back_base64'] = options['document_secondary']
else:
raise ValueError("Invalid secondary document image, file not found or malformed URL.")
if options.get('biometric_photo'):
if is_valid_url(options['biometric_photo']):
payload['faceurl'] = options['biometric_photo']
elif os.path.isfile(options['biometric_photo']):
with open(options['biometric_photo'], "rb") as image_file:
payload['face_base64'] = base64.b64encode(image_file.read())
elif len(options['biometric_photo']) > 100:
payload['face_base64'] = options['biometric_photo']
else:
raise ValueError("Invalid face image, file not found or malformed URL.")
if options.get('biometric_video'):
if is_valid_url(options['biometric_video']):
payload['videourl'] = options['biometric_video']
elif os.path.isfile(options['biometric_video']):
with open(options['biometric_video'], "rb") as image_file:
payload['video_base64'] = base64.b64encode(image_file.read())
elif len(options['biometric_video']) > 100:
payload['video_base64'] = options['biometric_video']
else:
raise ValueError("Invalid face video, file not found or malformed URL.")
if not options.get('biometric_video_passcode') or not re.search(r'^([0-9]{4})$',
options['biometric_video_passcode']):
raise ValueError("Please provide a 4 digit passcode for video biometric verification.")
else:
payload['passcode'] = options['biometric_video_passcode']
r = requests.post(self.apiendpoint, data=payload)
r.raise_for_status()
result = r.json()
if not self.throw_error:
return result
if result.get('error'):
raise APIError(result['error'])
else:
return result
class DocuPass:
"""
Initialize DocuPass API with an API key, company name and optional region (US, EU)
DocuPass allows rapid identity verification using a webpage or mobile app
:param apikey: You API key
:param company_name: Your company name to display in DocuPass pages
:param region: US/EU, defaults to US
:raises ValueError: Invalid input argument
"""
DEFAULT_CONFIG = {
"companyname": "",
"callbackurl": "",
"biometric": 0,
"authenticate_minscore": 0,
"authenticate_module": 2,
"maxattempt": 1,
"documenttype": "",
"documentcountry": "",
"documentregion": "",
"dualsidecheck": False,
"verify_expiry": False,
"verify_documentno": "",
"verify_name": "",
"verify_dob": "",
"verify_age": "",
"verify_address": "",
"verify_postcode": "",
"successredir": "",
"failredir": "",
"customid": "",
"vault_save": True,
"return_documentimage": True,
"return_faceimage": True,
"return_type": 1,
"qr_color": "",
"qr_bgcolor": "",
"qr_size": "",
"qr_margin": "",
"welcomemessage": "",
"nobranding": "",
"logo": "",
"language": "",
"biometric_threshold": 0.4,
"reusable": False,
"aml_check": False,
"aml_strict_match": False,
"aml_database": "",
"phoneverification": False,
"verify_phone": "",
"sms_verification_link": "",
"customhtmlurl": "",
"contract_generate": "",
"contract_sign": "",
"contract_format": "",
"contract_prefill_data": "",
"sms_contract_link": "",
"client": client_library
}
def __init__(self, apikey, company_name="My Company Name", region="US"):
if not apikey:
raise ValueError("Please provide an API key")
if not company_name:
raise ValueError("Please provide your company name")
if not region:
raise ValueError("Please set an API region (US, EU)")
self.config = self.DEFAULT_CONFIG
self.apikey = apikey
self.throw_error = False
self.config['companyname'] = company_name
if region.upper() == "EU":
self.apiendpoint = "https://api-eu.idanalyzer.com/"
elif region.upper() == 'US':
self.apiendpoint = "https://api.idanalyzer.com/"
else:
self.apiendpoint = region
def throw_api_exception(self, throw_exception = False):
"""
Whether an exception should be thrown if API response contains an error message
:param throw_exception: Throw exception upon API error, defaults to false
"""
self.throw_error = throw_exception is True
def reset_config(self):
"""
Reset all API configurations except API key and region.
"""
self.config = self.DEFAULT_CONFIG
def set_max_attempt(self, max_attempt=1):
"""
Set max verification attempt per user
:param max_attempt: 1 to 10
:raises ValueError: Invalid input argument
"""
if max_attempt not in range(1, 10):
raise ValueError("Invalid max attempt, please specify integer between 1 to 10.")
self.config['maxattempt'] = max_attempt
def set_custom_id(self, custom_id):
"""
Set a custom string that will be sent back to your server's callback, and appended to redirection URLs as a query string.
It is useful for identifying your user within your database. This value will be stored under docupass_customid under Vault.
:param custom_id: A string used to identify your customer internally
"""
self.config['customid'] = custom_id
def set_welcome_message(self, message):
"""
Display a custom message to the user in the beginning of verification
:param message: Plain text string
"""
self.config['welcomemessage'] = message
def set_logo(self, url="https://docupass.app/asset/logo1.png"):
"""
Replace footer logo with your own logo
:param url: Logo URL
"""
self.config['logo'] = url
def hide_branding_logo(self, hidden=False):
"""
Hide all branding logo
:param: hide logo, defaults to False
"""
self.config['nobranding'] = hidden is True
def set_custom_html_url(self, url):
"""
Replace DocuPass page content with your own HTML and CSS, you can download the HTML/CSS template from DocuPass API Reference page
:param url: URL pointing to your own HTML page
"""
self.config['customhtmlurl'] = url
def set_language(self, language):
"""
DocuPass automatically detects user device language and display corresponding language.
Set this parameter to override automatic language detection.
:param language: Check DocuPass API reference for language code
"""
self.config['language'] = language
def set_callback_url(self, url="https://www.example.com/docupass_callback.php"):
"""
Set server-side callback/webhook URL to receive verification results
:param url: Callback URL
:raises ValueError: Invalid input argument
"""
if url and not is_valid_url(url):
raise ValueError("Invalid URL, the host does not appear to be a remote host.")
self.config['callbackurl'] = url
def set_redirection_url(self, success_url="https://www.example.com/success.php",
fail_url="https://www.example.com/failed.php"):
"""
Redirect client browser to set URLs after verification.
DocuPass reference code and customid will be appended to the end of URL
e.g. https://www.example.com/success.php?reference=XXXXXXXX&customid=XXXXXXXX
:param success_url: Redirection URL after verification succeeded
:param fail_url: Redirection URL after verification failed
:raises ValueError: Invalid input argument
"""
if success_url and not is_valid_url(success_url):
raise ValueError("Invalid URL format for success URL")
if fail_url and not is_valid_url(fail_url):
raise ValueError("Invalid URL format for fail URL")
self.config['successredir'] = success_url
self.config['failredir'] = fail_url
def enable_authentication(self, enabled=False, module=2, minimum_score=0.3):
"""
Validate the document to check whether the document is authentic and has not been tampered
:param enabled: Enable or disable document authentication, defaults to False
:param module: Authentication Module: "1", "2" or "quick", defaults to "2"
:param minimum_score: Minimum score to pass verification, defaults to 0.3
:raises ValueError: Invalid input argument
"""
if not enabled:
self.config['authenticate_minscore'] = 0
else:
if not 0 < minimum_score <= 1:
raise ValueError("Invalid minimum score, please specify float between 0 to 1.")
if enabled and module != 1 and module != 2 and module != 'quick':
raise ValueError("Invalid authentication module, 1, 2 or 'quick' accepted.")
self.config['authenticate_module'] = module
self.config['authenticate_minscore'] = minimum_score
def enable_face_verification(self, enabled=False, verification_type=1, threshold=0.4):
"""
Whether users will be required to submit a selfie photo or record selfie video for facial verification.
:param enabled: Enable or disable facial biometric verification, defaults to False
:param verification_type: 1 for photo verification, 2 for video verification, defaults to 1
:param threshold: Minimum confidence score required to pass verification, value between 0 to 1, defaults to 0.4
:raises ValueError: Invalid input argument
"""
if not enabled:
self.config['biometric'] = 0
else:
if verification_type == 1 or verification_type == 2:
self.config['biometric'] = verification_type
self.config['biometric_threshold'] = threshold
else:
raise ValueError("Invalid verification type, 1 for photo verification, 2 for video verification.")
def set_reusable(self, reusable=False):
"""
Enabling this parameter will allow multiple users to verify their identity through the same URL,
a new DocuPass reference code will be generated for each user automatically.
:param reusable: Set True to allow unlimited verification for a single DocuPass session, defaults to False
"""
self.config['reusable'] = reusable is True
def set_callback_image(self, return_documentimage=True, return_faceimage=True, return_type=1):
"""
Enable or disable returning user uploaded document and face image in callback, and image data format.
:param return_documentimage: Return document image in callback data, defaults to True
:param return_faceimage: Return face image in callback data, defaults to True
:param return_type: Image type: 0=base64, 1=url, defaults to 1
"""
self.config['return_documentimage'] = return_documentimage is True
self.config['return_faceimage'] = return_faceimage is True
self.config['return_type'] = 0 if return_type == 0 else 1
def set_qrcode_format(self, foreground_color="000000", background_color="FFFFFF", size=5, margin=1):
"""
Configure QR code generated for DocuPass Mobile and Live Mobile
:param foreground_color: Image foreground color HEX code, defaults to 000000
:param background_color: Image background color HEX code, defaults to FFFFFF
:param size: Image size: 1 to 50, defaults to 5
:param margin: Image margin: 1 to 50, defaults to 1
:raises ValueError: Invalid input argument
"""
if not is_hex_color(foreground_color):
raise ValueError("Invalid foreground color HEX code")
if not is_hex_color(background_color):
raise ValueError("Invalid background color HEX code")
if size not in range(1, 50):
raise ValueError("Invalid image size (1-50)")
if margin not in range(0, 50):
raise ValueError("Invalid margin (0-50)")
self.config['qr_color'] = foreground_color
self.config['qr_bgcolor'] = background_color
self.config['qr_size'] = size
self.config['qr_margin'] = margin
def enable_dualside_check(self, enabled=False):
"""
Check if the names, document number and document type matches between the front and the back of the document
when performing dual-side scan. If any information mismatches error 14 will be thrown.
:param enabled: Enable or disable dual-side information check, defaults to False
"""
self.config['dualsidecheck'] = enabled is True
def enable_aml_check(self, enabled=False):
"""
Check document holder's name and document number against ID Analyzer AML Database for sanctions, crimes and PEPs.
:param enabled: Enable or disable AML/PEP check
"""
self.config["aml_check"] = enabled is True
def set_aml_database(self,
databases="au_dfat,ca_dfatd,ch_seco,eu_fsf,fr_tresor_gels_avoir,gb_hmt,ua_sfms,un_sc,us_ofac,eu_cor,eu_meps,global_politicians,interpol_red"):
"""
Specify the source databases to perform AML check, if left blank, all source databases will be checked.
Separate each database code with comma, for example: un_sc,us_ofac. For full list of source databases and corresponding code visit AML API Overview.
:param databases: Database codes separated by comma
"""
self.config["aml_database"] = databases
def enable_aml_strict_match(self, enabled=False):
"""
By default, entities with identical name or document number will be considered a match even though their birthday or nationality may be unknown.
Enable this parameter to reduce false-positives by only matching entities with exact same nationality and birthday.
:param enabled: Enable or disable AML strict match mode
"""
self.config["aml_strict_match"] = enabled is True
def enable_phone_verification(self, enabled=False):
"""
Whether to ask user to enter a phone number for verification, DocuPass supports both mobile or landline number verification.
Verified phone number will be returned in callback JSON.
:param enabled: Enable or disable user phone verification
"""
self.config["phoneverification"] = enabled
def sms_verification_link(self, mobile_number="+1333444555"):
"""
DocuPass will send SMS to this number containing DocuPass link to perform identity verification, the number provided will be automatically considered as verified if user completes identity verification. If an invalid or unreachable number is provided error 1050 will be thrown.
You should add your own thresholding mechanism to prevent abuse as you will be charged 1 quota to send the SMS.
:param mobile_number: Mobile number should be provided in international format such as +1333444555
"""
self.config["sms_verification_link"] = mobile_number
def sms_contract_link(self, mobile_number="+1333444555"):
"""
DocuPass will send SMS to this number containing DocuPass link to review and sign legal document. If an invalid or unreachable number is provided error 1050 will be thrown.
You should add your own thresholding mechanism to prevent abuse as you will be charged 1 quota to send the SMS.
:param mobile_number: Mobile number should be provided in international format such as +1333444555
"""
self.config["sms_contract_link"] = mobile_number
def verify_phone(self, phone_number="+1333444555"):
"""
DocuPass will attempt to verify this phone number as part of the identity verification process,
both mobile or landline are supported, users will not be able to enter their own numbers or change the provided number.
:param phone_number: Mobile or landline number should be provided in international format such as +1333444555
"""
self.config["verify_phone"] = phone_number
def verify_expiry(self, enabled=False):
"""
Check if the document is still valid based on its expiry date.
:param enabled: Enable or disable expiry check
"""
self.config['verify_expiry'] = enabled is True
def verify_document_number(self, document_number):
"""
Check if supplied document or personal number matches with document.
:param document_number: Document or personal number requiring validation
:raises ValueError: Invalid input argument
"""
if not document_number:
self.config['verify_documentno'] = ""
else:
self.config['verify_documentno'] = document_number
def verify_name(self, full_name):
"""
Check if supplied name matches with document.
:param full_name: Full name requiring validation
:raises ValueError: Invalid input argument
"""
if not full_name:
self.config['verify_name'] = ""
else:
self.config['verify_name'] = full_name
def verify_dob(self, dob):
"""
Check if supplied date of birth matches with document.
:param dob: Date of birth in YYYY/MM/DD
:raises ValueError: Invalid input argument
"""
if not dob:
self.config['verify_dob'] = ""
else:
if not re.search(r'^(\d{4}/\d{2}/\d{2})$', dob):
raise ValueError("Invalid birthday format (YYYY/MM/DD)")
self.config['verify_dob'] = dob
def verify_age(self, age_range="18-99"):
"""
Check if the document holder is aged between the given range.
:param age_range: Age range, example: 18-40
:raises ValueError: Invalid input argument
"""
if not age_range:
self.config['verify_age'] = ""
else:
if not re.search(r'^(\d+-\d+)$', age_range):
raise ValueError("Invalid age range format (minAge-maxAge)")
self.config['verify_age'] = age_range
def verify_address(self, address):
"""
Check if supplied address matches with document.
:param address: Address requiring validation
"""
if not address:
self.config['verify_address'] = ""
else:
self.config['verify_address'] = address
def verify_postcode(self, postcode):
"""
Check if supplied postcode matches with document.
:param postcode: Postcode requiring validation
"""
if not postcode:
self.config['verify_postcode'] = ""
else:
self.config['verify_postcode'] = postcode
def restrict_country(self, country_codes):
"""
Only accept document issued by specified countries. Separate multiple values with comma.
For example "US,CA" would accept documents from United States and Canada.
:param country_codes: ISO ALPHA-2 Country Code separated by comma
"""
if not country_codes:
self.config['documentcountry'] = ""
else:
self.config['documentcountry'] = country_codes
def restrict_state(self, states):
"""
Only accept document issued by specified state. Separate multiple values with comma.
For example "CA,TX" would accept documents from California and Texas.
:param states: State full name or abbreviation separated by comma
"""
if not states:
self.config['documentregion'] = ""
else:
self.config['documentregion'] = states
def restrict_type(self, document_type="DIP"):
"""
Only accept document of specified types. For example, "PD" would accept both passport and drivers license.
:param document_type: P: Passport, D: Driver's License, I: Identity Card, defaults to DIP
"""
if not document_type:
self.config['documenttype'] = ""
else:
self.config['documenttype'] = document_type
def enable_vault(self, enabled=True):
"""
Save document image and parsed information in your secured vault.
You can list, search and update document entries in your vault through Vault API or web portal.
:param enabled Enable or disable Vault, defaults to True
"""
self.config['vault_save'] = enabled is True
def set_parameter(self, parameter_key, parameter_value):
"""
Set an API parameter and its value, this function allows you to set any API parameter without using the built-in functions
:param parameter_key: Parameter key
:param parameter_value: Parameter value
"""
self.config[parameter_key] = parameter_value
def generate_contract(self, template_id, out_format="PDF", prefill_data=None):
"""
Generate legal document using data from user uploaded ID
:param template_id: Contract Template ID displayed under web portal
:param out_format: Output file format: PDF, DOCX or HTML
:param prefill_data: Dictionary or JSON string, to autofill dynamic fields in contract template.
:raises ValueError: Invalid input argument
"""
if prefill_data is None:
prefill_data = {}
if not template_id:
raise ValueError("Invalid template ID")
self.config['contract_sign'] = ""
self.config['contract_generate'] = template_id
self.config['contract_format'] = out_format
self.config['contract_prefill_data'] = prefill_data
def sign_contract(self, template_id, out_format="PDF", prefill_data=None):
"""
Have user review and sign autofilled legal document after successful identity verification
:param template_id: Contract Template ID displayed under web portal
:param out_format: Output file format: PDF, DOCX or HTML
:param prefill_data: Dictionary or JSON string, to autofill dynamic fields in contract template.
:raises ValueError: Invalid input argument
"""
if prefill_data is None:
prefill_data = {}
if not template_id:
raise ValueError("Invalid template ID")
self.config['contract_generate'] = ""
self.config['contract_sign'] = template_id
self.config['contract_format'] = out_format
self.config['contract_prefill_data'] = prefill_data
def create_signature(self, template_id, out_format="PDF", prefill_data=None):
"""
Create a DocuPass signature session for user to review and sign legal document without identity verification
:param template_id: Contract Template ID displayed under web portal
:param out_format: Output file format: PDF, DOCX or HTML
:param prefill_data: Dictionary or JSON string, to autofill dynamic fields in contract template.
:return DocuPass signature request response
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API error exception
"""
if prefill_data is None:
prefill_data = {}
if not template_id:
raise ValueError("Invalid template ID")
payload = self.config
payload["apikey"] = self.apikey
payload["template_id"] = template_id
payload['contract_format'] = out_format
payload['contract_prefill_data'] = prefill_data
r = requests.post(self.apiendpoint + "docupass/sign", data=payload)
r.raise_for_status()
result = r.json()
if not self.throw_error:
return result
if result.get('error'):
raise APIError(result['error'])
else:
return result
def create_iframe(self):
"""
Create a DocuPass session for embedding in web page as iframe
:return DocuPass verification request response
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API error exception
"""
return self.__create(0)
def create_mobile(self):
"""
Create a DocuPass session for users to open on mobile phone, or embedding in mobile app
:return DocuPass verification request response
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API error exception
"""
return self.__create(1)
def create_redirection(self):
"""
Create a DocuPass session for users to open in any browser
:return DocuPass verification request response
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API error exception
"""
return self.__create(2)
def create_live_mobile(self):
"""
Create a DocuPass Live Mobile verification session for users to open on mobile phone
:return DocuPass verification request response
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API error exception
"""
return self.__create(3)
def __create(self, docupass_module):
payload = self.config
payload["apikey"] = self.apikey
payload["type"] = docupass_module
r = requests.post(self.apiendpoint + "docupass/create", data=payload)
r.raise_for_status()
result = r.json()
if not self.throw_error:
return result
if result.get('error'):
raise APIError(result['error'])
else:
return result
def validate(self, reference, hash):
"""
Validate data received through DocuPass Callback against DocuPass Server to prevent request spoofing
:param reference: DocuPass Reference
:param hash: DocuPass callback hash
:return Whether validation succeeded
:rtype bool
:raises ValueError: Invalid input argument
"""
payload = {
"apikey": self.apikey,
"reference": reference,
"hash": hash,
"client": client_library
}
r = requests.post(self.apiendpoint + "docupass/validate", data=payload)
r.raise_for_status()
result = r.json()
return result.get('success')
class Vault:
"""
Initialize Vault API with an API key, and optional region (US, EU)
Vault API allows cloud storage of user ID and information retrieved from Core API and DocuPass
:param apikey: You API key
:param region: API Region US/EU, defaults to US
:raises ValueError: Invalid input argument
"""
def __init__(self, apikey, region="US"):
if not apikey:
raise ValueError("Please provide an API key")
if not region:
raise ValueError("Please set an API region (US, EU)")
self.apikey = apikey
self.throw_error = False
if region.upper() == 'EU':
self.apiendpoint = "https://api-eu.idanalyzer.com/"
elif region.upper() == "US":
self.apiendpoint = "https://api.idanalyzer.com/"
else:
self.apiendpoint = region
def throw_api_exception(self, throw_exception = False):
"""
Whether an exception should be thrown if API response contains an error message
:param throw_exception: Throw exception upon API error, defaults to false
"""
self.throw_error = throw_exception is True
def get(self, vault_id):
"""
Get a single vault entry
:param str vault_id: Vault entry ID
:return Vault entry data
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
if not vault_id:
raise ValueError("Vault entry ID required.")
return self.__api("get", {"id": vault_id})
def list(self, **options):
r"""
List multiple vault entries with optional filter, sorting and paging arguments
Refer to https://developer.idanalyzer.com/vaultapi.html for filter statements and field names
:param \**options:
See below
:Keyword Arguments:
* *filter* (``list[str]``) --
List of filter statements
* *orderby* (``str``) --
Field name used to order the results
* *sort* (``str``) --
Sort results by ASC = Ascending, DESC = DESCENDING
* *limit* (``int``) --
Number of results to return
* *offset* (``int``) --
Offset the first result using specified index
:return A list of vault items
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
payload = {}
if options.get('filter'):
if not isinstance(options['filter'], list) or len(options['filter']) > 5:
raise ValueError("Filter must be an array and must not exceed maximum 5 filter strings.")
payload['filter'] = options['filter']
if options.get('orderby'):
payload['orderby'] = options['orderby']
else:
payload['orderby'] = "createtime"
if options.get('sort'):
payload['sort'] = options['sort']
else:
payload['sort'] = "DESC"
if options.get('limit'):
payload['limit'] = options['limit']
else:
payload['limit'] = 10
if options.get('offset'):
payload['offset'] = options['offset']
else:
payload['offset'] = 0
return self.__api("list", payload)
def update(self, vault_id, data=None):
"""
Update vault entry with new data
:param vault_id: Vault entry ID
:param data: dictionary of the field key and its value
:return Whether updates succeeded
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
if not vault_id:
raise ValueError("Vault entry ID required.")
if not isinstance(data, dict):
raise ValueError("Data needs to be a dictionary.")
if len(data) < 1:
raise ValueError("Minimum one set of data required.")
data['id'] = vault_id
return self.__api("update", data)
def delete(self, vault_id):
"""
Delete a single or multiple vault entries
:param vault_id: Vault entry ID or array of IDs
:return Whether delete succeeded
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
if not vault_id:
raise ValueError("Vault entry ID required.")
return self.__api("delete", {"id": vault_id})
def add_image(self, id, image, type=0):
"""
Add a document or face image into an existing vault entry
:param id: Vault entry ID
:param image: Image file path, base64 content or URL
:param type: Type of image: 0 = document, 1 = person
:return New image object
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
if not id:
raise ValueError("Vault entry ID required.")
if type != 0 and type != 1:
raise ValueError("Invalid image type, 0 or 1 accepted.")
payload = {"id": id, "type": type}
if is_valid_url(image):
payload['imageurl'] = image
elif os.path.isfile(image):
with open(image, "rb") as image_file:
payload['image'] = base64.b64encode(image_file.read())
elif len(image) > 100:
payload['image'] = image
else:
raise ValueError("Invalid image, file not found or malformed URL.")
return self.__api("addimage", payload)
def delete_image(self, vault_id, image_id):
"""
Delete an image from vault
:param vault_id: Vault entry ID
:param image_id: Image ID
:return Whether delete succeeded
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
if not vault_id:
raise ValueError("Vault entry ID required.")
if not image_id:
raise ValueError("Image ID required.")
return self.__api("deleteimage", {"id": vault_id, "imageid": image_id})
def search_face(self, image, max_entry=10, threshold=0.5):
"""
Search vault using a person's face image
:param image: Face image file path, base64 content or URL
:param max_entry: Number of entries to return, 1 to 10.
:param threshold: Minimum confidence score required for face matching
:return List of vault entries
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
payload = {"maxentry": max_entry, "threshold": threshold}
if is_valid_url(image):
payload['imageurl'] = image
elif os.path.isfile(image):
with open(image, "rb") as image_file:
payload['image'] = base64.b64encode(image_file.read())
elif len(image) > 100:
payload['image'] = image
else:
raise ValueError("Invalid image, file not found or malformed URL.")
return self.__api("searchface", payload)
def train_face(self):
"""
Train vault for face search
:return Face training result
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
return self.__api("train")
def training_status(self):
"""
Get vault training status
:return Training status
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
return self.__api("trainstatus")
def __api(self, action, payload=None):
if not payload:
payload = {}
payload['apikey'] = self.apikey
payload['client'] = client_library
r = requests.post(self.apiendpoint + "vault/" + action, data=payload)
r.raise_for_status()
result = r.json()
if not self.throw_error:
return result
if result.get('error'):
raise APIError(result['error'])
else:
return result
class AMLAPI:
"""
Initialize AML API with an API key, and optional region (US, EU)
AML API allows you to monitor politically exposed persons (PEPs), and discover person or organization on under sanctions from worldwide governments.
ID Analyzer AML solutions allows you to check for comprehensive customer due diligence and Anti Money Laundering (AML) and Know Your Customer (KYC) program.
:param apikey: You API key
:param region: API Region US/EU, defaults to US
:raises ValueError: Invalid input argument
"""
def __init__(self, apikey, region="US"):
if not apikey:
raise ValueError("Please provide an API key")
if not region:
raise ValueError("Please set an API region (US, EU)")
self.apikey = apikey
self.throw_error = False
self.AMLDatabases = ""
self.AMLEntityType = ""
if region.upper() == 'EU':
self.apiendpoint = "https://api-eu.idanalyzer.com/aml"
elif region.upper() == "US":
self.apiendpoint = "https://api.idanalyzer.com/aml"
else:
self.apiendpoint = region
def throw_api_exception(self, throw_exception=False):
"""
Whether an exception should be thrown if API response contains an error message
:param throw_exception: Throw exception upon API error, defaults to false
"""
self.throw_error = throw_exception is True
def set_aml_database(self, databases="au_dfat,ca_dfatd,ch_seco,eu_fsf,fr_tresor_gels_avoir,gb_hmt,ua_sfms,un_sc,us_ofac,eu_cor,eu_meps,global_politicians,interpol_red"):
"""
Specify the source databases to perform AML search, if left blank, all source databases will be checked.
Separate each database code with comma, for example: un_sc,us_ofac. For full list of source databases and corresponding code visit AML API Overview.
:param databases: Database codes separated by comma
"""
self.AMLDatabases = databases
def set_entity_type(self, entity_type=""):
"""
Return only entities with specified entity type, leave blank to return both person and legal entity.
:param entity_type: 'person' or 'legalentity'
:raises ValueError: Invalid input argument
"""
if entity_type != "person" and entity_type != "legalentity" and entity_type != "":
raise ValueError("Entity Type should be either empty, 'person' or 'legalentity'")
self.AMLEntityType = entity_type
def search_by_name(self, name="", country="", dob=""):
"""
Search AML Database using a person or company's name or alias
:param name: Name or alias to search AML Database
:param country: ISO 2 Country Code
:param dob: Date of birth in YYYY-MM-DD or YYYY-MM or YYYY format
:return AML match results
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
if len(name) < 3:
raise ValueError("Name should contain at least 3 characters.")
return self.__api({"name": name, "country": country, "dob": dob})
def search_by_id_number(self, document_number="", country="", dob=""):
"""
Search AML Database using a document number (Passport, ID Card or any identification documents)
:param document_number: Document ID Number to perform search
:param country: ISO 2 Country Code
:param dob: Date of birth in YYYY-MM-DD or YYYY-MM or YYYY format
:return AML match results
:rtype dict
:raises ValueError: Invalid input argument
:raises APIError: API Error
"""
if len(document_number) < 5:
raise ValueError("Document number should contain at least 5 characters.")
return self.__api({"documentnumber": document_number, "country": country, "dob": dob})
def __api(self, payload=None):
if not payload:
payload = {}
payload['database'] = self.AMLDatabases
payload['entity'] = self.AMLEntityType
payload['apikey'] = self.apikey
payload['client'] = client_library
r = requests.post(self.apiendpoint, data=payload)
r.raise_for_status()
result = r.json()
if not self.throw_error:
return result
if result.get('error'):
raise APIError(result['error'])
else:
return result
|
import os
def get_version(request):
"""Process docker image version from version.txt"""
if os.environ.get('VERSION') is None:
try:
file = open('version.txt', 'r')
version = file.read()
file.close()
except FileNotFoundError:
version = 'DEV'
if version == '':
os.environ['VERSION'] = 'DEV'
else:
os.environ['VERSION'] = version
return {'version': os.environ.get('VERSION')}
|
"""
REST API Resource Routing
http://flask-restplus.readthedocs.io
"""
import pandas as pd
import numpy as np
from flask import request, Response, json
from flask_restplus import Resource
from ripser import ripser
from .security import require_auth
from app.api import api_rest
class SecureResource(Resource):
""" Calls require_auth decorator on all requests """
method_decorators = [require_auth]
def ndarray_to_object(data_array: np.ndarray, maxdim: int, prime: int, cocycles: bool):
result = ripser(X=data_array, maxdim=maxdim, coeff=prime, do_cocycles=cocycles)
diagrams = []
cocycles = []
for diagram in result['dgms']:
if len(diagram) > 0 and diagram[-1][1] == np.Inf:
diagram = diagram[:-1]
diagrams.append(json.dumps(diagram.tolist()))
for cocycles_in_dim in result['cocycles']:
cc_in_dim = []
for cocycle in cocycles_in_dim:
cc_in_dim.append(cocycle.tolist())
cocycles.append(json.dumps(cc_in_dim))
return {
'diagrams': json.dumps(diagrams),
'cocycles': json.dumps(cocycles),
'distance_matrix': json.dumps(result['dperm2all'].tolist()),
}
@api_rest.route('/upload_data')
class UploadData(Resource):
def post(self):
data = request.json
data_array = np.array(data['points'])
data_array = data_array.astype(np.float32)
prime = int(data['prime'])
cocycles = bool(data['do_cocycles'])
obj = ndarray_to_object(data_array, 1, prime, cocycles)
return Response(json.dumps(obj), status=200, headers={'Content-Type': 'application/json'})
|
x = [1,2]
print(int(''.join(map(str, x)))) |
"""Tensorflow image detection wrapper."""
import logging
import time
import numpy as np
# from importlib import import_module
from ambianic.pipeline.ai.tf_detect import TFDetectionModel
log = logging.getLogger(__name__)
class TFBoundingBoxDetection(TFDetectionModel):
"""Applies Tensorflow image detection."""
def __init__(self,
model=None,
**kwargs
):
"""Initialize detector with config parameters.
:Parameters:
----------
model: ai_models/mobilenet_ssd_v2_face.tflite
"""
super().__init__(model, **kwargs)
def detect(self, image=None):
"""Detect objects in image.
:Parameters:
----------
image : PIL.Image
Input image in raw RGB format
with the exact size of the input tensor.
:Returns:
-------
list of tuples
List of top_k detections above confidence_threshold.
Each detection is a tuple of:
(label, confidence, (x0, y0, x1, y1))
"""
assert image
start_time = time.monotonic()
log.debug("Calling TF engine for inference")
tfe = self._tfengine
# NxHxWxC, H:1, W:2
height = tfe.input_details[0]['shape'][1]
width = tfe.input_details[0]['shape'][2]
desired_size = (width, height)
new_im, thumbnail = self.resize_to_input_tensor(image=image,
desired_size=desired_size)
# calculate what fraction of the new image is the thumbnail size
# we will use these factors to adjust detection box coordinates
w_factor = thumbnail.size[0] / new_im.size[0]
h_factor = thumbnail.size[1] / new_im.size[1]
# add N dim
input_data = np.expand_dims(new_im, axis=0)
# log.warning('input_data.shape: %r', input_data.shape)
# log.warning('input_data.dtype: %r', input_data.dtype)
# input_data = input_data.astype(np.uint8)
# log.warning('input_data.dtype: %r', input_data.dtype)
# input_data = np.asarray(input_data).flatten()
# Note: Floating models are not tested thoroughly yet.
# Its not clear yet whether floating models will be a good fit
# for Ambianic use cases. Optimized quantized models seem to do
# a good job in terms of accuracy and speed.
if not tfe.is_quantized: # pragma: no cover
# normalize floating point values
input_mean = 127.5
input_std = 127.5
input_data = \
(np.float32(input_data) - input_mean) / input_std
tfe.set_tensor(tfe.input_details[0]['index'], input_data)
# invoke inference on the new input data
# with the configured model
tfe.infer()
self.log_stats(start_time=start_time)
# log.debug('output_details: %r', tfe.output_details)
# od = tfe.output_details[0]['index']
# log.debug('output_data[0]: %r',
# tfe.get_tensor(od))
# log.debug('output_data[0]: %r',
# tfe._tf_interpreter.get_tensor(od))
# get output tensor
boxes = tfe.get_tensor(tfe.output_details[0]['index'])
label_codes = tfe.get_tensor(
tfe.output_details[1]['index'])
scores = tfe.get_tensor(tfe.output_details[2]['index'])
num = tfe.get_tensor(tfe.output_details[3]['index'])
# log.warning('Detections:\n num: %r\n label_codes: %r\n scores: %r\n',
# num, label_codes, scores)
# log.warning('Required confidence: %r',
# tfe.confidence_threshold)
detections_count = int(num[0])
inference_result = []
# get a list of indices for the top_k results
# ordered from highest to lowest confidence.
# We are only interested in scores within detections_count range
indices_of_sorted_scores = np.argsort(scores[0, :detections_count])
# log.warning('Indices of sorted scores: %r:',
# indices_of_sorted_scores)
top_k_indices = indices_of_sorted_scores[-1*tfe.top_k:][::-1]
# log.warning('Indices of top_k scores: %r:', top_k_indices)
# from the top_k results, only take the ones that score
# above the confidence threshold criteria.
for i in top_k_indices:
confidence = scores[0, i]
if confidence >= tfe.confidence_threshold:
# log.warning('Sample confidence: %r, required confidence %r',
# confidence, tfe.confidence_threshold)
li = int(label_codes[0, i])
# protect against models that return arbitrary labels
# when the confidence is low
if (li < len(self._labels)):
label = self._labels[li]
# If a label filter is specified, apply it.
if (not self._label_filter or label in self._label_filter):
box = boxes[0, i, :]
# refit detections into original image size
# without overflowing outside image borders
x0 = box[1] / w_factor
y0 = box[0] / h_factor
x1 = min(box[3] / w_factor, 1)
y1 = min(box[2] / h_factor, 1)
log.debug('thumbnail image size: %r , '
'tensor image size: %r',
thumbnail.size,
new_im.size)
log.debug('resizing detection box (x0, y0, x1, y1) '
'from: %r to %r',
(box[1], box[0], box[3], box[2]),
(x0, y0, x1, y1))
inference_result.append((
label,
confidence,
(x0, y0, x1, y1)))
return thumbnail, new_im, inference_result
|
# -*- coding: utf-8 -*-
''' Extracts lists of words from a given input to be used for later vocabulary
generation or for creating tokenized datasets.
Supports functionality for handling different file types and
filtering/processing of this input.
'''
from __future__ import division, print_function, unicode_literals
import re
import unicodedata
import numpy as np
from text_unidecode import unidecode
from torchmoji.tokenizer import RE_MENTION, tokenize
from torchmoji.filter_utils import (convert_linebreaks,
convert_nonbreaking_space,
correct_length,
extract_emojis,
mostly_english,
non_english_user,
process_word,
punct_word,
remove_control_chars,
remove_variation_selectors,
separate_emojis_and_text)
try:
unicode # Python 2
except NameError:
unicode = str # Python 3
# Only catch retweets in the beginning of the tweet as those are the
# automatically added ones.
# We do not want to remove tweets like "Omg.. please RT this!!"
RETWEETS_RE = re.compile(r'^[rR][tT]')
# Use fast and less precise regex for removing tweets with URLs
# It doesn't matter too much if a few tweets with URL's make it through
URLS_RE = re.compile(r'https?://|www\.')
MENTION_RE = re.compile(RE_MENTION)
ALLOWED_CONVERTED_UNICODE_PUNCTUATION = """!"#$'()+,-.:;<=>?@`~"""
class WordGenerator():
''' Cleanses input and converts into words. Needs all sentences to be in
Unicode format. Has subclasses that read sentences differently based on
file type.
Takes a generator as input. This can be from e.g. a file.
unicode_handling in ['ignore_sentence', 'convert_punctuation', 'allow']
unicode_handling in ['ignore_emoji', 'ignore_sentence', 'allow']
'''
def __init__(self, stream, allow_unicode_text=False, ignore_emojis=True,
remove_variation_selectors=True, break_replacement=True):
self.stream = stream
self.allow_unicode_text = allow_unicode_text
self.remove_variation_selectors = remove_variation_selectors
self.ignore_emojis = ignore_emojis
self.break_replacement = break_replacement
self.reset_stats()
def get_words(self, sentence):
""" Tokenizes a sentence into individual words.
Converts Unicode punctuation into ASCII if that option is set.
Ignores sentences with Unicode if that option is set.
Returns an empty list of words if the sentence has Unicode and
that is not allowed.
"""
if not isinstance(sentence, unicode):
raise ValueError("All sentences should be Unicode-encoded!")
sentence = sentence.strip().lower()
if self.break_replacement:
sentence = convert_linebreaks(sentence)
if self.remove_variation_selectors:
sentence = remove_variation_selectors(sentence)
# Split into words using simple whitespace splitting and convert
# Unicode. This is done to prevent word splitting issues with
# twokenize and Unicode
words = sentence.split()
converted_words = []
for w in words:
accept_sentence, c_w = self.convert_unicode_word(w)
# Unicode word detected and not allowed
if not accept_sentence:
return []
else:
converted_words.append(c_w)
sentence = ' '.join(converted_words)
words = tokenize(sentence)
words = [process_word(w) for w in words]
return words
def check_ascii(self, word):
""" Returns whether a word is ASCII """
try:
word.decode('ascii')
return True
except (UnicodeDecodeError, UnicodeEncodeError, AttributeError):
return False
def convert_unicode_punctuation(self, word):
word_converted_punct = []
for c in word:
decoded_c = unidecode(c).lower()
if len(decoded_c) == 0:
# Cannot decode to anything reasonable
word_converted_punct.append(c)
else:
# Check if all punctuation and therefore fine
# to include unidecoded version
allowed_punct = punct_word(
decoded_c,
punctuation=ALLOWED_CONVERTED_UNICODE_PUNCTUATION)
if allowed_punct:
word_converted_punct.append(decoded_c)
else:
word_converted_punct.append(c)
return ''.join(word_converted_punct)
def convert_unicode_word(self, word):
""" Converts Unicode words to ASCII using unidecode. If Unicode is not
allowed (set as a variable during initialization), then only
punctuation that can be converted to ASCII will be allowed.
"""
if self.check_ascii(word):
return True, word
# First we ensure that the Unicode is normalized so it's
# always a single character.
word = unicodedata.normalize("NFKC", word)
# Convert Unicode punctuation to ASCII equivalent. We want
# e.g. "\u203c" (double exclamation mark) to be treated the same
# as "!!" no matter if we allow other Unicode characters or not.
word = self.convert_unicode_punctuation(word)
if self.ignore_emojis:
_, word = separate_emojis_and_text(word)
# If conversion of punctuation and removal of emojis took care
# of all the Unicode or if we allow Unicode then everything is fine
if self.check_ascii(word) or self.allow_unicode_text:
return True, word
else:
# Sometimes we might want to simply ignore Unicode sentences
# (e.g. for vocabulary creation). This is another way to prevent
# "polution" of strange Unicode tokens from low quality datasets
return False, ''
def data_preprocess_filtering(self, line, iter_i):
""" To be overridden with specific preprocessing/filtering behavior
if desired.
Returns a boolean of whether the line should be accepted and the
preprocessed text.
Runs prior to tokenization.
"""
return True, line, {}
def data_postprocess_filtering(self, words, iter_i):
""" To be overridden with specific postprocessing/filtering behavior
if desired.
Returns a boolean of whether the line should be accepted and the
postprocessed text.
Runs after tokenization.
"""
return True, words, {}
def extract_valid_sentence_words(self, line):
""" Line may either a string of a list of strings depending on how
the stream is being parsed.
Domain-specific processing and filtering can be done both prior to
and after tokenization.
Custom information about the line can be extracted during the
processing phases and returned as a dict.
"""
info = {}
pre_valid, pre_line, pre_info = \
self.data_preprocess_filtering(line, self.stats['total'])
info.update(pre_info)
if not pre_valid:
self.stats['pretokenization_filtered'] += 1
return False, [], info
words = self.get_words(pre_line)
if len(words) == 0:
self.stats['unicode_filtered'] += 1
return False, [], info
post_valid, post_words, post_info = \
self.data_postprocess_filtering(words, self.stats['total'])
info.update(post_info)
if not post_valid:
self.stats['posttokenization_filtered'] += 1
return post_valid, post_words, info
def generate_array_from_input(self):
sentences = []
for words in self:
sentences.append(words)
return sentences
def reset_stats(self):
self.stats = {'pretokenization_filtered': 0,
'unicode_filtered': 0,
'posttokenization_filtered': 0,
'total': 0,
'valid': 0}
def __iter__(self):
if self.stream is None:
raise ValueError("Stream should be set before iterating over it!")
for line in self.stream:
valid, words, info = self.extract_valid_sentence_words(line)
# Words may be filtered away due to unidecode etc.
# In that case the words should not be passed on.
if valid and len(words):
self.stats['valid'] += 1
yield words, info
self.stats['total'] += 1
class TweetWordGenerator(WordGenerator):
''' Returns np array or generator of ASCII sentences for given tweet input.
Any file opening/closing should be handled outside of this class.
'''
def __init__(self, stream, wanted_emojis=None, english_words=None,
non_english_user_set=None, allow_unicode_text=False,
ignore_retweets=True, ignore_url_tweets=True,
ignore_mention_tweets=False):
self.wanted_emojis = wanted_emojis
self.english_words = english_words
self.non_english_user_set = non_english_user_set
self.ignore_retweets = ignore_retweets
self.ignore_url_tweets = ignore_url_tweets
self.ignore_mention_tweets = ignore_mention_tweets
WordGenerator.__init__(self, stream,
allow_unicode_text=allow_unicode_text)
def validated_tweet(self, data):
''' A bunch of checks to determine whether the tweet is valid.
Also returns emojis contained by the tweet.
'''
# Ordering of validations is important for speed
# If it passes all checks, then the tweet is validated for usage
# Skips incomplete tweets
if len(data) <= 9:
return False, []
text = data[9]
if self.ignore_retweets and RETWEETS_RE.search(text):
return False, []
if self.ignore_url_tweets and URLS_RE.search(text):
return False, []
if self.ignore_mention_tweets and MENTION_RE.search(text):
return False, []
if self.wanted_emojis is not None:
uniq_emojis = np.unique(extract_emojis(text, self.wanted_emojis))
if len(uniq_emojis) == 0:
return False, []
else:
uniq_emojis = []
if self.non_english_user_set is not None and \
non_english_user(data[1], self.non_english_user_set):
return False, []
return True, uniq_emojis
def data_preprocess_filtering(self, line, iter_i):
fields = line.strip().split("\t")
valid, emojis = self.validated_tweet(fields)
text = fields[9].replace('\\n', '') \
.replace('\\r', '') \
.replace('&', '&') if valid else ''
return valid, text, {'emojis': emojis}
def data_postprocess_filtering(self, words, iter_i):
valid_length = correct_length(words, 1, None)
valid_english, n_words, n_english = mostly_english(words,
self.english_words)
if valid_length and valid_english:
return True, words, {'length': len(words),
'n_normal_words': n_words,
'n_english': n_english}
else:
return False, [], {'length': len(words),
'n_normal_words': n_words,
'n_english': n_english}
|
"""
Models a train loop for ALI: Adversarially Learned Inference (https://arxiv.org/abs/1606.00704)
Additionally, this train loop can also perform the MorGAN algorithm by setting the MorGAN alpha
R1 regularization (https://arxiv.org/pdf/1801.04406.pdf) (or at least something like it)
can be enabled using the r1_reg_gamma parameter.
It will "push" the gradients for real samples to 0. This is done for z ~ p(z) and x ~ p(x).
"""
import torch
import torch.nn.functional as F
from trainloops.train_loop import TrainLoop
def get_log_odds(raw_marginals, use_sigmoid):
if use_sigmoid:
marginals = torch.clamp(raw_marginals.mean(dim=0), 1e-7, 1 - 1e-7)
else:
# Correct for normalization between -1 and 1
raw_marginals = (raw_marginals + 1)/2
marginals = torch.clamp(raw_marginals.mean(dim=0), 1e-7, 1 - 1e-7)
return torch.log(marginals / (1 - marginals))
class ALITrainLoop(TrainLoop):
def __init__(self, listeners: list, Gz, Gx, D, optim_G, optim_D, dataloader, cuda=False, epochs=1,
morgan_alpha=0.0, d_img_noise_std=0.0, d_real_label=1.0, decrease_noise=True, use_sigmoid=True,
reconstruction_loss_mode="pixelwise", frs_model=None, r1_reg_gamma=0.0, non_saturating_G_loss=False,
disable_D_limiting=False):
super().__init__(listeners, epochs)
self.use_sigmoid = use_sigmoid
self.batch_size = dataloader.batch_size
self.Gz = Gz
self.Gx = Gx
# self.G = torch.nn.ModuleList([self.Gx, self.Gz])
self.D = D
self.optim_G = optim_G
self.optim_D = optim_D
self.dataloader = dataloader
self.cuda = cuda
self.morgan_alpha = morgan_alpha
self.morgan = morgan_alpha != 0
self.d_img_noise_std = d_img_noise_std
self.d_real_label = d_real_label
self.decrease_noise = decrease_noise
if reconstruction_loss_mode not in ["pixelwise", "dis_l", "frs"]:
raise ValueError("Reconstruction loss mode must be one of \"pixelwise\" \"dis_l\", or \"frs\"")
self.reconstruction_loss_mode = reconstruction_loss_mode
self.frs_model = frs_model
self.r1_reg_gamma = r1_reg_gamma
self.non_saturating = non_saturating_G_loss
self.disable_D_limiting = disable_D_limiting
def epoch(self):
self.Gx.train()
self.Gz.train()
self.D.train()
for i, (x, _) in enumerate(self.dataloader):
if x.size()[0] != self.batch_size:
continue
# Train D
# Draw M (= batch_size) samples from dataset and prior. x samples are already loaded by dataloader
if self.cuda:
x = x.cuda()
if self.current_epoch == 0 and i == 0:
if hasattr(self.Gx, 'output_bias'):
self.Gx.output_bias.data = get_log_odds(x, self.use_sigmoid)
else:
print("WARNING! Gx does not have an \"output_bias\". "
"Using untied biases as the last layer of Gx is advised!")
# ========== Computations for Dis(x, z_hat) ==========
x_no_noise = x
if self.r1_reg_gamma != 0.0:
x_no_noise.requires_grad = True
# Add noise to the inputs if the standard deviation isn't defined to be 0
if self.d_img_noise_std != 0.0:
x = self.add_instance_noise(x)
# Sample from conditionals (sampling is implemented by models)
z_hat = self.Gz.encode(x)
dis_q = self.D((x, z_hat))
# ========== Computations for Dis(x_tilde, z) ==========
z = self.generate_z_batch(self.batch_size)
if self.r1_reg_gamma != 0.0:
z.requires_grad = True
x_tilde = self.Gx(z)
# Add noise to the inputs of D if the standard deviation isn't defined to be 0
if self.d_img_noise_std != 0.0:
x_tilde = self.add_instance_noise(x_tilde)
dis_p = self.D((x_tilde, z))
# ========== Loss computations ==========
L_d_fake = F.binary_cross_entropy_with_logits(dis_p, torch.zeros_like(dis_q))
d_real_labels = torch.ones_like(dis_q) * self.d_real_label
L_d_real = F.binary_cross_entropy_with_logits(dis_q, d_real_labels)
L_d = L_d_real + L_d_fake
if self.non_saturating:
L_g_fake = -F.binary_cross_entropy_with_logits(dis_p, torch.zeros_like(dis_q))
L_g_real = -F.binary_cross_entropy_with_logits(dis_q, torch.ones_like(dis_q))
else:
L_g_fake = F.binary_cross_entropy_with_logits(dis_p, torch.ones_like(dis_q))
L_g_real = F.binary_cross_entropy_with_logits(dis_q, torch.zeros_like(dis_q))
L_g = L_g_real + L_g_fake
L_syn = L_g
if self.morgan:
x_recon = self.Gx(z_hat)
if self.reconstruction_loss_mode == "pixelwise":
L_pixel = self.morgan_pixel_loss(x_recon, x_no_noise)
elif self.reconstruction_loss_mode == "dis_l":
L_pixel = self.dis_l_loss(x_recon, x_no_noise)
else:
L_pixel = self.frs_loss(x_recon, x_no_noise)
L_syn = L_g + self.morgan_alpha * L_pixel
if self.r1_reg_gamma != 0:
# Computes an R1-like loss
grad_outputs = torch.ones_like(dis_p)
x_grads = torch.autograd.grad(
dis_q,
x_no_noise,
create_graph=True,
only_inputs=True,
grad_outputs=grad_outputs
)[0]
z_grads = torch.autograd.grad(
dis_p,
z,
create_graph=True,
only_inputs=True,
grad_outputs=grad_outputs
)[0]
r1_loss = 0.5*((x_grads.norm(2, dim=list(range(1, len(x_grads.size()))) ) ** 2).mean() + (z_grads.norm(2, dim=1) ** 2).mean())
L_d += (self.r1_reg_gamma/2.0) * r1_loss
# ========== Back propagation and updates ==========
# Gradient update on Discriminator network
if L_g.detach().item() < 3.5 or self.disable_D_limiting:
self.optim_D.zero_grad()
L_d.backward(retain_graph=True)
self.optim_D.step()
# Gradient update on the Generator networks
self.optim_G.zero_grad()
L_syn.backward()
self.optim_G.step()
losses = {
"D_loss": L_d.detach().item(),
"G_loss": L_g.detach().item(),
}
if self.morgan:
losses["L_pixel"] = L_pixel.detach().item()
losses["L_pixel_scaled"] = L_pixel.detach().item() * self.morgan_alpha
losses["L_syn"] = L_syn.detach().item()
if self.r1_reg_gamma != 0.0:
losses["R1_loss"] = r1_loss.detach().item()
return {
"epoch": self.current_epoch,
"losses": losses,
"networks": {
"Gx": self.Gx,
"Gz": self.Gz,
"D": self.D,
},
"optimizers": {
"G_optimizer": self.optim_G,
"D_optimizer": self.optim_D
}
}
def generate_z_batch(self, batch_size):
z = torch.normal(torch.zeros((batch_size, self.Gx.latent_size)), 1)
if self.cuda:
z = z.cuda()
return z
def generate_batch(self, batch_size):
# Generate random latent vectors
z = self.generate_z_batch(batch_size)
# Return outputs
return self.Gx(z)
@staticmethod
def morgan_pixel_loss(x_recon, target):
absolute_errors = torch.abs(x_recon - target)
# WxH = float(int(absolute_errors.size()[2]) * int(absolute_errors.size()[3]))
# loss = absolute_errors.sum()/WxH
loss = absolute_errors.mean()
return loss
def dis_l_loss(self, prediction, target):
_, dis_l_prediction = self.D.compute_dx(prediction)
_, dis_l_target = self.D.compute_dx(target)
return torch.nn.functional.mse_loss(dis_l_prediction, dis_l_target)
def add_instance_noise(self, x):
noise_factor = self.d_img_noise_std * \
(1 if not self.decrease_noise else 1 - (self.current_epoch / self.epochs))
return x + torch.randn_like(x) * noise_factor
def frs_loss(self, prediction, target):
z_pred = self.frs_model(prediction)
z_target = self.frs_model(target)
distances = torch.sqrt(torch.sum(torch.pow(z_pred - z_target, 2), dim=1))
return distances.mean()
|
from bs4 import BeautifulSoup
import datetime
import re
from utils import unprocessed_archive_urls, process_crawled_archive_response_chunk
import logging
PUBLISHER = "TheTimes"
@unprocessed_archive_urls(PUBLISHER)
def archive_urls():
for year in range(2015, 2021):
for month in range(1, 13):
root_url = f"https://www.thetimes.co.uk/html-sitemap/{year}-{month:0>2}-"
for week in range(1, 5):
yield root_url + str(week)
@process_crawled_archive_response_chunk(PUBLISHER, write_to_db=True)
def scrape_articles(resp):
url_re_result = re.search(r"/([0-9]{4})-([0-9]{2})-([0-9])", resp.url)
published = datetime.datetime(int(url_re_result.group(1)),
int(url_re_result.group(2)),
int(url_re_result.group(3))*7-6)
soup = BeautifulSoup(resp.text, "lxml")
try:
for el in soup.find_all(class_="Sitemap-link"):
yield "https://www.thetimes.co.uk" + el.a.attrs.get("href"), PUBLISHER, el.a.string, published, None, None
except:
logging.exception(f"Failed to parse archive page: {resp.url}")
|
r"""
Quantum state learning
======================
This demonstration works through the process used to produce the state
preparation results presented in `"Machine learning method for state
preparation and gate synthesis on photonic quantum
computers" <https://arxiv.org/abs/1807.10781>`__.
This tutorial uses the TensorFlow backend of Strawberry Fields, giving us access
to a number of
additional functionalities including: GPU integration, automatic gradient
computation, built-in optimization algorithms, and other machine
learning tools.
Variational quantum circuits
----------------------------
A key element of machine learning is optimization. We can use
TensorFlow's automatic differentiation tools to optimize the parameters
of variational quantum circuits constructed using Strawberry Fields. In
this approach, we fix a circuit architecture where the states, gates,
and/or measurements may have learnable parameters :math:`\vec{\theta}`
associated with them. We then define a loss function based on the output
state of this circuit. In this case, we define a loss function such that
the fidelity of the output state of the variational circuit is maximized
with respect to some target state.
.. note::
For more details on the TensorFlow backend in Strawberry Fields, please see
:ref:`machine_learning_tutorial`.
For arbitrary state preparation using optimization, we need to make use
of a quantum circuit with a layer structure that is **universal** - that
is, by 'stacking' the layers, we can guarantee that we can produce *any*
CV state with at-most polynomial overhead. Therefore, the architecture
we choose must consist of layers with each layer containing
parameterized Gaussian *and* non-Gaussian gates. **The non-Gaussian
gates provide both the nonlinearity and the universality of the model.**
To this end, we employ the CV quantum neural network architecture as described in
`Killoran et al. <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.1.033063>`__:
.. figure:: https://i.imgur.com/NEsaVIX.png
:alt: layer
Here,
- :math:`\mathcal{U}_i(\theta_i,\phi_i)` is an N-mode linear optical
interferometer composed of two-mode beamsplitters
:math:`BS(\theta,\phi)` and single-mode rotation gates
:math:`R(\phi)=e^{i\phi\hat{n}}`,
- :math:`\mathcal{D}(\alpha_i)` are single mode displacements in the
phase space by complex value :math:`\alpha_i`,
- :math:`\mathcal{S}(r_i, \phi_i)` are single mode squeezing operations
of magnitude :math:`r_i` and phase :math:`\phi_i`, and
- :math:`\Phi(\lambda_i)` is a single mode non-Gaussian operation, in
this case chosen to be the Kerr interaction
:math:`\mathcal{K}(\kappa_i)=e^{i\kappa_i\hat{n}^2}` of strength
:math:`\kappa_i`.
Hyperparameters
---------------
First, we must define the **hyperparameters** of our layer structure:
- ``cutoff``: the simulation Fock space truncation we will use in the
optimization. The TensorFlow backend will perform numerical
operations in this truncated Fock space when performing the
optimization.
- ``depth``: The number of layers in our variational quantum
circuit. As a general rule, increasing the number of layers (and
thus, the number of parameters we are optimizing over) increases the
optimizer's chance of finding a reasonable local minimum in the
optimization landscape.
- ``reps``: the number of steps in the optimization routine performing
gradient descent
Some other optional hyperparameters include:
- The standard deviation of initial parameters. Note that we make a
distinction between the standard deviation of *passive* parameters
(those that preserve photon number when changed, such as phase
parameters), and *active* parameters (those that introduce or remove
energy from the system when changed).
"""
import numpy as np
import strawberryfields as sf
from strawberryfields.ops import *
from strawberryfields.utils import operation
# Cutoff dimension
cutoff = 9
# Number of layers
depth = 15
# Number of steps in optimization routine performing gradient descent
reps = 200
# Learning rate
lr = 0.05
# Standard deviation of initial parameters
passive_sd = 0.1
active_sd = 0.001
######################################################################
# The layer parameters :math:`\vec{\theta}`
# -----------------------------------------
#
# We use TensorFlow to create the variables corresponding to the gate
# parameters. Note that we focus on a single mode circuit where
# each variable has shape ``(depth,)``, with each
# individual element representing the gate parameter in layer :math:`i`.
import tensorflow as tf
# set the random seed
tf.random.set_seed(42)
# squeeze gate
sq_r = tf.random.normal(shape=[depth], stddev=active_sd)
sq_phi = tf.random.normal(shape=[depth], stddev=passive_sd)
# displacement gate
d_r = tf.random.normal(shape=[depth], stddev=active_sd)
d_phi = tf.random.normal(shape=[depth], stddev=passive_sd)
# rotation gates
r1 = tf.random.normal(shape=[depth], stddev=passive_sd)
r2 = tf.random.normal(shape=[depth], stddev=passive_sd)
# kerr gate
kappa = tf.random.normal(shape=[depth], stddev=active_sd)
######################################################################
# For convenience, we store the TensorFlow variables representing the
# weights as a tensor:
weights = tf.convert_to_tensor([r1, sq_r, sq_phi, r2, d_r, d_phi, kappa])
weights = tf.Variable(tf.transpose(weights))
######################################################################
# Since we have a depth of 15 (so 15 layers), and each layer takes
# 7 different types of parameters, the final shape of our weights
# array should be :math:`\text{depth}\times 7` or ``(15, 7)``:
print(weights.shape)
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# (15, 7)
######################################################################
# Constructing the circuit
# ------------------------
#
# We can now construct the corresponding
# single-mode Strawberry Fields program:
# Single-mode Strawberry Fields program
prog = sf.Program(1)
# Create the 7 Strawberry Fields free parameters for each layer
sf_params = []
names = ["r1", "sq_r", "sq_phi", "r2", "d_r", "d_phi", "kappa"]
for i in range(depth):
# For the ith layer, generate parameter names "r1_i", "sq_r_i", etc.
sf_params_names = ["{}_{}".format(n, i) for n in names]
# Create the parameters, and append them to our list ``sf_params``.
sf_params.append(prog.params(*sf_params_names))
######################################################################
# ``sf_params`` is now a nested list of shape ``(depth, 7)``, matching
# the shape of ``weights``.
sf_params = np.array(sf_params)
print(sf_params.shape)
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# (15, 7)
######################################################################
# Now, we can create a function to define the :math:`i`\ th layer, acting
# on qumode ``q``. We add the :class:`~.utils.operation` decorator so that the layer can be used
# as a single operation when constructing our circuit within the usual
# Strawberry Fields Program context
# layer architecture
@operation(1)
def layer(i, q):
Rgate(sf_params[i][0]) | q
Sgate(sf_params[i][1], sf_params[i][2]) | q
Rgate(sf_params[i][3]) | q
Dgate(sf_params[i][4], sf_params[i][5]) | q
Kgate(sf_params[i][6]) | q
return q
######################################################################
#
# Now that we have defined our gate parameters and our layer structure, we
# can construct our variational quantum circuit.
# Apply circuit of layers with corresponding depth
with prog.context as q:
for k in range(depth):
layer(k) | q[0]
######################################################################
# Performing the optimization
# ---------------------------
#
# :math:`\newcommand{ket}[1]{\left|#1\right\rangle}` With the Strawberry
# Fields TensorFlow backend calculating the resulting state of the circuit
# symbolically, we can use TensorFlow to optimize the gate parameters to
# minimize the cost function we specify. With state learning, the measure
# of distance between two quantum states is given by the fidelity of the
# output state :math:`\ket{\psi}` with some target state
# :math:`\ket{\psi_t}`. This is defined as the overlap between the two
# states:
#
# .. math:: F = \left|\left\langle{\psi}\mid{\psi_t}\right\rangle\right|^2
#
# where the output state can be written
# :math:`\ket{\psi}=U(\vec{\theta})\ket{\psi_0}`, with
# :math:`U(\vec{\theta})` the unitary operation applied by the variational
# quantum circuit, and :math:`\ket{\psi_0}=\ket{0}` the initial state.
#
# Let's first instantiate the TensorFlow backend, making sure to pass
# the Fock basis truncation cutoff.
eng = sf.Engine("tf", backend_options={"cutoff_dim": cutoff})
######################################################################
# Now let's define the target state as the single photon state
# :math:`\ket{\psi_t}=\ket{1}`:
import numpy as np
target_state = np.zeros([cutoff])
target_state[1] = 1
print(target_state)
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# [0. 1. 0. 0. 0. 0. 0. 0. 0.]
######################################################################
# Using this target state, we calculate the fidelity with the state
# exiting the variational circuit. We must use TensorFlow functions to
# manipulate this data, as well as a ``GradientTape`` to keep track of the
# corresponding gradients!
#
# We choose the following cost function:
#
# .. math:: C(\vec{\theta}) = \left| \langle \psi_t \mid U(\vec{\theta})\mid 0\rangle - 1\right|
#
# By minimizing this cost function, the variational quantum circuit will
# prepare a state with high fidelity to the target state.
def cost(weights):
# Create a dictionary mapping from the names of the Strawberry Fields
# free parameters to the TensorFlow weight values.
mapping = {p.name: w for p, w in zip(sf_params.flatten(), tf.reshape(weights, [-1]))}
# Run engine
state = eng.run(prog, args=mapping).state
# Extract the statevector
ket = state.ket()
# Compute the fidelity between the output statevector
# and the target state.
fidelity = tf.abs(tf.reduce_sum(tf.math.conj(ket) * target_state)) ** 2
# Objective function to minimize
cost = tf.abs(tf.reduce_sum(tf.math.conj(ket) * target_state) - 1)
return cost, fidelity, ket
#######################################################################
# Now that the cost function is defined, we can define and run the
# optimization. Below, we choose the Adam
# optimizer that is built into TensorFlow:
opt = tf.keras.optimizers.Adam(learning_rate=lr)
######################################################################
# We then loop over all repetitions, storing the best predicted fidelity
# value.
fid_progress = []
best_fid = 0
for i in range(reps):
# reset the engine if it has already been executed
if eng.run_progs:
eng.reset()
with tf.GradientTape() as tape:
loss, fid, ket = cost(weights)
# Stores fidelity at each step
fid_progress.append(fid.numpy())
if fid > best_fid:
# store the new best fidelity and best state
best_fid = fid.numpy()
learnt_state = ket.numpy()
# one repetition of the optimization
gradients = tape.gradient(loss, weights)
opt.apply_gradients(zip([gradients], [weights]))
# Prints progress at every rep
if i % 1 == 0:
print("Rep: {} Cost: {:.4f} Fidelity: {:.4f}".format(i, loss, fid))
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Rep: 0 Cost: 0.9973 Fidelity: 0.0000
# Rep: 1 Cost: 0.3459 Fidelity: 0.4297
# Rep: 2 Cost: 0.5866 Fidelity: 0.2695
# Rep: 3 Cost: 0.4118 Fidelity: 0.4013
# Rep: 4 Cost: 0.5630 Fidelity: 0.1953
# Rep: 5 Cost: 0.4099 Fidelity: 0.4548
# Rep: 6 Cost: 0.2258 Fidelity: 0.6989
# Rep: 7 Cost: 0.3994 Fidelity: 0.5251
# Rep: 8 Cost: 0.1787 Fidelity: 0.7421
# Rep: 9 Cost: 0.3777 Fidelity: 0.5672
# Rep: 10 Cost: 0.2201 Fidelity: 0.6140
# Rep: 11 Cost: 0.3580 Fidelity: 0.6169
# Rep: 12 Cost: 0.3944 Fidelity: 0.5549
# Rep: 13 Cost: 0.3197 Fidelity: 0.5456
# Rep: 14 Cost: 0.1766 Fidelity: 0.6878
# Rep: 15 Cost: 0.1305 Fidelity: 0.7586
# Rep: 16 Cost: 0.1304 Fidelity: 0.7598
# Rep: 17 Cost: 0.1256 Fidelity: 0.7899
# Rep: 18 Cost: 0.2366 Fidelity: 0.8744
# Rep: 19 Cost: 0.1744 Fidelity: 0.7789
# Rep: 20 Cost: 0.1093 Fidelity: 0.7965
# Rep: 21 Cost: 0.1846 Fidelity: 0.8335
# Rep: 22 Cost: 0.0876 Fidelity: 0.8396
# Rep: 23 Cost: 0.0985 Fidelity: 0.8630
# Rep: 24 Cost: 0.1787 Fidelity: 0.9070
# Rep: 25 Cost: 0.0620 Fidelity: 0.9116
# Rep: 26 Cost: 0.2743 Fidelity: 0.8738
# Rep: 27 Cost: 0.2477 Fidelity: 0.8895
# Rep: 28 Cost: 0.0815 Fidelity: 0.8494
# Rep: 29 Cost: 0.1855 Fidelity: 0.8072
# Rep: 30 Cost: 0.1315 Fidelity: 0.8200
# Rep: 31 Cost: 0.1403 Fidelity: 0.8799
# Rep: 32 Cost: 0.1530 Fidelity: 0.8853
# Rep: 33 Cost: 0.0718 Fidelity: 0.8679
# Rep: 34 Cost: 0.1112 Fidelity: 0.8838
# Rep: 35 Cost: 0.0394 Fidelity: 0.9237
# Rep: 36 Cost: 0.0781 Fidelity: 0.9487
# Rep: 37 Cost: 0.0619 Fidelity: 0.9613
# Rep: 38 Cost: 0.0291 Fidelity: 0.9607
# Rep: 39 Cost: 0.0669 Fidelity: 0.9595
# Rep: 40 Cost: 0.0685 Fidelity: 0.9458
# Rep: 41 Cost: 0.0317 Fidelity: 0.9466
# Rep: 42 Cost: 0.0308 Fidelity: 0.9484
# Rep: 43 Cost: 0.0729 Fidelity: 0.9612
# Rep: 44 Cost: 0.0581 Fidelity: 0.9658
# Rep: 45 Cost: 0.0272 Fidelity: 0.9766
# Rep: 46 Cost: 0.0818 Fidelity: 0.9760
# Rep: 47 Cost: 0.0123 Fidelity: 0.9828
# Rep: 48 Cost: 0.0431 Fidelity: 0.9826
# Rep: 49 Cost: 0.0866 Fidelity: 0.9775
# Rep: 50 Cost: 0.0245 Fidelity: 0.9779
# Rep: 51 Cost: 0.1784 Fidelity: 0.9657
# Rep: 52 Cost: 0.2022 Fidelity: 0.9552
# Rep: 53 Cost: 0.0907 Fidelity: 0.9511
# Rep: 54 Cost: 0.1477 Fidelity: 0.9100
# Rep: 55 Cost: 0.2128 Fidelity: 0.8746
# Rep: 56 Cost: 0.1493 Fidelity: 0.8677
# Rep: 57 Cost: 0.0704 Fidelity: 0.8736
# Rep: 58 Cost: 0.1368 Fidelity: 0.8962
# Rep: 59 Cost: 0.1268 Fidelity: 0.9239
# Rep: 60 Cost: 0.0222 Fidelity: 0.9566
# Rep: 61 Cost: 0.1432 Fidelity: 0.9641
# Rep: 62 Cost: 0.1233 Fidelity: 0.9619
# Rep: 63 Cost: 0.0487 Fidelity: 0.9633
# Rep: 64 Cost: 0.0689 Fidelity: 0.9604
# Rep: 65 Cost: 0.0488 Fidelity: 0.9584
# Rep: 66 Cost: 0.0248 Fidelity: 0.9618
# Rep: 67 Cost: 0.0967 Fidelity: 0.9660
# Rep: 68 Cost: 0.0678 Fidelity: 0.9731
# Rep: 69 Cost: 0.0859 Fidelity: 0.9768
# Rep: 70 Cost: 0.0904 Fidelity: 0.9787
# Rep: 71 Cost: 0.0312 Fidelity: 0.9789
# Rep: 72 Cost: 0.0258 Fidelity: 0.9757
# Rep: 73 Cost: 0.0826 Fidelity: 0.9704
# Rep: 74 Cost: 0.0661 Fidelity: 0.9667
# Rep: 75 Cost: 0.0554 Fidelity: 0.9651
# Rep: 76 Cost: 0.0626 Fidelity: 0.9602
# Rep: 77 Cost: 0.0358 Fidelity: 0.9513
# Rep: 78 Cost: 0.0366 Fidelity: 0.9570
# Rep: 79 Cost: 0.0524 Fidelity: 0.9734
# Rep: 80 Cost: 0.0279 Fidelity: 0.9798
# Rep: 81 Cost: 0.0962 Fidelity: 0.9768
# Rep: 82 Cost: 0.0980 Fidelity: 0.9802
# Rep: 83 Cost: 0.0127 Fidelity: 0.9884
# Rep: 84 Cost: 0.0134 Fidelity: 0.9893
# Rep: 85 Cost: 0.0874 Fidelity: 0.9864
# Rep: 86 Cost: 0.0666 Fidelity: 0.9883
# Rep: 87 Cost: 0.0601 Fidelity: 0.9885
# Rep: 88 Cost: 0.0661 Fidelity: 0.9859
# Rep: 89 Cost: 0.0317 Fidelity: 0.9830
# Rep: 90 Cost: 0.0222 Fidelity: 0.9796
# Rep: 91 Cost: 0.0763 Fidelity: 0.9769
# Rep: 92 Cost: 0.0665 Fidelity: 0.9742
# Rep: 93 Cost: 0.0377 Fidelity: 0.9702
# Rep: 94 Cost: 0.0428 Fidelity: 0.9685
# Rep: 95 Cost: 0.0415 Fidelity: 0.9703
# Rep: 96 Cost: 0.0291 Fidelity: 0.9729
# Rep: 97 Cost: 0.0673 Fidelity: 0.9749
# Rep: 98 Cost: 0.0606 Fidelity: 0.9775
# Rep: 99 Cost: 0.0385 Fidelity: 0.9815
# Rep: 100 Cost: 0.0360 Fidelity: 0.9827
# Rep: 101 Cost: 0.0580 Fidelity: 0.9801
# Rep: 102 Cost: 0.0494 Fidelity: 0.9804
# Rep: 103 Cost: 0.0504 Fidelity: 0.9832
# Rep: 104 Cost: 0.0482 Fidelity: 0.9822
# Rep: 105 Cost: 0.0444 Fidelity: 0.9772
# Rep: 106 Cost: 0.0391 Fidelity: 0.9761
# Rep: 107 Cost: 0.0526 Fidelity: 0.9784
# Rep: 108 Cost: 0.0471 Fidelity: 0.9771
# Rep: 109 Cost: 0.0444 Fidelity: 0.9726
# Rep: 110 Cost: 0.0421 Fidelity: 0.9725
# Rep: 111 Cost: 0.0441 Fidelity: 0.9755
# Rep: 112 Cost: 0.0373 Fidelity: 0.9763
# Rep: 113 Cost: 0.0525 Fidelity: 0.9757
# Rep: 114 Cost: 0.0477 Fidelity: 0.9771
# Rep: 115 Cost: 0.0422 Fidelity: 0.9794
# Rep: 116 Cost: 0.0381 Fidelity: 0.9802
# Rep: 117 Cost: 0.0503 Fidelity: 0.9797
# Rep: 118 Cost: 0.0440 Fidelity: 0.9801
# Rep: 119 Cost: 0.0470 Fidelity: 0.9811
# Rep: 120 Cost: 0.0438 Fidelity: 0.9809
# Rep: 121 Cost: 0.0436 Fidelity: 0.9789
# Rep: 122 Cost: 0.0386 Fidelity: 0.9785
# Rep: 123 Cost: 0.0489 Fidelity: 0.9797
# Rep: 124 Cost: 0.0441 Fidelity: 0.9793
# Rep: 125 Cost: 0.0430 Fidelity: 0.9768
# Rep: 126 Cost: 0.0396 Fidelity: 0.9767
# Rep: 127 Cost: 0.0449 Fidelity: 0.9789
# Rep: 128 Cost: 0.0391 Fidelity: 0.9793
# Rep: 129 Cost: 0.0474 Fidelity: 0.9774
# Rep: 130 Cost: 0.0434 Fidelity: 0.9778
# Rep: 131 Cost: 0.0418 Fidelity: 0.9802
# Rep: 132 Cost: 0.0374 Fidelity: 0.9804
# Rep: 133 Cost: 0.0475 Fidelity: 0.9785
# Rep: 134 Cost: 0.0423 Fidelity: 0.9789
# Rep: 135 Cost: 0.0435 Fidelity: 0.9808
# Rep: 136 Cost: 0.0399 Fidelity: 0.9806
# Rep: 137 Cost: 0.0438 Fidelity: 0.9784
# Rep: 138 Cost: 0.0390 Fidelity: 0.9784
# Rep: 139 Cost: 0.0452 Fidelity: 0.9802
# Rep: 140 Cost: 0.0408 Fidelity: 0.9800
# Rep: 141 Cost: 0.0428 Fidelity: 0.9780
# Rep: 142 Cost: 0.0389 Fidelity: 0.9781
# Rep: 143 Cost: 0.0436 Fidelity: 0.9800
# Rep: 144 Cost: 0.0386 Fidelity: 0.9802
# Rep: 145 Cost: 0.0448 Fidelity: 0.9785
# Rep: 146 Cost: 0.0408 Fidelity: 0.9788
# Rep: 147 Cost: 0.0417 Fidelity: 0.9807
# Rep: 148 Cost: 0.0373 Fidelity: 0.9808
# Rep: 149 Cost: 0.0452 Fidelity: 0.9791
# Rep: 150 Cost: 0.0406 Fidelity: 0.9793
# Rep: 151 Cost: 0.0421 Fidelity: 0.9810
# Rep: 152 Cost: 0.0381 Fidelity: 0.9810
# Rep: 153 Cost: 0.0436 Fidelity: 0.9791
# Rep: 154 Cost: 0.0391 Fidelity: 0.9793
# Rep: 155 Cost: 0.0429 Fidelity: 0.9810
# Rep: 156 Cost: 0.0386 Fidelity: 0.9809
# Rep: 157 Cost: 0.0429 Fidelity: 0.9792
# Rep: 158 Cost: 0.0387 Fidelity: 0.9794
# Rep: 159 Cost: 0.0423 Fidelity: 0.9810
# Rep: 160 Cost: 0.0378 Fidelity: 0.9811
# Rep: 161 Cost: 0.0435 Fidelity: 0.9795
# Rep: 162 Cost: 0.0394 Fidelity: 0.9797
# Rep: 163 Cost: 0.0413 Fidelity: 0.9813
# Rep: 164 Cost: 0.0370 Fidelity: 0.9814
# Rep: 165 Cost: 0.0438 Fidelity: 0.9798
# Rep: 166 Cost: 0.0394 Fidelity: 0.9800
# Rep: 167 Cost: 0.0412 Fidelity: 0.9815
# Rep: 168 Cost: 0.0371 Fidelity: 0.9814
# Rep: 169 Cost: 0.0430 Fidelity: 0.9799
# Rep: 170 Cost: 0.0386 Fidelity: 0.9801
# Rep: 171 Cost: 0.0417 Fidelity: 0.9815
# Rep: 172 Cost: 0.0376 Fidelity: 0.9815
# Rep: 173 Cost: 0.0422 Fidelity: 0.9801
# Rep: 174 Cost: 0.0380 Fidelity: 0.9803
# Rep: 175 Cost: 0.0417 Fidelity: 0.9816
# Rep: 176 Cost: 0.0375 Fidelity: 0.9816
# Rep: 177 Cost: 0.0421 Fidelity: 0.9804
# Rep: 178 Cost: 0.0380 Fidelity: 0.9806
# Rep: 179 Cost: 0.0414 Fidelity: 0.9817
# Rep: 180 Cost: 0.0371 Fidelity: 0.9818
# Rep: 181 Cost: 0.0421 Fidelity: 0.9807
# Rep: 182 Cost: 0.0379 Fidelity: 0.9809
# Rep: 183 Cost: 0.0412 Fidelity: 0.9818
# Rep: 184 Cost: 0.0371 Fidelity: 0.9818
# Rep: 185 Cost: 0.0417 Fidelity: 0.9808
# Rep: 186 Cost: 0.0375 Fidelity: 0.9810
# Rep: 187 Cost: 0.0413 Fidelity: 0.9819
# Rep: 188 Cost: 0.0372 Fidelity: 0.9819
# Rep: 189 Cost: 0.0413 Fidelity: 0.9810
# Rep: 190 Cost: 0.0371 Fidelity: 0.9812
# Rep: 191 Cost: 0.0414 Fidelity: 0.9820
# Rep: 192 Cost: 0.0373 Fidelity: 0.9820
# Rep: 193 Cost: 0.0410 Fidelity: 0.9813
# Rep: 194 Cost: 0.0368 Fidelity: 0.9815
# Rep: 195 Cost: 0.0413 Fidelity: 0.9821
# Rep: 196 Cost: 0.0372 Fidelity: 0.9821
# Rep: 197 Cost: 0.0408 Fidelity: 0.9815
# Rep: 198 Cost: 0.0367 Fidelity: 0.9817
# Rep: 199 Cost: 0.0412 Fidelity: 0.9821
######################################################################
# Results and visualisation
# -------------------------
#
# Plotting the fidelity vs. optimization step:
from matplotlib import pyplot as plt
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.sans-serif"] = ["Computer Modern Roman"]
plt.style.use("default")
plt.plot(fid_progress)
plt.ylabel("Fidelity")
plt.xlabel("Step")
######################################################################
# .. image:: /_static/images/sphx_glr_run_state_learner_001.png
# :class: sphx-glr-single-img
######################################################################
# We can use the following function to plot the Wigner function of our
# target and learnt state:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def wigner(rho):
"""This code is a modified version of the 'iterative' method
of the wigner function provided in QuTiP, which is released
under the BSD license, with the following copyright notice:
Copyright (C) 2011 and later, P.D. Nation, J.R. Johansson,
A.J.G. Pitchford, C. Granade, and A.L. Grimsmo.
All rights reserved."""
import copy
# Domain parameter for Wigner function plots
l = 5.0
cutoff = rho.shape[0]
# Creates 2D grid for Wigner function plots
x = np.linspace(-l, l, 100)
p = np.linspace(-l, l, 100)
Q, P = np.meshgrid(x, p)
A = (Q + P * 1.0j) / (2 * np.sqrt(2 / 2))
Wlist = np.array([np.zeros(np.shape(A), dtype=complex) for k in range(cutoff)])
# Wigner function for |0><0|
Wlist[0] = np.exp(-2.0 * np.abs(A) ** 2) / np.pi
# W = rho(0,0)W(|0><0|)
W = np.real(rho[0, 0]) * np.real(Wlist[0])
for n in range(1, cutoff):
Wlist[n] = (2.0 * A * Wlist[n - 1]) / np.sqrt(n)
W += 2 * np.real(rho[0, n] * Wlist[n])
for m in range(1, cutoff):
temp = copy.copy(Wlist[m])
# Wlist[m] = Wigner function for |m><m|
Wlist[m] = (2 * np.conj(A) * temp - np.sqrt(m) * Wlist[m - 1]) / np.sqrt(m)
# W += rho(m,m)W(|m><m|)
W += np.real(rho[m, m] * Wlist[m])
for n in range(m + 1, cutoff):
temp2 = (2 * A * Wlist[n - 1] - np.sqrt(m) * temp) / np.sqrt(n)
temp = copy.copy(Wlist[n])
# Wlist[n] = Wigner function for |m><n|
Wlist[n] = temp2
# W += rho(m,n)W(|m><n|) + rho(n,m)W(|n><m|)
W += 2 * np.real(rho[m, n] * Wlist[n])
return Q, P, W / 2
######################################################################
# Computing the density matrices
# :math:`\rho = \left|\psi\right\rangle \left\langle\psi\right|` of the
# target and learnt state,
rho_target = np.outer(target_state, target_state.conj())
rho_learnt = np.outer(learnt_state, learnt_state.conj())
######################################################################
# Plotting the Wigner function of the target state:
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
X, P, W = wigner(rho_target)
ax.plot_surface(X, P, W, cmap="RdYlGn", lw=0.5, rstride=1, cstride=1)
ax.contour(X, P, W, 10, cmap="RdYlGn", linestyles="solid", offset=-0.17)
ax.set_axis_off()
fig.show()
######################################################################
# .. image:: /_static/images/sphx_glr_run_state_learner_002.png
# :class: sphx-glr-single-img
######################################################################
# Plotting the Wigner function of the learnt state:
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
X, P, W = wigner(rho_learnt)
ax.plot_surface(X, P, W, cmap="RdYlGn", lw=0.5, rstride=1, cstride=1)
ax.contour(X, P, W, 10, cmap="RdYlGn", linestyles="solid", offset=-0.17)
ax.set_axis_off()
fig.show()
######################################################################
# .. image:: /_static/images/sphx_glr_run_state_learner_003.png
# :class: sphx-glr-single-img
######################################################################
# References
# ----------
#
# 1. Juan Miguel Arrazola, Thomas R. Bromley, Josh Izaac, Casey R. Myers,
# Kamil Brádler, and Nathan Killoran. Machine learning method for state
# preparation and gate synthesis on photonic quantum computers. `Quantum
# Science and Technology, 4
# 024004 <https://iopscience.iop.org/article/10.1088/2058-9565/aaf59e>`__,
# (2019).
#
# 2. Nathan Killoran, Thomas R. Bromley, Juan Miguel Arrazola, Maria Schuld,
# Nicolas Quesada, and Seth Lloyd. Continuous-variable quantum neural networks.
# `Physical Review Research, 1(3), 033063.
# <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.1.033063>`__,
# (2019).
|
#! /usr/bin/env python
# train classifier that takes as input embeddings and predict POS
from __future__ import print_function
import sys, subprocess, os, itertools, pca, tsne, argparse
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import LeaveOneOut
from scipy import spatial
from utils import read_pos_tags, read_mapping, make_emb_dict, split_per_pos_tag
from matplotlib import pyplot
class emb_data(object):
def __init__(self, emb_dict, pos_tags):
self.counter_batches = 0
self.x_array = np.array(emb_dict.values())
self.y_array = np.array(pos_tags)
# split in train, validation and test data
self.size_train = int((float(self.x_array.shape[0]) / 100.0) * 80.0)
self.size_valid = int((float(self.x_array.shape[0]) / 100.0) * 10.0)
self.size_test = self.x_array.shape[0] - self.size_train - self.size_valid
self.x_train = self.x_array[:self.size_train, :]
self.y_train = self.y_array[:self.size_train]
self.x_valid = self.x_array[self.size_train:self.size_train+self.size_valid, :]
self.y_valid = self.y_array[self.size_train:self.size_train+self.size_valid]
self.x_test = self.x_array[self.size_train+self.size_valid:, :]
self.y_test = self.y_array[self.size_train+self.size_valid:]
class linear_class(object):
def __init__(self, multinomial= False, token_based= False):
if multinomial and not token_based:
logisticregressionparams = {"random_state": [2017, 1337],
"penalty": ['l2'],
"class_weight": ['balanced', None],
"C": [0.1, 0.4, 0.6, 0.8, 1.0, 1.2, 2.],
# ovr: binary problem is fit for each label
# multinomial: multinomial loss
"multi_class": ['ovr', 'multinomial'],
# these are the possible solvers for multinomial loss:
# - lbfgs: limited-memory Broyden-Fletcher-Goldfarb-Shannon algorithm
# - sag: stochastic average gradient descent
# - newton-cg
"solver": ['lbfgs', 'sag', 'newton-cg']}
else:
logisticregressionparams = {"random_state": [2017, 1337],
"penalty": ['l2'],
"class_weight": ['balanced', None],
"C": [0.6, 0.8, 1.0, 1.2, 2.],
# this includes an extra solver: liblinear = coordinate descent algorithm
"solver": ['liblinear', 'lbfgs', 'sag', 'newton-cg']}
if token_based:
# only 1 example for each class, so use leave-one-out cross-validation
self.pos_logisticregression = GridSearchCV(LogisticRegression(),
logisticregressionparams,
cv = LeaveOneOut())
else:
self.pos_logisticregression = GridSearchCV(LogisticRegression(),
logisticregressionparams)
def train(self, name, x_train, y_train):
print('Train classifier...')
self.pos_logisticregression.fit(x_train, y_train)
joblib.dump(self.pos_logisticregression.best_estimator_, "{0}.estimator".format(name))
joblib.dump(self.pos_logisticregression.best_params_, "{0}.params".format(name))
joblib.dump(self.pos_logisticregression.cv_results_, "{0}.results".format(name))
with open("{0}.score".format(name), "w") as w:
w.write(str(self.pos_logisticregression.best_score_))
def test(self, name, x_test, y_test):
print('Test classifier...')
pos_classifier = joblib.load('{0}.estimator'.format(name))
test_score = pos_classifier.score(x_test, y_test)
print('Test score: {0}'.format(test_score))
def results_gridsearch(self, name):
results_gridsearch = joblib.load('{0}.results'.format(name))
for param, results in results_gridsearch.iteritems():
print(param, end=' ')
for el in results:
print(el, end=' ')
print()
def show_confusion_matrix(self, name, x_test, y_test, id_to_pos):
pos_classifier = joblib.load('{0}.estimator'.format(name))
label_ids = pos_classifier.classes_
labels = [id_to_pos[label_id] for label_id in label_ids]
y_pred = pos_classifier.predict(x_test)
for l_id in label_ids:
if l_id not in y_pred:
print('label {0} is never predicted'.format(id_to_pos[l_id]))
cm = confusion_matrix(y_test, y_pred)
# normalise
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
pyplot.figure(figsize=(15,15))
# show an image (binary colourmap)
pyplot.imshow(cm, interpolation='nearest', cmap='binary')
pyplot.title("Confusion matrix")
pyplot.colorbar()
tick_marks = np.arange(len(labels)+1)
# set labels of axes
pyplot.xticks(tick_marks, labels, rotation=90)
pyplot.yticks(tick_marks, labels)
thresh = 0.5
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if str(round(cm[i, j], 2))[0] in ["1"]:
text = str(round(cm[i, j], 2))
elif str(round(cm[i, j], 2))[0] in ["n"]:
text = ".0"
else:
text = str(round(cm[i, j], 2))[1:]
if text == ".0":
text = ""
pyplot.text(j, i, text,
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
pyplot.tight_layout()
pyplot.ylabel('True label')
pyplot.xlabel('Predicted label')
pyplot.show()
pyplot.savefig('{0}_confusion_matrix_norm_all.png'.format(name))
def plot_pca(self, name, x, y, id_to_pos):
'''
Makes a PCA plot of the data.
'''
labels = []
for l in np.nditer(y):
labels.append(id_to_pos[int(l)])
pca.pca_main(x, labels, '{0}_pca.png'.format(name))
def plot_tsne(self, name, x, y, id_to_pos):
'''
Makes a T-SNE plot of the data.
'''
labels = []
for l in np.nditer(y):
labels.append(id_to_pos[int(l)])
unique_ints = range(len(set(labels)))
colors = [pyplot.cm.jet(float(i)/max(unique_ints)) for i in unique_ints]
y = tsne.tsne(x)
for i, label in enumerate(set(labels)):
indices = [idx for idx, x in enumerate(labels) if x == label]
pyplot.scatter(np.take(y, indices, axis=0)[:,0], np.take(y, indices, axis=0)[:,1], s=10, c=colors[i], label=label)
pyplot.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, fontsize='x-small', mode="expand", borderaxespad=0.)
pyplot.savefig('{0}_tsne.png'.format(name))
pyplot.show()
def plot_lda(self, name, x, y, id_to_pos):
lda = LinearDiscriminantAnalysis(n_components=1)
result_lda = lda.fit(x, y).transform(x)
labels = []
for l in np.nditer(y):
labels.append(id_to_pos[int(l)])
unique_ints = range(len(set(labels)))
colors = [pyplot.cm.jet(float(i)/max(unique_ints)) for i in unique_ints]
for color, i, target_name in zip(colors, unique_ints, set(labels)):
pyplot.scatter(result_lda[y == i, 0], result_lda[y == i, 1], alpha=.8, color=color,
label=target_name)
def plot_coef(self, name):
'''
Plots the coefficients of a trained file.
'''
estimator = joblib.load('{0}.estimator'.format(name))
coef = estimator.coef_
label_ids = estimator.classes_
labels = [id_to_pos[l] for l in label_ids]
for class_i in xrange(coef.shape[0]):
pyplot.plot(coef[class_i], label=labels[class_i])
pyplot.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
pyplot.savefig('{0}_coef.png'.format(name))
#pyplot.show()
def retrieve_coef(self, name):
'''
Saves the coefficients of a trained model as numpy files.
'''
estimator = joblib.load('{0}.estimator'.format(name))
coef = estimator.coef_
label_ids = estimator.classes_
labels = [id_to_pos[l] for l in label_ids]
for i, l in enumerate(labels):
np.save('{0}_coef_{1}.npy'.format(name, l), coef[i])
def compare_coef(self, name, x, y, id_to_pos):
'''
Compares the saved coefficients (with retrieve_coef) for a certain label
with all vectors that correspond to that label
and prints the cosine distances.
'''
estimator = joblib.load('{0}.estimator'.format(naliblinearme))
label_ids = estimator.classes_
labels = [id_to_pos[l] for l in label_ids]
all_coef = {}
for i, l in enumerate(labels):
print('label {0}'.format(l))
curr_coef = np.load('{0}_coef_{1}.npy'.format(name, l))
all_coef[l] = curr_coef
for id_in_y in xrange(y.shape[0]):
if id_to_pos[y[id_in_y]] == l:
emb = x[id_in_y]
cos_dist = spatial.distance.cosine(curr_coef, emb)
print('cosine distance {0}'.format(cos_dist))
for l, coef in all_coef.iteritems():
for l2, coef2 in all_coef.iteritems():
tmp_sum = 0.0
num_occ = 0.0
for id_in_y in xrange(y.shape[0]):
if id_to_pos[y[id_in_y]] == l2:
emb = x[id_in_y]
cos_dist = spatial.distance.cosine(coef, emb)
tmp_sum += cos_dist
num_occ += 1
avg_dist = tmp_sum / num_occ
print('Average cosine distance between label {0} and occurrences of {1}: {2}'.format(l, l2, avg_dist))
def avg_emb(self, name, x, y, id_to_pos):
avg_dict = {}
for pos_id in id_to_pos.iterkeys():
tmp_sum = 0.0
tmp_denom = 0
for i in xrange(y.shape[0]):
if y[i] == pos_id:
tmp_sum += x[i]
tmp_denom += 1
avg_dict[id_to_pos[pos_id]] = tmp_sum / tmp_denom
np.save('{0}_avg_{1}.npy'.format(name, id_to_pos[pos_id]), (tmp_sum/tmp_denom))
if __name__ == '__main__':
## input arguments ##
parser = argparse.ArgumentParser()
parser.add_argument('emb_f', type=str, help='numpy file containing all embeddings')
parser.add_argument('dict', type=str, help='dict file containing mapping of words to indices used in the emb_f')
parser.add_argument('pos_f', type=str, help='file containing list of all words and their possible POS tags')
parser.add_argument('name', type=str, help='name for the model')
parser.add_argument('--pos_classes', type=str, help='file containing the POS classes for which we want to train a classifier')
parser.add_argument('--type_data', type=str,
help="'collapsed' (=default) if the training set contains 1 (average) embedding for each word, otherwise 'running_text'",
choices=['collapsed', 'running_text'], default='collapsed')
parser.add_argument('--freq_cutoff', type=int, help='remove POS tags with frequency < cutoff')
parser.add_argument('--no_train', help='do not train the classifier (default = train)', action='store_true', default=False)
parser.add_argument('--no_valid', help='do not calculate accuracy on validation set (default = validate)', action='store_true', default=False)
parser.add_argument('--no_test', help='do not calculate accuracy on test set (default = test)', action='store_true', default=False)
parser.add_argument('--grid_search', help='print results of grid search over hyperparameters', action='store_true')
parser.add_argument('--confusion_matrix', help='plot confusion matrix', action='store_true')
parser.add_argument('--results_dataset', help="for confusion matrix/pca/tsne/lda: use full dataset ('full') or test set ('test', = default)",
choices=['full', 'test'])
parser.add_argument('--pca', help='PCA visualization of embeddings', action='store_true')
parser.add_argument('--tsne', help='T-SNE visualization of embeddings', action='store_true')
parser.add_argument('--lda', help='LDA visualization of embeddings', action='store_true')
parser.add_argument('--plot_coef', help='plot the coefficients/weights of a trained model', action='store_true')
parser.add_argument('--avg_emb', help='make an average embedding for every class', action='store_true')
args = parser.parse_args()
# if a pos_classes argument is given,
# we only train a classifier for the POS tags in this file
if args.pos_classes != None:
tmp = open(args.pos_classes).readlines()
# mapping based on tokens: pos_classes file should start with 'token'
if tmp[0] == 'token\n':
pos_classes = {}
for el in tmp[1:]:
split_class = el.strip().split('\t')
# pos_classes contains class name + set of all words belonging to the class
pos_classes[split_class[0]] = set(split_class[1].split(' '))
token_based = True
else:
# mapping based on POS classes
# if the pos_classes file contains class names +
# list of POS that belong to the class
if len(tmp[0].strip().split()) > 1:
pos_classes = {}
for el in tmp:
split_class = el.strip().split('\t')
pos_classes[split_class[0]] = set(split_class[1].split(' '))
# simple list of POS tags
else:
pos_classes = [pos.strip() for pos in tmp]
token_based = False
with_classes = True
else:
with_classes = False
token_based = False
multinomial = True
# read mapping of words to indices
mapping = read_mapping(args.dict)
# read embeddings
emb_dict, size_emb = make_emb_dict(args.emb_f, mapping)
# read POS tags
pos_tags, vocab_pos_tags = read_pos_tags(args.pos_f)
# throw away embeddings for which we do not have POS tags
for w in list(emb_dict.iterkeys()):
if w not in pos_tags:
#print('no POS tag for {0}'.format(w))
del emb_dict[w]
# throw away POS tags for which we do not have embeddings
for w in list(pos_tags.iterkeys()):
if w not in emb_dict:
#print('no embedding for {0}'.format(w))
del pos_tags[w]
# if we have only 1 embedding for each word and we want to classify based on POS,
# it is possible that a word has multiple embeddings
# so we change the training set, such that multiple training instances are created
# for every word + POS combination
if args.type_data == 'collapsed' and not token_based:
emb_dict, pos_tags = split_per_pos_tag(emb_dict, pos_tags)
# remove infrequent tags if needed
if isinstance(args.freq_cutoff, int):
# first count frequency per tag
freq_tags = {}
for tag in pos_tags.values():
if tag in freq_tags:
freq_tags[tag] += 1
else:
freq_tags[tag] = 1
# then remove tags with frequency < threshold
for tag, tag_freq in freq_tags.iteritems():
print('{0}\t{1}'.format(tag, tag_freq))
if tag_freq < args.freq_cutoff:
# remove from pos_tags
words_to_delete = []
for word in pos_tags.keys():
if tag == pos_tags[word]:
del pos_tags[word]
words_to_delete.append(word)
# remove from emb_dict
for w in words_to_delete:
del emb_dict[w]
# remove all training instances that do not
# belong to the classes that we want to classify
if with_classes:
if isinstance(pos_classes, list):
vocab_pos_tags = pos_classes
# remove all examples with tags not belonging to pos_classes
for word in pos_tags.keys():
if pos_tags[word] not in pos_classes:
del pos_tags[word]
del emb_dict[word]
elif isinstance(pos_classes, dict):
# first map pos tags in vocab_pos_tags to the right class
vocab_pos_tags = pos_classes.keys()
if token_based:
for word in pos_tags.keys():
in_training_set = False
for c, tokens in pos_classes.iteritems():
if word in tokens:
# if word in list, map to correct class
pos_tags[word] = c
in_training_set = True
if not in_training_set:
# otherwise, delete the training example
del pos_tags[word]
del emb_dict[word]
else:
# map all pos tags in training data to right class
for word in pos_tags.keys():
in_training_set = False
for pos_class in pos_classes.keys():
if pos_tags[word] in pos_classes[pos_class]:
# map to correct class
pos_tags[word] = pos_class
in_training_set = True
if not in_training_set:
# otherwise, delete the training example
del pos_tags[word]
del emb_dict[word]
# map POS tags to POS tag ids
pos_to_id = dict(zip(vocab_pos_tags, range(len(vocab_pos_tags))))
id_to_pos = dict(zip(range(len(vocab_pos_tags)), vocab_pos_tags))
pos_ids = [pos_to_id[pos] for pos in pos_tags.values()]
# create data object
data = emb_data(emb_dict, pos_ids)
# create classifier
model = linear_class(multinomial, token_based)
# train classifier
if not args.no_train:
model.train(args.name, data.x_train, data.y_train)
if args.grid_search:
model.results_gridsearch(args.name)
if not args.no_valid:
model.test(args.name, data.x_valid, data.y_valid)
if not args.no_test:
model.test(args.name, data.x_test, data.y_test)
if args.confusion_matrix or args.pca or args.tsne or args.lda:
# plot for whole dataset
if args.results_dataset == 'full':
data_x = data.x_array
data_y = data.y_array
# plot for test set only
else:
data_x = data.x_test
data_y = data.y_test
if args.confusion_matrix:
pyplot.figure()
model.show_confusion_matrix(args.name, data_x, data_y, id_to_pos)
if args.pca:
model.plot_pca(args.name, data_x, data_y, id_to_pos)
if args.tsne:
model.plot_tsne(args.name, data_x, data_y, id_to_pos)
if args.lda:
model.plot_lda(args.name, data_x, data_y, id_to_pos)
if args.plot_coef:
model.plot_coef(args.name)
#model.retrieve_coef(name)
#model.compare_coef(name, data.x_array, data.y_array, id_to_pos)
if args.avg_emb:
model.avg_emb(args.name, data.x_array, data.y_array, id_to_pos)
|
import os
import xml.sax
import unicodedata
dashes = ['֊', '-', '‐', '‑', '‒', '–', '—', '﹘', '﹣', '-']
correction_regex = r'publisher">[^<]+(Co|Inc|Corp|LP|Crop|corp|Ltd|s\.r\.l|B\.V)</rs>\.'
article_entry = ['TEI']
header_entry = ['teiHeader']
body_entry = ['text']
title_entry = ['title']
def fix_relations(article):
"""Adjust relations to a simple numbering scheme for BRAT annotation
Args:
article (dictionary): article information coded in a dictionary
"""
for rel in article['relations']:
rel['Arg2'] = article['softcite_id_mapping'][rel['Arg2']]
def repair_citation(text, citation):
"""Adjust article citations to match the "usual" pattern ... [3].
Args:
text (string): article text
citation (string): citation text
Returns:
string: adjusted text
"""
text = text.rsplit(citation, 1)[0]
add_space = False
if text.endswith(' '):
text = text.rstrip()
add_space = True
if not text.endswith(' al.') and not text.endswith(' ref.') and text.endswith('.') or text.endswith(',') or text.endswith(';'):
cut_off = text[-1]
text = text[:-1]
text += ' ' + '[' + citation + ']' + cut_off
if add_space:
text += ' '
else:
if add_space:
text += ' '
text += '[' + citation + ']'
return text
def is_multi_ref(ref):
"""Test if a citation candidate consist of multiple citations
Args:
ref (string): citation string
Returns:
bool: test result
"""
comma_count = 0
dash_count = 0
digit_count = 0
for c in ref:
if c.isdigit():
digit_count += 1
elif unicodedata.category(c) == 'Pd':
dash_count += 1
elif c == ',':
comma_count += 1
else:
return False
if ( comma_count > 0 or dash_count > 0 ) and digit_count > 1:
return True
else:
return False
class TEI_Parser(xml.sax.handler.ContentHandler):
"""Parser for TEI XML software annotation.
"""
def __init__(self):
# self.entity_type_list = set()
self.article_count = 0
self.running_id = 0
self.running_rel_id = 0
self.in_article = False
self.in_header = False
self.in_body = False
self.in_title = False
self.in_id = False
self.in_origin = False
self.read_text = False
self.ref = False
self.rs = False
self.current_ref = ''
self.articles = []
def add_article(self):
self.articles.append({
'text': '',
'entities': [],
'relations': [],
'softcite_id_mapping': {}
})
def startElement(self, name, attrs):
# Recognize when we are dealing with articles or meta-data
if name in article_entry:
if attrs['type'] != 'article':
raise(RuntimeError("Found type {} -- different from articles".format(attrs['type'])))
self.add_article()
self.running_id = 0
self.running_rel_id = 0
self.article_count += 1
self.in_article = True
self.articles[-1]['subtype'] = attrs['subtype']
# Recognize when we are dealing with the meta-data of a single article
if self.in_article and name in header_entry:
self.in_header = True
if self.in_header:
if name in title_entry:
self.in_title = True
if self.in_header:
if name == 'idno' and attrs['type'] == 'PMC':
self.in_id = True
if self.in_header:
if name == 'idno' and attrs['type'] == 'origin':
self.in_origin = True
# Recognize when we are inside the text of one specific article
if self.in_article and not self.in_header and name in body_entry:
self.in_body = True
if attrs['xml:lang'] != 'en':
raise(RuntimeError("Non English article in the set."))
if self.in_body:
if name not in ['p', 'ref', 'rs', 'text', 'body']:
raise(RuntimeError("Found unhandled tag: {}".format(name)))
if name == 'p':
self.read_text = True
if name == 'ref' and 'type' in attrs.keys() and attrs['type'] == 'bibr':
self.ref = True
if name == 'rs':
if self.articles[-1]['text'] and not self.articles[-1]['text'].endswith((' ', '(', '[', '{')):
self.articles[-1]['text'] += ' '
self.articles[-1]['entities'].append({
'id': self.running_id,
'type': attrs['type'],
'beg': len(self.articles[-1]['text']),
'end': -1,
'string': '',
'softcite_id': attrs['xml:id'] if 'xml:id' in attrs.keys() else ''
})
if 'xml:id' in attrs.keys():
self.articles[-1]['softcite_id_mapping'][attrs['xml:id']] = self.running_id
if 'corresp' in attrs.keys():
self.articles[-1]['relations'].append({
'id': self.running_rel_id,
'type': '{}_of'.format(attrs['type']),
'Arg1': self.running_id,
'Arg2': attrs['corresp'].lstrip('#')
})
self.running_rel_id += 1
# self.entity_type_list.update([attrs['type']])
self.rs = True
self.running_id += 1
def endElement(self, name):
if name in article_entry:
self.in_article = False
if self.articles[-1]['text']:
if self.articles[-1]['relations']:
fix_relations(self.articles[-1])
if name in header_entry:
self.in_header = False
if name in body_entry:
self.in_body = False
if name in title_entry:
self.in_title = False
if name == 'idno':
self.in_id = False
self.in_origin = False
if name == 'p' and self.read_text:
self.articles[-1]['text'] += '\n\n'
self.read_text = False
if name == 'ref':
if self.current_ref and ((self.current_ref.isdigit() and len(self.current_ref) < 4 ) or is_multi_ref(self.current_ref) ):
self.articles[-1]['text'] = repair_citation(self.articles[-1]['text'], self.current_ref)
self.current_ref = ''
self.ref = False
if name == 'rs':
self.articles[-1]['entities'][-1]['end'] = len(self.articles[-1]['text'])
self.rs = False
def characters(self, content):
if self.in_title:
self.articles[-1]['title'] = content
if self.in_id:
self.articles[-1]['PMC'] = content
if self.in_origin:
self.articles[-1]['origin'] = content
if self.read_text:
self.articles[-1]['text'] += content
if self.rs:
self.articles[-1]['entities'][-1]['string'] = content
if self.ref:
self.current_ref += content
def parse(in_file, out_path, write_empty=True):
"""Parse a TEI XML to extract annotate articles and annotation from it.
Args:
in_file (PosixPath): file name
out_path (PosixPath): output path
write_empty (bool, optional): whether to write empty outputs. Defaults to True.
"""
parser = xml.sax.make_parser()
tei_parser = TEI_Parser()
parser.setContentHandler(tei_parser)
with in_file.open() as xml_in:
parser.parse(xml_in)
print("Parsed {} articles".format(tei_parser.article_count))
skipped_articles = 0
for article in tei_parser.articles:
article_name = article['PMC'] if 'PMC' in article.keys() else article['origin']
if not write_empty and not article['text'].strip():
skipped_articles += 1
else:
out_text = out_path / '{}.txt'.format(article_name)
out_annotation = out_path / '{}.ann'.format(article_name)
with out_text.open(mode='w') as out_art, out_annotation.open(mode='w') as out_anno:
out_art.write(article['text'])
for e in article['entities']:
out_anno.write('T{}\t{} {} {}\t{}\n'.format(e['id'], e['type'], e['beg'], e['end'], e['string']))
for r in article['relations']:
out_anno.write('R{}\t{} Arg1:T{} Arg2:T{}\t\n'.format(r['id'], r['type'], r['Arg1'], r['Arg2']))
print("Skipped {} empty articles.".format(skipped_articles))
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.parallel._utils import _reset_op_id as reset_op_id
from mindspore import context, Tensor, Parameter
from mindspore.parallel import set_algo_parameters
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWarp(nn.Cell):
def __init__(self, network):
super(GradWarp, self).__init__()
self.network = network
def construct(self, x):
return grad_all(self.network)(x)
class Net(nn.Cell):
def __init__(self, strategy_dict=None):
super(Net, self).__init__()
self.mul1 = P.Mul()
self.mul2 = P.Mul()
self.mul3 = P.Mul()
self.mul4 = P.Mul()
self.relu1 = P.ReLU()
self.relu2 = P.ReLU()
self.ba1 = P.BiasAdd()
self.add = P.Add()
self.weight = Parameter(Tensor(np.ones([128, 1000]), dtype=ms.float32), name="weight")
self.bias = Parameter(Tensor(np.ones([1000]), dtype=ms.float32), name="bias")
if strategy_dict is not None:
self.mul1.shard(strategy_dict["mul1"])
self.mul2.shard(strategy_dict["mul2"])
self.relu1.shard(strategy_dict["relu1"])
self.relu2.shard(strategy_dict["relu2"])
self.ba1.shard(strategy_dict["bias_add"])
self.add.shard(strategy_dict["add"])
def construct(self, inputs):
x = self.mul1(inputs, self.weight)
y = self.relu1(x)
y = self.mul2(y, self.weight)
z = self.mul3(x, self.weight)
z = self.ba1(z, self.bias)
x = self.add(y, z)
x = self.mul4(x, self.weight)
x = self.relu2(x)
return x
def test_star_strategy_consistency1():
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
set_algo_parameters(fully_use_devices=False)
x = Tensor(np.ones([128, 1000]), dtype=ms.float32)
strategy_dict = {"mul1": ((2, 4), (2, 4)), "mul2": None, "relu1": ((4, 1),), "bias_add": ((8, 1), (1,)),
"relu2": ((2, 2),), "add": ((1, 8), (1, 8))}
net = NetWithLoss(Net(strategy_dict))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
net.set_train()
_cell_graph_executor.compile(net, x, phase='train')
def test_star_strategy_consistency2():
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
set_algo_parameters(fully_use_devices=False)
x = Tensor(np.ones([128, 1000]), dtype=ms.float32)
strategy_dict = {"mul1": None, "mul2": ((1, 4), (1, 4)), "relu1": ((2, 1),), "bias_add": ((4, 2), (2,)),
"relu2": ((2, 2),), "add": ((8, 1), (8, 1))}
net = NetWithLoss(Net(strategy_dict))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
net.set_train()
_cell_graph_executor.compile(net, x, phase='train')
def test_star_strategy_consistency3():
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
set_algo_parameters(fully_use_devices=False)
x = Tensor(np.ones([128, 1000]), dtype=ms.float32)
strategy_dict = {"mul1": None, "mul2": None, "relu1": ((8, 1),), "bias_add": ((1, 4), (4,)),
"relu2": ((4, 1),), "add": ((2, 2), (2, 2))}
net = NetWithLoss(Net(strategy_dict))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
net.set_train()
_cell_graph_executor.compile(net, x, phase='train')
def test_star_strategy_consistency4():
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
set_algo_parameters(fully_use_devices=False)
x = Tensor(np.ones([128, 1000]), dtype=ms.float32)
strategy_dict = {"mul1": ((1, 8), (1, 8)), "mul2": ((1, 4), (1, 4)), "relu1": None, "bias_add": None,
"relu2": None, "add": None}
net = NetWithLoss(Net(strategy_dict))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
with pytest.raises(RuntimeError):
net.set_train()
_cell_graph_executor.compile(net, x, phase='train')
|
box(color=color.purple) |
from . import scripts
|
print('testing TensorRT...')
import tensorrt
print('TensorRT version: ' + str(tensorrt.__version__))
print('TensorRT OK\n')
|
load(":forwarding.bzl", "transition_and_forward_providers_factory")
load(":utils.bzl", "attr_from_value", "is_dict", "is_list", "is_select", "is_struct", "REPLACE_ONLY_LIST_COMMAND_LINE_OPTIONS")
def _wrap_with_transition(
original_rule,
settings,
executable = False,
test = False,
extra_providers = []):
"""Creates a new rule that behaves like an existing rule but also modifies build settings.
Args:
original_rule: The existing rule to wrap (e.g., native.cc_binary).
settings: A dictionary of settings changes to apply.
executable: Whether the new rule should be executable (default: False).
test: Whether the new rule should be a test rule (default: False).
extra_providers: Additional providers that the wrapping rule should forward from the original rule.
Returns:
A new rule that behaves like the original rule after applying the provided changes to the build settings.
"""
is_native_rule = str(original_rule).startswith("<built-in rule ")
native_rule_name = None
if is_native_rule:
native_rule_name = str(original_rule)[len("<built-in rule "):-1]
raw_value_settings = {}
attr_settings = {}
attr_counter = 0
settings_mode = {}
for setting, value in settings.items():
full_setting = _maybe_add_command_line_option_prefix(setting)
if is_struct(value):
if not hasattr(value, "mode") or not hasattr(value, "value"):
fail("Value for setting '%s' cannot be a struct" % setting)
settings_mode[full_setting] = value.mode
value = value.value
else:
settings_mode[full_setting] = _autodetect_mode(full_setting)
if is_dict(value):
attr_settings[full_setting] = struct(
name = "attr_%d" % attr_counter,
type = attr_from_value(value),
value = select(value),
)
attr_counter += 1
elif is_select(value):
fail("Instead of select({...}), use {...} as the value of setting '%s'." % setting)
else:
raw_value_settings[full_setting] = value
all_settings = raw_value_settings.keys() + attr_settings.keys()
def _transition_impl(input_settings, attrs):
updated_settings = {}
for setting in all_settings:
if setting in raw_value_settings:
new_value = raw_value_settings[setting]
else:
new_value = getattr(attrs, attr_settings[setting].name)
# Some setting types do not allow reading from Starlark, so we have to wrap them in a lambda to defer
# evaluation until we know it's safe. Otherwise, we get Bazel server crashes such as:
# java.lang.IllegalArgumentException: cannot expose internal type to Starlark: class com.google.devtools.build.lib.rules.cpp.CppConfiguration$DynamicMode
updated_settings[setting] = _get_updated_value(settings_mode[setting], lambda: input_settings[setting], new_value)
return updated_settings
_transition = transition(
implementation = _transition_impl,
inputs = all_settings,
outputs = all_settings,
)
_apply_transition_rule = transition_and_forward_providers_factory(
_transition,
attrs = {
attr.name: attr.type
for attr in attr_settings.values()
},
executable = executable,
test = test,
extra_providers = extra_providers,
)
def _wrapper_macro(name, visibility = None, tags = None, testonly = None, **kwargs):
# Use a subdirectory to preserve the basename but still prevent a name
# collision with the transition rule.
orig_name = "{name}/{name}".format(name = name)
internal_rule_tags = list(tags or [])
if "manual" not in internal_rule_tags:
internal_rule_tags.append("manual")
# Native test rules offer an env attribute that has to be moved to the wrapper.
wrapper_env = kwargs.pop("env", default = None) if is_native_rule else None
wrapper_env_inherit = kwargs.pop("env_inherit", default = None) if is_native_rule else None
# All executable rules offer an args attribute that has to be moved to the wrapper.
wrapper_args = kwargs.pop("args", default = None) if (executable or test) else None
original_rule(
name = orig_name,
tags = internal_rule_tags,
testonly = testonly,
visibility = ["//visibility:private"],
**kwargs
)
_apply_transition_rule(
name = name,
args = wrapper_args,
env = wrapper_env,
env_inherit = wrapper_env_inherit,
exports = ":" + orig_name,
tags = tags,
testonly = testonly,
visibility = visibility,
**{
attr.name: attr.value
for attr in attr_settings.values()
}
)
return _wrapper_macro
def _append(value):
return struct(
value = value,
mode = _MODE_APPEND,
)
def _replace_with(value):
return struct(
value = value,
mode = _MODE_REPLACE,
)
meta = struct(
append = _append,
replace_with = _replace_with,
wrap_with_transition = _wrap_with_transition,
)
_MODE_APPEND = "rules_meta_append"
_MODE_REPLACE = "rules_meta_replace"
def _get_updated_value(mode, current_value, new_value):
if not is_list(new_value) or mode == _MODE_REPLACE:
return new_value
return current_value() + new_value
_COMMAND_LINE_OPTION_PREFIX = "//command_line_option:"
def _maybe_add_command_line_option_prefix(setting):
if not setting or not setting[0].isalpha():
return setting
else:
return _COMMAND_LINE_OPTION_PREFIX + setting
def _autodetect_mode(setting):
if not setting.startswith(_COMMAND_LINE_OPTION_PREFIX):
return _MODE_APPEND
option = setting[len(_COMMAND_LINE_OPTION_PREFIX):]
if option in REPLACE_ONLY_LIST_COMMAND_LINE_OPTIONS:
fail("""In most cases, the value of the command-line option '--%s' should be fully replaced, not appended to as is the default for meta.wrap_with_transition.
You probably want to wrap the value with meta.replace_with(...). If you really want the default behavior, wrap the value with meta.append(...).""" % option)
|
"""
"polymorphic" associations, ala ActiveRecord.
In this example, we are specifically targeting this ActiveRecord
functionality:
http://wiki.rubyonrails.org/rails/pages/UnderstandingPolymorphicAssociations
The term "polymorphic" here means "object X can be referenced by objects A, B,
and C, along a common line of association".
In this example we illustrate the relationship in both directions. A little
bit of property magic is used to smooth the edges.
AR creates this relationship in such a way that disallows any foreign key
constraint from existing on the association. For a different way of doing
this, see poly_assoc_fks.py. The interface is the same, the efficiency is more
or less the same, but foreign key constraints may be used. That example also
better separates the associated target object from those which associate with
it.
"""
from sqlalchemy import MetaData, Table, Column, Integer, String, and_
from sqlalchemy.orm import mapper, relationship, sessionmaker, \
class_mapper, backref
metadata = MetaData('sqlite://')
#######
# addresses table, class, 'addressable interface'.
addresses = Table("addresses", metadata,
Column('id', Integer, primary_key=True),
Column('addressable_id', Integer),
Column('addressable_type', String(50)),
Column('street', String(100)),
Column('city', String(50)),
Column('country', String(50))
)
class Address(object):
def __init__(self, type):
self.addressable_type = type
@property
def member(self):
return getattr(self, '_backref_%s' % self.addressable_type)
def addressable(cls, name, uselist=True):
"""addressable 'interface'.
if you really wanted to make a "generic" version of this function, it's
straightforward.
"""
# create_address function, imitaes the rails example.
# we could probably use property tricks as well to set
# the Address object's "addressabletype" attribute.
def create_address(self):
a = Address(table.name)
if uselist:
getattr(self, name).append(a)
else:
setattr(self, name, a)
return a
mapper = class_mapper(cls)
table = mapper.local_table
cls.create_address = create_address
# no constraints. therefore define constraints in an ad-hoc fashion.
primaryjoin = and_(
list(table.primary_key)[0] == addresses.c.addressable_id,
addresses.c.addressable_type == table.name
)
foreign_keys = [addresses.c.addressable_id]
mapper.add_property(name, relationship(
Address,
primaryjoin=primaryjoin,
uselist=uselist,
foreign_keys=foreign_keys,
backref=backref('_backref_%s' % table.name,
primaryjoin=list(table.primary_key)[0] ==\
addresses.c.addressable_id,
foreign_keys=foreign_keys)
)
)
mapper(Address, addresses)
######
# sample # 1, users
users = Table("users", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50), nullable=False)
)
class User(object):
pass
mapper(User, users)
addressable(User, 'addresses', uselist=True)
######
# sample # 2, orders
orders = Table("orders", metadata,
Column('id', Integer, primary_key=True),
Column('description', String(50), nullable=False))
class Order(object):
pass
mapper(Order, orders)
addressable(Order, 'address', uselist=False)
######
# use it !
metadata.create_all()
u1 = User()
u1.name = 'bob'
o1 = Order()
o1.description = 'order 1'
a1 = u1.create_address()
a1.street = '123 anywhere street'
a2 = u1.create_address()
a2.street = '345 orchard ave'
a3 = o1.create_address()
a3.street = '444 park ave.'
sess = sessionmaker()()
sess.add(u1)
sess.add(o1)
sess.commit()
# query objects, get their addresses
bob = sess.query(User).filter_by(name='bob').one()
assert [s.street for s in bob.addresses] == ['123 anywhere street', '345 orchard ave']
order = sess.query(Order).filter_by(description='order 1').one()
assert order.address.street == '444 park ave.'
# query from Address to members
for address in sess.query(Address).all():
print "Street", address.street, "Member", address.member
|
#2 layer neural network
import numpy as np
import time
#variables
n_hidden = 10 # number of hidden neurons, array of 10 input values and compare to 10 other values and compute XOR
n_in = 10 #outputs
n_out = 10
n_samples = 300
#hyperparameters
learning_rate = 0.01 #defines how fast we want to netowrk to learn
momentum = 0.9
np.random.seed(0) #seed ensures that we will generate the same "random" values every time we run the code
#activation function -
#sigmoid function - turns numbers into probabilities
#input data which is numbers when come through neural, each of weight is a set of probabilities
#this probabilities are updated when we train out network
#every time input data hits one of neurons it is going to turn number into probability
#we will use 2 activation functions
def sigmoid(x): #for first layer
return 1.0/(1.0 + np.exp(-x))
def tanh_prime(x): #for second layer
return 1 - np.tanh(x) ** 2
#train function
#x - input data
#t - transpose? will help make multiplication
#V, W - layers of out network
#bv, bw - biases - will help make better prediction, one bias for one layer in network
#input data, transpose, layer 1, layer 2, biases
def train(x, t, V, W, bv, bw):
#forward propagation - matrix multiply + biases
#we are taking dot product of input data x and we are putting it into out first layer V, A is a delta value
A = np.dot(x, V) + bv
Z = np.tanh(A) #perform activation function on our data
B = np.dot(Z, W) + bw # putting into 2 layer
Y = sigmoid(B)
#backward propagarion
#t - matrix of out weights filped, we want the filped version to go backwards
Ew = Y - t
Ev = tanh_prime(A) * np.dot(W, Ew)
#Ev is used to predict out loss, to minimize loss, that's how we train
#predict loss
dW = np.outer(Z, Ew) #Z value, that we predicted from tanh
dV = np.outer(x, Ev) #x - input
#dW, dV - deltas to calculate loss
#cross entropy, becouse we are doing classification
loss = -np.mean(t * np.log(Y) + (1 -t) * np.log(1-Y))
return loss, (dV, dW, Ev, Ew)
def predict(x, V, W, bv, bw):
A = np.dot(x, V) + bv
B = np.dot(np.tanh(A), W) + bw
return (sigmoid(B) > 0.5).astype(int)
#create layers
V = np.random.normal(scale=0.1, size=(n_in, n_hidden))
W = np.random.normal(scale=0.1, size=(n_hidden, n_out))
bv = np.zeros(n_hidden)
bw = np.zeros(n_out)
params = [V, W, bv, bw]
#generate data
X = np.random.binomial(1, 0.5, (n_samples, n_in))
T = X ^ 1
#Training time
for epoch in range(100):
err = []
upd = [0] * len(params)
t0 = time.clock()
#for each data point we want to update weights of out network
for i in range(X.shape[0]):
loss, grad = train(X[i], T[i], *params)
#update loss
for j in range(len(params)):
params[j] -= upd[j]
for j in range(len(params)):
upd[j] = learning_rate * grad[j] + momentum + upd[j]
err.append(loss)
#print('Epoch %d, Loss: %.8f, Time: %.4f s' %( epoch, np.mean(err), time.clock()-t0))
print("Epoch: %d, Loss: %.8f, Time: %.4fs" % (
epoch, np.mean( err ), time.clock()-t0 ))
#try to predict sth
x = np.random.binomial(1, 0.5, n_in)
print ('XOR Predict')
print (x)
print(predict(x, *params))
|
from .gat import GAT
from .gcn import GCN
from .compgcn_conv import *
from .compgcn_conv_basis import *
from .rgcn_conv import *
from .message_passing import *
from .models import *
from .helper import construct_adj
__all__ = [
"GAT",
"GCN",
"CompGCNConv",
"CompGCNConvBasis",
"RGCNConv",
"construct_adj",
]
|
def upload_args() -> None:
ret = """
The upload_options dictionary contains the following possible keys:
truncate_table:
Default: False
Tells the program to run "truncate <table>" before copying the data
drop_table:
Default: False
Tells the program to run "drop table <table>; create table <table>" before copying data
cleanup_s3:
Default: True
Tells the program to try to delete the file in S3 after copying to Redshift
grant_access:
Default: []
A list of individuals/groups to grant select access to table
diststyle:
Default: "even"
The diststyle for a table. See https://docs.aws.amazon.com/redshift/latest/dg/c_choosing_dist_sort.html for more details on options
distkey:
Default: None
The column to distribute the table based on. Only allowed when diststyle = "key"
sortkey:
Default: None
The column to sort the table on
load_in_parallel:
Default: None
The number of s3 files to seperate the file into. If None, defaults to sqrt of the num_rows. See more for why we do this here: https://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html
default_logging:
Default: True
Sets up a basic logger on STDOUT
skip_checks:
Default: False
Skips integrity checks on the type, etc of the file being uploaded
skip_views:
Default: False
Does not attempt to save/reinstantiate view
allow_alter_table
Default: False
If true and there are new columns in the local data, adds them to the Redshift table
""".strip()
ret = "\n".join(line.lstrip() for line in ret.split("\n"))
print(ret)
|
# Copyright 2021 OpenRCA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import time
from orca.common import utils
class Collector(abc.ABC):
"""Base class for garbage collectors."""
def __init__(self, graph):
self._graph = graph
@abc.abstractmethod
def collect(self):
"""Collects graph nodes for removal."""
class StaleNodeCollector(Collector):
"""Collects graph nodes based on staleness period."""
def __init__(self, graph, node_spec, staleness_period=300):
super().__init__(graph)
self._node_spec = node_spec
self._staleness_period = staleness_period
def collect(self):
nodes = self._graph.get_nodes(
properties={'origin': self._node_spec.origin, 'kind': self._node_spec.kind})
nodes_to_remove = []
for node in nodes:
if utils.get_utc() - node.updated_at > self._staleness_period:
nodes_to_remove.append(node)
return nodes_to_remove
|
"""Utility functions."""
from typing import Any
def is_empty(data: Any) -> bool:
"""Checks if argument is empty.
Args:
data (Any): To check if empty
Returns:
bool: Returns bool indicating if empty
"""
if data is None or data == '' or data == 'null':
return True
return False
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class REdger(RPackage):
"""Differential expression analysis of RNA-seq expression profiles with
biological replication. Implements a range of statistical methodology
based on the negative binomial distributions, including empirical Bayes
estimation, exact tests, generalized linear models and quasi-likelihood
tests. As well as RNA-seq, it be applied to differential signal analysis
of other types of genomic data that produce counts, including ChIP-seq,
SAGE and CAGE."""
homepage = "https://bioconductor.org/packages/edgeR/"
url = "https://git.bioconductor.org/packages/edgeR"
list_url = homepage
version('3.18.1', git='https://git.bioconductor.org/packages/edgeR', commit='101106f3fdd9e2c45d4a670c88f64c12e97a0495')
depends_on('r-limma', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@3.18.1')
|
# -*- coding: utf-8 -*-
name = "jupyterexcel"
__version__ = '0.0.8'
# Jupyter Extension points
def _jupyter_nbextension_paths():
return [dict(
section="notebook",
src="",
dest="jupyterexcel")]
def _jupyter_server_extension_paths():
return [{"module":"jupyterexcel.server_extension"}]
|
import os
import os.path
import unittest
from programy.parser.pattern.factory import PatternNodeFactory
class PatternNodesStoreAsserts(unittest.TestCase):
def assert_load(self, store):
store.empty()
store.upload_from_file(os.path.dirname(__file__) + os.sep + "data" + os.sep + "nodes" + os.sep + "pattern_nodes.conf")
collection = PatternNodeFactory()
store.load(collection)
self.assertEqual(12, len(collection.nodes))
self.assertTrue(collection.exists("zeroormore"))
def assert_load_exception(self, store):
store.empty()
store.upload_from_file(os.path.dirname(__file__) + os.sep + "data" + os.sep + "nodes" + os.sep + "pattern_nodes.conf")
collection = PatternNodeFactory()
store.load(collection)
self.assertEqual(0, len(collection.nodes))
self.assertFalse(collection.exists("zeroormore"))
def assert_upload_from_file(self, store, verbose=False):
store.empty()
count, success = store.upload_from_file(os.path.dirname(__file__) + os.sep + "data" + os.sep + "nodes" + os.sep + "pattern_nodes.conf", verbose=verbose)
self.assertEquals(17, count)
self.assertEquals(12, success)
def assert_upload_from_file_exception(self, store):
store.empty()
count, success = store.upload_from_file(os.path.dirname(__file__) + os.sep + "data" + os.sep + "nodes" + os.sep + "pattern_nodes.conf")
self.assertEquals(0, count)
self.assertEquals(0, success)
|
import datetime
from peewee import *
DATABASE = SqliteDatabase('spaces.sqlite')
class Space(Model):
field_values = CharField()
created_at = DateTimeField(default= datetime.datetime.now)
class Meta:
database = DATABASE
def initialize():
DATABASE.connect()
DATABASE.create_tables([Space], safe=True)
DATABASE.close()
|
# Third party
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution("edapy").version
except DistributionNotFound:
__version__ = "Not installed"
|
import pytest
from ioccheck.exceptions import InvalidHashException
from ioccheck.ioc_types import MD5, SHA256
from ioccheck.iocs import Hash
from ioccheck.services import MalwareBazaar, VirusTotal
class TestHashCreation:
""" Instantiating Hash() objects """
class TestHashGuesses:
def test_sha256_guess(self, hash_1, config_file):
assert Hash(hash_1, config_path=config_file).hash_type == SHA256
def test_sha256_guess_2(self, hash_1, config_file):
assert (
Hash(hash_1, hash_type=SHA256, config_path=config_file).hash_type
== SHA256
)
def test_sha256_guess_3(self, hash_2, config_file):
with pytest.raises(InvalidHashException):
assert Hash(hash_2, hash_type=SHA256, config_path=config_file)
def test_md5_guess(self, hash_2, config_file):
assert Hash(hash_2, config_path=config_file).hash_type == MD5
def test_md5_guess_2(self, hash_2, config_file):
assert Hash(hash_2, hash_type=MD5, config_path=config_file).hash_type == MD5
def test_md5_guess_3(self, hash_1, config_file):
with pytest.raises(InvalidHashException):
assert Hash(hash_1, hash_type=MD5, config_path=config_file)
class TestInvalidHashExceptions:
@pytest.mark.parametrize(
"file_hash,hash_type",
[
("12345", MD5),
("12345", SHA256),
("", MD5),
("", SHA256),
(1, SHA256),
(1, MD5),
(1, None),
(None, SHA256),
(None, MD5),
(SHA256, None),
(SHA256, ""),
(MD5, None),
([], SHA256),
([], MD5),
([], None),
({}, None),
("abc", None),
("abc", MD5),
("abc", SHA256),
],
)
def test_invalid_hash_exception(self, file_hash, hash_type, config_file):
with pytest.raises(InvalidHashException):
Hash(file_hash, hash_type, config_path=config_file)
|
#Initializing the total and count values
count = 0
total = 0
while True:
try:
n = input('Enter a number:\n ') #Getting the user's input
if n == 'done':
break
n = int(n)
total = total + n #Calculating the total of the input
count = count + 1 #Counting how many numbers the user has input
except:
print('Bad data')
print('Total: ', total)
print('Count: ', count)
print('Average: ', total / count)
|
from django.conf import settings
#Site Settings
SITE_NAME = getattr(settings, 'SITE_NAME', 'Replica')
SITE_DESC = getattr(settings, 'SITE_DESC', 'Just another blog.')
SITE_URL = getattr(settings, 'SITE_URL', 'http://localhost')
SITE_AUTHOR = getattr(settings, 'SITE_AUTHOR', 'Tyler')
DECK_ENTS = getattr(settings, 'REPLICA_DECK_ENTS', False)
PAGINATE = getattr(settings, 'REPLICA_PAGINATE', 25)
PAGINATE_TOPICS = getattr(settings, 'REPLICA_PAGINATE_TOPICS', 25)
#Enable plugins
ENABLE_BLIP = getattr(settings, 'REPLICA_ENABLE_BLIP', False)
ENABLE_WHISPER = getattr(settings, 'REPLICA_ENABLE_WHISPER', False)
ENABLE_MZINE = getattr(settings, 'REPLICA_ENABLE_MZINE', False)
|
import logging
import urllib
import requests
from django.core.cache import cache
from django.conf import settings
class BadStatusCodeError(Exception):
pass
def _fetch_users(email=None, group=None, is_username=False, **options):
if not getattr(settings, 'MOZILLIANS_API_KEY', None): # pragma no cover
logging.warning("'MOZILLIANS_API_KEY' not set up.")
return False
url = settings.MOZILLIANS_API_BASE + '/api/v2/users/'
options['api-key'] = settings.MOZILLIANS_API_KEY
if email:
if is_username:
options['username'] = email
else:
options['email'] = email
if group:
if isinstance(group, (list, tuple)): # pragma: no cover
raise NotImplementedError(
'You can not find users by MULTIPLE groups'
)
options['group'] = group
url += '?' + urllib.urlencode(options)
resp = requests.get(url)
if resp.status_code != 200:
url = url.replace(settings.MOZILLIANS_API_KEY, 'xxxscrubbedxxx')
raise BadStatusCodeError('%s: on: %s' % (resp.status_code, url))
return resp.json()
def _fetch_user(url):
options = {}
assert 'api-key=' not in url, url
options['api-key'] = settings.MOZILLIANS_API_KEY
url += '?' + urllib.urlencode(options)
resp = requests.get(url)
if resp.status_code != 200:
url = url.replace(settings.MOZILLIANS_API_KEY, 'xxxscrubbedxxx')
raise BadStatusCodeError('%s: on: %s' % (resp.status_code, url))
return resp.json()
def is_vouched(email):
content = _fetch_users(email)
if content:
for obj in content['results']:
return obj['is_vouched']
return False
def fetch_user(email, is_username=False):
content = _fetch_users(email, is_username=is_username)
if content:
for obj in content['results']:
return _fetch_user(obj['_url'])
def fetch_user_name(email, is_username=False):
user = fetch_user(email, is_username=is_username)
if user:
full_name = user.get('full_name')
if full_name and full_name['privacy'] == 'Public':
return full_name['value']
def in_group(email, group):
if isinstance(group, list): # pragma: no cover
raise NotImplementedError('supply a single group name')
content = _fetch_users(email, group=group)
return not not content['results']
def _fetch_groups(order_by='name', url=None, name=None, name_search=None):
if not getattr(settings, 'MOZILLIANS_API_KEY', None): # pragma no cover
logging.warning("'MOZILLIANS_API_KEY' not set up.")
return False
if not url:
url = settings.MOZILLIANS_API_BASE + '/api/v2/groups/'
data = {
'api-key': settings.MOZILLIANS_API_KEY,
}
if name:
data['name'] = name
if name_search:
data['name__icontains'] = name_search
url += '?' + urllib.urlencode(data)
resp = requests.get(url)
if resp.status_code != 200:
url = url.replace(settings.MOZILLIANS_API_KEY, 'xxxscrubbedxxx')
raise BadStatusCodeError('%s: on: %s' % (resp.status_code, url))
return resp.json()
def get_all_groups(name=None, name_search=None):
all_groups = []
next_url = None
while True:
found = _fetch_groups(
name=name,
name_search=name_search,
url=next_url,
)
all_groups.extend(found['results'])
if len(all_groups) >= found['count']:
break
next_url = found['next']
return all_groups
def get_all_groups_cached(name_search=None, lasting=60 * 60):
cache_key = 'all_mozillian_groups'
cache_key_lock = cache_key + 'lock'
all_groups = cache.get(cache_key)
if all_groups is None:
if cache.get(cache_key_lock):
return []
cache.set(cache_key_lock, True, 60)
all_groups = get_all_groups()
cache.set(cache_key, all_groups, lasting)
cache.delete(cache_key_lock)
return all_groups
def get_contributors():
"""Return a list of all users who are in the
https://mozillians.org/en-US/group/air-mozilla-contributors/ group
and whose usernames are in the settings.CONTRIBUTORS list.
Return them in the order of settings.CONTRIBUTORS.
"""
_users = _fetch_users(group='air mozilla contributors', is_vouched=True)
# turn that into a dict of username -> url
urls = dict(
(x['username'], x['_url'])
for x in _users['results']
if x['username'] in settings.CONTRIBUTORS
)
users = []
for username in settings.CONTRIBUTORS:
if username not in urls:
continue
user = _fetch_user(urls[username])
if not user.get('photo') or user['photo']['privacy'] != 'Public':
# skip users who don't have a public photo
continue
assert user['is_public']
users.append(user)
return users
|
from flask import Blueprint, request, jsonify, session, flash
from app.models import User, Post, Comment, Vote
from app.db import get_db
# Show error messages.
import sys
# Import decorator function to protect routes.
from app.utils.auth import login_required
bp = Blueprint('api', __name__, url_prefix='/api')
# Create a new user.
@bp.route('/users', methods=['POST'])
def signup():
# Capture the request data sent from client, and get session for DB communication.
data = request.get_json()
db = get_db()
try:
# Attempt to create new user.
# Use data (Python dictionary datatype) to create an object.
newUser = User(
username = data['username'],
email = data['email'],
password = data['password']
)
print(newUser)
# Save to database.
db.add(newUser)
db.commit()
except:
print(sys.exc_info()[0])
# If the insertion failed, rollback the last db commit to prevent server crashing when deployed.
db.rollback()
# Send error message back along with server error code.
flash('Something went wrong. Refresh and try again.', 'danger')
return jsonify('Something went wrong. Refresh and try again.'), 500
# Clear any existing session and add two properties to global session object for session persistence.
session.clear()
session['user_id'] = newUser.id
session['loggedIn'] = True
flash('Successfully created new user.', 'info')
return jsonify('Successfully created new user.')
# Log an existing user in.
@bp.route('/users/login', methods=['POST'])
def login():
# Capture request data and current session to communicate with db.
data = request.get_json()
db = get_db()
# See if this user exist. Otherwise, send back a 400 error.
try:
user = db.query(User).filter(User.email == data['email']).one()
except:
print(sys.exc_info()[0])
flash('Incorrect credentials. Try again.', 'danger')
return jsonify('Incorrect Credentials'), 400
# If this user exists, check password (stored in data dictionary) against stored password of this user.
if user.verify_password(data['password']) == False:
flash('Incorrect credentials. Try again.', 'danger')
return jsonify('Incorrect Credentials'), 400
# If successful, clear the current session and mark this user as logged in via the session object.
session.clear()
session['user_id'] = user.id
session['loggedIn'] = True
flash('Successfully signed in!', 'info')
return jsonify('Successfully signed in')
# Log a user out.
@bp.route('/users/logout', methods=['POST'])
def logout():
# Remove existing session and send back no content code.
session.clear()
flash('You have been logged out.', 'warning')
return '', 204
# Post a comment.
@bp.route('/comments', methods=['POST'])
@login_required
def comment():
# Capture request data and session to communicate with db.
data = request.get_json()
db = get_db()
# Try to create the new comment (using passed in data and current user ID from global session object) and add to the database.
try:
newComment = Comment(
comment_text = data['comment_text'],
post_id = data['post_id'],
user_id = session.get('user_id')
)
db.add(newComment)
db.commit()
except:
print(sys.exc_info()[0])
# If the insertion failed, rollback the last db commit to prevent server crashing when deployed.
db.rollback()
# Send error message back along with server error code.
flash('Failed to post new comment. Try again.', 'danger')
return jsonify('Failed to post comment. Try again.'), 500
# If successful, return the newly created comment id.
flash('New comment posted!', 'info')
return jsonify('New comment posted.')
# Upvote a post.
@bp.route('/posts/upvote', methods=['PUT'])
@login_required
def upvote():
# Capture request data and current session to communicate with db.
data = request.get_json()
db = get_db()
try:
# Create new vote object using passed in post id and stored user id.
newVote = Vote(
post_id = data['post_id'],
user_id = session.get('user_id')
)
db.add(newVote)
db.commit()
except:
print(sys.exc_info()[0])
# If the insertion failed, rollback the last db commit to prevent server crashing when deployed.
db.rollback()
# Send error message back along with server error code.
return '', 500
# If successful, return.
return '', 204
# Create a new post.
@bp.route('/posts', methods=['POST'])
@login_required
def create():
# Capture request data and current session to communicate with db.
data = request.get_json()
db = get_db()
# Try creating a new post using data sent from client and the session object's user id.
try:
newPost = Post(
title = data['title'],
post_url = data['post_url'],
user_id = session.get('user_id')
)
db.add(newPost)
db.commit()
except:
print(sys.exc_info()[0])
# If the insertion failed, rollback the last db commit to prevent server crashing when deployed.
db.rollback()
# Send error message back along with server error code.
flash('Failed to create new post. Try again.', 'danger')
return jsonify('Failed to create new post. Try again.'), 500
# If successful, send back the newly created post id.
flash('New post created!', 'info')
return jsonify('New post created!')
# Update an existing post.
@bp.route('/posts/<id>', methods=['PUT'])
@login_required
def update(id):
# Capture request data and current session to communicate with db.
data = request.get_json()
db = get_db()
try:
# Find the matching post using the passed in id.
post = db.query(Post).filter(Post.id == id).one()
# Update the retrieved post's title.
post.title = data['title']
db.commit()
except:
print(sys.exc_info()[0])
# If the edit failed, rollback the last db commit to prevent server crashing when deployed.
db.rollback()
# Send error message back along with server error code.
flash('Failed to update post. Refresh and try again.', 'danger')
return jsonify('Failed to update a post.'), 404
flash('Post updated!', 'info')
return jsonify('Post updated!'), 204
# Delete an existing post.
@bp.route('/posts/<id>', methods=['DELETE'])
@login_required
def delete(id):
# Capture current session to communicate with db.
db = get_db()
try:
# Delete the post from db by retrieving the correct post by id.
db.delete(db.query(Post).filter(Post.id == id).one())
db.commit()
except:
print(sys.exc_info()[0])
# If delete failed, rollback the last db commit to prevent server crashing when deployed.
db.rollback()
# Send error message back along with server error code.
flash('Failed to delete post. Refresh and try again.', 'danger')
return jsonify('Failed to delete a post.'), 404
flash('Post deleted!', 'info')
return jsonify('Post deleted!'), 204 |
from src.List import List
class Matrix2d(object):
def __init__(self, m, n):
self.data = self.__create(m, n)
self.m = m
self.n = n
@staticmethod
def __create(m, n):
response = List()
row = List(0 for _ in range(n))
[response.append(row.copy()) for _ in range(m)]
return response
def set_data(self, dataValue):
row, col = 0, 0
for i in range(len(dataValue)):
col = int(i % self.n)
row = int(i / self.n)
self.data[row][col] = dataValue[i]
def __str__(self):
return type(self).__name__ + "\n"+str(self.m) + "rows " + str(self.n)+"cols " + "\n" + str(self.data.__str__())
|
import urllib.request
import urllib.parse
import re
search = "animated card"
youtube_url = "https://www.youtube.com/watch?v="
youtube_search = "https://www.youtube.com/kepowob/search?"
args = input("what ya want?")
params = urllib.parse.urlencode({'query': args})
search = f'{youtube_search}{params}'
html = urllib.request.urlopen(search)
content = html.read().decode()
video_ids = re.findall(r"watch\?v=(\S{11})", content)
print(video_ids)
for x in video_ids:
print(f'{youtube_url}{x}')
|
import pika
import uuid
class FibonacciRPCClient(object):
def __init__(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host = "localhost"))
self.channel = self.connection.channel()
result = self.channel.queue_declare(exclusive = True)
self.callback_queue = result.method.queue
self.channel.basic_consume(self.onResponse, no_ack = True, queue = self.callback_queue)
def onResponse(self, channel, method, props, body):
if self.corr_id == props.correlation_id:
self.response = body
def call(self, n):
self.response = None
self.corr_id = str(uuid.uuid4())
properties = pika.BasicProperties(reply_to = self.callback_queue, correlation_id = self.corr_id)
body = str(n)
self.channel.basic_publish(exchange = "", routing_key = "rpc_queue", properties = properties, body = body)
while self.response is None:
self.connection.process_data_events()
return int(self.response)
fibonacciRPC = FibonacciRPCClient()
print " [x] Requesting fibonacci(30)."
response = fibonacciRPC.call(30)
print " [.] Got %r." % response |
from dataclasses import dataclass
from datetime import datetime
from teamtrak_api.data_transfer_objects.base_dto import BaseDTO
"""
Data Transfer Object representing a single comment.
Comments are found under tasks. Any user can make a comment on any task.
Attributes:
id : unique identifier
user : id representing the user who made the comment
content : content of comment, string.
creation_date : datetime object representing time of comment creation
"""
@dataclass
class CommentDTO(BaseDTO):
user: str
content: str
def __post_init__(self):
super(CommentDTO, self).__post_init__()
# Build a CommentDTO and return
@staticmethod
def build(record: dict):
return CommentDTO(
id=record.get('id'),
user=record.get('user'),
content=record.get('content'),
creation_date=record.get('creation_date')
)
|
import configparser
import logging
import numpy as np
import os
# from envs.real_net_env import RealNetEnv
ILD_POS = 50
def write_file(path, content):
with open(path, 'w') as f:
f.write(content)
def output_flows(flow_rate, seed=None):
if seed is not None:
np.random.seed(seed)
FLOW_NUM = 6
flows = []
flows1 = []
# flows1.append(('-10114#1', '-10079', '10115#2 -10109 10089#3 -10116'))
# flows1.append(('-10114#1', '-10079', '-10114#0 10108#0 10108#5 -10090#1 gneE18'))
# flows1.append(('-10114#1', '-10079', '-10114#0 10108#0 10108#5 gneE5 gneE18'))
# flows1.append(('-10114#1', '10076', '-10114#0 10108#0 -10067#1 gneE9 gneE18'))
# flows1.append(('-10114#1', '10076', '-10114#0 10107 10080#0 gneE12 10102'))
# flows1.append(('-10114#1', '10180#1', '-10114#0 10108#0 -10104 10115#5 -10090#1'))
flows1.append(('-10114#1', '-10079', '10115#2 -10109'))
flows1.append(('-10114#1', '-10079', '-10114#0 10108#0 gneE5'))
flows1.append(('-10114#1', '-10079', '-10114#0 10108#0 10102'))
flows1.append(('-10114#1', '10076', '-10114#0 10107 10102'))
flows.append(flows1)
flows1 = []
# flows1.append(('10096#1', '10063', '10089#3 10091 gneE12 -10065#2'))
# flows1.append(('10096#1', '10063', '10089#3 gneE4 -10090#1 gneE10'))
# flows1.append(('-10095', '-10071#3', '10109 10106#3 10115#5 -10080#0'))
# flows1.append(('-10185#1', '-10071#3', 'gneE20 gneE13 -10046#0 -10090#1'))
# flows1.append(('-10185#1', '-10061#5', 'gneE19 -10046#5 10089#4 gneE12'))
# flows1.append(('10197#1', '-10061#5', '10089#3 -10049 10043 10053#0'))
flows1.append(('10096#1', '10063', '10089#3'))
flows1.append(('-10185#1', '-10071#3', 'gneE20'))
flows1.append(('10096#1', '10063', '10109'))
flows1.append(('-10185#1', '-10061#5', 'gneE19'))
flows.append(flows1)
flows1 = []
# flows1.append(('10052#1', '10104', '10181#1 10116 -10089#3 10109'))
# flows1.append(('10052#1', '10104', '10181#1 -10089#4 gneE4 gneE7'))
# flows1.append(('-10051#2', '10043', '10179 10181#1 10116 -10089#3 10109'))
# flows1.append(('-10051#2', '10043', '10179 10181#1 -10089#4 gneE4 gneE7'))
# flows1.append(('-10051#2', '-10110', '-10051#0 10181#1 -10089#4 gneE4 -10115#5'))
# flows1.append(('-10051#2', '-10110', '-10051#0 10181#1 -10089#3 -10049'))
flows1.append(('10052#1', '10104', '10181#1 -10089#3'))
flows1.append(('-10064#9', '10104', '-10068 10102'))
flows1.append(('-10051#2', '10043', '10181#1 gneE4'))
flows1.append(('-10064#9', '-10110', '-10064#4 -10064#3'))
flows.append(flows1)
flows1 = []
# flows1.append(('-10064#9', '-10085', '-10068 -10064#3 gneE5 10046#0'))
# flows1.append(('-10064#9', '10085', '-10064#4 -10064#3 gneE5 10046#0'))
# flows1.append(('-10064#9', '-10086', '-10064#4 10102 10031#1 10046#0'))
# flows1.append(('10061#4', '-10085', '10065#2 10102 10031#1 10046#0'))
# flows1.append(('10069#0', '10085', '10065#2 -10064#3 gneE5 10046#0'))
# flows1.append(('-10058#0', '-10086', '10071#5 10108#5 gneE5 10046#0'))
flows1.append(('10061#4', '-10085', '10065#2 10102'))
flows1.append(('10071#3', '10085', '10065#2 -10064#3'))
flows1.append(('-10070#1', '-10086', 'gneE9'))
flows1.append(('-10063', '10085', 'gneE8'))
flows.append(flows1)
# vols_a = [2, 3, 4, 6, 4, 2, 1, 0, 0, 0, 0]
# vols_b = [0, 0, 0, 1, 2, 3, 5, 4, 3, 2, 1]
vols_a = [1, 2, 4, 4, 4, 4, 2, 1, 0, 0, 0]
vols_b = [0, 0, 0, 1, 2, 4, 4, 4, 4, 2, 1]
times = np.arange(0, 3301, 300)
flow_str = ' <flow id="f_%s" departPos="random_free" from="%s" to="%s" via="%s" begin="%d" end="%d" vehsPerHour="%d" type="car"/>\n'
output = '<routes>\n'
output += ' <vType id="car" length="5" accel="5" decel="10" speedDev="0.1"/>\n'
for i in range(len(times) - 1):
name = str(i)
t_begin, t_end = times[i], times[i + 1]
k = 0
for j in [0, 1]:
vol = vols_a[i]
if vol > 0:
# inds = np.random.choice(FLOW_NUM, vol, replace=False)
inds = np.arange(vol)
for ind in inds:
cur_name = name + '_' + str(k)
src, sink, via = flows[j][ind]
output += flow_str % (cur_name, src, sink, via, t_begin, t_end, flow_rate)
k += 1
for j in [2, 3]:
vol = vols_b[i]
if vol > 0:
# inds = np.random.choice(FLOW_NUM, vol, replace=False)
inds = np.arange(vol)
for ind in inds:
cur_name = name + '_' + str(k)
src, sink, via = flows[j][ind]
output += flow_str % (cur_name, src, sink, via, t_begin, t_end, flow_rate)
k += 1
output += '</routes>\n'
return output
def output_config(thread=None):
if thread is None:
out_file = 'most.rou.xml'
else:
out_file = 'most_%d.rou.xml' % int(thread)
str_config = '<configuration>\n <input>\n'
str_config += ' <net-file value="in/most.net.xml"/>\n'
str_config += ' <route-files value="in/%s"/>\n' % out_file
str_config += ' <additional-files value="in/most.add.xml"/>\n'
str_config += ' </input>\n <time>\n'
str_config += ' <begin value="0"/>\n <end value="3600"/>\n'
str_config += ' </time>\n</configuration>\n'
return str_config
def gen_rou_file(path, flow_rate, seed=None, thread=None):
if thread is None:
flow_file = 'most.rou.xml'
else:
flow_file = 'most_%d.rou.xml' % int(thread)
write_file(path + 'in/' + flow_file, output_flows(flow_rate, seed=seed))
sumocfg_file = path + ('most_%d.sumocfg' % thread)
write_file(sumocfg_file, output_config(thread=thread))
return sumocfg_file
def output_ild(env, ild):
str_adds = '<additional>\n'
for node_name in env.node_names:
node = env.nodes[node_name]
for ild_name in node.ilds_in:
# ild_name = ild:lane_name
lane_name = ild_name[4:]
l_len = env.sim.lane.getLength(lane_name)
i_pos = min(ILD_POS, l_len - 1)
if lane_name in ['gneE4_0', 'gneE5_0']:
str_adds += ild % (ild_name, lane_name, -63, -13)
elif lane_name == 'gneE18_0':
str_adds += ild % (ild_name, lane_name, -116, -66)
elif lane_name == 'gneE19_0':
str_adds += ild % (ild_name, lane_name, 1, 50)
else:
str_adds += ild % (ild_name, lane_name, -i_pos, -1)
str_adds += '</additional>\n'
return str_adds
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO)
config = configparser.ConfigParser()
config.read('./config/config_test_real.ini')
base_dir = './output_result/'
if not os.path.exists(base_dir):
os.mkdir(base_dir)
env = RealNetEnv(config['ENV_CONFIG'], 2, base_dir, is_record=True, record_stat=True)
# add.xml file
ild = ' <laneAreaDetector file="ild.out" freq="1" id="%s" lane="%s" pos="%d" endPos="%d"/>\n'
write_file('./real_net/data/in/most.add.xml', output_ild(env, ild))
env.terminate()
|
import logging
from abc import ABCMeta, abstractmethod
class IBMError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class IBMAppliance:
__metaclass__ = ABCMeta
def __init__(self, hostname, user):
self.logger = logging.getLogger(__name__)
self.logger.debug('Creating an IBMAppliance')
self.hostname = hostname
self.user = user
self.facts = {}
self.get_facts()
@abstractmethod
def invoke_post_files(self, description, uri, fileinfo, data, ignore_error=False):
"""
Send multipart/form-data upload file request to the appliance.
"""
pass
@abstractmethod
def invoke_get_file(self, description, uri, filename, ignore_error=False):
"""
Invoke a GET request and download the response data to a file
"""
@abstractmethod
def invoke_put(self, description, uri, data, ignore_error=False):
"""
Send a PUT request to the LMI.
"""
pass
@abstractmethod
def invoke_post(self, description, uri, data, ignore_error=False):
"""
Send a POST request to the LMI.
"""
pass
@abstractmethod
def invoke_get(self, description, uri, ignore_error=False):
"""
Send a GET request to the LMI.
"""
pass
@abstractmethod
def invoke_delete(self, description, uri, ignore_error=False):
"""
Send a DELETE request to the LMI.
"""
pass
@abstractmethod
def get_facts(self):
"""
Extracts standard facts from the appliance
Store it in JSON variable called "facts"
"""
pass
def create_return_object(self, rc=0, data={}, warnings=[], changed=False):
return {'rc': rc, 'data': data, 'changed': changed, 'warnings': warnings}
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='pagina_inicial'),
path('importar/importar_dados', views.importar_dados, name='importar_dados'),
path('importar/importar_municipios_rs', views.importar_municipios_rs, name='importar_municipios_rs'),
path('dados/pais/<slug:pais>/tabelas', views.tabelas_pais, name='pais_tabelas'),
path('dados/pais/<slug:pais>', views.graficos_pais, name='pais_graficos'),
path('dados/estados', views.estados, name='estados'),
path('dados/estado/<slug:estado>', views.estado, name='estado'),
path('dados/municipios', views.municipios, name='municipios'),
path('dados/municipio/<slug:municipio>', views.municipio, name='municipio'),
#path('dados/estado/<slug:estado>/tabelas', views.tabelas_estado, name='estado_tabelas'),
path('dados/', views.dados, name='dados'),
# path('pais/<slug:pais>/estado/<slug:estado>', views.estado, name='estado'),
# path('pais/<slug:pais>/estado/<slug:estado>/municipio/<int:municipio>', views.municipío, name='municipio'),
#path('<int:year>/<int:month>/<int:day>/<slug:post>/', views.post_detail, name='post_detail'),
] |
from django.core.management.base import BaseCommand
from ... import utils
class Command(BaseCommand):
help = 'Synchronize SAML2 identity providers.'
def handle(self, *args, **options):
utils.sync_providers()
self.stdout.write('SAML2 providers have been successfully synchronized.')
|
from sub.sipnner import spinner
from sub.mcFont import McFont
from pathlib import Path
import sys
def convert(fontJsonPath:str,genTTf:bool=True,genWOFF:bool=False,name:str='BitmapMc'):
if not (genTTf or genWOFF): return
mcFont = McFont(name)
jsonPath = Path(fontJsonPath)
assetsPath = jsonPath.parent.parent.parent
mcFont.generate(jsonPath,assetsPath)
if genTTf:
print('ttfを生成しています')
spinner(mcFont.exportTTF)()
if genWOFF:
print('woffを生成しています')
spinner(mcFont.exportWoff)()
if __name__ == '__main__':
args = sys.argv
if len(args) > 1:
jsonPath = args[1]
print(jsonPath)
try:
convert(jsonPath)
except Exception as e:
print(e)
print('変換時にエラーが発生しました')
print('閉じるにはaを入力してください')
while input() != 'a':
pass
raise e
print('変換が正常に終了しました')
print('閉じるにはaを入力してください')
while input() != 'a':
pass
## スクリプトから実行する場合
# <path>は次のようなパスになる:C:.../assets/<namespace>/font/<fontname>.json
convert(r'.\assets\minecraft\font\default.json',genTTf=True,genWOFF=False,name='BitmapMc')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import keystoneclient.auth.identity.v3
import keystoneclient.session
import cinderclient.client
import local_settings
auth = keystoneclient.auth.identity.v3.Password(auth_url=local_settings.auth_url_v3,
username=local_settings.username,
password=local_settings.password,
user_domain_name='Default',
project_domain_name='Default',
project_name=local_settings.tenant_name)
session = keystoneclient.session.Session(auth=auth)
cinder = cinderclient.client.Client('2', session=session)
q = cinder.backups.create('e25fa8ac-4db7-4a08-805f-d10a6abf7b20',
container='volumebackups',
name='vol-backup-1',
description='fdsfsdfsdfs')
print type(q), dir(q)
|
# static analysis: ignore
from __future__ import absolute_import, division, print_function, unicode_literals
from qcore.asserts import assert_eq, assert_in, assert_not_in, assert_is
from .error_code import ErrorCode
from .stacked_scopes import ScopeType, StackedScopes, _uniq_chain
from .test_name_check_visitor import TestNameCheckVisitorBase
from .test_node_visitor import assert_fails, assert_passes, skip_before
from .value import (
DictIncompleteValue,
KnownValue,
MultiValuedValue,
ReferencingValue,
TypedValue,
UNINITIALIZED_VALUE,
UNRESOLVED_VALUE,
)
# just used for its __dict__
class Module(object):
foo = 1
bar = None
class TestStackedScopes(object):
def setup(self):
self.scope = StackedScopes(Module)
def test_scope_type(self):
assert_eq(ScopeType.module_scope, self.scope.scope_type())
with self.scope.add_scope(ScopeType.function_scope, scope_node=None):
assert_eq(ScopeType.function_scope, self.scope.scope_type())
assert_eq(ScopeType.module_scope, self.scope.scope_type())
def test_current_and_module_scope(self):
assert_in("foo", self.scope.current_scope())
assert_in("foo", self.scope.module_scope())
with self.scope.add_scope(ScopeType.function_scope, scope_node=None):
assert_not_in("foo", self.scope.current_scope())
assert_in("foo", self.scope.module_scope())
assert_in("foo", self.scope.current_scope())
assert_in("foo", self.scope.module_scope())
def test_get(self):
assert_eq(KnownValue(1), self.scope.get("foo", None, None))
with self.scope.add_scope(ScopeType.module_scope, scope_node=None):
self.scope.set("foo", KnownValue(2), None, None)
assert_eq(KnownValue(2), self.scope.get("foo", None, None))
assert_eq(KnownValue(1), self.scope.get("foo", None, None))
assert_is(UNINITIALIZED_VALUE, self.scope.get("doesnt_exist", None, None))
# outer class scopes aren't used
with self.scope.add_scope(ScopeType.class_scope, scope_node=None):
self.scope.set("cls1", KnownValue(1), None, None)
assert_eq(KnownValue(1), self.scope.get("cls1", None, None))
with self.scope.add_scope(ScopeType.class_scope, scope_node=None):
self.scope.set("cls2", KnownValue(1), None, None)
assert_eq(KnownValue(1), self.scope.get("cls2", None, None))
assert_is(UNINITIALIZED_VALUE, self.scope.get("cls1", None, None))
assert_eq(KnownValue(1), self.scope.get("cls1", None, None))
def test_set(self):
with self.scope.add_scope(ScopeType.module_scope, scope_node=None):
self.scope.set("multivalue", KnownValue(1), None, None)
assert_eq(KnownValue(1), self.scope.get("multivalue", None, None))
self.scope.set("multivalue", KnownValue(2), None, None)
assert_eq(
MultiValuedValue([KnownValue(1), KnownValue(2)]),
self.scope.get("multivalue", None, None),
)
self.scope.set("multivalue", KnownValue(3), None, None)
assert_eq(
MultiValuedValue([KnownValue(1), KnownValue(2), KnownValue(3)]),
self.scope.get("multivalue", None, None),
)
# if the values set are the same, don't make a MultiValuedValue
self.scope.set("same", KnownValue(1), None, None)
assert_eq(KnownValue(1), self.scope.get("same", None, None))
self.scope.set("same", KnownValue(1), None, None)
assert_eq(KnownValue(1), self.scope.get("same", None, None))
# even if they are UNRESOLVED_VALUE
self.scope.set("unresolved", UNRESOLVED_VALUE, None, None)
assert_is(UNRESOLVED_VALUE, self.scope.get("unresolved", None, None))
self.scope.set("unresolved", UNRESOLVED_VALUE, None, None)
assert_is(UNRESOLVED_VALUE, self.scope.get("unresolved", None, None))
def test_referencing_value(self):
with self.scope.add_scope(ScopeType.module_scope, scope_node=None):
outer = self.scope.current_scope()
self.scope.set("reference", KnownValue(1), None, None)
multivalue = MultiValuedValue([KnownValue(1), KnownValue(2)])
with self.scope.add_scope(ScopeType.module_scope, scope_node=None):
val = ReferencingValue(outer, "reference")
self.scope.set("reference", val, None, None)
assert_eq(KnownValue(1), self.scope.get("reference", None, None))
self.scope.set("reference", KnownValue(2), None, None)
assert_eq(multivalue, self.scope.get("reference", None, None))
assert_eq(multivalue, self.scope.get("reference", None, None))
self.scope.set(
"nonexistent",
ReferencingValue(self.scope.module_scope(), "nonexistent"),
None,
None,
)
assert_is(UNINITIALIZED_VALUE, self.scope.get("nonexistent", None, None))
self.scope.set("is_none", KnownValue(None), None, None)
with self.scope.add_scope(ScopeType.function_scope, scope_node=None):
self.scope.set(
"is_none", ReferencingValue(outer, "is_none"), None, None
)
assert_is(UNRESOLVED_VALUE, self.scope.get("is_none", None, None))
def test_typed_value_set(self):
self.scope.set("value", TypedValue(dict), None, None)
assert_eq(TypedValue(dict), self.scope.get("value", None, None))
self.scope.set(
"value", DictIncompleteValue([]), None, None
) # subclass of TypedValue
assert_eq(DictIncompleteValue([]), self.scope.get("value", None, None))
class TestScoping(TestNameCheckVisitorBase):
@assert_passes()
def test_multiple_assignment(self):
def capybara():
x = 3
assert_is_value(x, KnownValue(3))
x = 4
assert_is_value(x, KnownValue(4))
@assert_fails(ErrorCode.undefined_name)
def test_undefined_name(self):
def capybara():
return x
@assert_fails(ErrorCode.undefined_name)
def test_read_before_write(self):
def capybara():
print(x)
x = 3
@assert_passes()
def test_function_argument(self):
def capybara(x):
assert_is_value(x, UNRESOLVED_VALUE)
x = 3
assert_is_value(x, KnownValue(3))
@assert_passes()
def test_default_arg(self):
def capybara(x=3):
assert_is_value(x, MultiValuedValue([UNRESOLVED_VALUE, KnownValue(3)]))
@assert_passes()
def test_args_kwargs(self):
def capybara(*args, **kwargs):
assert_is_value(args, TypedValue(tuple))
assert_is_value(kwargs, TypedValue(dict))
@assert_passes()
def test_internal_imports(self):
# nested import froms are tricky because there is no separate AST node for each name, so we
# need to use a special trick to represent the distinct definition nodes for each name
import collections
def capybara():
from collections import Counter, defaultdict
assert_is_value(Counter, KnownValue(collections.Counter))
assert_is_value(defaultdict, KnownValue(collections.defaultdict))
def test_nested_star_import(self):
try:
self.assert_passes(
"""
import collections
def capybara():
from collections import *
assert_is_value(Counter, KnownValue(collections.Counter))
assert_is_value(defaultdict, KnownValue(collections.defaultdict))
"""
)
except SyntaxError:
pass # ignore if we're in a Python version where this raises an error
@skip_before((3, 0))
def test_return_annotation(self):
self.assert_passes(
"""
import socket
class Capybara:
def socket(self) -> socket.error:
return socket.error()
"""
)
class TestIf(TestNameCheckVisitorBase):
@assert_passes()
def test_basic(self):
def capybara(cond):
if cond:
x = 3
assert_is_value(x, KnownValue(3))
else:
x = 4
assert_is_value(x, KnownValue(4))
assert_is_value(x, MultiValuedValue([KnownValue(3), KnownValue(4)]))
@assert_passes()
def test_nesting(self):
def capybara(cond1, cond2):
if cond1:
x = 3
assert_is_value(x, KnownValue(3))
else:
if cond2:
x = 4
assert_is_value(x, KnownValue(4))
else:
x = 5
assert_is_value(x, KnownValue(5))
assert_is_value(x, MultiValuedValue([KnownValue(4), KnownValue(5)]))
assert_is_value(
x, MultiValuedValue([KnownValue(3), KnownValue(4), KnownValue(5)])
)
class TestTry(TestNameCheckVisitorBase):
@assert_passes(settings={ErrorCode.possibly_undefined_name: False})
def test_except(self):
def capybara():
try:
x = 3
assert_is_value(x, KnownValue(3))
except NameError as e:
assert_is_value(e, TypedValue(NameError))
x = 4
assert_is_value(x, KnownValue(4))
except (RuntimeError, ValueError) as e:
assert_is_value(
e,
MultiValuedValue(
[TypedValue(RuntimeError), TypedValue(ValueError)]
),
)
assert_is_value(
x, MultiValuedValue([KnownValue(3), KnownValue(4), UNRESOLVED_VALUE])
)
@assert_passes()
def test_set_before_try(self):
def capybara():
x = 1
try:
x = 2
assert_is_value(x, KnownValue(2))
except NameError:
assert_is_value(x, MultiValuedValue([KnownValue(1), KnownValue(2)]))
x = 3
assert_is_value(x, KnownValue(3))
except RuntimeError:
assert_is_value(x, MultiValuedValue([KnownValue(1), KnownValue(2)]))
x = 4
assert_is_value(x, KnownValue(4))
assert_is_value(
x, MultiValuedValue([KnownValue(2), KnownValue(3), KnownValue(4)])
)
@assert_passes()
def test_multiple_except(self):
def capybara():
try:
x = 3
assert_is_value(x, KnownValue(3))
except NameError:
x = 4
assert_is_value(x, KnownValue(4))
except IOError:
x = 5
assert_is_value(x, KnownValue(5))
assert_is_value(
x, MultiValuedValue([KnownValue(3), KnownValue(4), KnownValue(5)])
)
@assert_passes()
def test_else(self):
def capybara():
try:
x = 3
assert_is_value(x, KnownValue(3))
except NameError:
x = 4
assert_is_value(x, KnownValue(4))
else:
x = 5
assert_is_value(x, KnownValue(5))
assert_is_value(x, MultiValuedValue([KnownValue(5), KnownValue(4)]))
@assert_passes()
def test_finally(self):
def capybara():
try:
x = 3
assert_is_value(x, KnownValue(3))
finally:
x = 4
assert_is_value(x, KnownValue(4))
assert_is_value(x, KnownValue(4))
@assert_passes(settings={ErrorCode.use_fstrings: False})
def test_finally_plus_if(self):
# here an approach that simply ignores the assignments in the try block while examining the
# finally block would fail
def capybara():
x = 0
assert_is_value(x, KnownValue(0))
try:
x = 1
assert_is_value(x, KnownValue(1))
finally:
print("%d" % x) # x is a number
@assert_fails(ErrorCode.bad_except_handler)
def test_bad_except_handler(self):
def capybara():
try:
x = 1
except 42 as fortytwo:
print(fortytwo)
class TestLoops(TestNameCheckVisitorBase):
@assert_passes(settings={ErrorCode.possibly_undefined_name: False})
def test_conditional_in_loop(self):
def capybara():
for i in range(2):
if i == 1:
print(x)
assert_is_value(
x, MultiValuedValue([UNRESOLVED_VALUE, KnownValue(3)])
)
else:
x = 3
assert_is_value(x, KnownValue(3))
assert_is_value(x, MultiValuedValue([UNRESOLVED_VALUE, KnownValue(3)]))
@assert_passes()
def test_second_assignment_in_loop(self):
def capybara():
hide_until = None
for _ in range(3):
assert_is_value(
hide_until, MultiValuedValue([KnownValue(None), KnownValue((1, 2))])
)
if hide_until:
print(hide_until[1])
hide_until = (1, 2)
@assert_passes()
def test_for_else(self):
def capybara():
for _ in range(2):
x = 3
assert_is_value(x, KnownValue(3))
else:
x = 4
assert_is_value(x, KnownValue(4))
assert_is_value(x, MultiValuedValue([KnownValue(3), KnownValue(4)]))
@assert_passes()
def test_for_always_entered(self):
def capybara():
x = 3
assert_is_value(x, KnownValue(3))
for _ in [0, 1]:
x = 4
assert_is_value(x, KnownValue(4))
assert_is_value(x, KnownValue(4))
@assert_passes()
def test_range_always_entered(self):
from six.moves import range
def capybara():
for i in range(2):
assert_is_value(i, TypedValue(int))
assert_is_value(i, TypedValue(int))
@assert_passes(settings={ErrorCode.possibly_undefined_name: False})
def test_use_after_for(self):
def capybara(x):
for _ in range(x):
y = 4
break
assert_is_value(y, MultiValuedValue([KnownValue(4), UNRESOLVED_VALUE]))
@assert_passes(settings={ErrorCode.possibly_undefined_name: False})
def test_use_after_for_conditional(self):
def capybara(x):
for _ in range(2):
if x > 2:
y = 4
break
assert_is_value(y, MultiValuedValue([KnownValue(4), UNRESOLVED_VALUE]))
@assert_passes(settings={ErrorCode.possibly_undefined_name: False})
def test_while(self):
def capybara():
while bool():
x = 3
assert_is_value(x, KnownValue(3))
assert_is_value(x, MultiValuedValue([UNRESOLVED_VALUE, KnownValue(3)]))
@assert_passes()
def test_while_always_entered(self):
def capybara():
while True:
x = 3
assert_is_value(x, KnownValue(3))
break
assert_is_value(x, KnownValue(3))
@assert_passes()
def test_while_else(self):
def capybara():
while bool():
x = 3
assert_is_value(x, KnownValue(3))
else:
x = 4
assert_is_value(x, KnownValue(4))
assert_is_value(x, MultiValuedValue([KnownValue(3), KnownValue(4)]))
@assert_passes()
def test_recursive_func_in_loop(self):
def capybara(xs):
for x in xs:
def do_something(y):
if x:
do_something(y)
do_something(x)
class TestUnusedVariable(TestNameCheckVisitorBase):
@assert_passes()
def test_used(self):
def capybara(condition):
y = 3
print(y)
z = 3
def nested():
print(z)
x = 4
if condition:
print(x)
@assert_fails(ErrorCode.unused_variable)
def test_unused(self):
def capybara():
y = 3
def test_replacement(self):
self.assert_is_changed(
"""
def capybara():
y = 3
return 3
""",
"""
def capybara():
return 3
""",
)
@assert_fails(ErrorCode.unused_variable)
def test_unused_then_used(self):
def capybara():
y = 3
y = 4
return y
@assert_fails(ErrorCode.unused_variable)
def test_unused_in_if(self):
def capybara(condition):
if condition:
x = 3
x = 4
return x
@assert_passes()
def test_while_loop(self):
def capybara(condition):
rlist = condition()
while rlist:
rlist = condition()
num_items = 0
while num_items < 10:
if condition:
num_items += 1
@assert_passes(settings={ErrorCode.use_fstrings: False})
def test_try_finally(self):
def func():
return 1
def capybara():
x = 0
try:
x = func()
finally:
print("%d" % x) # x is a number
@assert_passes()
def test_for_may_not_run(self):
def capybara(iterable):
# this is not unused, because iterable may be empty
x = 0
for x in iterable:
print(x)
break
print(x)
class TestUnusedVariableComprehension(TestNameCheckVisitorBase):
@assert_fails(ErrorCode.unused_variable)
def test_single_unused_name(self):
def capybara():
return [None for i in range(10)]
def test_replacement(self):
self.assert_is_changed(
"""
def capybara():
return [None for i in range(10)]
""",
"""
def capybara():
return [None for _ in range(10)]
""",
)
@assert_passes()
def test_used_in_listcomp(self):
def capybara():
return [i for i in range(10)]
@assert_fails(ErrorCode.unused_variable)
def test_both_unused(self):
def capybara(pairs):
return [None for a, b in pairs]
@assert_passes()
def test_one_used(self):
def capybara(pairs):
# this is OK; in real code the name of "b" might serve as useful documentation about
# what is in "pairs"
return [a for a, b in pairs]
class TestUnusedVariableUnpacking(TestNameCheckVisitorBase):
@assert_fails(ErrorCode.unused_variable)
def test_unused_in_yield(self):
from asynq import asynq, result
@asynq()
def kerodon(i):
return i
@asynq()
def capybara():
a, b = yield kerodon.asynq(1), kerodon.asynq(2)
result(a)
@assert_passes()
def test_async_returns_pair(self):
from asynq import asynq, result
@asynq()
def returns_pair():
return 1, 2
@asynq()
def capybara():
a, b = yield returns_pair.asynq()
result(a)
@assert_fails(ErrorCode.unused_variable)
def test_all_unused(self):
def capybara(pair):
a, b = pair
@assert_passes()
def test_some_used(self):
def capybara(pair):
a, b = pair
return a
@assert_fails(ErrorCode.unused_variable)
def test_multiple_assignment(self):
def capybara(pair):
c = a, b = pair
return c
@assert_passes()
def test_used_in_multiple_assignment(self):
def capybara(pair):
a, b = c, d = pair
return a + d
@assert_passes()
def test_nested_unpack(self):
def capybara(obj):
(a, b), c = obj
return c
@skip_before((3, 6))
def test_used_in_annassign(self):
self.assert_passes(
"""
def capybara(condition):
x: int
if condition:
x = 1
else:
x = 2
return x
"""
)
class TestLeavesScope(TestNameCheckVisitorBase):
@assert_passes()
def test_leaves_scope(self):
def capybara(cond):
if cond:
return
else:
x = 3
print(x)
@assert_passes()
def test_try_always_leaves_scope(self):
def capybara(cond):
try:
x = 3
except ValueError:
if cond:
raise
else:
return None
print(x)
@assert_fails(ErrorCode.possibly_undefined_name)
def test_try_may_leave_scope(self):
def capybara(cond):
try:
x = 3
except ValueError:
if cond:
pass
else:
return None
print(x)
@assert_passes()
def test_assert_false(self):
def capybara(cond):
if cond:
assert False
else:
x = 3
print(x)
@assert_passes()
def test_after_assert_false(self):
def capybara(cond):
assert False
if cond:
x = True
else:
# For some reason in Python 2.7, False gets inferred as UNRESOLVED_VALUE
# after the assert False, but True and None still work.
x = None
y = None
assert_is_value(y, KnownValue(None))
assert_is_value(x, MultiValuedValue([KnownValue(True), KnownValue(None)]))
@assert_passes()
def test_elif_assert_false(self):
def capybara(cond):
if cond == 1:
x = 3
elif cond == 2:
x = 4
else:
assert 0
print(x)
@skip_before((3, 5))
def test_visit_assert_message(self):
self.assert_passes(
"""
from typing import Union
def needs_int(x: int) -> None:
pass
def capybara(x: Union[int, str]) -> None:
assert_is_value(x, MultiValuedValue([TypedValue(int), TypedValue(str)]))
assert isinstance(x, str), needs_int(x)
assert_is_value(x, TypedValue(str))
"""
)
@assert_passes()
def test_no_cross_function_propagation(self):
def capybara(cond):
if cond == 1:
x = 3
else:
pass
return x # static analysis: ignore[possibly_undefined_name]
def kerodon():
# make sure we don't propagate the UNINITIALIZED_VALUE from
# inside capybara() to here
y = capybara(2)
print(y)
class TestConstraints(TestNameCheckVisitorBase):
@assert_passes()
def test_assert_truthy(self):
def capybara(x):
if x:
y = True
else:
y = False
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
assert y
assert_is_value(y, KnownValue(True))
@assert_passes()
def test_assert_falsy(self):
def capybara(x):
if x:
y = True
else:
y = False
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
assert not y
assert_is_value(y, KnownValue(False))
@assert_passes()
def test_no_constraints_from_branches(self):
def capybara(x):
if x:
y = True
else:
y = False
if x:
assert_is_value(
y, MultiValuedValue([KnownValue(True), KnownValue(False)])
)
assert y
assert_is_value(y, KnownValue(True))
# Constraints do not survive past the if block.
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
@assert_passes()
def test_if(self):
def capybara(x):
if x:
y = True
else:
y = False
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
if y:
assert_is_value(y, KnownValue(True))
else:
assert_is_value(y, KnownValue(False))
assert_is_value(y, KnownValue(True)) if y else assert_is_value(
y, KnownValue(False)
)
@assert_passes()
def test_isinstance(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
def capybara(x):
assert_is_value(x, UNRESOLVED_VALUE)
if isinstance(x, int):
assert_is_value(x, TypedValue(int))
else:
assert_is_value(x, UNRESOLVED_VALUE)
if isinstance(x, A):
assert_is_value(x, TypedValue(A))
if isinstance(x, B):
assert_is_value(x, TypedValue(B))
if isinstance(x, C):
# Incompatible constraints result in UNRESOLVED_VALUE.
assert_is_value(x, UNRESOLVED_VALUE)
if isinstance(x, B):
assert_is_value(x, TypedValue(B))
if isinstance(x, A):
# Less precise constraints are ignored.
assert_is_value(x, TypedValue(B))
x = B()
assert_is_value(x, TypedValue(B))
if isinstance(x, A):
# Don't widen the type to A.
assert_is_value(x, TypedValue(B))
def kerodon(cond1, cond2, val):
if cond1:
x = int(val)
elif cond2:
x = str(val)
else:
x = list(val)
assert_is_value(
x,
MultiValuedValue([TypedValue(int), TypedValue(str), TypedValue(list)]),
)
if isinstance(x, (int, str)):
assert_is_value(x, MultiValuedValue([TypedValue(int), TypedValue(str)]))
else:
assert_is_value(x, TypedValue(list))
assert_is_value(
x,
MultiValuedValue([TypedValue(int), TypedValue(str), TypedValue(list)]),
)
if isinstance(x, int) or isinstance(x, str):
assert_is_value(x, MultiValuedValue([TypedValue(int), TypedValue(str)]))
else:
assert_is_value(x, TypedValue(list))
def paca(cond1, cond2):
if cond1:
x = True
elif cond2:
x = False
else:
x = None
if (x is not True and x is not False) or (x is True):
assert_is_value(
x, MultiValuedValue([KnownValue(None), KnownValue(True)])
)
else:
assert_is_value(x, KnownValue(False))
@assert_passes()
def test_qcore_asserts(self):
from qcore.asserts import assert_is, assert_is_not
def capybara(cond):
if cond:
x = True
else:
x = False
assert_is_value(x, MultiValuedValue([KnownValue(True), KnownValue(False)]))
assert_is(x, True)
assert_is_value(x, KnownValue(True))
def capybara(cond):
if cond:
x = True
else:
x = False
assert_is_value(x, MultiValuedValue([KnownValue(True), KnownValue(False)]))
assert_is_not(x, True)
assert_is_value(x, KnownValue(False))
@assert_passes()
def test_is_or_is_not(self):
def capybara(x):
if x:
y = True
else:
y = False
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
if y is True:
assert_is_value(y, KnownValue(True))
else:
assert_is_value(y, KnownValue(False))
if y is not True:
assert_is_value(y, KnownValue(False))
else:
assert_is_value(y, KnownValue(True))
@assert_passes()
def test_and_or(self):
true_or_false = MultiValuedValue([KnownValue(True), KnownValue(False)])
def capybara(x, y):
if x is True and y is True:
assert_is_value(x, KnownValue(True))
assert_is_value(y, KnownValue(True))
else:
# no constraints from the inverse of an AND constraint
assert_is_value(x, UNRESOLVED_VALUE)
assert_is_value(y, UNRESOLVED_VALUE)
def kerodon(x):
if x is True and assert_is_value(x, KnownValue(True)):
pass
# After the if it's either True (if the if branch was taken)
# or UNRESOLVED_VALUE (if it wasn't). This is not especially
# useful in this case, but hopefully harmless.
assert_is_value(x, MultiValuedValue([KnownValue(True), UNRESOLVED_VALUE]))
def paca(x):
if x:
y = True
z = True
else:
y = False
z = False
if y is True or z is True:
assert_is_value(y, true_or_false)
assert_is_value(z, true_or_false)
else:
assert_is_value(y, KnownValue(False))
assert_is_value(z, KnownValue(False))
def pacarana(x):
# OR constraints within the conditional
if x:
z = True
else:
z = False
if z is True or assert_is_value(z, KnownValue(False)):
pass
def hutia(x):
if x:
y = True
else:
y = False
if x and y:
assert_is_value(y, KnownValue(True))
else:
assert_is_value(y, true_or_false)
def mara(x):
if x:
y = True
z = True
else:
y = False
z = False
if not (y is True and z is True):
assert_is_value(y, true_or_false)
assert_is_value(z, true_or_false)
else:
assert_is_value(y, KnownValue(True))
assert_is_value(z, KnownValue(True))
def phoberomys(cond):
if cond:
x = True
y = True
z = True
else:
x = False
y = False
z = False
if not ((x is False or y is False) or z is True):
assert_is_value(x, KnownValue(True))
assert_is_value(y, KnownValue(True))
assert_is_value(z, KnownValue(False))
else:
assert_is_value(x, true_or_false)
assert_is_value(y, true_or_false)
assert_is_value(z, true_or_false)
def llitun(cond):
if cond:
x = True
y = True
z = True
else:
x = False
y = False
z = False
if x and y and z:
assert_is_value(x, KnownValue(True))
assert_is_value(y, KnownValue(True))
assert_is_value(z, KnownValue(True))
else:
assert_is_value(x, true_or_false)
assert_is_value(y, true_or_false)
assert_is_value(z, true_or_false)
def coypu(cond):
if cond:
x = True
y = True
z = True
else:
x = False
y = False
z = False
if x or y or z:
assert_is_value(x, true_or_false)
assert_is_value(y, true_or_false)
assert_is_value(z, true_or_false)
else:
assert_is_value(x, KnownValue(False))
assert_is_value(y, KnownValue(False))
assert_is_value(z, KnownValue(False))
@assert_passes()
def test_set_in_condition(self):
def capybara(x):
if x:
y = True
else:
y = False
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
if not y:
assert_is_value(y, KnownValue(False))
y = True
assert_is_value(y, KnownValue(True))
@skip_before((3, 5))
def test_optional_becomes_non_optional(self):
self.assert_passes(
"""
from typing import Optional
def capybara(x: Optional[int]) -> None:
assert_is_value(x, MultiValuedValue([TypedValue(int), KnownValue(None)]))
if not x:
x = int(0)
assert_is_value(x, TypedValue(int))
"""
)
@assert_passes()
def test_reset_on_assignment(self):
def capybara(x):
if x:
y = True
else:
y = False
if y is True:
assert_is_value(y, KnownValue(True))
y = bool(x)
assert_is_value(y, TypedValue(bool))
@skip_before((3, 5))
def test_constraint_on_arg_type(self):
self.assert_passes(
"""
from typing import Optional
def kerodon() -> Optional[int]:
return 3
def capybara() -> None:
x = kerodon()
assert_is_value(x, MultiValuedValue([TypedValue(int), KnownValue(None)]))
if x:
assert_is_value(x, TypedValue(int))
else:
assert_is_value(x, MultiValuedValue([TypedValue(int), KnownValue(None)]))
if x is not None:
assert_is_value(x, TypedValue(int))
else:
assert_is_value(x, KnownValue(None))
"""
)
@skip_before((3, 5))
def test_constraint_in_nested_scope(self):
self.assert_passes(
"""
from typing import Optional
def capybara(x: Optional[int], z):
if x is None:
return
assert_is_value(x, TypedValue(int))
def nested():
assert_is_value(x, TypedValue(int))
return [assert_is_value(x, TypedValue(int)) for _ in z]
"""
)
@assert_passes()
def test_repeated_constraints(self):
def capybara(cond):
if cond:
x = True
else:
x = False
assert_is_value(x, MultiValuedValue([KnownValue(True), KnownValue(False)]))
# Tests that this completes in a reasonable time.
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
if x:
pass
assert_is_value(x, MultiValuedValue([KnownValue(True), KnownValue(False)]))
@assert_passes()
def test_nonlocal_unresolved(self):
def capybara(x):
def nested():
while True:
assert_is_value(x, UNRESOLVED_VALUE)
if x:
pass
return nested()
@assert_passes()
def test_nonlocal_unresolved_if(self):
def capybara(x):
def nested():
assert_is_value(x, UNRESOLVED_VALUE)
if x:
assert_is_value(x, UNRESOLVED_VALUE)
return nested()
@assert_passes()
def test_nonlocal_known(self):
def capybara(y):
if y:
x = True
else:
x = False
def nested():
assert_is_value(
x, MultiValuedValue([KnownValue(True), KnownValue(False)])
)
if x:
assert_is_value(x, KnownValue(True))
else:
assert_is_value(x, KnownValue(False))
@skip_before((3, 0))
def test_nonlocal_known_with_write(self):
self.assert_passes(
"""
def capybara(y):
if y:
x = True
else:
x = False
def nested():
nonlocal x
assert_is_value(x, MultiValuedValue([KnownValue(True), KnownValue(False)]))
if x:
assert_is_value(x, KnownValue(True))
else:
assert_is_value(x, KnownValue(False))
x = True
assert_is_value(x, KnownValue(True))
"""
)
@assert_passes()
def test_nonlocal_in_loop(self):
def capybara(x):
def nested(y):
for _ in y:
if x:
pass
@assert_passes()
def test_nonlocal_not_unused(self):
def _get_call_point(x, y):
frame = x
while y(frame):
frame = frame.f_back
return {"filename": frame.f_code.co_filename, "line_no": frame.f_lineno}
@assert_passes()
def test_conditional_assignment_to_global(self):
_disk_size_with_low_usage = 0
def _report_boxes_with_low_disk_usage(tier):
global _disk_size_with_low_usage
x = 0
if tier.startswith("lego"):
_disk_size_with_low_usage = 3
x += _disk_size_with_low_usage
_disk_size_with_low_usage = 0
return x
@assert_passes()
def test_comprehension(self):
def maybe_int(x):
if x:
return int(x)
else:
return None
def capybara(x, y):
assert_is_value(
maybe_int(x), MultiValuedValue([TypedValue(int), KnownValue(None)])
)
lst = [maybe_int(elt) for elt in y]
assert_is_value(
lst,
GenericValue(
list, [MultiValuedValue([TypedValue(int), KnownValue(None)])]
),
)
lst2 = [elt for elt in lst if elt]
assert_is_value(lst2, GenericValue(list, [TypedValue(int)]))
@assert_passes()
def test_while(self):
def capybara(x):
if x:
y = True
else:
y = False
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
while y:
assert_is_value(y, KnownValue(True))
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
@assert_passes()
def test_unconstrained_composite(self):
class Foo(object):
def has_images(self):
pass
class InlineEditor:
def init(self, input, valuee, is_qtext=False):
if is_qtext:
value = input
else:
value = ""
self.value = value
def tree(self):
assert_is_value(
self.value, MultiValuedValue([UNRESOLVED_VALUE, KnownValue("")])
)
if isinstance(self.value, Foo) and self.value.has_images():
assert_is_value(self.value, TypedValue(Foo))
else:
assert_is_value(
self.value, MultiValuedValue([UNRESOLVED_VALUE, KnownValue("")])
)
assert_is_value(
self.value,
MultiValuedValue(
[TypedValue(Foo), UNRESOLVED_VALUE, KnownValue("")]
),
)
class TestComposite(TestNameCheckVisitorBase):
@assert_passes()
def test_assignment(self):
class Capybara(object):
def __init__(self, x):
self.x = x
def eat(self):
assert_is_value(
self.x, MultiValuedValue([UNRESOLVED_VALUE, KnownValue(1)])
)
self.x = 1
assert_is_value(self.x, KnownValue(1))
self = Capybara(2)
assert_is_value(
self.x, MultiValuedValue([UNRESOLVED_VALUE, KnownValue(1)])
)
@assert_passes()
def test_conditional_attribute_assign(self):
class Capybara(object):
def __init__(self, x):
self.x = int(x)
def eat(self, cond, val):
if cond:
self.x = int(val)
x = self.x
assert_is_value(x, TypedValue(int))
@assert_passes()
def test_constraint(self):
class Capybara(object):
def __init__(self, x):
self.x = x
def eat(self, val):
self.x = val
if isinstance(self.x, int):
assert_is_value(self.x, TypedValue(int))
def eat_no_assign(self):
if isinstance(self.x, int):
assert_is_value(self.x, TypedValue(int))
def test_uniq_chain():
assert_eq([], _uniq_chain([]))
assert_eq(list(range(3)), _uniq_chain(range(3) for _ in range(3)))
assert_eq([1], _uniq_chain([1, 1, 1] for _ in range(3)))
|
from unittest import TestCase
from unittest.mock import Mock, patch
from src.aws_scanner_main import AwsScannerMain
from src.data.aws_scanner_exceptions import ClientFactoryException
from tests.test_types_generator import aws_scanner_arguments, aws_task, task_report
mock_factory = Mock()
tasks = [aws_task(description="task_1"), aws_task(description="task_2")]
mock_task_builder = Mock(build_tasks=Mock(return_value=tasks))
reports = [task_report(description="report_1"), task_report(description="report_2")]
mock_task_runner = Mock(run=Mock(return_value=reports))
mock_output = Mock()
class TestMain(TestCase):
@patch("src.aws_scanner_main.AwsClientFactory", return_value=mock_factory)
@patch("src.aws_scanner_main.AwsTaskBuilder", return_value=mock_task_builder)
@patch("src.aws_scanner_main.AwsParallelTaskRunner", return_value=mock_task_runner)
@patch("src.aws_scanner_main.AwsScannerOutput", return_value=mock_output)
def test_main(self, output: Mock, task_runner: Mock, task_builder: Mock, factory: Mock) -> None:
args = aws_scanner_arguments(task="service_usage", services=["ssm"], year=2020, month=10, region="us")
AwsScannerMain(args)
factory.assert_called_once_with(mfa="123456", username="bob")
task_builder.assert_called_once_with(mock_factory, args)
mock_task_builder.build_tasks.assert_called_once()
task_runner.assert_called_once_with(mock_factory)
mock_task_runner.run.assert_called_once_with(tasks)
output.assert_called_once_with(mock_factory)
mock_output.write.assert_called_once_with("service_usage", reports)
@patch("src.aws_scanner_main.AwsClientFactory", side_effect=ClientFactoryException)
def test_main_failure(self, _: Mock) -> None:
with self.assertRaises(SystemExit) as se:
with self.assertLogs("AwsScannerMain", level="ERROR") as error_log:
AwsScannerMain(aws_scanner_arguments(task="drop"))
self.assertEqual(1, se.exception.code, f"exit code should be 1 but got {se.exception.code}")
self.assertIn("ClientFactoryException", error_log.output[0])
|
"""
Contains methods for resolving variables stored in the SeleniumYAML engine
through a string value
The methods should support reading of Nested variables in dictionaries,
as well as first level values
Basic Usage:
# TODO
"""
import re
import collections
import ast
len_function = lambda resolved_value=None: len(resolved_value)
FUNCTIONS = {
"str": {
"split": lambda delim, maxsplit=-1, resolved_value=None: (
resolved_value.split(delim, maxsplit)),
"upper": lambda resolved_value=None: resolved_value.upper(),
"lower": lambda resolved_value=None: resolved_value.lower(),
"capitalize": lambda resolved_value=None: resolved_value.capitalize(),
"zfill": lambda width, resolved_value=None: resolved_value.zfill(width),
"strip": lambda resolved_value=None: resolved_value.strip(),
"len": len_function,
"startswith": lambda prefix, resolved_value=None: (
resolved_value.startswith(prefix)),
"endswith": lambda suffix, resolved_value=None: (
resolved_value.endswith(suffix)),
},
"dict": {
"get": lambda key, default=None, resolved_value=None: (
resolved_value.get(key, default=default)),
"keys": lambda resolved_value=None: resolved_value.keys(),
"items": lambda resolved_value=None: resolved_value.items()
},
"list": {
"len": len_function,
"index": lambda key, resolved_value=None: resolved_value.index(key),
"reverse": lambda resolved_value=None: list(reversed(resolved_value)),
"sort": lambda resolved_value=None: sorted(resolved_value),
"join": lambda delim, resolved_value=None: delim.join(resolved_value)
}
}
class VariableResolver:
""" Resolver class that receives a value containing variables in the form
of ``${name__sub_var|func(param1, param2=...)...}`` and resolves all of
those variables through a provided ``context`` dictionary
"""
def __init__(self, value):
""" Creates a new instance of ``VariableResolver`` as sets the value
as a class attribute
Parameters
----------
``value`` : String/List/Dict containing variables that needs
to be resolved
"""
self.value = value
@staticmethod
def find_variables(value):
""" Returns a list of all variables in ``${...}`` in the given
value
"""
if isinstance(value, str):
r = re.compile(r"\$\{(.*?)\}")
return r.findall(value)
@staticmethod
def parse_functions(value):
""" Receives a string as input and parses out all of the functions
in the format of `string|func1(...)|func2(...)` into an ordered
dict in the format of `{func1: {param: ...}, func2: {}}`
"""
functions = re.split(r'(?<!\\)\|', value)
value = functions.pop(0)
parsed_functions = collections.OrderedDict()
for function in functions:
call = ast.parse(function).body[0].value
if isinstance(call, ast.Call):
fname = call.func.id
parsed_functions[fname] = {}
parsed_functions[fname]["args"] = [
ast.literal_eval(arg) for arg in call.args]
parsed_functions[fname]["kwargs"] = {
arg.arg: ast.literal_eval(arg.value) for arg in call.keywords}
return value, parsed_functions
@classmethod
def resolve_functions(cls, value, functions):
""" Executes all of the given ``functions`` on the provided
``value``
"""
value_type = type(value).__name__
assert value_type in FUNCTIONS, (
"Value type must be str, list or dict!")
methods = FUNCTIONS[value_type]
for function, params in functions.items():
value = methods[function](
*params["args"], resolved_value=value, **params["kwargs"])
return value
@classmethod
def resolve_variable(cls, data, key):
""" A recursive function that takes the "first level" of the ``key``
and checks if the ``data`` dictionary contains it. If so, it
recursively calls itself on the dictionary if the value of that
key is a dictionary, or returns the value if it's either not
present/is not a dictionary
"""
if not isinstance(key, list):
key = key.split("__")
if key:
current_level = key.pop(0)
current_level, functions = cls.parse_functions(current_level)
# raise ValueError(functions)
if isinstance(data, dict):
# If data is a dictionary, try to get the key from the dictionary
if current_level in data:
value = cls.resolve_functions(
data[current_level], functions)
return cls.resolve_variable(value, key)
return None
elif isinstance(data, list):
# If data is a list, try to get the index for the list
try:
value = data[int(current_level)]
except IndexError:
return None
except ValueError:
raise ValueError(f"`{current_level}` must be an integer since "
f"`{data}` is an array.")
value = cls.resolve_functions(value, functions)
return cls.resolve_variable(value, key)
else:
raise ValueError(
f"Can't query non-dict for value ``{current_level}``."
)
return data
@classmethod
def substitute_variables(cls, value, context):
""" Substitutes all variables in the given ``value`` through the
given ``context``
"""
if isinstance(value, str):
placeholders = cls.find_variables(value)
placeholder_count = len(placeholders)
for placeholder in placeholders:
# Only replaces the placeholder if the resolution is valid
resolved_value = cls.resolve_variable(
context, placeholder)
placeholder_string = "${" + placeholder + "}"
if placeholder_count == 1 and value == placeholder_string:
# This is for cases where we need the placeholder to be
# replaced as is; steps should handle their own conversions
return resolved_value
else:
if resolved_value is not None:
value = value.replace(
placeholder_string,
str(resolved_value)
)
return value
elif isinstance(value, dict):
return {k: cls.substitute_variables(v, context)
for k, v in value.items()}
elif isinstance(value, list):
return [cls.substitute_variables(i, context) for i in value]
return value
def render(self, context):
""" Renders and resolves the variables contained in the ``value``
attribute through the context dictionary
Parameters
----------
``context`` : Dictionary containing context required for resolving
variables in the value
"""
assert isinstance(context, dict)
return self.substitute_variables(self.value, context)
|
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
def color_activated():
color = colorchooserdialog.get_rgba()
red = (color.red * 255)
green = (color.green * 255)
blue = (color.blue * 255)
print('Hex: #%02x%02x%02x' % (red, green, blue))
colorchooserdialog = Gtk.ColorChooserDialog()
if colorchooserdialog.run() == Gtk.ResponseType.OK:
color_activated()
colorchooserdialog.destroy()
|
#encoding=utf-8
from selenium import webdriver
import requests
from bs4 import BeautifulSoup
import re
print("~~~现仅支持腾讯视频:电视剧/电影/动漫(其他的频道可能会有问题)~~~")
# urls='https://jx.618g.com/?url='
urls='http://jiexi.92fz.cn/player/vip.php?url='
while True:
name=input("输入名称:")
url = "https://v.qq.com/x/search/?q=%s" %name
res=requests.get(url).text
tree=BeautifulSoup(res,"html.parser")
# print(tree)
new=tree.find_all("div",class_=" result_item result_item_v ")
# print(len(new))
ress=tree.find_all("h2",class_="result_title")
# print(ress)
ti = tree.find_all("h2", class_="result_title")
# print(ti)
txt=[]
txt_nl=[]
for xxx in range(len(ti)):
if 'type' in str(ti[xxx]):
txt.append(ti[xxx])
# print(txt)
for txt_n in range(len(txt)):
txt_num=txt[txt_n].a['href']
txt_nl.append(txt_num)
# print(txt_nl)
# print(len(txt))
for i in range(len(txt)):
tii = txt[i].get_text()
print(str(i + 1) + ">>>" + tii)
while True:
try:
nummm = int(input("输入对应序号:"))
except ValueError:
continue
break
if txt[nummm-1].find("span",class_="type").string=='电影':
if 'item' in str(new[nummm-1].find("div",class_="item")):
# print(str(new[nummm-1].find_all("div",class_="item")))
idss = new[nummm - 1]['data-id']
new_id = new[nummm - 1].find("div", class_="result_link_list cf").get('r-props')
# print(new_id)
new_ids = re.findall(r"range: '(.*)';", new_id)[0]
# print(new_ids)
url_g = 'http://s.video.qq.com/get_playsource?id=%s&plat=2&type=4&data_type=2&video_type=2&range=%s ' % (
idss, new_ids)
# print(url_g)
get = requests.get(url_g).text
gett = BeautifulSoup(get, "html.parser")
# print(gett)
ra = gett.find_all("playurl" or "playUrl")
# print(ra)
gett_listtt = []
for i in range(len(ra)):
gett_listt = gett.find_all("playurl" or "playUrl")[i].string
gett_listtt.append(gett_listt)
# print(gett_listtt)
# gett_list=gett.videoPlayList.playUrl.string
# print(gett_list)
ra_title = gett.find_all("episode_number")
# print(ra_title)
gett_title = []
for i in range(len(ra_title)):
gett_titl = gett.find_all("episode_number")[i].string
gett_title.append(gett_titl)
print(gett_title)
num_list = dict(zip(gett_title, gett_listtt))
# print(num_list)
while True:
try:
try:
numm = str(input("输入'-1'退出程序,输入'-2'重新搜索\n输入观看哪一集:"))
if int(numm) == -1:
exit()
except ValueError:
continue
if int(numm) == -2:
break
urlss = urls + num_list[numm]
except KeyError:
continue
# print(urlss)
driver = webdriver.Chrome()
driver.get(urlss)
if numm == -2 :
continue
else:
ress_f=tree.find_all("div",class_="_playlist")
# print(ress_f[1])
t_f=ress_f[nummm-1].find_all("span",class_="icon_text")
# print(t_f)
t_ff=[]
for i in range(len(t_f)):
xx=t_f[i].string
t_ff.append(xx)
t_ff.remove(None)
# print(t_ff)
t_fi = ress_f[nummm-1].find_all("a")
# print(len(t_fi))
t_ffi = []
l_fi = {}
for index,ii in enumerate(t_ff):
l_fi.setdefault(str(index),ii)
print(str(index+1),">>>",ii)
# print(l_fi)
for i in range(len(t_fi)):
xx = t_fi[i]['href']
t_ffi.append(xx)
t_ffi.remove('javascript:;')
# print(t_ffi)
num_l=dict(zip(t_ff,t_ffi))
# print(num_l)
while True:
try:
try:
nummss = int(input("输入'-1'退出程序,输入'-2'重新搜索\n输入观看版本序号:"))
if int(nummss) == -1:
exit()
except ValueError:
continue
if int(nummss) == -2:
break
nummss = l_fi[str(nummss-1)]
except KeyError:
continue
urlssss = urls + num_l[nummss]
driver = webdriver.Chrome()
driver.get(urlssss)
if nummss == -2:
continue
else:
# a=ress[0]
# # print(len(ress))
# # print(a)
# list=[]
# b=[]
# d=[]
# for i in range(len(ress)):
# # z=ress[i]
# # print(z)
# a=re.sub('( .*?)href=',"",str(ress[i])).replace('<h2',"")
# b.append(a)
# c=re.sub('target(.*)class="hl"',"",str(b[i]))
# d.append(c)
# if 'span' in str(d[i]):
# q=d[i].replace('</em',"").replace('span class="sub"',"").replace('/span><span class="type"',"").replace('/span></a></h2>',"")
# qq=q.replace(" ","")
# qqq=qq.replace('>',",").replace("<","").replace('"',"")
# qqqq=qqq.split(",")
# # print(qqqq)
# list.append(qqqq)
# else:
# f=re.sub('/em(.*)',"",str(d[i]))
# ff=f.replace(" ","")
# fff=ff.replace('>', ",").replace("<", "").replace('"',"")
# ffff=fff.split(',')
# # print(ffff)
# list.append(ffff)
# print(list)
# for ii in range(len(list)):
# print("\n")
# print(str(ii + 1) + ">>>")
# for iii in range(len(list[ii])):
# print(list[ii][iii]+"-",end='')
# print('\n')
# num=int(input("序号:"))
# get_url=list[num-1][0]
get_url=txt_nl[nummm-1]
# print(get_url)
reee=requests.get(get_url).text
tre=BeautifulSoup(reee,"html.parser")
ccc = tre.find_all('script')
ccccc=[]
for nu in range(len(ccc)):
if 'window.__g' in str(ccc[nu]):
ccccc.append(ccc[nu])
ddd=ccccc[0].get_text()
# print(ddd)
# print(type(ddd))
dddd=ddd.replace("\n","").replace("\t","").split("=")
# print(type(dddd))
ddddd=dddd[1]
# print(ddddd)
query=1
cccc=eval(ddddd)
# print(cccc)
# print(type(cccc))
ids=cccc[1][id]
# print(ids)
# print(tre)
tr=tre.find("div",class_="mod_episode")
# print(tr)
if 'item_all' in str(tr):
aaa=tr.find("span",class_="item item_all")
bbb=aaa.a['data-range']
# print(bbb)
url_g='http://s.video.qq.com/get_playsource?id=%s&plat=2&type=4&data_type=2&video_type=2&range=%s '%(ids,bbb)
# print(url_g)
get=requests.get(url_g).text
gett=BeautifulSoup(get,"html.parser")
# print(gett)
ra=gett.find_all("playurl"or"playUrl")
# print(ra)
gett_listtt=[]
for i in range(len(ra)):
gett_listt=gett.find_all("playurl"or"playUrl")[i].string
gett_listtt.append(gett_listt)
# print(gett_listtt)
# gett_list=gett.videoPlayList.playUrl.string
# print(gett_list)
ra_title=gett.find_all("episode_number")
# print(ra_title)
gett_title=[]
for i in range(len(ra_title)):
gett_titl=gett.find_all("episode_number")[i].string
gett_title.append(gett_titl)
print(gett_title)
num_list=dict(zip(gett_title,gett_listtt))
# print(num_list)
while True:
try:
try:
numm=str(input("输入'-1'退出程序,输入'-2'重新搜索\n输入观看哪一集:"))
if int(numm) == -1:
exit()
except ValueError:
continue
if int(numm) == -2:
break
urlss=urls+num_list[numm]
except KeyError:
continue
# print(urlss)
driver = webdriver.Chrome()
driver.get(urlss)
if numm == -2:
continue
else:
aaaa = tr.find_all("span",class_="item")
# print(aaaa)
l=[]
ll=[]
for i in range(len(aaaa)):
aaaaa=aaaa[i].find("a").find('span').string
l.append(aaaaa)
aaaaaa=aaaa[i].find("a")['href']
ll.append(aaaaaa)
num_lists=dict(zip(l,ll))
print(l)
while True:
try:
try:
numms = str(input("输入'-1'退出程序,输入'-2'重新搜索\n输入观看哪一集:"))
if int(numms)==-1:
exit()
except ValueError:
continue
if int(numms)==-2:
break
urlsss = urls + num_lists[numms]
except KeyError:
continue
# print(urlsss)
driver = webdriver.Chrome()
driver.get(urlsss)
if numms == -2:
continue |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.rum.v20210622 import models
class RumClient(AbstractClient):
_apiVersion = '2021-06-22'
_endpoint = 'rum.tencentcloudapi.com'
_service = 'rum'
def CreateProject(self, request):
"""创建项目(归属于某个团队)
:param request: Request instance for CreateProject.
:type request: :class:`tencentcloud.rum.v20210622.models.CreateProjectRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.CreateProjectResponse`
"""
try:
params = request._serialize()
body = self.call("CreateProject", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateProjectResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDataEventUrl(self, request):
"""获取DescribeDataEventUrl信息
:param request: Request instance for DescribeDataEventUrl.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeDataEventUrlRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeDataEventUrlResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDataEventUrl", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDataEventUrlResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDataLogUrlStatistics(self, request):
"""获取LogUrlStatistics信息
:param request: Request instance for DescribeDataLogUrlStatistics.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeDataLogUrlStatisticsRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeDataLogUrlStatisticsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDataLogUrlStatistics", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDataLogUrlStatisticsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDataPerformancePage(self, request):
"""获取PerformancePage信息
:param request: Request instance for DescribeDataPerformancePage.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeDataPerformancePageRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeDataPerformancePageResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDataPerformancePage", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDataPerformancePageResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeDataPvUrlStatistics(self, request):
"""获取DescribeDataPvUrlStatistics信息
:param request: Request instance for DescribeDataPvUrlStatistics.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeDataPvUrlStatisticsRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeDataPvUrlStatisticsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeDataPvUrlStatistics", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeDataPvUrlStatisticsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeError(self, request):
"""获取首页错误信息
:param request: Request instance for DescribeError.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeErrorRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeErrorResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeError", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeErrorResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeLogList(self, request):
"""获取项目下的日志列表(实例创建的项目下的日志列表)
:param request: Request instance for DescribeLogList.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeLogListRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeLogListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeLogList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeLogListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeProjects(self, request):
"""获取项目列表(实例创建的团队下的项目列表)
:param request: Request instance for DescribeProjects.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeProjectsRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeProjectsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeProjects", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeProjectsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeScores(self, request):
"""获取首页分数列表
:param request: Request instance for DescribeScores.
:type request: :class:`tencentcloud.rum.v20210622.models.DescribeScoresRequest`
:rtype: :class:`tencentcloud.rum.v20210622.models.DescribeScoresResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeScores", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeScoresResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql.expression import func
from datetime import date
db = SQLAlchemy()
class User(db.Model):
__tablename__ = 'user'
user_id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30), unique=True)
password = db.Column(db.String(30))
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return '<User {}>'.format(self.username)
class Favorite(db.Model):
__tablename__ = 'favorite'
favorite_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.user_id'))
movie = db.Column(db.Text, nullable=False)
def __init__(self, user_id, movie):
self.user_id = user_id
self.movie = movie
def __repr__(self):
return '<Movie {}>'.format(self.movie)
class Subscription(db.Model):
__tablename__ = 'subscription'
subscription_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.user_id'))
subscription = db.Column(db.Text, nullable=False)
def __init__(self, user_id, subscription):
self.user_id = user_id
self.subscription = subscription
def __repr__(self):
return '<Subscription {}>'.format(self.subscription)
|
from collections import defaultdict
# Count coins of each type
def count_coins(amount, coin, counter):
while(amount >= coin):
counter += 1
amount = amount - coin
return amount, counter
def main():
coins = [25, 10, 5, 1]
counters = [0] * len(coins)
by_coin = defaultdict(int)
amount = input("Enter amount in dollars: ")
# Convert dollars to pennies
amount = float(amount) * 100
for coin, counter in zip(coins, counters):
amount, counter = count_coins(amount, coin, counter)
by_coin[coin] = counter
print("Quarters: {0:d}, dimes: {1:d}, nickels: {2:d} and pennies: {3:d}."
.format(*by_coin.values()))
print("Total number of coins owed: {0:d}.".format(sum(by_coin.values())))
if __name__ == "__main__":
main()
|
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework import generics, permissions, status
from .serializers import (
QuestionSerializer,
AnswerSerializer
)
from .models import (
Question,
Answer
)
class IsTeacher(permissions.BasePermission):
"""
Allows access only to teachers.
"""
def has_permission(self, request, view):
return request.user and request.user.is_staff
class QuestionPostView(APIView):
parser_classes = (MultiPartParser, FormParser)
def post(self, request, *args, **kwargs):
serializers = QuestionSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
x = Question.objects.get(Q_text=serializers.data.get('Q_text'))
return Response(x.id)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class AnswerPostView(APIView):
serializer_class = AnswerSerializer
permission_classes = (IsTeacher,)
def post(self, request, *args, **kwargs):
serializers = AnswerSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
x = Answer.objects.get(A_text=serializers.data.get('A_text'))
return Response(x.id)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class OneQuestionView(APIView):
def get_answers(self, pk):
x = Answer.objects.filter(Q_id=pk)
ans = []
for i in x:
y = {
'answer_text': i.A_text
}
ans.append(y)
return ans
def get(self, request, pk, format=None):
x = Question.objects.get(pk=pk)
# import pdb;pdb.set_trace()
data = {
'question_text': x.Q_text,
'answers': self.get_answers(pk),
'attachment': x.attachment.url
}
return Response(data)
|
"""
.. _howto_simplelookupdecoder:
Decoding Spots with :py:class:`.SimpleLookupDecoder`
====================================================
Linearly multiplexed assays are designed such that every RNA transcript is labeled in only one of
potentially many imaging rounds (e.g. osmFISH, sequential smFISH, and RNAscope). One way to
decode spots from images produced by these assays is to use :py:class:`.SimpleLookupDecoder`,
which simply looks up the :term:`target <Target>` in the :term:`codebook <Codebook>` whose
:term:`codeword <Codeword>` has ``value: 1`` in the round and channel the spot was found in.
.. warning::
:py:class:`.SimpleLookupDecoder` should never be used on :py:class:`.SpotFindingResults`
found from a ``reference_image``.
.. note::
:py:class:`.PerRoundMaxChannel` decoding with
``trace_building_strategy=TraceBuildingStrategies.SEQUENTIAL`` will return effectively the
same result but with the addition of ``xc``, ``yc``, ``zc``, ``distance``,
and ``passes_threshold`` fields in the :py:class:`.DecodedIntensityTable`.
"""
# Load smFISH data and find spots
import starfish.data
from starfish import FieldOfView
from starfish.types import Levels
from starfish.image import Filter
experiment = starfish.data.allen_smFISH(use_test_data=True)
image = experiment["fov_001"].get_image(FieldOfView.PRIMARY_IMAGES)
bandpass = Filter.Bandpass(lshort=.5, llong=7, threshold=0.0)
glp = Filter.GaussianLowPass(
sigma=(1, 0, 0),
is_volume=True
)
clip1 = Filter.Clip(p_min=50, p_max=100, level_method=Levels.SCALE_BY_CHUNK)
clip2 = Filter.Clip(p_min=99, p_max=100, is_volume=True, level_method=Levels.SCALE_BY_CHUNK)
tlmpf = starfish.spots.FindSpots.TrackpyLocalMaxPeakFinder(
spot_diameter=5,
min_mass=0.02,
max_size=2,
separation=7,
noise_size=0.65,
preprocess=False,
percentile=10,
verbose=True,
is_volume=True,
)
clip1.run(image, in_place=True)
bandpass.run(image, in_place=True)
glp.run(image, in_place=True)
clip2.run(image, in_place=True)
spots = tlmpf.run(image)
# Decode spots with SimpleLookupDecoder
from starfish.spots import DecodeSpots
decoder = DecodeSpots.SimpleLookupDecoder(codebook=experiment.codebook)
decoded_intensities = decoder.run(spots=spots) |
"""
@Author: yshhuang@foxmail.com
@Date: 2020-07-27 16:57:35
@LastEditors: yshhuang@foxmail.com
@LastEditTime: 2020-07-30 20:05:42
@FilePath: /d2l-zh/srcnn/train.py
"""
from preprocessing import (generate_data, try_gpu, data_iter)
import os
import h5py
from mxnet import nd, gluon, autograd
from model import SrCnn
from mxnet.gluon import loss as gloss
import time
import random
train_data = '../data/srcnn/Train/'
lr = 1e-4
epoch = 10
batch_size = 128
if __name__ == "__main__":
if not os.path.exists("train.h5"):
generate_data(train_data, "train.h5")
with h5py.File("train.h5", 'r') as hf:
train_input = nd.array(hf.get('input'))
train_label = nd.array(hf.get('label'))
net = SrCnn()
net.initialize(ctx=try_gpu())
if os.path.exists("srcnn.params"):
net.load_parameters("srcnn.params")
ctx = try_gpu()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': lr})
print('training on', ctx)
loss = gloss.L2Loss()
for ep in range(epoch):
train_l_sum, n, start = 0.0, 0, time.time()
# batch_idxs = len(train_input) // batch_size
for X, y in data_iter(batch_size, train_input, train_label):
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
X = nd.transpose(X, (0, 3, 1, 2))
y = nd.transpose(y, (0, 3, 1, 2))
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
y = y.astype('float32')
train_l_sum += l.asscalar()
print(y.size)
n += y.size
print('epoch %d,loss %f' % (ep+1, train_l_sum/n))
net.save_parameters("srcnn.params")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
######################################################################################################################
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
# This file reads the AWS cloudwatch metrics for a given workspace
# This is where we will change the algorithm to determine billing preference
import boto3
import logging
import os
import math
from botocore.config import Config
from datetime import timedelta, datetime
botoConfig = Config(
max_pool_connections=100,
retries={
'max_attempts': 20,
'mode': 'standard'
},
)
log = logging.getLogger()
LOG_LEVEL = str(os.getenv('LogLevel', 'INFO'))
log.setLevel(LOG_LEVEL)
AUTO_STOP = 'AUTO_STOP'
ALWAYS_ON = 'ALWAYS_ON'
TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
NUMBER_OF_DAYS = 5
START_TIME = 'start_time'
END_TIME = 'end_time'
AUTO_STOP_TIMEOUT_HOURS = os.getenv('AutoStopTimeoutHours')
# This constant represents the number of 5 minutes sessions in AUTO_STOP_TIMEOUT_HOURS
ZERO_COUNT = int(AUTO_STOP_TIMEOUT_HOURS) * 60 / 5
class MetricsHelper(object):
def __init__(self, region):
self.region = region
self.client = boto3.client('cloudwatch', region_name=self.region, config=botoConfig)
def get_billable_hours(self, start_time, end_time, workspace):
"""
This method returns the billable hours for the given workspace
:param start_time: Start time for the calculating hours
:param end_time: End time for calculating hours
:param workspace: Workspace object to use to calculate hours
:return: billable hours for the workspace
"""
log.debug("Calculating user connected hours for the workspace {} with start time {} and end time {}".
format(workspace, start_time, end_time))
list_time_ranges = self.get_list_time_ranges(start_time, end_time)
list_metric_data_points_user_connected = \
self.get_cloudwatch_metric_data_points(workspace['WorkspaceId'], list_time_ranges, 'UserConnected')
if list_metric_data_points_user_connected:
list_user_session_data_points = self.get_list_user_session_data_points(list_metric_data_points_user_connected)
list_user_sessions = self.get_user_sessions(list_user_session_data_points, workspace)
user_connected_hours = self.get_user_connected_hours(list_user_sessions, workspace)
log.debug("Calculated user connected hours: {}".format(user_connected_hours))
return user_connected_hours
else:
return None
def get_list_time_ranges(self, start_time, end_time):
"""
This method returns list of time ranges for the given start and end time. Each time range if of 5 days.
:param start_time:
:param end_time:
:return: list of time ranges
"""
log.debug("Getting time ranges for start time {} and end time {}".format(start_time, end_time))
list_time_ranges = []
start_time_new_format = datetime.strptime(start_time, TIME_FORMAT)
end_time_new_format = datetime.strptime(end_time, TIME_FORMAT)
time_diff = end_time_new_format - start_time_new_format
number_of_time_ranges = math.ceil(
time_diff / timedelta(days=NUMBER_OF_DAYS)) # Round the number to the next integer
for item in range(number_of_time_ranges):
start_time = start_time_new_format + item * timedelta(days=NUMBER_OF_DAYS)
end_time = start_time + timedelta(days=NUMBER_OF_DAYS)
time_range = {
START_TIME: start_time.strftime(TIME_FORMAT),
END_TIME: end_time.strftime(TIME_FORMAT)
}
list_time_ranges.append(time_range)
log.debug("List of time ranges for start time {} and end time {} is {}".
format(start_time, end_time, list_time_ranges))
return list_time_ranges
def get_cloudwatch_metric_data_points(self, workspace_id, list_time_ranges, metric):
"""
This method returns the cloudwatch metric datapoints for given workspace id and time ranges.
:param metric: metric to use to query cloudwatch metrics
:param workspace_id:
:param list_time_ranges: List of time ranges to query and get the metrics for
:return: list of Datapoints for the cloudwatch metrics
"""
log.debug("Getting the cloudwatch metrics for the workspace id {}".format(workspace_id))
list_data_points = []
for time_range in list_time_ranges:
try:
metrics = self.client.get_metric_statistics(
Dimensions=[{
'Name': 'WorkspaceId',
'Value': workspace_id
}],
Namespace='AWS/WorkSpaces',
MetricName=metric,
StartTime=time_range[START_TIME],
EndTime=time_range[END_TIME],
Period=300,
Statistics=['Maximum']
)
except Exception as error:
log.error("Error occurred while processing workspace {}, {}".format(workspace_id, error))
return None
for metric_data in metrics['Datapoints']:
list_data_points.append(metric_data)
log.debug("The cloudwatch metrics list for workspace id {} is {}".format(workspace_id, list_data_points))
return list_data_points
def get_list_user_session_data_points(self, list_metric_data_points):
"""
This method returns the sorted list of data points
:param list_metric_data_points:
:return: sorted list of data points
"""
log.debug("Getting the list of user session data points for metric data points {}".
format(list_metric_data_points))
list_user_session_data_points = []
sorted_list_metric_data_points = sorted(list_metric_data_points, key=lambda x: x['Timestamp'])
for metric in sorted_list_metric_data_points:
list_user_session_data_points.append(metric['Maximum'])
log.debug("List of user sessions is {}".format(list_user_session_data_points))
return list_user_session_data_points
def get_user_connected_hours(self, list_user_sessions, workspace):
"""
This method returns user connected hours from list of user sessions for a given workspace
:param list_user_sessions:
:param workspace:
:return:
"""
log.debug("Calculating user connected hours for workspace {} and user sessions {}".
format(workspace, list_user_sessions))
user_connected_hours = 0
if workspace['WorkspaceProperties']['RunningMode'] == ALWAYS_ON:
idle_time_in_hours = int(AUTO_STOP_TIMEOUT_HOURS)
else:
idle_time_in_hours = workspace['WorkspaceProperties']['RunningModeAutoStopTimeoutInMinutes'] / 60
for session in list_user_sessions:
user_connected_hours = user_connected_hours + session + idle_time_in_hours ## ADD PATCHING HOURS TO WORKSPACES
return user_connected_hours
def get_user_sessions(self, list_user_session_data_points, workspace):
"""
This method returns user session hours from list of user sessions for a given workspace
:param list_user_session_data_points:
:param workspace:
:return:
"""
list_user_sessions = []
session_start = False
zeroes_count = 0
end_session_index = 0
start_session_index = 0
for i in range(len(list_user_session_data_points)):
if list_user_session_data_points[i] == 1:
if not session_start:
session_start = True
zeroes_count = 0
start_session_index = i
end_session_index = i + 1 # set this to account for user session [1,0,0....0]
else:
zeroes_count = 0 # Reset the zero count if a value of 1 is encountered
end_session_index = i + 1
elif list_user_session_data_points[i] == 0 and session_start:
zeroes_count = zeroes_count + 1
if zeroes_count == self.get_zero_count(workspace):
user_session_hours = math.ceil((end_session_index - start_session_index) / 12)
list_user_sessions.append(user_session_hours)
session_start = False
end_session_index = 0
start_session_index = 0
user_session_hours = math.ceil((end_session_index - start_session_index) / 12)
if user_session_hours:
list_user_sessions.append(user_session_hours)
return list_user_sessions
def get_zero_count(self, workspace):
"""
This method returns the number of continuous zeroes which will indicate end of user session based on the
property RunningModeAutoStopTimeoutInMinutes
:param workspace:
:return: the number of continuous zeros in user session to determine end of user session
"""
if workspace['WorkspaceProperties']['RunningMode'] == ALWAYS_ON:
number_zero_count = ZERO_COUNT
else:
number_zero_count = workspace['WorkspaceProperties']['RunningModeAutoStopTimeoutInMinutes'] / 5
log.debug("The zero count for the workspace {} is {}".format(workspace, number_zero_count))
return int(number_zero_count)
|
"""
This problem was asked by Uber.
Given an array of integers, return a new array such that each element at index i of the new array is the product of all
the numbers in the original array except the one at i.
For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24].
If our input was [3, 2, 1], the expected output would be [2, 3, 6].
Follow-up: what if you can't use division?
"""
def array_multiplier(array):
final_array = []
for j in range(0, len(array)):
prod = 1
for i in range(0, len(array)):
if j != i:
prod = prod*array[i]
final_array.append(prod)
print(final_array)
if __name__ == '__main__':
list_of_int = [1, 2, 3, 4, 5]
array_multiplier(list_of_int) |
"""sample implementation for IntegrationPlugin"""
from plugin import InvenTreePlugin
from plugin.mixins import UrlsMixin
class NoIntegrationPlugin(InvenTreePlugin):
"""
An basic plugin
"""
NAME = "NoIntegrationPlugin"
class WrongIntegrationPlugin(UrlsMixin, InvenTreePlugin):
"""
An basic wron plugin with urls
"""
NAME = "WrongIntegrationPlugin"
|
from scipy.signal import welch, filtfilt
from scipy.ndimage.filters import gaussian_filter1d
from scipy.signal import butter, hilbert
import networkx as nx
from time import time
import numpy as np
import pylab as pl
import igraph
import os
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Function(object):
"""Base class for mathematical functions.
The ``callable`` interface is sufficient for when you only ever need to
invoke a function. But many times we want to have more information about
the function, such as getting its domain or range or knowing whether
it's sparse. In addition, we often want to adjust the computational
representation of functions (e.g., adding memoization). So this class
provides a base class for functions supporting all these sorts of
operations in addition to being callable.
"""
def __init__(self, f):
self._f = f
def __call__(self, x):
return self._f(x)
def map(self, g):
"""Returns a new function that applies ``g`` after ``self``.
Args:
g (callable): the function to post-compose.
Returns:
An object of the same type as ``self`` which computes ``lambda x:
g(self(x))``. N.B., although mathematically we have the equivalence:
``SomeFunction(f).map(g) == SomeFunction(lambda x: g(f(x)))``;
operationally the left- and right-hand sides may differ. For
example, with the ``MemoizedFunction`` class, the left-hand side
will memoize the intermediate ``f(x)`` values whereas the right-hand
side will not.
"""
return self.__class__(lambda x: g(self(x)))
class MemoizedFunction(Function):
"""A function which memoizes its value for all arguments."""
def __init__(self, f):
super(MemoizedFunction, self).__init__(f)
self._memos = {}
def ClearMemos(self):
"""Discard all memoized results of this function."""
self._memos = {}
def __call__(self, x):
try:
return self._memos[x]
except KeyError:
fx = self._f(x)
self._memos[x] = fx
return fx
|
# -*- coding: utf-8 -*-
u"""test invalid method for guest
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_invalid_method(auth_fc):
fc = auth_fc
from pykern import pkconfig, pkunit, pkio
from pykern.pkunit import pkok, pkre
from pykern.pkdebug import pkdp
import re
r = fc.sr_get('authGuestLogin', {'simulation_type': fc.sr_sim_type})
fc.sr_post('listSimulations', {'simulationType': fc.sr_sim_type})
import sirepo.auth
sirepo.auth.cfg.methods = set(['email'])
sirepo.auth.cfg.deprecated_methods = set()
sirepo.auth.non_guest_methods \
= sirepo.auth.visible_methods = sirepo.auth.valid_methods = tuple(sirepo.auth.cfg.methods)
del sirepo.auth._METHOD_MODULES['guest']
fc.sr_auth_state(
displayName=None,
isLoggedIn=False,
needCompleteRegistration=False,
uid=None,
userName=None,
)
|
__author__ = 'Властелин Вселенной'
from model.parameters import Contact, Group
import random
def test_add_contact_to_group(app, orm):
group_name = "test_group"
group = Group(name=group_name)
if len(orm.find_group_in_list_by_name(group))== 0:
app.group.create_new_group(group)
if len(orm.get_contacts_not_in_group(group)) == 0:
app.contact.create_new_contact_short(Contact(firstname="add", middlename="add", lastname="add"))
contacts_not_in_group = orm.get_contacts_not_in_group(group)
contacts_in_group = orm.get_contacts_in_group(group)
contact = random.choice(contacts_not_in_group)
app.contact.select_contact_by_id_for_contact_to_group(contact.id)
app.contact.select_group_bottom_dropdown(group_name)
app.contact.click_add_contact_to_group()
new_contacts_not_in_group = orm.get_contacts_not_in_group(group)
new_contacts_in_group = orm.get_contacts_in_group(group)
assert len(contacts_in_group) == len(new_contacts_in_group) - 1
assert len(contacts_not_in_group) == len(new_contacts_not_in_group) + 1
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import corepy.spre.spe as spe
import corepy.arch.cal.isa as cal
import corepy.lib.extarray as extarray
def load_word(code, r_target, word):
l = code.prgm.acquire_register((word, word, word, word))
code.add(cal.mov(r_target, l.x))
code.prgm.release_register(l)
return
def load_float(code, reg, val):
data = extarray.extarray('f', (val,))
data.change_type('I')
return load_word(code, reg, data[0])
def vector_from_array(code, r_target, a):
"""
Generate the instructions to fill a vector register with the values
from an array.
"""
l = code.prgm.acquire_register((a[0], a[1], a[2], a[3]))
code.add(cal.mov(r_target, l))
code.prgm.release_register(l)
return
def get_param_reg(code, param, dict, copy = True):
""" Take a parameter given to a function, which may be a value or a
register containing that value, and return a register containing the
value.
If copy is True, a new register is always returned. Otherwise if a
register was passed in, that register is returned unchanged.
dict is a dictionary used internally between get_param_reg() and
put_param_reg() to keep track of whether registers have been allocated for
parameters. A function should use one (initially empty) dictionary for
all of its parameters.
"""
reg = None
if isinstance(param, (spe.Register, spe.Variable)):
if copy == True:
# TODO - behave differently if at an even/odd spot
reg = code.prgm.acquire_register()
code.add(spu.ori(reg, param, 0))
dict[reg] = True
else:
reg = param
dict[reg] = False
else: # TODO - check types?
reg = code.prgm.acquire_register()
load_word(code, reg, param)
dict[reg] = True
return reg
def put_param_reg(code, reg, dict):
"""Check a register containing a parameter, release the register if the
provided dictionary indicates it was acquired by get_param_reg()/
"""
if dict[reg] == True:
code.prgm.release_register(reg)
# ------------------------------------------------------------
# Unit Test Code
# ------------------------------------------------------------
if __name__=='__main__':
pass
|
#/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
import logging
import argparse
import logging
import threading
import time
import syndicate.ag.curation.specfile as AG_specfile
logging.basicConfig( format='[%(asctime)s] [%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
log = logging.getLogger()
log.setLevel( logging.ERROR )
class crawler_callbacks(object):
"""
Callbacks the crawler will invoke to generate a specfile.
"""
include_cb = None # include_cb(path, is_directory) => {True,False}
listdir_cb = None # listdir_cb(path) => [names of children]
isdir_cb = None # isdir_cb(path) => {True,False}
def __init__(self, include_cb=None,
listdir_cb=None,
isdir_cb=None ):
if include_cb is None:
include_cb = lambda path, is_dir: True
self.include_cb = include_cb
self.listdir_cb = listdir_cb
self.isdir_cb = isdir_cb
# do this in its own thread
class crawl_thread( threading.Thread ):
def __init__(self, threadno, context, callbacks, max_retries ):
"""
Make a new crawler thread.
* listdir_cb is a function that takes (context, absolute path to a directory) as arguments
and returns a list of names (not paths) of its immediate children.
* isdir_cb is a function that takes (context, absolute path to a dataset entry) as arguments and returns
True if it is a directory.
"""
super( crawl_thread, self ).__init__()
self.callbacks = callbacks
self.threadno = threadno
self.producer_sem = threading.Semaphore(0)
self.context = context
self.max_retires = max_retries
self.running = True
self.result_files = None
self.result_dirs = None
self.working = False
self.cur_dir = None
self.crawl_status = True
@classmethod
def crawl( cls, threadno, cur_dir, context, callbacks, max_retries ):
log.info( "thread %s: listdir %s" % (threadno, cur_dir ) )
names = None
for i in xrange(0, max_retries):
try:
names = callbacks.listdir_cb( context, cur_dir )
break
except Exception, e:
log.exception(e)
log.info("thread %s: Trying to crawl %s again" % (threadno, cur_dir) )
time.sleep(1)
pass
if names is None:
return (None, None, False)
# harvest the work
files = []
dirs = []
for name in names:
abs_path = "/" + os.path.join( cur_dir.strip("/"), name.strip("/") )
is_directory = False
for i in xrange(0, max_retries):
try:
is_directory = callbacks.isdir_cb( context, abs_path )
break
except Exception, e:
log.exception(e)
log.info("thread %s: Trying to isdir %s again" % (threadno, abs_path))
time.sleep(1)
pass
if is_directory:
dirs.append( name )
else:
files.append( name )
# done!
return (files, dirs, True)
def run(self):
while self.running:
# wait for work
self.producer_sem.acquire()
if not self.running:
return
self.result_files, self.result_dirs, self.crawl_status = crawl_thread.crawl( self.threadno, self.cur_dir, self.context, self.callbacks, self.max_retires )
self.working = False
log.info("thread %s: expored %s" % (self.threadno, self.cur_dir ))
def stop_working(self):
self.running = False
self.producer_sem.release()
def is_working(self):
return self.working
def consume_files(self):
ret = self.result_files
self.result_files = None
return ret
def consume_dirs( self ):
ret = self.result_dirs
self.result_dirs = None
return ret
def consume_crawl_status( self ):
ret = self.crawl_status
self.crawl_status = True
return ret
def get_cur_dir( self ):
return self.cur_dir
def next_dir( self, cur_dir ):
if self.is_working():
raise Exception("thread %s: Thread is still working on %s" % (self.threadno, self.cur_dir))
self.cur_dir = cur_dir
self.working = True
self.producer_sem.release()
# walk a dataset, and do something with each directory
# give each thread one of context_list's items
# include_cb takes (absolute path to a dataset entry, whether or not it is a directory) as arguments,
# and must return True for a directory to be explored further.
def walk_dataset( context_list, root_dir, callbacks, max_retries ):
dir_queue = []
failed = []
log.info("Starting %s threads for crawling" % len(context_list) )
total_processed = 1
running = []
walk_stats = {} # map directories to child counts
i = 0
for context in context_list:
ct = crawl_thread( i, context, callbacks, max_retries )
ct.start()
running.append( ct )
i += 1
dir_queue.append( root_dir )
while True:
time.sleep(1)
working_list = [th.is_working() for th in running]
added_work = False
thread_working = False
log.info("Gather thread results")
# find the finished thread(s) and given them more work
for i in xrange(0, len(running)):
if not running[i].is_working():
# did the thread do work?
files = running[i].consume_files()
dirs = running[i].consume_dirs()
status = running[i].consume_crawl_status()
if not status:
log.error("Failed to explore %s" % running[i].get_cur_dir())
failed.append( running[i].get_cur_dir() )
continue
processed_here = 0
explore = []
if files is not None and dirs is not None:
log.info("Gather thread %s's results (%s items gathered)", i, len(files) + len(dirs))
# process files
for name in files:
abs_path = os.path.join( running[i].get_cur_dir(), name )
rc = callbacks.include_cb( abs_path, False )
processed_here += len(files)
# process dirs
for dir_name in dirs:
abs_path = os.path.join( running[i].get_cur_dir(), dir_name )
rc = callbacks.include_cb( abs_path, True )
if rc:
explore.append( abs_path )
processed_here += len(dirs)
if processed_here > 0:
total_processed += processed_here
log.info("%s: %s entries processed (total: %s)" % (running[i].get_cur_dir(), processed_here, total_processed))
if not walk_stats.has_key( running[i].get_cur_dir() ):
walk_stats[ running[i].get_cur_dir() ] = processed_here
else:
walk_stats[ running[i].get_cur_dir() ] += processed_here
if len(explore) > 0:
dir_queue += explore
log.info("Assign thread work")
for i in xrange(0, len(running)):
if not running[i].is_working():
# queue up more work
if len(dir_queue) > 0:
next_dir = dir_queue[0]
dir_queue.pop(0)
log.info("Thread %s: explore %s" % (i, next_dir))
running[i].next_dir( next_dir )
added_work = True
else:
log.info("Thread %s is not working, but no directories queued", i)
else:
log.info("Thread %s is working" % i)
thread_working = True
log.info("Directories left to explore: %s" % len(dir_queue))
if not added_work and not thread_working:
break
stats_buf = ""
for (dirname, count) in walk_stats.items():
stats_buf += "% 15s %s\n" % (count, dirname)
log.info("Walk stats:\n%s" % stats_buf )
log.info("Finished exploring %s, shutting down..." % root_dir)
# stop all threads
for ct in running:
ct.stop_working()
for ct in running:
ct.join()
if len(failed) == 0:
return True
else:
log.error("Failed to explore the following files and directories:\n%s\n" % ("\n".join( [" %s" % failed_path for failed_path in failed] )) )
return False
# build a hierarchy
def build_hierarchy( contexts, root_dir, driver_name, crawler_cbs, specfile_cbs, allow_partial_failure=False, max_retries=1 ):
"""
Given a crawler_callbacks and specfile_callbacks bundle and a list of contexts, generate a hierarchy by crawling the dataset.
Spawn one thread per context
"""
hierarchy_dict = {}
# generate and store data based on the caller's include_cb
generator_cb = lambda abs_path, is_dir: AG_specfile.add_hierarchy_element( abs_path, is_dir, driver_name, crawler_cbs.include_cb, specfile_cbs, hierarchy_dict )
# override the include_cb in crawler_cbs to build up the hierarchy, based on the user-given include_cb's decisions
generator_callbacks = crawler_callbacks( include_cb=generator_cb,
listdir_cb=crawler_cbs.listdir_cb,
isdir_cb=crawler_cbs.isdir_cb )
status = walk_dataset( contexts, root_dir, generator_callbacks, max_retries )
if not status and not allow_partial_failure:
return None
AG_specfile.add_hierarchy_prefixes( root_dir, driver_name, crawler_cbs.include_cb, specfile_cbs, hierarchy_dict )
return hierarchy_dict
|
import json
import logging
import requests
from core.channel import (Channel, NotSupportedTrigger, NotSupportedAction,
ConditionNotMet, ChannelStateForUser)
from core.core import Core
from channel_github.models import GithubAccount
from channel_github.config import (TRIGGER_TYPE, CHANNEL_NAME, CLIENT_ID,
CLIENT_SECRET, TRIGGER_OUTPUT,
API_URL, get_webhook_url, WEBHOOK_TEST,
REPO_HOOKS_URL)
log = logging.getLogger('channel')
class GithubChannel(Channel):
def _check_for_webhook(self, repo_name, auth_header):
"""
returns true if a webhook for the given repo already exists
"""
check_url = REPO_HOOKS_URL.format(repo_name)
response = requests.get(check_url, headers=auth_header)
data = json.loads(response.content.decode('utf-8'))
# check if our webhook url is associated with any webhook of the repo
return any(get_webhook_url() in e['config']['url'] for e in data)
def repo_exists(self, repo_name, owner):
"""
Check if a github repository exists.
Args:
repo_name: The name of the repository
owner: Github username of the owner.
Returns:
True if the repository exists, false otherwise.
"""
resp = requests.get(API_URL.format(repo_name))
return resp.ok
def create_webhook(self,
github_account,
repository,
events,
owner=None):
"""
This method creates a subscription to a repository of a
github user whose account previously has been authenticated.
"""
if not owner:
owner = github_account.username
# full repo name
repo_name = '/'.join([owner, repository])
auth_header = {'Authorization': 'token ' + github_account.access_token}
# check whether a webhook already exists
if self._check_for_webhook(repo_name, auth_header):
# no need to create another!
return repo_name
data = {
'name': 'web',
'active': True,
'events': events,
'config': {
'url': get_webhook_url(),
'content_type': 'json'
}
}
subscribe_url = REPO_HOOKS_URL.format(repo_name)
resp = requests.post(subscribe_url,
json=data,
headers=auth_header)
if resp.ok:
return repo_name
else:
return None
def fire_trigger(self, trigger_data):
"""
Handles incoming triggers
params:
trigger_data = a dictionary that contains the payload sent by
github as the event payload.
"""
if 'commits' in trigger_data and 'pusher' in trigger_data:
# push trigger
self._fire_push_trigger(data=trigger_data)
elif 'issue' in trigger_data:
self._fire_issue_trigger(data=trigger_data)
# TODO distinguish whether issue was created updated or something?
def _fire_push_trigger(self, data):
username = data['repository']['owner']['name']
github_account = GithubAccount.objects.get(username=username)
user_id = github_account.user.id
trigger_type = TRIGGER_TYPE['push']
payload = {
'repository_name': data['repository']['name'],
'repository_url': data['repository']['url'],
'head_commit_message': data['head_commit']['message'],
'head_commit_author': data['head_commit']['author']['name'],
'repository_full_name': data['repository']['full_name']
}
# pass the data to the core and let it handle the trigger
Core().handle_trigger(channel_name=CHANNEL_NAME,
trigger_type=trigger_type,
userid=user_id,
payload=payload)
def _fire_issue_trigger(self, data):
pass
#TODO: complete implementation!
def fill_recipe_mappings(self, trigger_type, userid,
payload, conditions, mappings):
if trigger_type == TRIGGER_TYPE['push']:
# check whether the the repository, that was pushed
# matches the repository of the recipe.
if conditions['repository_name'] != payload['repository_full_name']:
raise ConditionNotMet()
return self._replace_mappings(mappings=mappings,
payload=payload,
to_replace=['repository_name',
'repository_url',
'head_commit_message',
'head_commit_author'])
elif trigger_type == TRIGGER_TYPE['issues']:
# TODO implement!
raise NotSupportedTrigger()
else:
raise NotSupportedTrigger()
def _replace_mappings(self, mappings, to_replace, payload):
for key in mappings:
val = mappings[key]
if type(val) == str:
for s in to_replace:
# replace any placeholder by its concrete value
placeholder = '%{}%'.format(s)
val = val.replace(placeholder,
payload[s])
mappings[key] = val
return mappings
def handle_action(self, action_type, userid, inputs):
raise NotSupportedAction()
def user_is_connected(self, user):
"""
Check whether the user is authenticated, i.e. whether a TwitterAccount
has been saved.
Args:
user: The user that is checked.
Returns:
True if the user is authenticated, false otherwise.
"""
if GithubAccount.objects.filter(user=user).count() > 0:
return ChannelStateForUser.connected
else:
return ChannelStateForUser.initial
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from users.models import CustomUser, Profile
class UserProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name = 'Профиль'
verbose_name_plural = 'Профили'
class UserAdmin(BaseUserAdmin):
inlines = (UserProfileInline, )
ordering = ('email', )
list_display = ('email', 'first_name', 'last_name', 'is_staff')
# admin.site.unregister(User)
# admin.site.register(CustomUser)
admin.site.register(CustomUser, UserAdmin)
|
"""
Python mapping for the AppKit framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
"""
import sys
# Manually written wrappers:
import Foundation
import objc
from AppKit import _metadata
from AppKit._inlines import _inline_list_
def _setup_conveniences():
def fontdescriptor_get(self, key, default=None):
value = self.objectForKey_(key)
if value is None:
return default
return value
def fontdescriptor_getitem(self, key, default=None):
value = self.objectForKey_(key)
if value is None:
raise KeyError(key)
return value
objc.addConvenienceForClass(
"NSFontDescriptor",
(("__getitem__", fontdescriptor_getitem), ("get", fontdescriptor_get)),
)
_setup_conveniences()
def NSDictionaryOfVariableBindings(*names):
"""
Return a dictionary with the given names and there values.
"""
import sys
variables = sys._getframe(1).f_locals
return {nm: variables[nm] for nm in names}
sys.modules["AppKit"] = mod = objc.ObjCLazyModule(
"AppKit",
"com.apple.AppKit",
objc.pathForFramework("/System/Library/Frameworks/AppKit.framework"),
_metadata.__dict__,
_inline_list_,
{
"__doc__": __doc__,
"objc": objc,
"NSDictionaryOfVariableBindings": NSDictionaryOfVariableBindings,
"__path__": __path__,
"__loader__": globals().get("__loader__", None),
},
(Foundation,),
)
# NSApp is a global variable that can be changed in ObjC,
# somewhat emulate that (it is *not* possible to assign to
# NSApp in Python)
from AppKit._nsapp import NSApp # isort:skip # noqa: E402
mod.NSApp = NSApp
import AppKit._AppKit # isort:skip # noqa: E402
for nm in dir(AppKit._AppKit):
setattr(mod, nm, getattr(AppKit._AppKit, nm))
# Fix types for a number of character constants
mod.NSEnterCharacter = chr(mod.NSEnterCharacter)
mod.NSBackspaceCharacter = chr(mod.NSBackspaceCharacter)
mod.NSTabCharacter = chr(mod.NSTabCharacter)
mod.NSNewlineCharacter = chr(mod.NSNewlineCharacter)
mod.NSFormFeedCharacter = chr(mod.NSFormFeedCharacter)
mod.NSCarriageReturnCharacter = chr(mod.NSCarriageReturnCharacter)
mod.NSBackTabCharacter = chr(mod.NSBackTabCharacter)
mod.NSDeleteCharacter = chr(mod.NSDeleteCharacter)
mod.NSLineSeparatorCharacter = chr(mod.NSLineSeparatorCharacter)
mod.NSParagraphSeparatorCharacter = chr(mod.NSParagraphSeparatorCharacter)
for nm in [
"NSUpArrowFunctionKey",
"NSDownArrowFunctionKey",
"NSLeftArrowFunctionKey",
"NSRightArrowFunctionKey",
"NSF1FunctionKey",
"NSF2FunctionKey",
"NSF3FunctionKey",
"NSF4FunctionKey",
"NSF5FunctionKey",
"NSF6FunctionKey",
"NSF7FunctionKey",
"NSF8FunctionKey",
"NSF9FunctionKey",
"NSF10FunctionKey",
"NSF11FunctionKey",
"NSF12FunctionKey",
"NSF13FunctionKey",
"NSF14FunctionKey",
"NSF15FunctionKey",
"NSF16FunctionKey",
"NSF17FunctionKey",
"NSF18FunctionKey",
"NSF19FunctionKey",
"NSF20FunctionKey",
"NSF21FunctionKey",
"NSF22FunctionKey",
"NSF23FunctionKey",
"NSF24FunctionKey",
"NSF25FunctionKey",
"NSF26FunctionKey",
"NSF27FunctionKey",
"NSF28FunctionKey",
"NSF29FunctionKey",
"NSF30FunctionKey",
"NSF31FunctionKey",
"NSF32FunctionKey",
"NSF33FunctionKey",
"NSF34FunctionKey",
"NSF35FunctionKey",
"NSInsertFunctionKey",
"NSDeleteFunctionKey",
"NSHomeFunctionKey",
"NSBeginFunctionKey",
"NSEndFunctionKey",
"NSPageUpFunctionKey",
"NSPageDownFunctionKey",
"NSPrintScreenFunctionKey",
"NSScrollLockFunctionKey",
"NSPauseFunctionKey",
"NSSysReqFunctionKey",
"NSBreakFunctionKey",
"NSResetFunctionKey",
"NSStopFunctionKey",
"NSMenuFunctionKey",
"NSUserFunctionKey",
"NSSystemFunctionKey",
"NSPrintFunctionKey",
"NSClearLineFunctionKey",
"NSClearDisplayFunctionKey",
"NSInsertLineFunctionKey",
"NSDeleteLineFunctionKey",
"NSInsertCharFunctionKey",
"NSDeleteCharFunctionKey",
"NSPrevFunctionKey",
"NSNextFunctionKey",
"NSSelectFunctionKey",
"NSExecuteFunctionKey",
"NSUndoFunctionKey",
"NSRedoFunctionKey",
"NSFindFunctionKey",
"NSHelpFunctionKey",
"NSModeSwitchFunctionKey",
]:
try:
setattr(mod, nm, chr(getattr(mod, nm)))
except AttributeError:
pass
try:
mod.NSImageNameApplicationIcon
except AttributeError:
mod.NSImageNameApplicationIcon = "NSApplicationIcon"
if objc.arch == "arm64":
# XXX: Temporary adjustment until the metadata
# is updated
mod.NSImageResizingModeStretch = 1
mod.NSImageResizingModeTile = 0
mod.NSTextAlignmentCenter = 1
mod.NSTextAlignmentRight = 2
mod.NSRightTextAlignment = mod.NSTextAlignmentRight
mod.NSCenterTextAlignment = mod.NSTextAlignmentCenter
del sys.modules["AppKit._metadata"]
|
import scrapy
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse0(self, response):
filename = response.url.split("/")[-2]
with open(filename, 'wb') as f:
f.write(response.body)
def parse(self, response):
for sel in response.xpath('//ul/li'):
item = DmozItem()
item['title'] = sel.xpath('a/text()').extract()
item['link'] = sel.xpath('a/@href').extract()
item['desc'] = sel.xpath('text()').extract()
yield item
def parse_items1(self, response):
for sel in response.xpath('//ul/li'):
title = sel.xpath('a/text()').extract()
link = sel.xpath('a/@href').extract()
desc = sel.xpath('text()').extract()
print title, link, desc
def parse_items(self, response):
hxs = HtmlXPathSelector(response)
titles = hxs.select('//span[@class="pl"]')
items = []
for titles in titles:
item = CraigslistSampleItem()
item ["title"] = titles.select("a/text()").extract()
item ["link"] = titles.select("a/@href").extract()
items.append(item)
return(items)
|
class Solution:
def generateParenthesis(self, n: int) -> [str]:
if n == 0: return ['']
ans = []
for c in range(n):
for left in self.generateParenthesis(c):
for right in self.generateParenthesis(n - 1 -c):
ans.append('({}){}'.format(left, right))
return ans |
import keras
import pickle
import util
from datetime import datetime
from sklearn.neighbors import BallTree
import tensorflow as tf
def train(args, preprocess_manager):
# share gpu capacity
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
batch_size = args.batch_size_train
util.llprint("Loading Data starts... \n")
X, Y_Event, Y_Time, sequence_max_length, num_features_all, num_features_activities = preprocess_manager.create_and_encode_training_set(
args)
util.llprint('\n Build model for suffix prediction... \n')
if args.dnn_architecture == 0:
# train a 2-layer LSTM with one shared layer
main_input = keras.layers.Input(shape=(sequence_max_length, num_features_all), name='main_input')
# the shared layer
l1 = keras.layers.recurrent.LSTM(100, implementation=2, kernel_initializer='glorot_uniform',
return_sequences=True, dropout=0.2)(main_input)
b1 = keras.layers.normalization.BatchNormalization()(l1)
# the layer specialized in activity prediction
l2_1 = keras.layers.recurrent.LSTM(100, implementation=2, kernel_initializer='glorot_uniform',
return_sequences=False, dropout=0.2)(b1)
b2_1 = keras.layers.normalization.BatchNormalization()(l2_1)
# the layer specialized in time prediction
l2_2 = keras.layers.recurrent.LSTM(100, implementation=2, kernel_initializer='glorot_uniform',
return_sequences=False, dropout=0.2)(b1)
b2_2 = keras.layers.normalization.BatchNormalization()(l2_2)
event_output = keras.layers.core.Dense(num_features_activities + 1, activation='softmax',
kernel_initializer='glorot_uniform', name='event_output')(b2_1)
time_output = keras.layers.core.Dense(1, kernel_initializer='glorot_uniform', name='time_output')(b2_2)
model_suffix_prediction = keras.models.Model(inputs=[main_input], outputs=[event_output, time_output])
opt = keras.optimizers.Nadam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8, schedule_decay=0.004,
clipvalue=3)
model_suffix_prediction.compile(loss={'event_output': 'categorical_crossentropy', 'time_output': 'mae'},
optimizer=opt)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
model_checkpoint = keras.callbacks.ModelCheckpoint(
'%smodel_suffix_prediction_%s.h5' % (args.checkpoint_dir, preprocess_manager.iteration_cross_validation),
monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto')
lr_reducer = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=0, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0)
model_suffix_prediction.summary()
start_training_time = datetime.now()
model_suffix_prediction.fit(X, {'event_output': Y_Event, 'time_output': Y_Time},
validation_split=1 / args.num_folds, verbose=1,
callbacks=[early_stopping, model_checkpoint, lr_reducer], batch_size=batch_size,
epochs=args.dnn_num_epochs)
training_time = datetime.now() - start_training_time
if args.next_best_event:
util.llprint('Build model for candidate determination... \n')
X_case_based_suffix = preprocess_manager.transformTensorToMatrix(X)
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html
model_candidate_selection = BallTree(X_case_based_suffix, leaf_size=2)
pickle.dump(model_candidate_selection, open(
"%smodel_candidate_selection_%s" % (args.checkpoint_dir, preprocess_manager.iteration_cross_validation),
'wb'))
return training_time.total_seconds()
|
"""
================================
Data Analysis and Visualizations
================================
Kafka consumers and transformers with data processing and outputs.
"""
import os
import sys
import psutil
from PySide6.QtCore import QTimer, Qt
from PySide6.QtGui import QColor, QBrush
from PySide6.QtWidgets import QTableWidgetItem
from ..config_manager import ConfigManager
from ..extensions_handler import ExtensionWidget
from ..subprocess_handler import run_subprocess
########################################################################
class Visualization:
"""Real-time data analysis and visualizations."""
# ----------------------------------------------------------------------
def __init__(self, core):
""""""
self.parent_frame = core.main
self.core = core
self.config = ConfigManager()
self.process_status_timer = QTimer()
self.process_status_timer.timeout.connect(self.update_data_analysis)
self.process_status_timer.setInterval(1000)
self.on_focus()
self.add_subwindow()
self.connect()
# self.build_analysis()
# ----------------------------------------------------------------------
def connect(self) -> None:
"""Connect events."""
self.parent_frame.pushButton_load_visualizarion.clicked.connect(
self.add_subwindow)
self.parent_frame.pushButton_visualizations_remove_all.clicked.connect(
self.remove_all)
self.parent_frame.pushButton_visualizations_reload_all.clicked.connect(
self.reload_all)
self.parent_frame.tableWidget_anlaysis.itemChanged.connect(
self.analisys_status_update)
self.parent_frame.pushButton_visualizations_stop_all.clicked.connect(
self.stop_all_scripts)
self.parent_frame.pushButton_visualizations_restart_all.clicked.connect(
self.restart_running_scripts)
# ----------------------------------------------------------------------
def on_focus(self) -> None:
"""Update mdiAreas."""
self.parent_frame.mdiArea.tileSubWindows()
self.visualizations_list = []
for i in range(self.parent_frame.listWidget_projects_visualizations.count()):
item = self.parent_frame.listWidget_projects_visualizations.item(
i)
if item.text().startswith('_'):
continue
if item.text().startswith('Tutorial :'):
continue
self.visualizations_list.append([item.text(), item.path])
self.build_analysis()
# ----------------------------------------------------------------------
def reload_all(self) -> None:
"""Reload all patitions."""
for sub in self.parent_frame.mdiArea.subWindowList():
sub.reload()
# self.resize_menubar()
# ----------------------------------------------------------------------
def remove_all(self) -> None:
"""Remove all patitions."""
for sub in self.parent_frame.mdiArea.subWindowList():
sub.remove()
QTimer().singleShot(100, self.widgets_set_enabled)
# # ----------------------------------------------------------------------
# def resize_menubars(self):
# """"""
# for sub in self.parent_frame.mdiArea.subWindowList():
# if hasattr(sub, 'resize_menubar'):
# # sub.resize_menubar()
# QTimer().singleShot(100, sub.resize_menubar)
# ----------------------------------------------------------------------
def add_subwindow(self) -> None:
"""Add new patition."""
sub = ExtensionWidget(
self.parent_frame.mdiArea, mode='visualization', extensions_list=self.visualizations_list)
self.parent_frame.mdiArea.addSubWindow(sub)
sub.show()
self.parent_frame.mdiArea.tileSubWindows()
sub.update_menu_bar()
sub.loaded = self.widgets_set_enabled
sub.destroyed.connect(self.widgets_set_enabled)
# sub..connect(self.resize_menubars)
# sub.on_remove(self.resize_menubars)
self.widgets_set_enabled()
# self.resize_menubars()
# ----------------------------------------------------------------------
def widgets_set_enabled(self) -> None:
"""Update action buttons."""
subwindows = len(self.parent_frame.mdiArea.subWindowList()) != 0
self.parent_frame.pushButton_visualizations_remove_all.setEnabled(
subwindows)
self.parent_frame.pushButton_visualizations_reload_all.setEnabled(
False)
for sub in self.parent_frame.mdiArea.subWindowList():
if getattr(sub, 'stream_subprocess', False):
self.parent_frame.pushButton_visualizations_reload_all.setEnabled(
True)
break
# ----------------------------------------------------------------------
def build_analysis(self) -> None:
""""""
columns = ['Data analisys',
'PID',
'CPU%',
'Memory',
'Status',
]
start_index = self.parent_frame.tableWidget_anlaysis.rowCount() - 1
if self.parent_frame.tableWidget_anlaysis.rowCount() == 0:
self.parent_frame.tableWidget_anlaysis.clear()
self.parent_frame.tableWidget_anlaysis.setRowCount(0)
self.parent_frame.tableWidget_anlaysis.setColumnCount(
len(columns))
self.parent_frame.tableWidget_anlaysis.setHorizontalHeaderLabels(
columns)
already_items = []
to_remove = []
to_add = [self.parent_frame.listWidget_projects_analysis.item(i).text(
) for i in range(self.parent_frame.listWidget_projects_analysis.count())]
else:
# start_index = 0
already_items = [self.parent_frame.tableWidget_anlaysis.item(
i, 0).text() for i in range(self.parent_frame.tableWidget_anlaysis.rowCount())]
new_ones = [self.parent_frame.listWidget_projects_analysis.item(
i).text() for i in range(self.parent_frame.listWidget_projects_analysis.count())]
to_remove = set(already_items) - set(new_ones)
to_add = set(new_ones) - set(already_items)
for i, script_name in enumerate(to_add):
if script_name.startswith('_'):
continue
if script_name in already_items:
continue
# if item.text().startswith('Tutorial |'):
# continue
self.parent_frame.tableWidget_anlaysis.insertRow(start_index + i)
for j in range(len(columns)):
if j == 0:
item = QTableWidgetItem(script_name)
item.setCheckState(Qt.Unchecked)
item.is_running = False
item.path = self.core.projects.normalize_path(
item.text())
else:
item = QTableWidgetItem()
if 0 < j < 4:
item.setTextAlignment(Qt.AlignCenter)
item.setFlags(item.flags() &
~Qt.ItemIsEditable &
~Qt.ItemIsSelectable)
self.parent_frame.tableWidget_anlaysis.setItem(
start_index + i, j, item)
self.parent_frame.tableWidget_anlaysis.cellWidget(
start_index + i, j)
for script_name in to_remove:
for i in range(self.parent_frame.tableWidget_anlaysis.rowCount()):
item = self.parent_frame.tableWidget_anlaysis.item(i, 0)
if item.text() == script_name:
if not item.checkState() == Qt.Checked:
self.parent_frame.tableWidget_anlaysis.removeRow(i)
else:
item.to_remove = True
break
self.parent_frame.tableWidget_anlaysis.sortByColumn(
0, Qt.SortOrder.DescendingOrder)
# ----------------------------------------------------------------------
def analisys_status_update(self, item) -> None:
""""""
if item.column() != 0:
return
if item.checkState() == Qt.Checked:
self.start_script(item)
else:
self.stop_script(item)
# ----------------------------------------------------------------------
def stop_script(self, item) -> None:
""""""
if hasattr(item, 'subprocess'):
item.subprocess.terminate()
del item.subprocess
item.setCheckState(Qt.Unchecked)
self.update_row_information(item.row(), '', '', '', 'Terminated')
if hasattr(item, 'to_remove'):
self.parent_frame.tableWidget_anlaysis.removeRow(item.row())
# ----------------------------------------------------------------------
def start_script(self, item) -> None:
""""""
script = item.path
item.setCheckState(Qt.Checked)
item.subprocess = run_subprocess([sys.executable, os.path.join(
self.core.projects.projects_dir, script, 'main.py')])
if not self.process_status_timer.isActive():
self.process_status_timer.start()
# ----------------------------------------------------------------------
def update_data_analysis(self) -> None:
""""""
running = 0
for row in range(self.parent_frame.tableWidget_anlaysis.rowCount()):
item = self.parent_frame.tableWidget_anlaysis.item(row, 0)
if hasattr(item, 'subprocess'):
try:
process = psutil.Process(item.subprocess.pid)
pid = str(item.subprocess.pid)
memory = f"{process.memory_info().vms / (1024):.0f} K"
cpu = f"{process.cpu_percent()}%"
status = 'Running...'
running += 1
except:
item.setCheckState(Qt.Unchecked)
pid = ''
memory = ""
cpu = ""
status = 'Finalized'
if process.memory_info().vms == 0:
pid = ''
memory = ""
cpu = ""
status = 'Finalized'
self.stop_script(item)
self.update_row_information(row, pid, cpu, memory, status)
if not running:
self.process_status_timer.stop()
enable = self.process_status_timer.isActive()
self.parent_frame.pushButton_visualizations_stop_all.setEnabled(
enable)
self.parent_frame.pushButton_visualizations_restart_all.setEnabled(
enable)
# ----------------------------------------------------------------------
def update_row_information(self, row, pid: str, cpu: str, memory: str, status: str) -> None:
""""""
item1 = self.parent_frame.tableWidget_anlaysis.item(row, 1)
item1.setText(pid)
item2 = self.parent_frame.tableWidget_anlaysis.item(row, 2)
item2.setText(cpu)
item2 = self.parent_frame.tableWidget_anlaysis.item(row, 3)
item2.setText(memory)
item3 = self.parent_frame.tableWidget_anlaysis.item(row, 4)
item3.setText(status)
# if status in ['Terminated', 'Finalized']:
# item3.setBackground(QColor(220, 53, 69, 30))
# elif status in ['Running...']:
# item3.setBackground(QColor(63, 197, 94, 30))
# ----------------------------------------------------------------------
def stop_all_scripts(self) -> None:
""""""
for row in range(self.parent_frame.tableWidget_anlaysis.rowCount()):
item = self.parent_frame.tableWidget_anlaysis.item(row, 0)
if hasattr(item, 'subprocess'):
self.stop_script(item)
# ----------------------------------------------------------------------
def restart_running_scripts(self) -> None:
""""""
for row in range(self.parent_frame.tableWidget_anlaysis.rowCount()):
item = self.parent_frame.tableWidget_anlaysis.item(row, 0)
if hasattr(item, 'subprocess'):
self.stop_script(item)
self.start_script(item)
|
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
# Global Variables
SQLITE = 'sqlite'
# Table Names
USERS = 'users'
ADDRESSES = 'addresses' |
import datetime
import logging
from django.contrib.auth import get_user_model
from django.utils import timezone
from apps.org.models import Org
from apps.physicaldevice.models import Device
from apps.project.models import Project
from apps.stream.models import StreamId
from apps.streamer.models import StreamerReport
from apps.streamnote.models import StreamNote
from apps.utils.data_helpers.manager import DataManager
logger = logging.getLogger(__name__)
class DbStats(object):
_labels = {
'Users': {
'qs': get_user_model().objects.filter(is_active=True),
'creation_field': 'created_at'
},
'Orgs': {
'qs': Org.objects.all()
},
'Projects': {
'qs': Project.objects.all()
},
'ActiveDevices': {
'qs': Device.objects.filter(active=True)
},
'ClaimedDevices': {
'qs': Device.objects.filter(active=True, project__isnull=False),
'creation_field': 'claimed_on'
},
'EnabledStreams': {
'qs': StreamId.objects.filter(enabled=True)
},
'StreamData*': {
'qs': DataManager.all_qs('data'),
'creation_field': 'timestamp'
},
'StreamEvents*': {
'qs': DataManager.all_qs('event'),
'creation_field': 'timestamp'
},
'StreamNotes': {
'qs': StreamNote.objects.all()
},
'StreamerReports': {
'qs': StreamerReport.objects.all()
}
}
stats = {}
start = None
end = None
def __init__(self):
stats = {}
start = None
end = None
def compute_stats(self):
for key in self._labels.keys():
qs = self._labels[key]['qs']
self.stats[key] = qs.count()
def day_stats(self, days=1):
self.end = timezone.now()
self.start = self.end - datetime.timedelta(days=days)
for key in self._labels.keys():
qs = self._labels[key]['qs']
filter_kwargs = {}
if 'creation_field' in self._labels[key]:
name = '{0}__gte'.format(self._labels[key]['creation_field'])
filter_kwargs[name] = self.start
else:
filter_kwargs['created_on__gte'] = self.start
try:
self.stats[key] = qs.filter(**filter_kwargs).count()
except Exception as e:
logger.warning('{0} Err={1}'.format(key, e))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.